text stringlengths 8 6.05M |
|---|
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
#二分搜索
l = 1
h = n
while l<h:
m = int(l+(h-l)/2)
if( isBadVersion(m) ):
h = m
else:
l = m+1
return l
|
from amath.constants import inf
from .mean import mean
def slope(x1, y1, x2, y2):
from amath.DataTypes.Fraction import Fraction
dx = x1 - x2
dy = y1 - y2
if dx == 0:
return inf
return Fraction(dy, dx)
def sum(f, i=None, maximum=None, step=1, l=None):
try:
if type(f(2)) != float:
if type(f(2)) != int:
raise ValueError("Function must return float or integer value")
except TypeError:
raise TypeError("f must be a function with only one argument")
if i is not None:
if maximum is not None:
if l is not None:
raise TypeError("Invalid Argument")
if i is None:
if maximum is None:
if l is None:
raise TypeError("Invalid Argument")
if l is None:
x = 0
previous_value = None
while i <= maximum:
value = f(i)
if value == previous_value:
break
x += value
i += step
return x
else:
x = 0
for y in l:
x += f(y)
return x
def product(f, i=None, maximum=None, step=1, l=None):
try:
if type(f(2)) != float:
if type(f(2)) != int:
raise ValueError("Function must return float or integer value")
except TypeError:
raise TypeError("f must be a function")
if i is not None:
if maximum is not None:
if l is not None:
raise TypeError("Invalid Argument")
if i is None:
if maximum is None:
if l is None:
raise TypeError("Invalid Argument")
if l is None:
x = 1
previous_value = 1
while i <= maximum:
value = f(i)
# if value == previous_value:
# break
x *= value
i += step
return x
elif i is not None:
x = 1
for y in l:
x *= f(y)
return x
def linregress(inp, output):
# type: (list, list) -> Function
from amath.DataTypes.Function import Function
if not isinstance(inp, list):
raise TypeError("Input must be a list")
if not isinstance(output, list):
raise TypeError("Input must be a list")
if len(inp) != len(output):
raise TypeError("Lists must be of the same size")
if inp == output:
return Function("x", "1.0x + 0.0") # if list is same
# set Defaults
z = 0
a = 0
i = 0
xm = mean(inp) # mean of input
ym = mean(output) # mean of output
for item in inp:
z += (item - xm) * (output[i] - ym)
i += 1
for item in inp:
a += (item - xm) ** 2
m = z / a
b = ym - (m * xm)
return Function("x", "{0}x + {1}".format(m, b))
def expregress(inp, output):
from amath.lists.lists import applylist, anytrue
from amath.Computation.power import ln
from amath.testing.types import isnan, isinf
from amath.DataTypes.Function import Function
logoutput = applylist(output, ln)
if anytrue(logoutput, isnan) or anytrue(logoutput, isinf):
raise ValueError("output cannot be negative")
lin = linregress(inp, logoutput)
l = lin.function.split("x + ")
l = applylist(l, float)
l = applylist(l, Function("a", "e**a"))
return Function("x", "{0}*({1}**x)".format(l[1], l[0]))
def isPro(x, y):
if type(x) != list:
raise TypeError("x must be a list")
if type(y) != list:
raise TypeError("y must be a list")
if len(x) != len(y):
raise TypeError("length of lists must be same")
if len(x) <= 1:
raise ValueError("length of lists must be greater than 1")
s = 0
f = False
for i in range(len(x)):
n = x[i]
n2 = y[i]
if n == 0:
if n2 == 0:
continue
else:
return False
else:
if n2 == 0:
return False
cs = float(n2) / n
if not f:
s = cs
f = True
if cs != s:
return False
return True
|
import datetime
import pandas as pd
columns = ["datetime", "label", "logits", "entropy"]
file_path = "tracking/training_data.csv"
def reset_data():
df = pd.DataFrame({}, columns=columns)
df.to_csv(file_path, header=True, index=False)
def save_data(label, logits, entropy):
df = pd.DataFrame({
"datetime": [datetime.datetime.now()],
"label": [label],
"logits": [logits],
"entropy": [entropy],
}, columns=columns)
df.to_csv(file_path, mode="a", header=False, index=False)
def get_dataframes():
return pd.read_csv(file_path)
|
# -*- coding: utf-8 -*-
'''
72. Edit Distance
Runtime: 232 ms
Memory Usage: 16.1 MB
'''
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
'''
Operations:
__________________
| replace | remove |
|_________|________|
| insert | |
|_________|________|
This video helped:
https://www.youtube.com/watch?v=MiqoA-yF-0M&t=217s
'''
if word1 == word2:
return 0
lx, ly = len(word1)+1, len(word2)+1
m = [[0]*lx for _ in range(ly)]
for x in range(lx):
for y in range(ly):
if x == 0 and y == 0:
'''
1. no op: word1[0:0] against word2[0:0]
'''
m[y][x] = 0
elif x == 0 and y > 0:
'''
2. inserts: word1[0:0] against word2[:]
'''
m[y][x] = y
elif y == 0 and x > 0:
'''
3. deletes: word1[:] against word2[0:0]
'''
m[y][x] = x
elif word1[x - 1] == word2[y - 1]:
'''
4. no op: "stri(n)f" ---> "stri(n)g"
'''
m[y][x] = m[y-1][x-1]
elif word1[x-1] != word2[y-1]:
'''
5. replace: "strin(f)" ---> "strin(g)"
'''
m[y][x] = 1 + min(m[y-1][x-1], m[y][x-1], m[y-1][x])
return m[y][x]
|
from distutils.core import setup
setup(
name='KVM',
version='0.1',
author='Warren Spits',
author_email='warren@spits.id.au',
url='https://github.com/spitsw/kvm',
license='Creative Commons Attribution-Noncommercial-Share Alike license'
) |
import sqlite3
def create_table():
# will create if doesn't exist
conn=sqlite3.connect("lite.db")
print("db created")
cur=conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS scoreboard (name TEXT, score INT)")
conn.commit()
print ("table created")
conn.close()
create_table()
|
import asyncio
from collections import deque
from concurrent.futures import ThreadPoolExecutor
from random import choice
from aiohttp import web
from common.request import post_request, get_request
class Scraper:
def __init__(self, host, port, parser_endpoints):
self.host = host
self.port = port
self.pool = ThreadPoolExecutor(max_workers=20)
self.loop = asyncio.get_event_loop()
self.urls_to_parser = deque([])
self.parser_endpoints = parser_endpoints
def scrape_callback(self, return_value):
return_value = return_value.result()
if return_value:
self.urls_to_parser.append(return_value)
async def get_urls(self, request):
data = await request.json()
url = data.get('url')
if url:
t = self.loop.run_in_executor(self.pool, get_request, url)
t.add_done_callback(self.scrape_callback)
return web.json_response({'Status': 'Dispatched'})
async def process_queue(self):
while True:
if self.urls_to_parser:
data_to_post = self.urls_to_parser.popleft()
print('Sending URL to dispatcher')
selected_host = choice(self.parser_endpoints)
res = await post_request(selected_host, data_to_post)
else:
await asyncio.sleep(0.1)
async def start_background_tasks(self, app):
app['dispatch'] = app.loop.create_task(self.process_queue())
async def cleanup_background_tasks(self, app):
app['dispatch'].cancel()
await app['dispatch']
async def create_app(self, loop):
app = web.Application()
app.router.add_post('/', self.get_urls)
return app
def run_app(self):
loop = self.loop
app = loop.run_until_complete(self.create_app(loop))
app.on_startup.append(self.start_background_tasks)
app.on_cleanup.append(self.cleanup_background_tasks)
web.run_app(app, host=self.host, port=self.port)
if __name__ == '__main__':
s = Scraper(host='127.0.0.1', port=5002, parser_endpoints=['http://127.0.0.1:5003/'])
s.run_app() |
from numpy import loadtxt
import numpy as np
from plotData import plotData
from computeCost import computeCost
from gradientDescent import gradientDescent
'''
%% Machine Learning Online Class - Exercise 1: Linear Regression
% Instructions
% ------------
%
% This file contains code that helps you get started on the
% linear exercise. You will need to complete the following functions
% in this exericse:
%
% plotData.py
% gradientDescent.py
% computeCost.py
% gradientDescentMulti.py
% computeCostMulti.py
% featureNormalize.py
% normalEqn.py
%
% For this exercise, you will not need to change any code in this file,
% or any other files other than those mentioned above.
%
% x refers to the population size in 10,000s
% y refers to the profit in $10,000s
%
'''
# ======================= Part 2: Plotting =======================
print('Plotting Data ...')
data = loadtxt('ex1data1.txt', delimiter=',')
X = data[:, 0] # X.shape = (m,)
y = data[:, 1]
m = len(y) # number of training examples
X = X.reshape(m, 1)
y = y.reshape(m, 1)
# % Plot Data
# % Note: You have to complete the code in plotData.py
plotData(X, y)
input('Program paused. Press ENTER to continue\n')
# ===================== Part 2: Cost and Gradient descent=====================
X = np.column_stack((np.ones(m), X)) # Add a column of ones to x
theta = np.zeros((2, 1)) # initialize fitting parameters
# Some gradient descent settings
iterations = 1500
alpha = 0.01
print('\nTesting the cost function ...')
# compute and display initial cost
J = computeCost(X, y, theta)
print('With theta = [0 ; 0]\nCost computed = {}'.format(J))
print('Expected cost value (approx) 32.07\n')
# further testing of the cost function
J = computeCost(X, y, np.array([[-1], [2]]))
print('With theta = [-1 ; 2]\nCost computed = {}'.format(J))
print('Expected cost value (approx) 54.24\n')
input('Program paused. Press ENTER to continue\n')
print('\nRunning Gradient Descent ...')
# % run gradient descent
theta = gradientDescent(X, y, theta, alpha, iterations)
# % print theta to screen
print('Theta found by gradient descent:')
print(theta)
print('Expected theta values (approx)')
print(' -3.6303\n 1.1664\n\n')
predict1 = np.dot(np.array(([1, 3.5])), theta)
print('For population = 35,000, we predict a profit of %f' %
(predict1 * 10000))
predict2 = np.dot(np.array(([1, 7])), theta)
print('For population = 70,000, we predict a profit of %f' %
(predict2 * 10000))
|
from collections import defaultdict
from math import ceil
def solution(fees, records):
answer, visitor, time = [], [], []
record_dict = defaultdict(int)
for i in records:
if i[11:] == 'IN':
visitor.append(i[6:10])
time.append(int(i[0:2]) * 60 + int(i[3:5]))
else:
record_dict[i[6:10]] += int(i[0:2]) * 60 + int(i[3:5]) - time[visitor.index(i[6:10])]
time.pop(visitor.index(i[6:10]))
visitor.pop(visitor.index(i[6:10]))
for i in range(len(visitor)):
record_dict[visitor[i]] += (1439 - time[i])
for i in sorted(record_dict.keys()):
if record_dict[i] <= fees[0]:
answer.append(fees[1])
else:
answer.append(fees[1] + (ceil((record_dict[i] - fees[0]) / fees[2]) * fees[3]))
return answer |
# -*- coding: utf-8 -*-
import requests
import os
import json
import os.path
import PixivNotifier
import PixivUtil
def getFileExt(path):
return os.path.splitext(path)[1]
def getFileDir(file):
return os.path.split(file)[0]
class imgCache:
def __init__(self, d = 'imgCache/'):
self.setCacheDir(d)
def setCacheDir(self, dir):
# dir = PixivNotifier.path + '/' + dir
self.cacheDir = dir
if not os.path.isdir(dir):
os.mkdir(dir)
def downloadImg(self, url, fileName, headers = {}):
while 1:
try:
ir = PixivUtil.get(
PixivUtil.pixiv.getServer(), url, headers = headers)
break
except Exception, e:
continue
if ir.status_code == 200:
open(fileName, 'wb').write(ir.content)
def find(self, url = "", name = None):
if name == None:
fname = self.cacheDir + hex(abs(hash(url))) + getFileExt(url)
else:
fname = self.cacheDir + name + getFileExt(url)
if os.path.isfile(fname):
return fname
return None
def get(self, url, name = None, headers = {}):
if name == None:
fname = self.cacheDir + hex(abs(hash(url))) + getFileExt(url)
else:
fname = self.cacheDir + name + getFileExt(url)
if not os.path.isfile(fname):
self.downloadImg(url, fname, headers)
return fname
def update(self, url, name = None, headers = {}):
if name == None:
fname = self.cacheDir + hex(abs(hash(url))) + getFileExt(url)
else:
fname = self.cacheDir + name + getFileExt(url)
# if not os.path.isfile(fname):
self.downloadImg(url, fname, headers)
return fname
image = imgCache()
class dataCache:
def __init__(self, d = 'userData/config.json'):
self.setFileName(d)
def setFileName(self, s):
# s = PixivNotifier.path + '/' + s
self.fileName = s
d = getFileDir(s)
if not os.path.isdir(d):
os.mkdir(d)
def read(self, list, section = None):
result = {}
if os.path.isfile(self.fileName):
f = open(self.fileName, 'r')
data = json.load(f)
for x in list:
if section is None:
if x in data:
result[x] = data[x]
else:
if section in data and x in data[section]:
result[x] = data[section][x]
f.close()
return result
def write(self, map, section = None):
data = {}
if os.path.isfile(self.fileName):
f = open(self.fileName, 'r')
data = json.load(f)
f.close
if section is None:
for x in map:
data[x] = map[x]
else:
if not section in data:
data[section] = {}
for x in map:
data[section][x] = map[x]
f = open(self.fileName, 'w')
json.dump(data, f)
f.close()
config = dataCache()
# src = "https://i.pximg.net/c/128x128/img-master/img/2017/08/15/04/09/26/64422822_p0_square1200.jpg"
# print cache.getImg(src)
|
import uvicorn
uvicorn.run()
|
#! -*- coding:utf8 -*-
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from gensqlalorm.utils import (
format_for_hump
)
from gensqlalorm.db import (
desc_table,
show_tables,
show_create_table
)
def show_all_tables(project_name):
return show_tables(project_name)
def gen_table_model(project_name, table_name):
table_desc = desc_table(project_name, table_name)
table_model_name = format_for_hump(table_name)
|
CSRF_ENABLED = True
SECRET_KEY = 'CZ3003_Extinguisher'
SQLALCHEMY_DATABASE_URI = 'mysql://extinguisher:extinguisher@127.0.0.1/subscription'
|
s = input()
ps1 = s[:len(s)//2]
ps2 = s[(len(s)+2)//2:]
if s == s[::-1] and ps1 == ps1[::-1] and ps2 == ps2[::-1]:
print("Yes")
else :
print("No") |
import torch
import numpy as np
import torch.nn as nn
import copy
import math
from torch.nn import functional as F
def Linear(inputdim, outputdim, bias=True):
linear = nn.Linear(inputdim, outputdim, bias)
return linear
def clone(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class MultiHeadedAttention(nn.Module):
def __init__(self, head_num, d_model, dropout=0.0):
super(MultiHeadedAttention, self).__init__()
assert d_model % head_num == 0
self.d_k = d_model // head_num
self.head = head_num
self.linears = clone(Linear(d_model, d_model), 4)
self.attn = None
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout(dropout)
def attention(self, q, k, v, mask=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
# print(scores.size(), mask.size())
# when searching, target mask is not needed
if mask is not None:
# b 1 t -> b 1 1 t -> b head t t
# print(scores.size())
# print(mask)
mask = mask.unsqueeze(1).expand_as(scores)
# print(mask.size())
scores.masked_fill_(mask == 0, -1e9)
# print(scores)
p_att = F.softmax(scores, -1)
# print(p_att)
# exit()
if self.dropout:
p_att = self.dropout(p_att)
return torch.matmul(p_att, v)
def forward(self, query, key, value, mask=None):
# q k v : B T H
nbatches = query.size(0)
# b head t dim
query, key, value = [l(x).view(nbatches, -1, self.head, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
x = self.attention(query, key, value, mask)
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.head * self.d_k)
x = self.linears[-1](x)
# returen b t dim
return x
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.0):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = Linear(d_model, d_ff)
self.w_2 = Linear(d_ff, d_model)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout(dropout)
def forward(self, x):
h = F.relu(self.w_1(x), inplace=True)
if self.dropout:
h = self.dropout(h)
return self.w_2(h)
class SublayerConnection(nn.Module):
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
if dropout > 0:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
def forward(self, x, sublayer):
if self.dropout:
return self.norm(x + self.dropout(sublayer(x)))
else:
return self.norm(x + sublayer(x))
|
from openpyxl.compat.strings import unicode
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from base import *
import clsTestService
import enums
from general import General
class FreeTrial(Base):
driver = None
clsCommon = None
def __init__(self, clsCommon, driver):
self.driver = driver
self.clsCommon = clsCommon
#=============================================================================================================
# Free Trial locators: #
#=============================================================================================================
FREE_TRIAL_PARTNER_ID_TEXTBOX = ('id', 'partnerId')
FREE_TRIAL_ADMIN_SECRET_TEXTBOX = ('id', 'adminSecret')
FREE_TRIAL_INSTANCEID_TEXTBOX = ('id', 'instanceId')
FREE_TRIAL_COMPANY_NAME_TEXTBOX = ('id', 'company')
FREE_TRIAL_HOSTNAME_TEXTBOX = ('id', 'hostname')
FREE_TRIAL_APPLICATION_TEXTBOX = ('id', 'applicationProfile')
FREE_TRIAL_SUBMIT_BUTTON = ('xpath', "//input[@id='saveForm' and @class='button_text']")
FREE_TRIAL_CONFIRM_INSTANCE_CREATION = ('xpath', "//pre[contains(text(),'Creating site...\nSite created with ID')]")
#============================================================================================================
def createFreeTrialInctance(self, partnerId, adminSecret, instanceId, company, hostname, application):
if self.send_keys(self.FREE_TRIAL_PARTNER_ID_TEXTBOX, partnerId) == False:
writeToLog("INFO","FAILED to insert partner Id '" + partnerId + "'")
return False
if self.send_keys(self.FREE_TRIAL_ADMIN_SECRET_TEXTBOX, adminSecret) == False:
writeToLog("INFO","FAILED to insert admin secret '" + adminSecret + "'")
return False
if self.send_keys(self.FREE_TRIAL_INSTANCEID_TEXTBOX, instanceId) == False:
writeToLog("INFO","FAILED to insert instance Id '" + instanceId + "'")
return False
if self.send_keys(self.FREE_TRIAL_COMPANY_NAME_TEXTBOX, company) == False:
writeToLog("INFO","FAILED to insert company '" + company + "'")
return False
if self.send_keys(self.FREE_TRIAL_HOSTNAME_TEXTBOX, hostname) == False:
writeToLog("INFO","FAILED to insert host name '" + hostname + "'")
return False
if self.select_from_combo_by_text(self.FREE_TRIAL_APPLICATION_TEXTBOX, application) == False:
writeToLog("INFO","FAILED to choose application '" + hostname + "'")
return False
if self.click(self.FREE_TRIAL_SUBMIT_BUTTON, 20) == False:
writeToLog("INFO","FAILED to click on submit button")
return False
# wait until the creating process is done
if self.wait_visible(self.FREE_TRIAL_CONFIRM_INSTANCE_CREATION, 120) == False:
writeToLog("INFO","FAILED to find create instance message confirm")
return False
writeToLog("INFO","Success, instance " + instanceId + " was created successfully")
return True
def setInstanceNumber(self, instanceNumberFilePath):
try:
# read the current instance number
instanceFile = open(instanceNumberFilePath,'r',encoding='utf8')
tempInstance = instanceFile.read()
instanceFile.close()
# raise the instance number be 1 and update the file
instanceFile = open(instanceNumberFilePath,'w', encoding='utf8')
tmp = tempInstance.split('-')
number = int(tmp[1]) + 1
instanceNumber = tmp[0] + "-" + str(number)
instanceNumber = instanceNumber.encode('ascii', 'ignore').decode('utf-8', 'ignore')
instanceFile.write(instanceNumber)
instanceFile.close()
return instanceNumber
except NoSuchElementException:
writeToLog("INFO","FAILED to read / write from the instance file")
return False
|
import glob, os, sys
import numpy as np
from random import*
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
def get_rand_color(val):
h,s,v = random()*6, 0.5, 243.2
colors = []
for i in range(val):
h += 3.75#3.708
tmp = ((v, v-v*s*abs(1-h%2), v-v*s)*3)[5**int(h)/3%3::int(h)%2+1][:3]
colors.append('#' + '%02x' *3%tmp)
if i%5/4:
s += 0.1
v -= 51.2
return colors
def main(argv):
logFileName = sys.argv[1]
plotTitle = sys.argv[2]
print logFileName
# get all csv file from current directory
resultsCSV = sorted(glob.glob(logFileName + '*_results.csv'))
# read the individual csv files for logFileName
nFiles = len(resultsCSV)
cmap = get_rand_color(nFiles)
markers = ['x', 'o', '^', '+', 's', 'v', '<', '>']
xlabel = 'Number of Executors'
fig = plt.figure()
ax = fig.add_subplot(111)
# plot the time-line data
width = 0
for i in range(0, nFiles):
timeSVD = np.genfromtxt(resultsCSV[i], delimiter = ', ')
numCores = np.unique(timeSVD[:, 1])
# blockSize = np.unique(timeSVD[:, 2])
numExecutors = np.empty(len(numCores), dtype=int)
avgSVDTime = np.empty(len(numCores), dtype=float)
for j in range(len(numCores)):
numExecutors[j] = timeSVD[0, 2]/numCores[j]
singleRunData = timeSVD[ np.where(timeSVD[:, 1] == numCores[j])[0], 3]
avgSVDTime[j] = np.mean(singleRunData)
print 'execs: ' + str(numExecutors[j]) + ', cores: ' + str(numCores[j]) + ', avg. time: ' + str(avgSVDTime[j])
ax.bar(j + width, avgSVDTime[j], 0.5, bottom=0, align='center', color='b')
width = width + 0.5
# line plot
# ax.plot(blockSize[0:6], avgSVDTime[0:6], marker=markers[i], color=cmap[i], linewidth=2, label=str(int(numExecutors)))
# ax.set_xlim([-0.5, 8])
# ax.set_ylim([100, 180])
runs = range(len(numExecutors))
# xTicks = [val + width/2 for val in runs]
xTicks = [val for val in runs]
plt.xticks(xTicks, map(str, map(int, numExecutors)))
# ax.set_ylim([50, 500])
# ax.xaxis.set_ticks(range(0, 288+1, 24))
ax.set_xlabel(xlabel)
ax.set_ylabel('Avg. Time to compute SVD [sec]')
# ax.set_title('Compute times for different number of Executors (10M Data-set)')
# ax.annotate('Max. Cores=64', xy=(-125, -175), xycoords='axes pixels', bbox=dict(boxstyle='square', fc='yellow', alpha=0.3))
# remove duplicate legends
# handles, labels = plt.gca().get_legend_handles_labels()
# newLabels, newHandles = [], []
# for handle, label in zip(handles, labels):
# if label not in newLabels:
# newLabels.append(label)
# newHandles.append(handle)
# plt.legend(newHandles, newLabels, title='Executors:', ncol=5)
plt.show()
if __name__ == "__main__":
main(sys.argv[1])
|
# pip3 install --user QCustomPlot2
# change gui font size in linux: xrandr --output HDMI-0 --dpi 55
# https://pypi.org/project/QCustomPlot2/
# https://osdn.net/users/salsergey/pf/QCustomPlot2-PyQt5/scm/blobs/master/examples/plots/mainwindow.py
import PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLabel
from PyQt5.QtCore import QAbstractTableModel, Qt
from PyQt5.QtGui import QPen, QBrush, QColor
from QCustomPlot2 import *
import numpy as np
import collections
import pandas as pd
import pathlib
# %%
output_path="/dev/shm"
_code_git_version="xx"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/17_qt_customplot/source/run_00_plot.py"
_code_generation_time="07:48:06 of Thursday, 2020-05-21 (GMT+1)"
class DataFrameModel(QtCore.QAbstractTableModel):
# this is boiler plate to render a dataframe as a QTableView
# https://learndataanalysis.org/display:pandas:dataframe:with:pyqt5:qtableview:widget/
def __init__(self, df=pd.DataFrame(), parent=None):
QAbstractTableModel.__init__(self)
self._dataframe=df
def rowCount(self, parent=None):
return self._dataframe.shape[0]
def columnCount(self, parent=None):
return self._dataframe.shape[1]
def data(self, index, role=QtCore.Qt.DisplayRole):
if ( index.isValid() ):
if ( ((role)==(QtCore.Qt.DisplayRole)) ):
return str(self._dataframe.iloc[index.row(),index.column()])
return None
def headerData(self, col, orientation, role):
if ( ((((orientation)==(QtCore.Qt.Horizontal))) and (((role)==(QtCore.Qt.DisplayRole)))) ):
return self._dataframe.columns[col]
return None
# %% open gui windows
app=QApplication([""])
window=QWidget()
layout_h=QHBoxLayout(window)
layout=QVBoxLayout()
layout_h.addLayout(layout)
window.setWindowTitle("run_00_plot")
table=QtWidgets.QTableView(window)
# select whole row when clicking into table
table.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
custom_plot=QCustomPlot()
custom_plot.setFixedHeight(250)
graph=custom_plot.addGraph()
x=np.linspace(-3, 3, 300)
graph.setPen(QPen(Qt.blue))
custom_plot.rescaleAxes()
custom_plot.setInteractions(QCP.Interactions(((QCP.iRangeDrag) | (QCP.iRangeZoom) | (QCP.iSelectPlottables))))
layout.addWidget(table)
layout.addWidget(custom_plot)
window.show()
def selectionChanged(selected, deselected):
global other_table, df
if ( not(other_table is None) ):
# https://stackoverflow.com/questions/5889705/pyqt:how:to:remove:elements:from:a:qvboxlayout/5890555
zip_table.setParent(None)
row=df.iloc[selected.indexes()[0].row()]
# the only realtime data source i can think of: power consumption of my cpu
def read_from_file(fn):
with open(fn, "r") as file:
return file.read().replace("\n", "")
df=pd.DataFrame({("input_fn"):(list(map(str, pathlib.Path("/sys/devices/pci0000:00/0000:00:18.3/hwmon/hwmon0/").glob("*input"))))})
df["base_fn"]=df.input_fn.str.extract("(.*)_input")
df["label_fn"]=df.base_fn.apply(lambda x: ((x)+("_label")))
df["label"]=df.label_fn.apply(read_from_file)
df["value"]=df.input_fn.apply(read_from_file)
df["values"]=df.input_fn.apply(lambda x: collections.deque(maxlen=1000))
model=DataFrameModel(df)
table.setModel(model)
table.selectionModel().selectionChanged.connect(selectionChanged)
def update_values():
global df, graph, custom_plot
for (idx,row,) in df.iterrows():
df.loc[idx,"value"]=read_from_file(row.input_fn)
row["values"].append(int(read_from_file(row.input_fn)))
model=DataFrameModel(df)
table.setModel(model)
y=df.iloc[1]["values"]
graph.setData(range(len(y)), y)
custom_plot.rescaleAxes()
custom_plot.replot()
timer=PyQt5.QtCore.QTimer()
timer.setInterval(10)
timer.timeout.connect(update_values)
timer.start()
def run0():
# apparently i don't need to call this. without it i can interact with python -i console
app.exec_() |
from sklearn.svm import SVR
import matplotlib.pylab as plt
import mglearn
import numpy as np
X, y = mglearn.datasets.make_wave(n_samples=100)
line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
for gamma in [1, 10]:
svr = SVR(gamma=gamma).fit(X,y)
plt.plot(line, svr.predict(line), label='SVR gamma={}'.format(gamma))
plt.plot(X[:,0],y,'o',c='k')
plt.ylabel("regression output")
plt.xlabel("feature")
plt.show()
|
import os
x = os.environ.get('EMAIL_USER')
y = os.environ.get('EMAIL_PASS')
ID = "working"
print(x)
print(y)
print(ID)
|
class Movie(object):
"""
This class represents movie object.
Attributes:
title (str): Title of the movie
story_line (str): A short description for the movie
poster_image_url (str): An URL of the poster image
trailer_youtube_url (str): An URL of the youtube trailer
"""
def __init__(self, title, story_line, poster_img_url, trailer_youtube_url):
self.title = title
self.story_line = story_line
self.poster_image_url = poster_img_url
self.trailer_youtube_url = trailer_youtube_url
|
import lasagne
import logging
import sys
import numpy as np
from braindecode.analysis.stats import wrap_reshape_topo, corr
from braindecode.experiments.experiment import create_experiment
from braindecode.experiments.load import load_exp_and_model
from braindecode.results.results import ResultPool
from braindecode.veganlasagne.layer_util import compute_trial_acts
from braindecode.veganlasagne.layers import get_n_sample_preds
from braindecode.analysis.create_amplitude_perturbation_corrs import (
get_trials_targets)
log = logging.getLogger(__name__)
def create_unit_output_class_corrs_for_files(folder_name, params,
start, stop, i_all_layers):
res_pool = ResultPool()
res_pool.load_results(folder_name, params=params)
res_file_names = res_pool.result_file_names()
all_base_names = [name.replace('.result.pkl', '')
for name in res_file_names]
start = start or 0
stop = stop or len(all_base_names)
for i_file, basename in enumerate(all_base_names[start:stop]):
log.info("Running {:s} ({:d} of {:d})".format(
basename, i_file + start + 1, stop))
create_unit_output_class_corrs(basename, i_all_layers)
def create_unit_output_class_corrs(basename, i_all_layers):
exp, model = load_exp_and_model(basename)
exp.dataset.load()
train_set = exp.dataset_provider.get_train_merged_valid_test(
exp.dataset)['train']
rand_model = create_experiment(basename + '.yaml').final_layer
for i_layer in i_all_layers:
trained_corrs = unit_output_class_corrs(model, exp.iterator,
train_set, i_layer)
untrained_corrs = unit_output_class_corrs(rand_model, exp.iterator,
train_set, i_layer)
file_name_end = '{:d}.npy'.format(i_layer)
trained_filename = '{:s}.unit_class_corrs.{:s}'.format(basename,
file_name_end)
untrained_filename = '{:s}.rand_unit_class_corrs.{:s}'.format(basename,
file_name_end)
log.info("Saving to {:s} and {:s}".format(trained_filename,
untrained_filename))
np.save(trained_filename, trained_corrs)
np.save(untrained_filename, untrained_corrs)
def unit_output_class_corrs(model, iterator, train_set, i_layer):
# only need targets, ignore trials
# always get targets as from final layer
this_final_layer = lasagne.layers.get_all_layers(model)[-1]
_, targets = get_trials_targets(train_set,
get_n_sample_preds(this_final_layer), iterator)
trial_acts = compute_trial_acts(this_final_layer, i_layer, iterator,
train_set)
# only take those targets where we have predictions for
# a bit hacky: we know targets are same for each trial, so we just
# take last ones, eventhough they come form overlapping batches
# targets are #trials x #samples x #classes
unmeaned_targets = targets - np.mean(targets, axis=(1), keepdims=True)
assert np.all(unmeaned_targets == 0), ("Each trial should only have one "
"unique label")
relevant_targets = targets[:,:trial_acts.shape[2]]
unit_class_corrs = wrap_reshape_topo(corr, trial_acts, relevant_targets,
axis_a=(0,2), axis_b=(0,1))
return unit_class_corrs
def setup_logging():
""" Set up a root logger so that other modules can use logging
Adapted from scripts/train.py from pylearn"""
from pylearn2.utils.logger import (CustomStreamHandler, CustomFormatter)
root_logger = logging.getLogger()
prefix = '%(asctime)s '
formatter = CustomFormatter(prefix=prefix)
handler = CustomStreamHandler(formatter=formatter)
root_logger.handlers = []
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
if __name__ == "__main__":
setup_logging()
start = None
stop = None
if len(sys.argv) > 1:
start = int(sys.argv[1]) - 1 # from 1-based to 0-based
if len(sys.argv) > 2:
stop = int(sys.argv[2])
folder = 'data/models/paper/ours/cnt/deep4/car/'
params = dict(cnt_preprocessors="$cz_zero_resample_car_demean")
i_all_layers = [8,14,20,26,28]
create_unit_output_class_corrs_for_files(folder,
params=params, start=start,stop=stop, i_all_layers=i_all_layers)
|
import numpy
def rsi(prices, n=14):
'''
params:
prices: python list type, close price of list of time series candles
n: rsi params, default is 14
return:
rsi: python list type, rsi value of prices
'''
pass |
from django.shortcuts import render
from django.contrib.auth.models import User
from rest_framework import viewsets
from signin.serializers import UserSerializer
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
def home(request):
return render(request, 'index.html', {})
# @api_view(['GET', 'POST'])
# def user(request):
# if request.method == 'GET':
# users = User.objects.all()
# serializer = UserSerializer(users, many=True)
# return Response(serializer.data)
# elif request.method == 'POST':
# serializer = UserSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
# insurance-project
# This is an insurance project from codecademy
# Add your code here
medical_costs = {}
# medical_costs["James"] = 3323.3
medical_costs.update({"Marina": 6607.0, "Vinay": 3225.0})
medical_costs.update({"Connie": 8886.0, "Issac": 16444.0, "Valentina": 6420.0})
print(medical_costs)
medical_costs["Vinay"] = 3325.0
print(medical_costs)
total_cost = 0
for cost in medical_costs.values():
total_cost += cost
average_cost = total_cost / len(medical_costs)
print(f"Average Insurance Cost: {average_cost}")
names = ["Marina", "Vinay", "Connie", "Issac", "Valentina"]
ages = [27, 24, 43, 35, 52]
zipped_ages = zip(names, ages)
names_to_ages = {name : age for name, age in zipped_ages}
print(names_to_ages)
marina_age = names_to_ages.get("Marina", None)
print(f"Marina's age is {marina_age}")
medical_records = {}
medical_records["Marina"] = {"Age": 27, "Sex": "Female", "BMI": 31.1, "Children": 2, "Smoker": "Non-smoker", "Insurance_cost": 6607.0}
medical_records["Vinay"] = {"Age": 24, "Sex": "Male", "BMI": 26.9, "Children": 0, "Smoker": "Non-smoker", "Insurance_cost": 3225.0}
medical_records["Connie"] = {"Age": 43, "Sex": "Female", "BMI": 25.3, "Children": 3, "Smoker": "Non-smoker", "Insurance_cost": 8886.0}
medical_records["Isaac"] = {"Age": 35, "Sex": "Male", "BMI": 20.6, "Children": 4, "Smoker": "Smoker", "Insurance_cost": 16444.0}
medical_records["Valentina"] = {"Age": 52, "Sex": "Female", "BMI": 18.7, "Children": 1, "Smoker": "Non-smoker", "Insurance_cost": 6420.0}
print(medical_records)
print("Connie's insurance cost is " + str(medical_records["Connie"]["Insurance_cost"]) + " dollars.")
medical_records.pop("Vinay")
|
import json
from bokeh.embed import components
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomJS, Select
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.util.string import encode_utf8
from flask import Flask, jsonify, request
from jinja2 import Template
app = Flask(__name__)
N_DATAPOINTS = 20
DEFAULT_VARIABLE = 'bar'
MY_DATABASE = {
'foo': [i**1 for i in range(N_DATAPOINTS)],
'bar': [i**2 for i in range(N_DATAPOINTS)],
'baz': [i**3 for i in range(N_DATAPOINTS)]
}
@app.route("/get_new_data", methods=['POST'])
def get_new_data():
app.logger.info("Browser sent the following via AJAX: %s",
json.dumps(request.form))
variable_to_return = request.form['please_return_data_of_this_variable']
return jsonify({variable_to_return: MY_DATABASE[variable_to_return]})
SIMPLE_HTML_TEMPLATE = Template('''
<!DOCTYPE html>
<html>
<head>
<script src="https://code.jquery.com/jquery-3.1.0.min.js"></script>
{{ js_resources }}
{{ css_resources }}
</head>
<body>
{{ plot_div }}
{{ plot_script }}
</body>
</html>
''')
@app.route("/")
def simple():
x = range(N_DATAPOINTS)
y = MY_DATABASE[DEFAULT_VARIABLE]
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(title="Flask + JQuery AJAX in Bokeh CustomJS")
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(
args=dict(source=source),
code="""
var selected_value = cb_obj.value;
var plot_data = source.data;
jQuery.ajax({
type: 'POST',
url: '/get_new_data',
data: {"please_return_data_of_this_variable": selected_value},
dataType: 'json',
success: function (json_from_server) {
// alert(JSON.stringify(json_from_server));
plot_data['y'] = json_from_server[selected_value];
source.trigger('change');
},
error: function() {
alert("Oh no, something went wrong. Search for an error " +
"message in Flask log and browser developer tools.");
}
});
""")
select = Select(
title="Select variable to visualize",
value=DEFAULT_VARIABLE,
options=list(MY_DATABASE.keys()),
callback=callback)
layout = column(select, plot)
script, div = components(layout)
html = SIMPLE_HTML_TEMPLATE.render(
plot_script=script,
plot_div=div,
js_resources=INLINE.render_js(),
css_resources=INLINE.render_css())
return encode_utf8(html)
app.run(debug=True, host="127.0.0.1", port=8000)
|
q = int(input())
a, na = [], []
for c in range(q):
m, n = map(float, input().split(' '))
a.append(m)
na.append(n)
if max(na) < 8:
print('Minimum note not reached')
else:
for c in range(q):
if max(na) == na[c]:
print(int(a[c]))
|
import RPi.GPIO as GPIO
class Peltier:
@staticmethod
def init():
GPIO.setmode(GPIO.BOARD)
# warm
GPIO.setup(19,GPIO.OUT)
# cool
GPIO.setup(24,GPIO.OUT)
# enable
GPIO.setup(12,GPIO.OUT)
# test
#GPIO.setup(26,GPIO.OUT)
@staticmethod
def hot():
print("hot")
GPIO.output(19,GPIO.LOW)
GPIO.output(24,GPIO.HIGH)
GPIO.output(12,GPIO.HIGH)
#GPIO.output(26,GPIO.LOW)
@staticmethod
def cold():
print("cold")
GPIO.output(19,GPIO.HIGH)
GPIO.output(24,GPIO.LOW)
GPIO.output(12,GPIO.HIGH)
#GPIO.output(26,GPIO.HIGH)
@staticmethod
def stop():
print("stop")
GPIO.output(19,GPIO.LOW)
GPIO.output(24,GPIO.LOW)
#GPIO.output(26,GPIO.HIGH)
|
# Copyright (c) 2018, EPFL/Human Brain Project PCO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from requests import Request
from openid_http_client.auth_client.auth_client import AbstractAuthClient
def http_requests(method_name, full_url, headers=None, params=None, data=None):
"""
Generic http request
:param method_name:
:param full_url:
:param headers:
:param params:
:param data:
:return:
"""
session = requests.session()
request = Request(method_name, full_url, headers, params=params, data=data)
res = session.send(request.prepare())
return res
class SimpleRefreshTokenClient(AbstractAuthClient):
endpoints = {
'configuration': '.well-known/openid-configuration'
}
scope = 'openid profile offline_access'
def __init__(self, openid_host, client_secret, client_id, refresh_token, refresh_token_method='get'):
self.host = openid_host
self.client_secret = client_secret
self.client_id = client_id
self.endpoints = self._fetch_endpoints()
self.refr_token = refresh_token
self.refr_token_method = refresh_token_method
self.access_token = self.refresh_token()
def _fetch_endpoints(self):
"""
Fetching meaningful endpoints for Open ID calls
:return dict: the endpoints path
"""
res = http_requests('get', '{}/{}'.format(self.host, self.endpoints['configuration']))
j = res.json()
result = dict()
result['userinfo'] = j['userinfo_endpoint']
result['token'] = j['token_endpoint']
return result
def exchange_code_for_token(self, code, redirect_uri):
"""
If no token are provided. We can request for one by providing a code and the redirect uri
:param code:
:param redirect_uri:
:return:
"""
refresh_token, access_token = self.request_refresh_token(code, redirect_uri)
self.refr_token = refresh_token
self.access_token = access_token
def request_refresh_token(self, code, redirect_uri):
"""
Http request for a new refresh token
:param code:
:param redirect_uri:
:return:
"""
params = {
'code': code,
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': redirect_uri,
'access_type': 'offline',
'grant_type': 'authorization_code'
}
res = http_requests('get', '{}/{}'.format(self.host, self.endpoints['token']), params=params)
if res.status_code == 200:
return res.json()['refresh_token'], res.json()['access_token']
else:
raise Exception('Could not get the refresh token. {}'.format(res.content))
def get_token(self):
if self.access_token is None:
self.refresh_token()
return self.access_token
def get_headers(self):
return {'Authorization': 'Bearer {}'.format(self.get_token())}
def refresh_token(self, old_refresh_token=None):
"""
To refresh the token through the refresh token
:return: the refreshed token
"""
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refr_token if old_refresh_token is None else old_refresh_token,
'grant_type': 'refresh_token'
}
if self.refr_token_method == 'get':
res = http_requests('get', self.endpoints['token'], params=params)
elif self.refr_token_method == 'post':
res = http_requests('post', self.endpoints['token'], data=params)
else:
raise Exception('Unknown refresh_token_method: {}. Must be "get" or "post"'.format(self.refr_token_method))
if res.status_code == 200:
self.refr_token = res.json()['refresh_token']
self.access_token = res.json()['access_token']
return self.access_token
else:
raise Exception('Could not refresh the token. {}'.format(res.content))
|
import argparse
import io
import re
import ebooklib
from bs4 import BeautifulSoup
from ebooklib import epub
from google.cloud import texttospeech
from google.oauth2 import service_account
from pydub import AudioSegment
from tqdm import tqdm
# max chunks to send each time
MAX_CHAR = 5000
blacklist = [
"[document]",
"noscript",
"header",
"html",
"meta",
"head",
"input",
"script",
]
def read(chap):
output = ""
soup = BeautifulSoup(chap, "html.parser")
text = soup.find_all(text=True)
for item in text:
if item.parent.name not in blacklist:
if item.parent.name == "li":
# pause between items in list
output += f"{item.strip()}, "
elif item.parent.name[0] == "h":
# full stop after titles
output += f"{item.strip()}. "
else:
output += f"{item.strip()} "
return output
def get_input(prompt, valid, other=[]):
while True:
try:
x = input(prompt)
if x in other:
return x
if int(x) < valid[0] or int(x) > valid[1]:
raise ValueError
return int(x)
except ValueError:
print(f"Input valid chapter {valid[0]}-{valid[1]}!")
pass
def get_text(file):
book = epub.read_epub(file)
#%%
contents = [x.get_content() for x in book.get_items_of_type(ebooklib.ITEM_DOCUMENT)]
idx = {
x.file_name: i
for i, x in enumerate(book.get_items_of_type(ebooklib.ITEM_DOCUMENT))
}
#%%
j = 0
pos = [] # converts toc to pointers to locations in contents
prev = ""
def print_ch(items, n):
nonlocal j
nonlocal prev
for x in items:
if type(x) == tuple or type(x) == list:
print_ch(x, n + 1)
else:
curr = x.href
i = curr.find("#")
if i >= 0:
# filter out jump link
curr = curr[0:i]
if curr == prev:
continue
print(f"\033[92m{j}:\033[0m " + (" " * n) + x.title)
pos.append(idx[curr])
j += 1
prev = curr
print_ch(book.toc, 0)
#%%
start = get_input(f"Input start chapter 0-{j-1}: ", (0, j - 1))
end = get_input(
f"Input end chapter {start}-{j-1}: (Can press ENTER to select just the one start chapter) ",
(start, j - 1),
[""],
)
if end == "":
end = start
#%%
chapters = []
for i in range(pos[start], pos[end] + 1):
text = read(contents[i])
text = text.replace("\n", " ")
text = " ".join(text.split())
chapters.append(text)
return "\n".join(chapters), start
def get_mp3(text, creds) -> bytes:
# Instantiates a client
client = texttospeech.TextToSpeechClient(credentials=creds)
# Set the text input to be synthesized
synthesis_input = texttospeech.SynthesisInput(text=text)
# Build the voice request, select the language code ("en-US") and the ssml
# voice gender ("neutral")
voice = texttospeech.VoiceSelectionParams(
language_code="en-US", ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL
)
# Select the type of audio file you want returned
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
return response.audio_content
def parse_args():
parser = argparse.ArgumentParser(
description="Use Google Text-to-Speech to record epub chapters"
)
parser.add_argument(
"-k",
"--key",
type=str,
help="path to google credentials json (if not using environment variable)",
)
parser.add_argument(
"-o",
"--output",
type=str,
help="name of the mp3 output (default will be ch##.mp3 where ## is your selection)",
)
parser.add_argument(
"-d",
"--dry",
action="store_true",
help="does a dry run and prints out your chapter as a string list rather than send to Google",
)
parser.add_argument("file", type=str, help="path to epub")
return parser.parse_args()
def main():
args = parse_args()
if args.key is not None:
creds = service_account.Credentials.from_service_account_file(args.key)
text, sel = get_text(args.file)
if args.output is None:
outfile = f"ch{sel}.mp3"
print(f"Char length: {len(text)}")
sentences = re.split(r"(?<=\.) ", text)
if args.dry:
print(sentences)
quit()
output = AudioSegment.empty()
current = ""
for s in tqdm(sentences):
if len(current) + len(s) + 1 > MAX_CHAR:
output += AudioSegment.from_mp3(io.BytesIO(get_mp3(current, creds)))
current = ""
current += " " + s
if current:
output += AudioSegment.from_mp3(io.BytesIO(get_mp3(current, creds)))
output.export(outfile, format="mp3")
# old gTTS
# obj = gTTS(text=text[:2000], lang="en", slow=False)
# obj.save("chapter" + str(x) + ".mp3")
if __name__ == "__main__":
main()
|
# encoding: utf-8
from tastypie.constants import * |
import multiprocessing
import time
from object_recognition_multiprocessing.functions.evaluation import generate_json_evaluation
from object_recognition_multiprocessing.functions.final_filters import final_filter
from object_recognition_multiprocessing.functions.images_manager import ask_test_image, get_templates
from object_recognition_multiprocessing.functions.path_manager import setup_path
from object_recognition_multiprocessing.functions.plot_manager import setup_backend, setup_backend_for_saving, \
restore_backend, save_homographies
from object_recognition_multiprocessing.functions.process_functions import remove_overlaps
from object_recognition_multiprocessing.objects.homography import Homography
from object_recognition_multiprocessing.objects.process import ProcessHandler
from object_recognition_multiprocessing.objects.ratio import RatioList
from object_recognition_multiprocessing.objects.ratio_manager import RatioManager
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
# check if result path exists otherwise they are created
setup_path()
# setup backend of matplotlib for linux users
setup_backend()
# choose test image
test_image = ask_test_image()
# get all template
templates = get_templates('./templates_test.json')
### START ANALYSIS
times = []
full_templates = templates.copy()
SINGLE_TEMPLATE = True
NUM_TEMPLATE_IN_SCENE = 12
for i in range(NUM_TEMPLATE_IN_SCENE, len(full_templates) + 1):
templates = full_templates[:i]
### STOP ANALYSIS
# list of processes
template_processes: [ProcessHandler] = []
setup_backend_for_saving()
# templates = [templates[7]]
print("Number of templates: {}".format(len(templates)))
manager = multiprocessing.Manager()
# create the dict to return the homography
return_dict = manager.dict()
# create the list to plot and save all homographies
plot_dict = manager.dict()
# create and register a ratio
RatioManager.register("ratio_list", RatioList)
ratio_manager = RatioManager()
ratio_manager.start()
ratio_list = ratio_manager.ratio_list(test_image.image)
tic = time.time()
for template in templates:
process = ProcessHandler(test_image, template, return_dict, ratio_list, plot_dict)
if process is not None:
template_processes.append(process)
process.start()
if SINGLE_TEMPLATE:
process.join()
for process in template_processes:
process.join()
toc = time.time()
restore_backend()
print('-' * 100)
# list of total homographies found and number of homographies discarded
total_homographies_found = []
homographies_discarded = 0
for process in template_processes:
print(process.template.name, end='')
try:
homographies = return_dict[process.template.name]
total_homographies_found += homographies
if len(return_dict[process.template.name]) > 0:
save_homographies(process.test_image, return_dict[process.template.name], process.template)
plots = plot_dict[process.template.name]
# print("Number of items found in {}: {}".format(process.template.name, len(return_dict[process.template.name])))
# print("Number of items discarded in {}: {}".format(process.template.name, len(plots)))
homographies_discarded += len(plots)
for plot in plots:
plot.save_plot(test_image.image)
print(', completed!')
except:
print("\nERROR:")
print(process.template.name+" not in dictionary")
print(return_dict.keys())
print("*"*20)
before_overlaps = len(total_homographies_found)
save_homographies(test_image, total_homographies_found, before_overlap=True)
total_homographies_found = final_filter(total_homographies_found, test_image.image)
generate_json_evaluation(total_homographies_found, test_image)
save_homographies(test_image, total_homographies_found)
print('=' * 100)
print("Computational time: {}".format(round(toc - tic, 2)))
print("Number of total items found: {}".format(len(total_homographies_found)))
print("Number of total items found before overlap: {}".format(before_overlaps))
print("Number of total items discarded: {}".format(homographies_discarded))
print('=' * 100)
### START ANALYSIS
times.append(round(toc - tic, 2))
for i, t in enumerate(times):
print("{}) {}".format(i, t))
import matplotlib.pyplot as plt
x = range(len(times))
plt.clf()
plt.plot(times)
plt.show()
### STOP ANALYSIS
|
import itertools
import os
import urllib.request
import re
# PREWORK
TMP = os.getenv("TMP", "/tmp")
DICT = 'dictionary.txt'
DICTIONARY = os.path.join(TMP, DICT)
urllib.request.urlretrieve(
f'https://bites-data.s3.us-east-2.amazonaws.com/{DICT}',
DICTIONARY
)
with open(DICTIONARY) as f:
dictionary = set([word.strip().lower() for word in f.read().split()])
def get_possible_dict_words(draw):
"""Get all possible words from a draw (list of letters) which are
valid dictionary words. Use _get_permutations_draw and provided
dictionary"""
words = _get_permutations_draw(draw)
possible_dict_words = [word for word in words if word in dictionary]
return possible_dict_words
def _get_permutations_draw(draw):
"""Helper to get all permutations of a draw (list of letters), hint:
use itertools.permutations (order of letters matters)"""
#p = re.compile('[^A-Z]*')
#draw = re.sub('[\W_]+', '',draw.lower())
draw = [_.lower() for _ in draw]
words = set()
for r in range(len(draw)):
for word in itertools.permutations(draw, r):
words.add(''.join(word))
return words
|
import pkg_resources
import sys
import warnings
import threading
import time
from logger.scream import say
try:
import MySQLdb as MSQL
except ImportError:
import _mysql as MSQL
IP_ADDRESS = "10.4.4.3" # Be sure to update this to your needs
threads = []
connection = None
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def init():
global connection
connection = MSQL.connect(host=IP_ADDRESS, port=3306, user=pkg_resources.resource_string('sources.gender_api', 'mysqlu.dat'),
passwd=pkg_resources.resource_string('sources.gender_api', 'mysqlp.dat'),
db="github", connect_timeout=5 * 10**7,
charset='utf8', init_command='SET NAMES UTF8',
use_unicode=True)
return connection
def test_database(connection):
print 'Pinging database: ' + (str(connection.ping(True)) if connection.ping(True) is not None else 'feature unavailable')
cursor = connection.cursor()
cursor.execute(r'SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = "%s"' % 'github')
rows = cursor.fetchall()
print 'There are: ' + str(rows[0][0]) + ' table objects in the local GHtorrent copy'
return cursor
def check_database_consistency(cursor):
cursor.execute(r'SELECT table_name FROM information_schema.tables WHERE table_schema = "%s"' % 'github')
rows = cursor.fetchall()
if (u'users', ) and (u'users_ext', ) in rows:
print 'All neccesary tables are there.'
else:
print 'Your database does not fit a typical description of a GitHub Torrent copy..'
print 'Did you forgot to create users_ext table?'
print 'Program will exit now'
sys.exit(0)
def get_record_count(cursor, sample_tb_name, limit):
cursor.execute(r'select count(*) from ' + str(sample_tb_name)
+ ' where (type = "USR") and (fake <> 1) and (name rlike "[a-zA-Z]+( [a-zA-Z]+)?"){optional}'.format(optional=" limit 500" if limit else ""))
rows = cursor.fetchall()
return rows[0][0]
@deprecated
def batch_update_database(connection, names, is_locked_tb, sample_tb_name):
cursor = connection.cursor()
for key in names.keys():
collection = names[key]
for fullname in names[key]['persons']:
update_query = r'UPDATE {table} SET gender = {gender} , accuracy = {accuracy} where full_name = "{fullname}"'.format(
gender=collection['classification'],
fullname=fullname.encode('utf-8').replace('"', '\\"'),
table='users_ext' if is_locked_tb else sample_tb_name,
accuracy=collection['accuracy'])
say(update_query)
cursor.execute(update_query)
cursor.close()
def update_record_threaded(connection, classification, is_locked_tb=True, sample_tb_name="users"):
if (classification[2] == 2):
thread = threading.Thread(target=update_single_record, args=(connection, classification, is_locked_tb, sample_tb_name))
threads.append(thread)
thread.start()
else:
pass
def update_single_record(connection, classification, is_locked_tb, sample_tb_name):
success = False
while (not success):
try:
cursor = connection.cursor()
fullname, accuracy, gender = classification
update_query = r'UPDATE {table} t1 JOIN {sample_tb_name} t2 ON t1.id = t2.id SET t1.gender = {gender} , t1.accuracy = {accuracy} WHERE t2.name = "{fullname}"'.format(
gender=gender, fullname=fullname.encode('utf-8').replace('"', '\\"'), table='users_ext' if is_locked_tb else sample_tb_name,
accuracy=accuracy, sample_tb_name=sample_tb_name)
say(update_query)
cursor.execute(update_query)
cursor.close()
success = True
except:
say("Lost connection to MySQL? Update query failed")
time.sleep(5)
return
|
import os
# 获得当前路径
cwd = os.getcwd()
print(cwd)
# 得到当前文件夹下的所有文件和文件夹
print(os.listdir()) # listdir(../)
# 检查是否是文件/文件夹
print(os.path.isfile('E:/sanfordpython/self_study/exercise/tests/path.py'))
print(os.path.isdir('E:/sanfordpython/self_study/exercise/tests'))
# 检查文件路径是否存在
print(os.path.exists('E:/sanfordpython/self_study/exercise/test'))
# 分离文件名
[dirname, filename] = os.path.split('E:/sanfordpython/self_study/exercise/tests/path.py')
print(dirname, "\n", filename)
# 获得文件路径(不包括文件名)
print("get pathname:", os.path.dirname('E:/sanfordpython/self_study/exercise/tests/path.py'))
# 获得文件名
print("get pathname:", os.path.basename('E:/sanfordpython/self_study/exercise/tests/path.py'))
|
from prediction.Tournament import *
RRR = "dsf" |
print 'Saludos'
|
# Generated by Django 3.1.5 on 2021-01-27 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='images/')),
],
options={
'verbose_name': 'Photos',
'verbose_name_plural': 'Photos',
'db_table': 'Photos',
},
),
]
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: ShengGW
@time: 19/07/28 16:08
@file: GetShapefileCover.py
@version: ??
@software: PyCharm
@contact: shenggw95@gmail.com
"""
from osgeo import gdal, gdalnumeric, ogr
from PIL import Image, ImageDraw
import os
import numpy as np
import DIPy.SpectralIndex as dsi
def clip_raster(rast, features_path, gt=None, nodata=-9999):
"""
Clips a raster (given as either a gdal.Dataset or as a numpy.array
instance) to a polygon layer provided by a Shapefile (or other vector
layer). If a numpy.array is given, a "GeoTransform" must be provided
(via dataset.GetGeoTransform() in GDAL). Returns an array. Clip features
must be a dissolved, single-part geometry (not multi-part). Modified from:
http://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html
#clip-a-geotiff-with-shapefile
Arguments:
rast A gdal.Dataset or a NumPy array
features_path The path to the clipping features
gt An optional GDAL GeoTransform to use instead
nodata The NoData value; defaults to -9999.
"""
def array_to_image(a):
'''
Converts a gdalnumeric array to a Python Imaging Library (PIL) Image.
'''
i = Image.fromstring('L',(a.shape[1], a.shape[0]),
(a.astype('b')).tostring())
return i
def image_to_array(i):
"""
Converts a Python Imaging Library (PIL) array to a gdalnumeric image.
"""
a = gdalnumeric.fromstring(i.tobytes(), 'b')
a.shape = i.im.size[1], i.im.size[0]
return a
def world_to_pixel(geo_matrix, x, y):
'''
Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate
the pixel location of a geospatial coordinate; from:
http://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html#clip-a-geotiff-with-shapefile
'''
ulX = geo_matrix[0]
ulY = geo_matrix[3]
xDist = geo_matrix[1]
yDist = geo_matrix[5]
rtnX = geo_matrix[2]
rtnY = geo_matrix[4]
pixel = int((x - ulX) / xDist)
line = int((ulY - y) / xDist)
return (pixel, line)
# Can accept either a gdal.Dataset or numpy.array instance
if not isinstance(rast, np.ndarray):
gt = rast.GetGeoTransform()
rast = rast.ReadAsArray()
# Create an OGR layer from a boundary shapefile
features = ogr.Open(features_path)
if features.GetDriver().GetName() == 'ESRI Shapefile':
lyr = features.GetLayer(os.path.split(os.path.splitext(features_path)[0])[1])
else:
lyr = features.GetLayer()
# Get the first feature
poly = lyr.GetNextFeature()
# Convert the layer extent to image pixel coordinates
minX, maxX, minY, maxY = lyr.GetExtent()
ulX, ulY = world_to_pixel(gt, minX, maxY)
lrX, lrY = world_to_pixel(gt, maxX, minY)
# Calculate the pixel size of the new image
pxWidth = int(lrX - ulX)
pxHeight = int(lrY - ulY)
# If the clipping features extend out-of-bounds and ABOVE the raster...
if gt[3] < maxY:
# In such a case... ulY ends up being negative--can't have that!
iY = ulY
ulY = 0
# Multi-band image?
try:
clip = rast[:, ulY:lrY, ulX:lrX]
except IndexError:
clip = rast[ulY:lrY, ulX:lrX]
# Create a new geomatrix for the image
gt2 = list(gt)
gt2[0] = minX
gt2[3] = maxY
# Map points to pixels for drawing the boundary on a blank 8-bit,
# black and white, mask image.
points = []
pixels = []
geom = poly.GetGeometryRef()
pts = geom.GetGeometryRef(0)
for p in range(pts.GetPointCount()):
points.append((pts.GetX(p), pts.GetY(p)))
# # 将所有要素覆盖范围内的点都添加到points中
# while poly != None:
# geom = poly.GetGeometryRef()
# pts = geom.GetGeometryRef(0)
#
# for p in range(pts.GetPointCount()):
# points.append((pts.GetX(p), pts.GetY(p)))
#
# poly = lyr.GetNextFeature()
for p in points:
pixels.append(world_to_pixel(gt2, p[0], p[1]))
raster_poly = Image.new('L', (pxWidth, pxHeight), 1)
rasterize = ImageDraw.Draw(raster_poly)
rasterize.polygon(pixels, 0) # Fill with zeroes
# If the clipping features extend out-of-bounds and ABOVE the raster...
if gt[3] < maxY:
# The clip features were "pushed down" to match the bounds of the
# raster; this step "pulls" them back up
premask = image_to_array(raster_poly)
# We slice out the piece of our clip features that are "off the map"
mask = np.ndarray((premask.shape[-2] - abs(iY), premask.shape[-1]), premask.dtype)
mask[:] = premask[abs(iY):, :]
mask.resize(premask.shape) # Then fill in from the bottom
# Most importantly, push the clipped piece down
gt2[3] = maxY - (maxY - gt[3])
else:
mask = image_to_array(raster_poly)
# Clip the image using the mask
try:
clip = gdalnumeric.choose(mask, (clip, nodata))
# If the clipping features extend out-of-bounds and BELOW the raster...
except ValueError:
# We have to cut the clipping features to the raster!
rshp = list(mask.shape)
if mask.shape[-2] != clip.shape[-2]:
rshp[0] = clip.shape[-2]
if mask.shape[-1] != clip.shape[-1]:
rshp[1] = clip.shape[-1]
mask.resize(*rshp, refcheck=False)
clip = gdalnumeric.choose(mask, (clip, nodata))
return (clip, ulX, ulY, gt2)
|
# Generated by Django 2.2.4 on 2019-08-31 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('encuestas', '0002_auto_20190831_1145'),
]
operations = [
migrations.AddConstraint(
model_name='pregunta',
constraint=models.UniqueConstraint(fields=('encuesta', 'codigo'), name='codigo_unico'),
),
]
|
#coding:utf-8
#实时边缘检测
#导入opencv-python
import cv2
#导入科学计算库numpy
import numpy as np
#获取摄像头,传入0表示获取系统默认摄像头
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#打开cap
cap.open(0)
#循环
while cap.isOpened():
#获取画面
flag,frame = cap.read()
if not flag:
break
#进行canny边缘检测
frame = cv2.Canny(frame,50,100)
#将单通道图像复制3份,摞成三通道图像
frame = np.dstack((frame,frame,frame))
cv2.imshow('my_window',frame)
#获取键盘上按下的那个按键
key_pressed = cv2.waitKey(60)
print('键盘被按下的按键是:',key_pressed)
if key_pressed == 27:
break
cap.release()
cv2.destroyAllWindows() |
#!/usr/bin/env python3
from sys import getsizeof
from pathlib import Path
import subprocess
import os
import io
import re
class Exit:
def __init__(self, arguments, instream):
instream.close()
def execute(self):
exit()
class Cd:
def __init__(self, arguments, instream):
self.arguments = arguments
self.input_stream = instream
instream.close()
def execute(self):
outstream = io.StringIO()
if len(self.arguments) == 0 and self.input_stream.getvalue():
return outstream
elif len(self.arguments) > 1:
print('cd: too many arguments.')
raise Exception()
else:
new_directory = Path(self.arguments[0].text)
if len(self.arguments) == 0:
new_directory = Path.home()
if new_directory.is_dir():
os.chdir(new_directory)
else:
print('cd: {}: Not a directory.'.format(new_directory))
return outstream
class Ls:
def __init__(self, arguments, instream):
instream.close()
self.arguments = arguments
def execute(self):
outstream = io.StringIO()
lists_of_files = {}
current_dir = os.getcwd()
if len(self.arguments) == 0:
dirlist = os.listdir(current_dir)
lists_of_files.update({current_dir: dirlist})
else:
text_arguments = []
for arg in self.arguments:
text_arguments.append(arg.text)
for dir in sorted(text_arguments):
if dir == '~':
dir = Path.home()
try:
dirlist = os.listdir(dir)
except Exception:
print('ls: cannot access: {}: '
'No such file or directory.'.format(dir),
file=outstream, end=os.linesep)
return outstream
lists_of_files.update({dir: dirlist})
plur = (len(lists_of_files) > 1)
for (directory, files) in lists_of_files.items():
if plur:
print(directory, file=outstream, end=os.linesep)
for file in files:
if file[0] != '.':
print(file, file=outstream, end=os.linesep)
if plur:
print(file=outstream)
return outstream
class Wc:
def __init__(self, arguments, instream):
self.input_stream = instream
self.arguments = arguments
def execute(self):
# проверка наличия аргумента:
# если его нет - считываем из потока
if len(self.arguments) == 0:
stream_value = self.input_stream.getvalue()
self.input_stream.close()
lines = stream_value.split('\n')
count_line = 0
count_word = 0
count_byte = 0
if lines[-1] == '':
lines = lines[:-1]
for line in lines:
count_line += 1
count_word += len(line.split())
b = getsizeof(line) - getsizeof('')
count_byte += b
outstream = io.StringIO()
print('{:>3} {:>3} {:>3}'.format(count_line,
count_word, count_byte),
file=outstream, end='\n')
return outstream
else:
filename = self.arguments[0].text
try:
file = open(filename, 'r')
except IOError:
print('wc: ' + filename + ': No such file')
raise Exception()
else:
with file:
count_line = 0
count_word = 0
count_byte = 0
for line in file:
count_line += 1
count_word += len(line.split())
b = getsizeof(line) - getsizeof('')
count_byte += b
outstream = io.StringIO()
print('{:>3} {:>3} {:>3} {:>6}'.format(count_line,
count_word,
count_byte,
filename),
file=outstream, end='\n')
return outstream
class Cat:
def __init__(self, arguments, instream):
self.input_stream = instream
self.arguments = arguments
def execute(self):
# проверка наличия аргумента, если его нет - считываем из потока
if len(self.arguments) == 0:
stream_value = self.input_stream.getvalue()
self.input_stream.close()
lines = stream_value.split('\n')
outstream = io.StringIO()
for line in lines:
print(line, file=outstream, end='')
return outstream
else:
filename = self.arguments[0].text
try:
file = open(filename, 'r')
except IOError:
print('cat: ' + filename + ': No such file')
raise Exception()
else:
outstream = io.StringIO()
with file:
for line in file:
print(line, file=outstream, end='')
return outstream
class Pwd:
def __init__(self, arguments, instream):
instream.close()
self.arguments = arguments
def execute(self):
cwd = os.getcwd()
outstream = io.StringIO()
print(cwd, file=outstream, end='\n')
return outstream
class Echo:
def __init__(self, arguments, instream):
instream.close()
self.arguments = arguments
def execute(self):
outstream = io.StringIO()
for arg in self.arguments:
print(arg.text, file=outstream, end=' ')
print(file=outstream, end='\n')
return outstream
#from docopt import docopt
class Grep:
def __init__(self, arguments=None, instream=None):
#arguments = docopt(__doc__)
print(arguments)
self.input_stream = instream
self.arguments = arguments
self.keys = ["-i", "-w", "-A"]
def print_lines(self, i, line, outstream, text_for_search, n=None):
print(line, file=outstream, end='')
if n is not None:
n = int(n)
if n < 0:
print('n must be non-negative')
raise Exception
for added_line in text_for_search[i + 1:min(len(text_for_search), i + n + 1)]:
print(added_line, file=outstream, end='')
def grep(self, pattern, text, i=False, w=False, n=None):
outstream = io.StringIO()
for j, line in enumerate(text):
if w:
pattern = r'\b' + pattern + r'\b'
try:
if i:
if re.search(pattern, line, re.IGNORECASE):
self.print_lines(j, line, outstream, text, n)
else:
if re.search(pattern, line):
self.print_lines(j, line, outstream, text, n)
except Exception:
raise Exception
return outstream
def parse_input_string(self):
is_parsing_keys = True
flag_w = False
flag_i = False
remaining_arguments = []
input_keys = []
pos_a = -1
n = None
# всегда есть хоть 1 аргумент - шаблон
if len(self.arguments) == 0:
print("Error: no input template")
raise Exception
for i, arg in enumerate(self.arguments):
if arg.text in self.keys:
if is_parsing_keys:
if arg.text == '-A':
pos_a = i
input_keys.append(arg.text)
else:
print("All keys must be before template (and file)")
raise Exception
else:
if pos_a != -1 and i == pos_a + 1:
if arg.text.isdigit():
n = int(arg.text)
else:
print("-A requires int n")
raise Exception
else:
is_parsing_keys = False
remaining_arguments.append(arg.text)
if "-i" in input_keys:
flag_i = True
if "-w" in input_keys:
flag_w = True
return flag_i, flag_w,\
n, remaining_arguments
def execute(self):
flag_i, flag_w, n, args = self.parse_input_string()
text = []
if len(args) == 0:
print("There is no template")
raise Exception
if len(args) == 1:
stream_value = self.input_stream.getvalue()
lines = stream_value.split('\n')
for line in lines:
text.append(line + '\n')
template = args[0]
elif len(args) == 2:
template = args[0]
filename = args[1]
with open(filename, 'r') as f:
for line in f:
text.append(line)
else:
print("Too many arguments")
raise Exception
self.input_stream.close()
try:
return self.grep(template, text, flag_i, flag_w, n)
except Exception:
raise Exception
class ShellProcess:
"""
при наличии неидентифицированной
команды запускается shell process
"""
def __init__(self, pipe_part, instream):
self.command = pipe_part[0].text
instream.close()
self.arguments = pipe_part[1:]
def execute(self):
shell_arguments = ''
for arg in self.arguments[:-1]:
shell_arguments += arg.text + ' '
if len(self.arguments) > 0:
shell_arguments += self.arguments[-1].text
outstream = io.StringIO()
try:
if shell_arguments == '':
output = subprocess.check_output([self.command],
universal_newlines=True)
else:
output = subprocess.check_output([self.command,
shell_arguments],
universal_newlines=True)
except subprocess.CalledProcessError:
raise Exception
else:
print(output, file=outstream, end='')
return outstream
|
import uml
from labop import Primitive, Protocol, SampleArray, SampleData, SampleMap, SampleMask
def protocol_template():
"""
Create a template instantiation of a protocol. Used for populating UI elements.
:param
:return: str
"""
return f'protocol = labop.Protocol(\n\t"Identity",\n\tname="Name",\n\tdescription="Description")'
Protocol.template = protocol_template
def primitive_template(self):
"""
Create a template instantiation of a primitive for writing a protocol. Used for populating UI elements.
:param self:
:return: str
"""
args = ",\n\t".join(
[
f"{parameter.property_value.template()}"
for parameter in self.parameters
if parameter.property_value.direction == uml.PARAMETER_IN
]
)
return f"step = protocol.primitive_step(\n\t'{self.display_id}',\n\t{args}\n\t)"
Primitive.template = primitive_template
def sample_array_str(self):
"""
Create a human readable string for a SampleArray.
:param self:
:return: str
"""
return f"SampleArray(name={self.name}, container_type={self.container_type}, initial_contents={self.initial_contents})"
SampleArray.__str__ = sample_array_str
def sample_mask_str(self):
"""
Create a human readable string for a SampleMask.
:param self:
:return: str
"""
return f"SampleMask(name={self.name}, source={self.source}, mask={self.mask})"
SampleMask.__str__ = sample_mask_str
def sample_data_str(self):
"""
Create a human readable string for a SampleData.
:param self:
:return: str
"""
return f"SampleData(name={self.name}, from_samples={self.from_samples}, values={self.values})"
SampleData.__str__ = sample_data_str
def sample_map_plot(self):
"""
Render the sample map using a matplotlib plot
"""
self.plot()
SampleMap.plot = sample_map_plot
def sample_array_plot(self):
"""
Render the sample array using a matplotlib plot
"""
self.plot()
SampleArray.plot = sample_array_plot
|
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
# Create time: 2020-07-20
import sys
import json
import grpc
from .proto import schedule_service_pb2 as schedule_pb2
from .proto import schedule_service_pb2_grpc as schedule_service_pb2_grpc
class ScheduleClient(object):
def __init__(self):
self._channel = None
self._stub = None
def connect(self, endpoints):
options = [('grpc.max_receive_message_length', 512 * 1024 * 1024),
('grpc.max_send_message_length', 512 * 1024 * 1024),
('grpc.lb_policy_name', 'round_robin')]
g_endpoint = 'ipv4:{}'.format(','.join(endpoints))
self._channel = grpc.insecure_channel(g_endpoint, options=options)
self._stub = schedule_service_pb2_grpc.ScheduleServiceStub(self._channel)
def _pack_clusters(self, clusters):
pb_clusters = []
for cluster in clusters:
pb_cluster = schedule_pb2.ClusterInfo()
pb_cluster.name = cluster["name"]
for node in cluster["nodes"]:
pb_node = schedule_pb2.NodeInfo()
pb_node.home = node["home"]
pb_node.storage = node["storage"]
pb_node.traffic = node["traffic"]
pb_cluster.nodes.append(pb_node)
pb_clusters.append(pb_cluster)
return pb_clusters
def query_deployed_cluster(self, clusters, threshold, contract):
req = schedule_pb2.QueryDeployedClusterRequest()
req.threshold = threshold
pb_clusters = self._pack_clusters(clusters)
req.clusters.extend(pb_clusters)
req.contract.storage = contract["storage"]
req.contract.traffic = contract["traffic"]
resp = self._stub.QueryDeployedCluster(req)
if resp.error_code != 0:
return None
return resp.cluster_name
def query_lb_by_nodes(self, clusters, threshold):
req = schedule_pb2.LoadBalancingByNodesRequest()
req.threshold = threshold
pb_clusters = self._pack_clusters(clusters)
req.clusters.extend(pb_clusters)
resp = self._stub.LoadBalancingByNodes(req)
if resp.error_code != 0:
return None
transfers = []
for pb_transfer in resp.transfers:
transfers.append({
"contract_id": pb_transfer.contract_id,
"cluster_src": pb_transfer.cluster_src,
"cluster_dst": pb_transfer.cluster_dst,
})
return transfers
if __name__ == "__main__":
client = ScheduleClient()
client.connect(["127.0.0.1:18080"])
clusters = [{
"name": "cluster1",
"nodes": [{
"home": "http://127.0.0.1:8080/SCIDE/SCManager",
"storage": "1 GB",
"traffic": "20 MB",
}],
}, {
"name": "cluster2",
"nodes": [{
"home": "http://127.0.0.1:9090/SCIDE/SCManager",
"storage": "135 MB",
"traffic": "120 B",
}],
}]
threshold = 0.8
transfers = client.query_lb_by_nodes(clusters, threshold)
print(json.dumps(transfers, indent=4, separators=(',', ':')))
contract = {
"storage": "1 MB",
"traffic": "1 B",
}
cluster_name = client.query_deployed_cluster(clusters, threshold, contract)
print(cluster_name)
|
import os
import numpy as np
import pytest
try:
from unittest import mock
except ImportError:
import mock
from smalldataviewer import DataViewer
from smalldataviewer.files import offset_shape_to_slicing, FileReader, NORMALISED_TYPES
from .constants import OFFSET, SHAPE, INTERNAL_PATH
@pytest.mark.parametrize(
"offset,shape,expected",
[
[None, None, Ellipsis],
[(1, 1, 1), (2, 2, 2), (slice(1, 3), slice(1, 3), slice(1, 3))],
[None, (2, 2, 2), (slice(None, 2), slice(None, 2), slice(None, 2))],
[(1, 1, 1), None, (slice(1, None), slice(1, None), slice(1, None))],
[
(1, None, 1),
(None, 2, None),
(slice(1, None), slice(None, 2), slice(1, None)),
],
],
)
def test_offset_shape_to_slicing(offset, shape, expected):
assert offset_shape_to_slicing(offset, shape) == expected
def test_read_file(data_file, array):
path, has_ipath = data_file
ipath = INTERNAL_PATH if has_ipath else None
data = FileReader(path, internal_path=ipath, offset=OFFSET, shape=SHAPE).read()
if path.endswith("swf"):
pytest.xfail("swf comparison is hard due to compression and dimensions")
assert np.allclose(data, array)
def test_read_file_raises_or_warns(data_file):
path, has_ipath = data_file
if path.endswith(".json"):
return # json can have either internal path or not
if has_ipath:
with pytest.raises(ValueError):
FileReader(path).read()
else:
with pytest.warns(UserWarning):
FileReader(path, internal_path=INTERNAL_PATH).read()
def test_read_file_ftype_overrides(data_file, array):
path, has_ipath = data_file
_, ext = os.path.splitext(path)
new_path = path + ".txt"
os.rename(path, new_path)
read_array = FileReader(
new_path,
internal_path=INTERNAL_PATH if has_ipath else None,
offset=OFFSET,
shape=SHAPE,
).read(ext.lstrip("."))
if path.endswith("swf"):
pytest.xfail("swf comparison is hard due to compression and dimensions")
assert np.allclose(read_array, array)
@pytest.mark.parametrize(
"method_name", sorted({"_read_" + tail for tail in NORMALISED_TYPES.values()})
)
def test_read_methods_exist(method_name):
assert hasattr(FileReader, method_name)
def test_dataviewer_from_file(data_file, array, subplots_patch):
path, has_ipath = data_file
if has_ipath:
dv = DataViewer.from_file(
path, internal_path=INTERNAL_PATH, offset=OFFSET, shape=SHAPE
)
else:
dv = DataViewer.from_file(path, offset=OFFSET, shape=SHAPE)
if path.endswith("swf"):
pytest.xfail("swf comparison is hard due to compression and dimensions")
assert np.allclose(dv.volume, array)
|
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render_to_response
from LSMS.SM.libs import *
from LSMS.SM.models import *
import datetime
def msg(request):
return render_to_response('message.html',{'mbody':request.GET.get('mbody',''), 'mtype':request.GET.get('mtype','info')})
def home(request):
if request.session.get('utype', False):
if(request.session['utype']=='S'):
return HttpResponseRedirect('/stuhome')
elif(request.session['utype']=='T'):
return HttpResponseRedirect('/teahome')
elif(request.session['utype']=='M'):
return HttpResponseRedirect('/cmhome')
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Unexpected error'))
else:
return render_to_response('welcome.html')
def stuHome(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact your class manager.'))
elif uinfo['utype']!='S':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You are not a student. Please go back to the proper UI.'))
else:
return render_to_response('stuhome.html', request.session)
def teaHome(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % 'error',('Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']!='T':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % 'error',('You are not a teacher. Please go back to the proper UI.'))
else:
return render_to_response('teahome.html', request.session)
def cmHome(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']!='M':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You are not a class manager. Please go back to the proper UI.'))
else:
return render_to_response('cmhome.html', request.session)
def register(request):
if len(request.POST)==0:
return render_to_response('register.html')
elif request.POST['password']!=request.POST['cpassword']:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Please confirm you enter the same password twice!'))
else:
res=reg(request.POST['utype'],request.POST['username'],request.POST['password'],request.POST['email'],request.POST['internalid'])
if res=='OK':
return authorize(request)
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error',res))
def modPass(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
else:
if len(request.POST)==0:
return render_to_response('modpass.html')
elif request.POST['npassword']!=request.POST['cpassword']:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Please confirm you enter the same password twice!'))
elif len(request.POST['npassword'])<6:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Password must have 6 or more characters!'))
else:
r=account.objects.get(userId=uinfo['uid'])
r.userPwd=encrypt(uinfo['uname'],request.POST['npassword'])
r.save()
return HttpResponseRedirect('/')
def getPass(request):
return render_to_response('getpass.html')
def disableUser(requst):
return HttpResponse('disable user')
def logout(request):
request.session.flush()
return HttpResponseRedirect('/')
def authorize(request):
uname=request.POST.get('username','')
upass=request.POST.get('password','')
auth_res=auth(uname,upass)
if auth_res['auth_state']=='failed':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your username or password is invalid!'))
elif auth_res['auth_state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
else:
request.session['uid']=auth_res['user_object'].userId
request.session['uname']=auth_res['user_object'].userName
request.session['email']=auth_res['user_object'].email
request.session['utype']=auth_res['user_object'].userType
request.session['rid']=auth_res['user_object'].roleId
request.session.set_expiry(0)
if(auth_res['user_object'].userType=='S'):
request.session['rname']=student.objects.get(stuId=request.session['rid']).stuName
#return HttpResponseRedirect('/stuhome')
elif(auth_res['user_object'].userType=='T'):
request.session['rname']=teacher.objects.get(teaId=request.session['rid']).teaName
#return HttpResponseRedirect('/teahome')
elif(auth_res['user_object'].userType=='M'):
request.session['rname']=cmanager.objects.get(cmId=request.session['rid']).cmName
#return HttpResponseRedirect('/cmhome')
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Unexpected error'))
return HttpResponseRedirect('/')
def readStuRoll(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
else:
if uinfo['utype']=='S':
sid=uinfo['rid']
else:
sid=(request.GET['stuId'])
s=getStuRoll(sid)
s['pl']=getPerf(sid)
s['utype']=uinfo['utype']
return render_to_response('readroll.html',s)
def readStuScore(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
else:
if uinfo['utype']=='S':
s=getStuScore(uinfo['rid'],int(request.GET.get('term',-1)))
else:
s=getStuScore(request.GET['stuId'],int(request.GET.get('term',-1)))
return render_to_response('readscore.html',{'clist':s})
return HttpResponse('Student Score Read')
def readStuPerf(request):
return HttpResponse('Student Performance Read')
def readStuNoti(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
else:
if uinfo['utype']=='T':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Teachers have no notification in student management system.'))
elif not request.GET.get('nid',False):
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Invalid entry of notification!'))
elif notification.objects.filter(id=request.GET['nid']).count()==0:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Invalid entry of notification!'))
else:
n=notification.objects.get(id=request.GET['nid'])
if (uinfo['utype']=='S' and n.classId==student.objects.get(stuId=uinfo['rid']).classId) or (uinfo['utype']=='M' and cclass.objects.filter(cmId=uinfo['rid'],classId=n.classId).count()!=0):
return render_to_response('readnoti.html',{'noti':n})
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Invalid entry of notification!'))
def listStuRoll(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.GET)==0 or len(request.GET['cid'])==0:
return render_to_response('liststu.html',{'cl':getClassList(uinfo['rid'])})
else:
return render_to_response('liststu.html',{'cl':getClassList(uinfo['rid']), 'sl':getStuList(request.GET['cid']), 'cid':int(request.GET['cid'])})
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def listStuScore(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='T':
crid=request.GET['crid']
clid=request.GET['clid']
if course.objects.filter(teaId=uinfo['rid'],courseId=crid).count()==0:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You do not teach this course.'))
else:
od=request.GET.get('orderby','stuId')
return render_to_response('listscore.html',getStuScoreList(crid,clid,od))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def listCourse(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='T':
if request.GET.get('crid',False):
crid=request.GET['crid']
if request.GET.get('clid',False):
clid=request.GET['clid']
return HttpResponseRedirect('/list/stuscore?crid=%s&clid=%s' % (crid,clid))
else:
sl=courseOnStu.objects.filter(courseId=crid).values_list('stuId',flat=True)
cidl=student.objects.filter(classId__in=sl).distinct('classId').values_list('classId',flat=True)
cl=[]
for c in cclass.objects.filter(classId__in=cidl):
tmp={'cid':c.classId, 'cname':str(c.classGrade)+'-'+c.className}
cl.append(tmp)
return render_to_response('listcourse.html',{'cl':cl,'cr':crid})
else:
return render_to_response('listcourse.html',{'crl':course.objects.filter(teaId=uinfo['rid'])})
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def listStuPerf(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.GET)==0 or len(request.GET['cid'])==0 or len(request.GET['term'])==0:
return render_to_response('listperf.html',{'cl':getClassList(uinfo['rid']),'term':int(request.GET.get('term',0))})
else:
od=request.GET.get('orderby', 'stuId')
return render_to_response('listperf.html',{'cl':getClassList(uinfo['rid']), 'spl':getPerfList(request.GET['cid'],int(request.GET['term']), od), 'cid':int(request.GET['cid']), 'term':int(request.GET['term'])})
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def listStuNoti(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
cl=getClassList(uinfo['rid'])
if len(request.GET)==0 or not request.GET.get('cid',False):
return render_to_response('listnoti.html',{'cl':cl})
else:
ns=request.GET.get('nscope','all')
cid=int(request.GET['cid'])
return render_to_response('listnoti.html',{'cl':cl, 'cid':cid, 'nl':getNotiList(cid,ns), 'ns':ns})
elif uinfo['utype']=='S':
ns=request.GET.get('nscope','all')
cid=student.objects.get(stuId=uinfo['rid']).classId
return render_to_response('listnoti.html',{'cid':cid, 'nl':getNotiList(cid,ns), 'ns':ns})
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Teacher do not have notification in the system'))
def listClass(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
cl=getClassList(uinfo['rid'])
return render_to_response('listclass.html',{'cl':cl})
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def newStuRoll(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.POST)==0:
cl=[]
for c in cclass.objects.filter(cmId=uinfo['rid']):
tmp={'cid':c.classId, 'cname':str(c.classGrade)+'-'+c.className}
cl.append(tmp)
return render_to_response('newstu.html',{'cl':cl})
else:
res=saveStu(request.POST['sname'], request.POST['sbirth'], request.POST['sgender'], request.POST['snative'], request.POST['sclass'])
if res=='OK':
mt='info'
res='The student is successfully created!'
else:
mt='error'
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % (mt,res))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def newStuEvent(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.POST)==0:
if (student.objects.filter(stuId=request.GET['sid']).count==0) or (cclass.objects.filter(cmId=uinfo['rid'],classId=student.objects.get(stuId=request.GET['sid']).classId).count()==0):
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','No such student in your class!'))
else:
return render_to_response('newevent.html',{'sid':request.GET['sid'],'sname':student.objects.get(stuId=request.GET['sid']).stuName})
else:
res=newEvent(request.POST['sid'], request.POST['ebody'], request.POST['etype'], request.POST['edate'], request.POST['eterm'], request.POST['epoint'])
if res=='OK':
mt='info'
res='The event is successfully created!'
else:
mt='error'
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % (mt,res))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def newStuScore(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='T':
if len(request.POST)==0:
crid=request.GET['crid']
clid=request.GET['clid']
if course.objects.filter(teaId=uinfo['rid'],courseId=crid).count()==0:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You do not teach this course.'))
else:
return render_to_response('grade.html',getStuScoreList(crid,clid))
else:
ssheet=eval(request.POST['ssheet'])
saveScore(ssheet)
#res=newNoti(request.POST['nclass'], request.POST['ntitle'], request.POST['nbody'], request.POST['edate'])
#if res=='OK':
# mt='info'
# res='The notification is successfully created!'
#else:
# mt='error'
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('info','Score sheet has been saved!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def newStuPerf(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.POST)==0:
return render_to_response('newperf.html', {'cl':getClassList(uinfo['rid'])})
else:
res=genPerf(request.POST['cid'], request.POST['term'], int(request.POST['aweight']))
if res=='OK':
return HttpResponseRedirect('/list/stuperf?cid=%s&term=%s' % (request.POST['cid'], request.POST['term']))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error',res))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def newStuNoti(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.POST)==0:
cl=[]
for c in cclass.objects.filter(cmId=uinfo['rid']):
tmp={'cid':c.classId, 'cname':str(c.classGrade)+'-'+c.className}
cl.append(tmp)
return render_to_response('newnoti.html',{'cl':cl})
else:
res=newNoti(request.POST['nclass'], request.POST['ntitle'], request.POST['nbody'], request.POST['edate'])
if res=='OK':
mt='info'
res='The notification is successfully created!'
else:
mt='error'
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % (mt,res))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def newClass(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.POST)==0:
return render_to_response('newclass.html')
else:
res=saveClass('new', request.POST['cname'], request.POST['grade'], uinfo['rid'])
if res=='OK':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('info','Your class is created!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error',res))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def modStuRoll(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.POST)==0:
s=getStuRoll(request.GET['sid'])
cl=[]
for c in cclass.objects.filter(cmId=uinfo['rid']):
tmp={'cid':c.classId, 'cname':str(c.classGrade)+'-'+c.className}
cl.append(tmp)
s['cl']=cl;
return render_to_response('modroll.html', s)
else:
res=saveStu(request.POST['sname'], request.POST['sbirth'], request.POST['sgender'], request.POST['snative'], request.POST['sclass'], request.POST['sid'])
if res=='OK':
return HttpResponseRedirect('/read/sturoll?stuId=%s' % request.POST['sid'])
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error',res))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def modClass(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if len(request.POST)==0:
c=cclass.objects.get(classId=request.GET['cid'])
cm=cmanager.objects.all()
return render_to_response('modclass.html', {'c':c, 'cm':cm})
elif uinfo['rid']!=cclass.objects.get(classId=request.POST['cid']).cmId:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission on this class!'))
else:
res=saveClass(request.POST['cid'], request.POST['cname'], request.POST['grade'], request.POST['cmid'])
if res=='OK':
return HttpResponseRedirect('/list/class/')
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error',res))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def delStuNoti(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
c=notification.objects.get(id=request.GET['nid'])
if cclass.objects.filter(classId=c.classId, cmId=uinfo['rid']).count()!=0:
c.delete()
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('info','The notification is deleted!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def delStuEvent(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
e=stuEvent.objects.get(eventId=request.GET['eid'])
c=student.objects.get(stuId=e.stuId)
if cclass.objects.filter(classId=c.classId, cmId=uinfo['rid']).count()!=0:
e.delete()
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('info','The event is deleted!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def delClass(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
if student.objects.filter(classId=request.GET['cid']).count() !=0:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','The class is not empty. Can not delete!'))
if cclass.objects.filter(classId=request.GET['cid'], cmId=uinfo['rid']).count()!=0:
cclass.objects.get(classId=request.GET['cid']).delete()
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('info','The class is deleted!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
def delStuRoll(request):
uinfo=getuinfo(request)
if uinfo['state']=='not logon':
return HttpResponseRedirect('/')
elif uinfo['state']=='disabled':
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','Your account is disabled. Please contact the system administrator.'))
elif uinfo['utype']=='M':
sid=request.GET['sid']
cid=student.objects.get(stuId=sid).classId
if cclass.objects.filter(classId=cid, cmId=uinfo['rid']).count()!=0:
stuEvent.objects.filter(stuId=sid).delete()
courseOnStu.objects.filter(stuId=sid).delete()
performance.objects.filter(stuId=sid).delete()
student.objects.filter(stuId=sid).delete()
account.objects.filter(roleId=sid, userType='S').delete()
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('info','The student is deleted!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
else:
return HttpResponseRedirect('/msg?mtype=%s&mbody=%s' % ('error','You have no permission!'))
|
from datetime import datetime
from flask import Flask, render_template, url_for, request, session, redirect, flash
import data_manager
from util import json_response
app = Flask('__name__')
# app.secret_key = data_manager.random_api_key()
app.secret_key = '123'
@app.route('/')
def index():
return render_template('index.html')
@app.route("/register", methods=['GET', 'POST'])
def register():
if 'user_id' in session:
return redirect(url_for("index"))
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
submission_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if data_manager.register_user(username, password, submission_time) is False:
flash('Not registered')
data_manager.register_user(username, password, submission_time)
flash('Successful registration. Log in to continue.')
return redirect(url_for("login"))
return render_template("register.html")
@app.route('/login', methods=['GET', 'POST'])
def login():
if 'user_id' in session:
return redirect(url_for("index"))
if request.method == 'POST':
username, typed_password = request.form.get(
'username'), request.form.get('password')
user = data_manager.check_user(username)
if user and data_manager.verify_password(typed_password, user['password']):
session['user_id'] = user['id']
session['username'] = username
flash('User logged in!')
return redirect('/')
else:
flash('User or Password do not match')
return render_template('login.html')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
if 'user_id' not in session:
flash('You are not logged in!')
else:
session.pop('user_id', None)
session.pop('username', None)
return redirect(url_for('index'))
@app.route("/vote/<planetID>/<planetName>", methods=['GET', 'POST'])
@json_response
def vote(planetID, planetName):
if request.method == 'POST':
planet_id = planetID
planet_name = planetName
user_id = session['user_id']
submission_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data_manager.vote_planet(planet_id, planet_name, user_id, submission_time)
flash(f'Voted on planet {planetName} successfully')
return render_template('index.html')
user_id = session['user_id']
date = data_manager.planets_votes(user_id)
return date
def main():
app.run(debug=True,
port=8001)
if __name__ == '__main__':
main()
|
import os
import shutil
from datetime import datetime
from django import forms
from django.core.files.storage import default_storage
from excelapp.models import Tm_Department, Tm_Service
from excelapp.utils.file_util import CustomFile
class ServiceForm(forms.Form):
department = forms.ModelChoiceField(
label=Tm_Service._meta.get_field('department').verbose_name,
queryset=Tm_Department.objects.all(),
widget=forms.Select(attrs={'class':'form-control'}),
empty_label='該当なし',
required=True)
service_name = forms.CharField(
label=Tm_Service._meta.get_field('service_name').verbose_name,
max_length=Tm_Service._meta.get_field('service_name').max_length,
widget=forms.TextInput(attrs={'class':'form-control'}),
required=True)
upload_file = forms.FileField(
label=Tm_Service._meta.get_field('upload_file').verbose_name,
required=False)
def __init__(self, *args, **kwargs):
self.details = kwargs.pop('details') if 'details' in kwargs else None
super().__init__(*args, **kwargs)
def clean_upload_file(self):
infile = self.cleaned_data.get("upload_file")
if infile is None and not 'file_path' is self.details:
raise forms.ValidationError('アップロードファイルは必須です。')
return infile
def upload_file_save(self):
infile = self.cleaned_data.get("upload_file")
if infile:
tmppath = datetime.now().strftime('upload/temp/%Y%m%d/%H%M%S/')
url_, path_ = CustomFile.save_file(tmppath + infile.name, infile)
if self.details and 'file_path' in self.details:
file_path = self.details['file_path']
ret = CustomFile.remove_dir(os.path.dirname(file_path))
else:
url_ = self.details['file_url']
path_ = self.details['file_path']
return (url_, path_)
def get_upload_file(self):
infile = self.cleaned_data.get("upload_file")
tmpfile = None
if self.details and 'file_path' in self.details:
file_path = self.details['file_path']
tmpfile = CustomFile.localfile_to_filefield(file_path)
return infile if infile else tmpfile
# def get_upload_file(self):
# # cleaned_data = super(ServiceForm, self).clean()
# upload_file = self.cleaned_data.get("upload_file")
# if upload_file is None and self.details:
# file_path = self.details['file_path']
# upload_file = CustomFile.localfile_to_filefield(file_path)
# if default_storage.exists(file_path):
# dirname = os.path.dirname(file_path)
# shutil.rmtree(dirname)
# return upload_file
|
# coding=utf-8
import os
import sys
import json
import time
import wave
import base64
import signal
import pyaudio
import threading
#from apa102_pi.colorschemes import colorschemes
IS_PY3 = sys.version_info.major == 3
WIDTH = 2
CHANNELS = 1
RECORD_SECONDS = 5
CHUNK = 1024
if IS_PY3:
from urllib.request import urlopen
from urllib.request import Request
from urllib.error import URLError
from urllib.parse import urlencode
timer = time.perf_counter
else:
from urllib2 import urlopen
from urllib2 import Request
from urllib2 import URLError
from urllib import urlencode
if sys.platform == "win32":
timer = time.clock
else:
# On most other platforms the best timer is time.time()
timer = time.time
RATE = 16000
card = pyaudio.PyAudio()
stream = card.open(
rate = RATE,
format = card.get_format_from_width(WIDTH),
channels = CHANNELS,
input = True,
start = False,)
class DemoError(Exception):
pass
""" TOKEN start """
def record():
global KEY_STATE,strip,SYS_STATE,card
SYS_STATE = 1
stream.start_stream()
print("* recording for 5 seconds")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
wf = wave.open("recordtest.wav", 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(card.get_sample_size(card.get_format_from_width(WIDTH)))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close
SYS_STATE = 2
print("Play recorded file")
os.system("aplay recordtest.wav")
SYS_STATE = 0
def sigint_handler(signum, frame):
global FLAG_EXIT
stream.stop_stream()
stream.close()
FLAG_EXIT = 1
print('catched interrupt signal!')
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint_handler)
record()
sys.exit(0)
|
__author__ = 'Greg Ziegan'
from .models import User
class PhoneAuthBackend(object):
def authenticate(self, phone, password):
try:
user = User.objects.get(phone=phone)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_user(self, phone):
user = User.objects.get(phone=phone)
if user.is_active:
return user
return None
|
# -*- coding: utf-8 -*-
{
'name': 'POS Graph Customize',
'summary': 'New Point of Sale Graph with new filters',
'version': '0.1',
'category': 'Point of sale',
'description': """
POS Graph Customize
============================================================================================
Features:
---------
* New report graph for POS orders analysis
* New filters and grouping for/by brands, month, year, weeks
""",
'author': 'Onawoo Soluciones C.A. (Alexander Rodriguez adrt271988@gmail.com)',
'website': '',
'data': [
'security/ir.model.access.csv',
'report/pos_order_report_view.xml',
],
'depends': [
'trend_point_of_sale',
],
'qweb': [
],
"installable": True,
"application": True,
"auto_install": False,
}
|
from regionProposal import processing
import cv2
def outputVideo(clf, nonFiltered):
# loading video
print('Load video')
try:
mpgFile = '../data/input.mpg'
vidcap = cv2.VideoCapture(mpgFile)
cnt = 0
falseCnt = 0
prevRect = []
printed = []
length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vidcap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter('../output/output.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (height, width))
while (True):
if cnt % 2 == 0:
del prevRect[:]
ret, image = vidcap.read()
if ret == 0:
out.release()
break
if (int(vidcap.get(1))):
numOfRect = 0
rect = processing(image, clf, nonFiltered)
for (x1, x2, y1, y2, pred) in rect:
numOfRect += 1
if pred == 1:
if cnt % 2 == 0:
temp = [x1, x2, y1, y2]
prevRect.append(temp)
for (x1, x2, y1, y2) in printed:
ec = (0, 0, 255)
lw = 3
cv2.rectangle(image, (x1, y1), (x2, y2), ec, lw)
else:
del printed[:]
for (px1, px2, py1, py2) in prevRect:
if abs(px1 - x1) < 60 and abs(px2 - x2) < 60 and abs(py1 - y1) < 60 and abs(py2 - y2) < 60:
ec = (0, 0, 255)
lw = 3
cv2.rectangle(image, (x1, y1), (x2, y2), ec, lw)
printed.append([x1, x2, y1, y2])
break
cnt += 1
out.write(image)
#break
print('%d frame' % (cnt))
except Exception as e:
out.release()
print('Write Error')
print(e)
return None
|
for i in range(0, 6):
print i
a='a'
print ord('A')
print chr(65) |
import sys
import sdl2
import sdl2.ext
class MovementSystemAirHockey(sdl2.ext.Applicator):
def __init__(self, minx, miny, maxx, maxy, midline):
super(MovementSystemAirHockey, self).__init__()
self.componenttypes = Velocity, sdl2.ext.Sprite
self.minx = minx
self.miny = miny
self.maxx = maxx
self.maxy = maxy
self.midline = midline
self.paddle1 = None
self.paddle2 = None
def process(self, world, componentsets):
for velocity, sprite in componentsets:
if sprite == self.paddle1:
swidth, sheight = sprite.size
sprite.x += velocity.vx
sprite.y += velocity.vy
sprite.x = max(self.minx, sprite.x)
sprite.y = max(self.miny, sprite.y)
pmaxx = sprite.x + swidth
pmaxy = sprite.y + sheight
if pmaxx > self.midline:
sprite.x = self.midline - swidth
if pmaxy > self.maxy:
sprite.y = self.maxy - sheight
elif sprite == self.paddle2:
swidth, sheight = sprite.size
sprite.x += velocity.vx
sprite.y += velocity.vy
sprite.x = max(self.midline, sprite.x)
sprite.y = max(self.miny, sprite.y)
pmaxx = sprite.x + swidth
pmaxy = sprite.y + sheight
if pmaxx > self.maxx:
sprite.x = self.maxx - swidth
if pmaxy > self.maxy:
sprite.y = self.maxy - sheight
else:
swidth, sheight = sprite.size
sprite.x += velocity.vx
sprite.y += velocity.vy
sprite.x = max(self.minx, sprite.x)
sprite.y = max(self.miny, sprite.y)
pmaxx = sprite.x + swidth
pmaxy = sprite.y + sheight
if pmaxx > self.maxx:
sprite.x = self.maxx - swidth
if pmaxy > self.maxy:
sprite.y = self.maxy - sheight
class Velocity(object):
def __init__(self):
super(Velocity, self).__init__()
self.vx = 0
self.vy = 0
|
'''
具体思考见java解法,主要是要理清楚思路
'''
class Solution:
def convert(self, s: str, numRows: int) -> str:
if s is None or len(s) <= numRows or numRows == 1:
return s
k1 = 2 * numRows - 2
result = ""
for i in range(numRows):
if i == 0 or i == numRows - 1:
result = result + s[i]
index = i + k1
while index < len(s):
result = result + s[index]
index = index + k1
else:
result = result + s[i]
k2 = k1 - 2 * i # 注意 k2 的规律
index = i + k2
while index < len(s):
result = result + s[index]
k2 = k1 - k2 # 注意这里更新了 k2
index = index + k2
return result |
'''
added xavier_initializer
added dropout
'''
import numpy as np
import tensorflow as tf
import os
import sys
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/home/mhkim/data/mnist', one_hot=True)
train_checkpoint = '/home/mhkim/data/checkpoint/mnist_nn/'
if tf.gfile.Exists(train_checkpoint):
tf.gfile.DeleteRecursively(train_checkpoint)
tf.gfile.MakeDirs(train_checkpoint)
#learning_rate = 0.001
training_epochs = 15
display_step = 1
batch_size = 100
def xavier_init(n_inputs, n_outputs, uniform=True):
if uniform :
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else :
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
x = tf.placeholder('float', [None, 784], name='X')
y = tf.placeholder('float', [None, 10], name='Y')
W1 = tf.get_variable("W1", shape=[784, 256], initializer=xavier_init(784, 256))
W2 = tf.get_variable("W2", shape=[256, 256], initializer=xavier_init(256, 256))
W3 = tf.get_variable("W3", shape=[256, 256], initializer=xavier_init(256, 256))
W4 = tf.get_variable("W4", shape=[256, 10], initializer=xavier_init(256, 10))
b1 = tf.Variable(tf.random_normal([256]), name='bias1')
b2 = tf.Variable(tf.random_normal([256]), name='bias2')
b3 = tf.Variable(tf.random_normal([256]), name='bias3')
b4 = tf.Variable(tf.random_normal([10]), name='bias4')
#activation = tf.nn.softmax(tf.matmul(x, W) + b)
#cost = tf.reduce_mean(-tf.reduce_sum(y* tf.log(activation) , reduction_indices=1))
dropout_rate = tf.placeholder("float", name='dropout_rate')
L1 = tf.nn.relu(tf.add(tf.matmul(x, W1,) , b1), name='relu1')
_L1 = tf.nn.dropout(L1, dropout_rate, name='relu1-dropout')
L2 = tf.nn.relu(tf.add(tf.matmul(_L1, W2,) , b2), name='relu2')
_L2 = tf.nn.dropout(L2, dropout_rate, name='relu2-dropout')
L3 = tf.nn.relu(tf.add(tf.matmul(_L2, W3,) , b3), name='relu3')
_L3 = tf.nn.dropout(L3, dropout_rate, name='relu3-dropout')
hypothesis = tf.add(tf.matmul(_L3, W4), b4, name='hypothesis')
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(hypothesis, y), name='cost')
#######
batch = tf.Variable(0, dtype=tf.float32, name='batch')
train_size = mnist.test.labels.shape[0]
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_learning_rate = 0.001
learning_rate = tf.train.exponential_decay(
starter_learning_rate, # Base learning rate.
global_step, # Current index into the dataset.
train_size, # Decay step.
0.96, # Decay rate.
staircase=True,
name='decay_learning_rate')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(cost, name='train')
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess :
tf.global_variables_initializer().run()
saver = tf.train.Saver()
for epoch in range(training_epochs) :
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs , batch_ys = mnist.train.next_batch(batch_size)
sess.run(train, feed_dict={ x:batch_xs, y:batch_ys , dropout_rate:0.7})
avg_cost += sess.run(cost, feed_dict={ x:batch_xs, y:batch_ys , dropout_rate:0.7}) / total_batch
if epoch % display_step == 0:
print ( "Epoch : ", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost) , '%f' % learning_rate.eval() )
saver.save(sess=sess, save_path=os.path.join(train_checkpoint, 'save.ckpt'))
sys.stdout.flush()
print ("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy : ", accuracy.eval({x:mnist.test.images, y:mnist.test.labels , dropout_rate:1 })) |
# encoding: utf-8
from tastypie.authorization import * |
"""
Ejercicio 2
Una empresa requiere cierto número de trabajadores que laboren durante 8 horas diarias en diferentes días de la semana.
Los trabajadores deben desempeñar sus cargos 5 días consecutivos y descansar 2 días. Por ejemplo, un trabajador que labora
de martes a sábado, descansaría el domingo y el lunes
"""
from __future__ import division
from pyomo.environ import *
from pyomo.opt import SolverFactory
M = ConcreteModel()
dias=7
M.dias=RangeSet(1,dias)
#Trabajadores Por Dia
M.TPD=Param(M.dias,mutable=True)
M.TPD[1]=17
M.TPD[2]=13
M.TPD[3]=15
M.TPD[4]=19
M.TPD[5]=14
M.TPD[6]=16
M.TPD[7]=11
M.T = Param(M.dias,M.dias,mutable=True)
for i in M.dias:
for j in M.dias:
M.T[i,j]=0
for i in M.dias:
M.T[i,i]=1
aux = [i-j for j in range(1,5)]
for j in aux:
if j==0:
M.T[i,7]=1
else:
M.T[i,(j%7)]=1
M.x=Var(M.dias,domain=PositiveIntegers)
M.obj=Objective(expr=sum(M.x[i] for i in M.dias),sense=minimize)
M.res=ConstraintList()
for i in M.dias:
M.res.add( sum(M.x[j]*M.T[i,j] for j in M.dias)>= M.TPD[i])
SolverFactory('glpk').solve(M)
M.display() |
import fnmatch
import os
import string
import shutil
searchStr = u"\u003F"
for root, dirnames, filenames in os.walk('./wiki/commons/'):
for dirname in dirnames:
if "?" in dirname or "\"" in dirname or "*" in dirname:
print(root+'/'+dirname)
shutil.rmtree(root+'/'+dirname)
|
# -*- coding: utf8 -*-
from jiendia.test.io import DATA_DIR
def test_read():
from jiendia.io.archive.seq import SeqArchive
with SeqArchive(DATA_DIR + '/010_01_01_STAND_R.SEQ') as seq:
assert len(seq.frames) > 0
for index, frame in enumerate(seq.frames):
assert frame.number == index + 1
assert len(frame.parts) > 0
|
# 优化前
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
ret = []
sign = 0
for i in range(len(digits)-1, -1, -1):
if i == len(digits)-1:
t = digits[i] + 1
else:
t = digits[i] + sign
if t == 10:
t = 0
sign = 1
else:
sign = 0
ret.insert(0, t)
if sign != 0:
ret.insert(0, sign)
return ret
# 优化后
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
acc = 1
for i in range(len(digits)-1, -1, -1):
t = digits[i] + acc
if t == 10:
acc = 1
digits[i] = 0
else:
digits[i] = t
acc = 0
return digits
if acc:
digits.insert(0, 1)
return digits
|
from lib import settings
class Controller:
def steer_left(self):
pass
def steer_right(self):
pass
def steer_neutral(self):
pass
def forward(self, power: int = 100):
pass
def reverse(self, power: int = 100):
pass
def neutral(self):
pass
def exit(self):
pass
|
import requests, ctypes, urllib.request
from unsplash import Unsplash
# Path for the image
path = "YourPath/unsplash.jpg"
# Instantiate Unsplash Object
u = Unsplash()
# Photo url
photo = ""
# -------------------
# Change Background
# -------------------
def change_desktop(photo):
# save image locally
urllib.request.urlretrieve(photo, "unsplash.jpg")
print(photo)
# Change background image with ctypes library, code = 20
ctypes.windll.user32.SystemParametersInfoW(20, 0, path, 0)
# -------------------
# Menu
# -------------------
def menu():
print("Welcome!\nLet's change your background")
print("1) Random Photo\n2) Search by term\n3) Search by collection\n4) Exit")
# -------------------
# Main
# -------------------
def main():
flag = True
while (flag):
menu()
user_input = input()
if user_input == "1":
photo = u.get_random_image()
change_desktop(photo)
elif user_input == "2":
search_term = input("Search term: ")
photo = u.get_photo(search_term)
change_desktop(photo)
elif user_input == "3":
collection = input("Collection name: ")
photo = u.get_collection(collection)
change_desktop(photo)
elif user_input == "4":
flag = False
print("Exiting")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding: utf-8
from reconciliationICBC import DealExcelICBC,CheckICBC
from reconciliationABC import DealExcelABC,CheckABC
from reconciliationBOC import DealExcelBOC,CheckBOC
from reconciliationCCB import DealExcelCCB,CheckCCB
from reconciliationCEB import DealExcelCEB,CheckCEB
from reconciliationPOS import CheckPOS
import os
import consolechars
# from cfonts import say
if __name__=='__main__':
# say("LEADING ssc", font='simple', size=(150, 30), colors=['candy'],
# background='transparent', align='center', letter_spacing=None,
# line_height=20, space=True,max_length=0)
consolechars.say("LEADING SSC")
go_on = True
while go_on:
try:
bank_name = input("请选择对账银行或其他功能(1.工行[icbc] 2.农行[abc] 3.中行[boc] 4.建行[ccb]) 5.光大[ceb] 6.POS)\n")
nc_path = input("请输入NC/POS表路径:\n").strip(r'\'|\"')
nc_file_name = os.path.basename(nc_path).split('.')[0]
if bank_name.lower()!='pos' and bank_name!='6':
bank_path = input("请输入银行表路径:\n").strip(r'\'|\"')
bank_file_name = os.path.basename(bank_path).split('.')[0]
if os.path.isdir(nc_path):
save_path = nc_path
elif os.path.isfile(nc_path) and os.path.dirname(nc_path)!='':
save_path = os.path.dirname(nc_path)
elif os.path.dirname(nc_path)=='':
save_path = os.getcwd()
if bank_name.lower()=="icbc" or bank_name=='1' or bank_name == "工行":
deal_excel = DealExcelICBC(nc_path=nc_path,bank_path=bank_path)
nc_icbc = deal_excel.dealNC()
icbc = deal_excel.dealBANK()
check_icbc = CheckICBC(nc_icbc,icbc,nc_file_name,bank_file_name,save_path)
check_icbc.doall()
elif bank_name.lower()=='abc' or bank_name=='2' or bank_name=='农行':
deal_excel = DealExcelABC(nc_path=nc_path,bank_path=bank_path)
nc_abc = deal_excel.dealNC()
abc = deal_excel.dealBANK()
check_abc = CheckABC(nc_abc,abc,nc_file_name,bank_file_name,save_path)
check_abc.doall()
elif bank_name.lower()=='boc' or bank_name=='3' or bank_name=='中行':
deal_excel = DealExcelBOC(nc_path=nc_path,bank_path=bank_path)
nc_boc = deal_excel.dealNC()
boc = deal_excel.dealBANK()
check_boc = CheckBOC(nc_boc,boc,nc_file_name,bank_file_name,save_path)
check_boc.doall()
elif bank_name.lower() == 'ccb' or bank_name=='4' or bank_name=='建行':
deal_excel = DealExcelCCB(nc_path=nc_path,bank_path=bank_path)
nc_ccb = deal_excel.dealNC()
ccb = deal_excel.dealBANK()
check_ccb = CheckCCB(nc_ccb,ccb,nc_file_name,bank_file_name,save_path)
check_ccb.doall()
elif bank_name.lower() == 'ceb' or bank_name=='5' or bank_name=='光大':
deal_excel = DealExcelCEB(nc_path=nc_path,bank_path=bank_path)
nc_ceb = deal_excel.dealNC()
ceb = deal_excel.dealBANK()
check_ceb = CheckCEB(nc_ceb,ceb,nc_file_name,bank_file_name,save_path)
check_ceb.doall()
elif bank_name.lower() == 'pos' or bank_name == '6':
checkPOS = CheckPOS(pos_path=nc_path,pos_file_name=nc_file_name,save_path=save_path)
checkPOS.doall()
choice = True
while choice:
go = input("continue/exit: [enter/n] ? ")
if go.strip()=='':
choice = False
elif go.lower()=='n':
choice = False
go_on = False
else:
pass
except Exception as e:
raise e
# print("ERROR:",e)
# time.sleep(60)
os.system('pause')
|
import numpy as np
import pandas as pd
import chartify
data = pd.DataFrame({'time': pd.date_range('2015-01-01', '2018-01-01')})
n_days = len(data)
data['1st'] = np.array(list(range(n_days))) + np.random.normal(
0, 10, size=n_days)
data['2nd'] = np.array(list(range(n_days))) + np.random.normal(
0, 10, size=n_days) + 200
data['3rd'] = np.array(list(range(n_days))) + np.random.normal(
0, 10, size=n_days) + 500
data['4th'] = np.array(list(range(n_days))) + np.random.normal(
0, 10, size=n_days) + 700
data['5th'] = np.array(list(range(n_days))) + np.random.normal(
0, 10, size=n_days) + 800
data['6th'] = np.array(list(range(n_days))) + np.random.normal(
0, 10, size=n_days) + 1000
print(data)
data = pd.melt(
data,
id_vars=['time'],
value_vars=data.columns[1:],
value_name='y',
var_name=['grouping'])
print(data)
# Plot the data
ch = chartify.Chart(blank_labels=True, x_axis_type='datetime')
ch.style.set_color_palette(palette_type='sequential')
ch.plot.line(
data_frame=data.sort_values('time'),
x_column='time',
y_column='y',
color_column='grouping')
ch.set_title("Sequential color palette type")
ch.set_subtitle("Palette type for sequential ordered dimensions")
ch.show('html')
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 示例一,函数传入可变参数的几个问题-------------------------------------------
def change(a, b):
"""
两种传递参数的方式:
1.不可变对象作参数,通过“值”进行传递
2.可变对象作参数,通过“指针”进行传递。如果在函数中,对不可变参数进行了原处修改(如append),其全局对象也会改变
"""
a = 2
b[0] = 'spam'
x = 1
y = [1, 2]
change(x, y)
print(x, y)
# 这是由于可变对象与不可变对象,在赋值时的方式不同引起的
x = 1
y = x
y = 2
print(x, y)
print('两个对象引用了同一个简单变量,赋值语句即为简单的赋值!')
x = [1, 2]
y = x
y[0] = 'spam'
print(x, y)
print('两个对象引用了一个复杂变量,赋值语句为“指针”含义,x与y都指向同一个内存地址')
# 如果想要避免上面情况的出现,可以通过复制方法进行参数的传递
def change(a):
a[0] = 'spam'
x = [1, 2]
change(x[:])
# 或者在函数内部进行拷贝
def change(a):
a = a[:]
x = [1, 2]
change(x)
# 或者将数据转换为不可变独享(如元组)(不推荐使用)
change(tuple(x))
|
# -*- coding: utf-8 -*-
from typing import Text
# noinspection PyProtectedMember
from bs4 import SoupStrainer
from .base import FeedFetcher
class SmzdmFetcher(FeedFetcher):
FILTER = SoupStrainer('article', 'article-details')
def __init__(self, keywords=None):
super().__init__()
self.keywords = keywords
def url(self) -> Text:
return 'http://feed.smzdm.com'
def description(self, url) -> Text:
data = ''
soup = self.fetcher.soup(url, parse_only=self.FILTER)
link = soup.find('div', 'buy')
if link:
data += str(link)
item = soup.find('div', 'item-box')
if item:
data += str(item)
return data
# noinspection PyUnusedLocal
def callback(self, result, item):
title = result['title'].upper()
goon = False
if self.keywords:
for word in self.keywords:
if word in title:
goon = True
break
else:
goon = True
if goon:
result['id'] = result['link'].split('/')[-2]
return goon
|
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from linux_jobs.items import LinuxJobsItem
from bs4 import BeautifulSoup
import re
class LinuxJobSpider(Spider):
name = 'linux_job'
allowed_domains = ['51job.com']
start_urls = ['https://m.51job.com/search/joblist.php?keyword=linux&keywordtype=2']
def parse(self, response):
# 获取一页的URL
url_lis = response.xpath('//div[@class="items"]/a/@href').extract()
# 调用函数获取信息
for url in url_lis:
yield Request(url, callback=self.parse_detail)
# 下一页
next_page = response.xpath('//*[@id="turnpage"]/div/a[2]/@href').extract()
if next_page:
next_page = response.urljoin(next_page[0])
yield Request(next_page, callback=self.parse)
def parse_detail(self, response):
item = LinuxJobsItem()
soup = BeautifulSoup(response.body,'lxml')
item['name'] = response.xpath('//div[@class="jt"]/p/text()').extract()
item['city'] = response.xpath('//div[@class="jt"]/em/text()').extract()
peops = response.xpath('//span[@class="s_r"]/text()').extract()
if peops:
item['peops'] = peops
else:
item['peops'] = ['暂无信息']
salary = response.xpath('//p[@class="jp"]/text()').extract()
if salary:
item['salary'] = salary
else:
item['salary'] = ['暂无信息']
experience = response.xpath('//span[@class="s_n"]/text()').extract()
if experience:
item['experience'] = experience
else:
item['experience'] = ['无要求']
education = response.xpath('//span[@class="s_x"]/text()').extract()
if education:
item['education'] = education
else:
item['education'] = ['暂无信息']
item['company'] = response.xpath('//*[@id="pageContent"]/div[2]/a[1]/p/text()').extract()
item['com_info'] = response.xpath('//*[@id="pageContent"]/div[2]/a[1]/div/text()').extract()
job_info = response.xpath('//div[@class="ain"]/article//text()').extract()
item['job_info'] = [job_info]
return item
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# otra_app.py
#
from Tkinter import *
def mi_funcion():
print esto
app = Tk()
app.title("Aplicacion grafica en python")
etiqueta = Label(app, text="Hola mundo!!!")
boton = Button(app, text="OK!!")
mi funcion = "eje"
etiqueta.pack()
boton.pack()
app.mainloop()
|
from app import app
from flask import render_template, jsonify, request
from flask import send_file
from response import *
import random
from lxml import etree
from urllib.request import urlopen
import json
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import requests
from pytz import timezone
from gingerit.gingerit import GingerIt
import os
import base64
from datetime import datetime
from models import *
import base64
import random
|
from abc import ABC, abstractmethod
import torch
from torch import nn
from torch import distributions as D
class BaseSPN(nn.Module, ABC):
'''
based on a RAT-SPN structure
batch: batch size
R: number of replicas in the RAT-SPN
xdim: number of dimensions of each data point
PN: number of nodes in each sector of the parent (current) layer
CN: number of nodes in each sector of the child layer
layer_width[i]: number of sectors in the i-th layer
'''
def __init__(self, xdim, N, R=1, seed=0):
super(BaseSPN, self).__init__()
self.xdim, self.N, self.R = xdim, N, R
if seed: torch.random.manual_seed(seed)
self.leaf_perms = torch.stack([torch.randperm( xdim ) for r in range(R) ]) # (R, xdim)
self.inverse_leaf_perms = torch.stack([torch.argsort(leaf_perm) for leaf_perm in self.leaf_perms ]) # (R, xdim)
self.layers = xdim.bit_length() + 1
self.layer_widths = [xdim]
while self.layer_widths[-1] != 1:
width = self.layer_widths[-1] // 2
self.layer_widths.append( width )
self.layer_widths.append(1)
self.layer_widths.reverse()
assert(len(self.layer_widths) == self.layers)
self.mix = nn.Parameter(torch.rand( R ))
@abstractmethod
def get_edges(self, r_idx, grp_idx):
# Gets the edge parameters for the sector of the RAT-SPN corresponding to replica r_idx and sector grp_idx.
# The sector grp_idx is assigned based on a level-order traversal of the sectors.
pass
@abstractmethod
def get_device(self):
pass
def forward(self, x):
_, _, xdim = x.shape
p = torch.stack( [torch.zeros(xdim, device=x.device), torch.ones(xdim, device=x.device)], dim=0) # (2, xdim)
log_y = D.Bernoulli(probs=p).log_prob( x ) # (batch, 2, xdim)
log_y = log_y[:,:,self.leaf_perms] # (batch, 2, R, xdim)
log_y = log_y.transpose(1,2).transpose(0,1) # (R, batch, 2, xdim)
for i in reversed(range(self.layers - 1)):
if i == self.layers-2: CN = 2 # 2 children in leaf layer (true/false leaves)
else: CN = self.N
if i == 0: PN = 1 # 1 parent in top layer (root of the spn)
else: PN = self.N
start_idx, end_idx = sum(self.layer_widths[:i+1]), sum(self.layer_widths[:i+2])
grp_idx = torch.arange(start_idx, end_idx,device=x.device).unsqueeze(0).repeat(self.R,1) # (R, layer_width[i+1])
r_idx = torch.arange(self.R,device=x.device).unsqueeze(1).repeat(1,self.layer_widths[i+1]) # (R, layer_width[i+1])
edges = self.get_edges(r_idx,grp_idx).reshape(self.R, self.layer_widths[i+1], self.N, self.N)
edges = edges[:,:,:PN,:CN] # (R, layer_width[i+1], PN, CN)
edges = edges - edges.logsumexp(dim=3, keepdim=True) # (R, layer_width[i+1], PN, CN)
cross_idx = torch.arange(CN).unsqueeze(0).repeat(PN,1)
log_y = log_y[:, :, cross_idx, :] # (R, batch, PN, CN, layer_width[i+1])
log_y = log_y + edges.unsqueeze(4).transpose(1,4)
log_y = torch.logsumexp(log_y, dim=3) # (R, batch, PN, layer_width[i+1])
if i > 0:
if log_y.shape[-1] % 2:
log_y = torch.cat([log_y[...,:-2], log_y[...,-2:-1] + log_y[...,-1:]], dim=-1) # add together last 2 columns
log_y = log_y[...,::2] + log_y[...,1::2]
log_y = log_y[:,:,0,0] # (R, batch)
mix = self.mix - self.mix.logsumexp(dim=0)
log_y = (log_y + mix.unsqueeze(1)).logsumexp(dim=0)
return log_y # (batch)
def sample(self, batch):
device = self.get_device()
p = torch.stack( [torch.zeros(self.xdim, device=device), torch.ones(self.xdim, device=device)], dim=0) # (2, xdim)
p = p.unsqueeze(0).repeat(batch, 1, 1) # (batch, 2, xdim)
p = p[:,:,self.leaf_perms] # (batch, 2, R, xdim)
p = p.transpose(1,2).transpose(0,1) # (R, batch, 2, xdim)
samp = p.unsqueeze(-1) # (R, batch, CN=2, xdim, D=1) last dimensions will grow as we merge samps
samp_tail = samp[...,:1,:]
for i in reversed(range(self.layers - 1)):
if i == self.layers-2: CN = 2
else: CN = self.N
if i == 0: PN = 1
else: PN = self.N
start_idx, end_idx = sum(self.layer_widths[:i+1]), sum(self.layer_widths[:i+2])
grp_idx = torch.arange(start_idx, end_idx,device=device).unsqueeze(0).repeat(self.R,1) # (R, layer_width[i+1])
r_idx = torch.arange(self.R,device=device).unsqueeze(1).repeat(1,self.layer_widths[i+1]) # (R, layer_width[i+1])
edges = self.get_edges(r_idx,grp_idx).reshape(self.R, self.layer_widths[i+1], self.N, self.N)
edges = edges[:,:,:PN,:CN] # (R, layer_width[i+1], PN, CN)
edges = edges - edges.logsumexp(dim=3, keepdim=True) # (R, layer_width[i+1], PN, CN)
batch_edges = edges.unsqueeze(1).repeat(1,batch,1,1,1)
samp_ch = D.categorical.Categorical(logits=batch_edges).sample() # (R, batch, layer_width[i+1], PN)
samp_ch = samp_ch.transpose(2,3) # (R, batch, PN, layer_width[i+1])
samp_ch_idx = samp_ch.unsqueeze(4).repeat(1,1,1,1,samp.size(4)) # (R, batch, PN, layer_width[i+1], D)
samp = torch.gather(samp, 2, samp_ch_idx)
samp_ch_idx = samp_ch.unsqueeze(4).repeat(1,1,1,1,samp_tail.size(4)) # (R, batch, PN, layer_width[i+1], D)
samp_tail = torch.gather(samp_tail, 2, samp_ch_idx[...,-1:,:])
if i > 0:
if samp.shape[-2] % 2:
samp_tail = torch.cat([samp[...,-3:-2,:], samp[...,-2:-1,:], samp_tail], dim=-1)
samp = samp[...,:-1,:]
samp = torch.cat([samp[...,::2,:], samp[...,1::2,:]], dim=-1) # concat together adjacent children
else:
samp_tail = torch.cat([samp[...,-2:-1,:], samp_tail], dim=-1)
samp = torch.cat([samp[...,::2,:], samp[...,1::2,:]], dim=-1) # concat together adjacent children
# samp tail contains the samples
# samp tail dim = (R, batch, 1, 1, xdim)
samps = samp_tail.squeeze(2).squeeze(2) # (R, batch, xdim)
inv_perm_idx = self.inverse_leaf_perms.unsqueeze(1).repeat(1,batch,1) # (R, batch, xdim)
inv_perm_idx = inv_perm_idx.to(device)
samps = torch.gather(samps, 2, inv_perm_idx) # (R, batch, xdim) invert permutation
mix = self.mix - self.mix.logsumexp(dim=0)
mix = mix.unsqueeze(0).repeat(batch, 1)
mix_samp = D.categorical.Categorical(logits=mix).sample() # (batch)
samps = samps[mix_samp, torch.arange(batch)]
return samps |
import logging
import random
from opencensus.trace import config_integration
from opencensus.trace.samplers import AlwaysOnSampler
from opencensus.trace.tracer import Tracer
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.tags import tag_value as tag_value_module
# Azure Applicatin Insights specific exporters
from opencensus.ext.azure.trace_exporter import AzureExporter
from opencensus.ext.azure.log_exporter import AzureLogHandler, AzureEventHandler
from opencensus.ext.azure import metrics_exporter
import config
# make sure traces have proper trace and span id set
config_integration.trace_integrations(['logging'])
# AI: tracer with AzureExporter configured
tracer = Tracer(exporter=AzureExporter(connection_string=config.AI_CONNECTION_STR),sampler=AlwaysOnSampler())
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# AI: this will send the events to trace
logger.addHandler(AzureLogHandler(connection_string=config.AI_CONNECTION_STR))
# AI: this will send the events to customEvents
logger.addHandler(AzureEventHandler(connection_string=config.AI_CONNECTION_STR))
stats = stats_module.stats
view_manager = stats.view_manager
stats_recorder = stats.stats_recorder
CARROTS_MEASURE = measure_module.MeasureInt("carrots",
"number of carrots",
"carrots")
CARROTS_VIEW = view_module.View("carrots_view",
"number of carrots",
[],
CARROTS_MEASURE,
aggregation_module.SumAggregation())
# AI: registing the metrics exporter with the view manager
view_manager.register_exporter(metrics_exporter.new_metrics_exporter(connection_string=config.AI_CONNECTION_STR))
view_manager.register_view(CARROTS_VIEW)
with tracer.span(name='hello world'):
logger.warning('Before the inner span')
with tracer.span(name='hello') as innerSpan:
innerSpan.add_annotation("Some additional data here", textkey="textkey val", boolkey=False, intkex=31415926)
logger.warning('In the inner span')
logger.critical('error')
try:
result = 1 / 0 # generate a ZeroDivisionError
except Exception:
logger.exception('Captured an exception.', extra={'custom_dimensions': {'key_1': 'value_1', 'key_2': 'value_2'}})
logger.warning('After the inner span')
mmap = stats_recorder.new_measurement_map()
tmap = tag_map_module.TagMap()
tmap.insert("version", tag_value_module.TagValue("1.2.30"))
mmap.measure_int_put(CARROTS_MEASURE, 10+random.randrange(10))
mmap.record(tmap) |
import numpy as np
import funcs
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pltt
print 'PSO routine to find minimum of defined objective function'
print '=========================================================\n'
#Standard PSO routine-------------------------------------------------------------------------------
def PSO(nparam,ndata,nswarm,objFunc,args,p):
#nparam - number of parameters
#ndata - number of data points
#nswarm - number of particles
#objFunc - Objective Function
#args - Objective Function arguments
#Organize parameters and arguments------------------------------------
xdata = args[0]
#=====================================================================
#Initialize PSO parameters--------------------------------------------
k = 1 #iteration counter
kmax = 100 #max iterations allowed
c1 = 0.5 #weight of particle's best position distance
c2 = 1.0 #weight of swarm's best position distance
w = 0.5 #weight of particle's last velocity
tol = 1e-20 #tolerance
#=====================================================================
#Initialize solutions-------------------------------------------------
best_swarm_pos = np.array(p[0]) #Best Swarm position, choosing "randomly the first particle"
best_swarm_obj = np.array(objFunc(p[0],xdata)) #Objective Function of Best Swarm position
best_particle_obj = np.empty((nswarm)) #Initialize Best Particle objective function
best_particle_pos = np.empty((nswarm,nparam)) #Initialize Best Particle position
Fobj = np.empty((nswarm)) #Initialize objective function
v = np.empty((nswarm,nparam)) #Initial velocities
for i in range(0,nswarm):
v[i] = np.random.rand(1,nparam)[0]
for i in range(0,nswarm):
best_particle_obj[i] = np.array(objFunc(p[i],xdata)) #Objective Function of Best Particle position
best_particle_pos[i] = np.array(p[i]) #Objective Function of Best Particle position
Fobj_old = best_particle_obj
#=====================================================================
#MAIN LOOP------------------------------------------------------------
#Calculate Objective function for all particles
while (k<kmax) and (best_swarm_obj>tol):
#Calculate Objective Function for all particles
for i in range(0,nswarm):
Fobj[i] = objFunc(p[i],xdata)
#Update each particle best position
if Fobj[i]<=Fobj_old[i]:
best_particle_obj[i] = Fobj[i]
for j in range(0,nparam):
best_particle_pos[i][j] = p[i][j]
#Update swarm best position
if Fobj[i]<=best_swarm_obj:
best_swarm_obj = Fobj[i]
for j in range(0,nparam):
best_swarm_pos[j] = p[i][j]
Fobj_old = Fobj
#Update positions
for i in range(0,nswarm):
for j in range(0,nparam):
v[i][j] = w*v[i][j] + c1*np.random.rand()*(best_particle_pos[i][j]-p[i][j]) + c2*np.random.rand()*(best_swarm_pos[j]-p[i][j])
p[i][j] = p[i][j] + v[i][j]
#Update iteration counter
k = k+1
print 'k',k,best_swarm_pos,best_swarm_obj
#=====================================================================
for i in range(0,nswarm):
print p[i],Fobj[i]
return best_swarm_pos
#===================================================================================================
#Lennard-Jones PSO routine-------------------------------------------------------------------------------
def LJPSO(nparam,ndata,nswarm,objFunc,args,p):
#nparam - number of parameters
#ndata - number of data points
#nswarm - number of particles
#objFunc - Objective Function
#args - Objective Function arguments
#Organize parameters and arguments------------------------------------
xdata = args[0]
#=====================================================================
#Initialize PSO parameters--------------------------------------------
k = 1 #iteration counter
kmax = 10000 #max iterations allowed
c1 = 0.5 #weight of particle's best position distance
c2 = 1.0 #weight of swarm's best position distance
w = 0.5 #weight of particle's last velocity
tol = 1e-5 #tolerance
eps = 1.0 #epsilon LJ parameter
sig = 1.0 #sigma LJ parameter
rc = 1.0 #cutoff radius
m = np.linspace(1.0,1.0,nswarm) #mass of particles
flagcount = 0 #Number of stationary particles
#=====================================================================
#Initialize solutions-------------------------------------------------
best_swarm_pos = np.array(p[0]) #Best Swarm position, choosing "randomly the first particle"
best_swarm_obj = np.array(objFunc(p[0],xdata)) #Objective Function of Best Swarm position
best_particle_obj = np.empty((nswarm)) #Initialize Best Particle objective function
best_particle_pos = np.empty((nswarm,nparam)) #Initialize Best Particle position
Fobj = np.empty((nswarm)) #Initialize objective function
v = np.empty((nswarm,nparam)) #Initial velocities
a = np.zeros((nswarm,nparam)) #Initial accelerations
flag = np.empty((nswarm))
for i in range(0,nswarm):
v[i] = np.random.rand(1,nparam)[0]
flag[i] = False
for i in range(0,nswarm):
best_particle_obj[i] = abs(np.array(objFunc(p[i],xdata))) #Objective Function of Best Particle position
best_particle_pos[i] = np.array(p[i]) #Objective Function of Best Particle position
Fobj_old = best_particle_obj
#=====================================================================
#MAIN LOOP------------------------------------------------------------
#Calculate Objective function for all particles
#while (k<kmax) and (np.amax(best_particle_obj)>tol):
while (k<kmax) and (flagcount<=nswarm/2):
#Calculate Objective Function for all particles
for i in range(0,nswarm):
if flag[i]==False:
Fobj[i] = objFunc(p[i],xdata)
Fobj[i] = abs(Fobj[i])
#Update each particle best position
if Fobj[i]<=Fobj_old[i]:
best_particle_obj[i] = Fobj[i]
for j in range(0,nparam):
best_particle_pos[i][j] = p[i][j]
if best_particle_obj[i]<tol:
flag[i] = True
p[i] = best_particle_pos[i]
flagcount = flagcount+1
#Update swarm best position
if Fobj[i]<=best_swarm_obj:
best_swarm_obj = Fobj[i]
for j in range(0,nparam):
best_swarm_pos[j] = p[i][j]
#New position is the best in the swarm, get swarm near to this newly converged particle
if flag[i]==True:
for j in range(0,nparam):
best_swarm_pos[j] = p[i][j]
Fobj_old = Fobj
#Update positions
for i in range(0,nswarm):
if flag[i]==False:
for j in range(0,nparam):
v[i][j] = w*v[i][j] + c1*np.random.rand()*(best_particle_pos[i][j]-p[i][j]) + c2*np.random.rand()*(best_swarm_pos[j]-p[i][j])
p[i][j] = p[i][j] + v[i][j]
#Forces and acceleration calculation
#Check if distance<Cut-off
F = np.zeros((nswarm,nparam))
for i in range(0,nswarm):
for l in range(i+1,nswarm):
if flag[i]==True and flag[l]==True:
flag[i] = True
else:
d = 0
for j in range(0,nparam):
d = d + (p[i][j]-p[l][j])**2
rij = d**0.5
#print rij,i,l
if rij<rc: #distance between particles is less than cut-off radius
for j in range(0,nparam):
Fij = np.random.rand()*3.14/rc*math.cos(3.14*rij/rc)*(p[i][j]-p[l][j])
#Fij = np.random.rand()*5/math.exp(rij/rc)
#Fij = 24*eps/rij*(2*((sig/rij)**(12) - (sig/rij)**(6)))*(p[i][j]-p[l][j])
#if Fij>1.0:
# Fij = 1.0
#F[i][j] = F[i][j] + 24*eps/d*(2*((sig/rij)**(1/12) - (sig/rij)**(1/6)))*(p[i][j]-p[l][j])
#F[l][j] = F[l][j] - 24*eps/d*(2*((sig/rij)**(1/12) - (sig/rij)**(1/6)))*(p[i][j]-p[l][j])
F[i][j] = F[i][j] + Fij
F[l][j] = F[l][j] - Fij
#F[i][j] = F[i][j] + 3.14/rc*math.sin(3.14*rij/rc)*(p[i][j]-p[l][j])
#F[l][j] = F[l][j] - 3.14/rc*math.sin(3.14*rij/rc)*(p[i][j]-p[l][j])
#F[i][j] = 1.0
#F[l][j] = 1.0
#print 'Forces of i,l,j',i,l,j,F[i][j]
#Update velocities according to forces
#print '=========='
for i in range(0,nswarm):
if flag[i]==False:
for j in range(0,nparam):
a[i][j] = F[i][j]/m[i]
p[i][j] = p[i][j] + a[i][j] #Should we add it or subtract it???
#print a[i][j],i,j
#Plot actual status
h = k
if h%20==0:
xa = np.empty((nswarm))
ya = np.empty((nswarm))
xc = np.empty((flagcount))
yc = np.empty((flagcount))
qq = 0
ww = 0
for ww in range(0,nswarm):
xa[ww] = best_particle_pos[ww][0]
ya[ww] = best_particle_pos[ww][1]
if flag[ww]==True:
xc[qq] = best_particle_pos[ww][0]
yc[qq] = best_particle_pos[ww][1]
qq = qq+1
fig = plt.figure()
plt.plot(xa,ya,'r.')
plt.plot(xc,yc,'g^')
plt.xlim(-10,10)
plt.ylim(-10,10)
plt.savefig('xyk.png')
#Update iteration counter
k = k+1
print 'k',k,best_swarm_pos,best_swarm_obj,np.amax(best_particle_pos),flagcount
#raw_input('...')
#=====================================================================
for i in range(0,nswarm):
print best_particle_pos[i],best_particle_obj[i]
#plot solution
x = np.empty((flagcount))
y = np.empty((flagcount))
k = 0
for i in range(0,nswarm):
if flag[i]==True:
x[k] = best_particle_pos[i][0]
y[k] = best_particle_pos[i][1]
k = k+1
fig = plt.figure()
plt.plot(x,y,'r.')
plt.savefig('xy.png')
return best_swarm_pos
#===================================================================================================
#----------data to create optimum data---------
best = []
best.append(7.39)
best.append(-3.39)
xdata = np.array([1,2,3,4,5,6,7,8,9,10])
#ydata = funcs.quad12(5,5)
ydata = funcs.quad12(5,5)
#Particles and PSO definitions
nparameter = 2
ndata = 10
nswarm = 200
data = []
data.append(xdata)
p = np.empty((nswarm,nparameter))
for i in range(0,nswarm):
p[i] = np.random.rand(1,nparameter)[0]*10
print 'The data is:'
print 'x = ',xdata
print 'y = ',ydata
opt_par = np.empty((nparameter))
opt_par[0] = 5
opt_par[1] = 5
yobj = funcs.quad12_obj(opt_par,xdata)
print 'yobj',yobj
raw_input('.....')
par_opt = LJPSO(nparameter,ndata,nswarm,funcs.quad12_obj,data,p)
ycal = funcs.quad12(par_opt[0],par_opt[1])
ycal_obj = funcs.quad12_obj(par_opt,xdata)
print 'y',ycal,ycal_obj
|
from pyasn1.type.constraint import ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, \
ValueSizeConstraint
MIN = float('-inf')
MAX = float('inf')
class NoConstraint(ConstraintsIntersection):
def __init__(self):
self.extensionMarker = False
self.lowerEndpoint = None
self.upperEndpoint = None
super(NoConstraint, self).__init__()
def __repr__(self):
return str(None)
class ExtensionMarker(ConstraintsIntersection):
def __init__(self, present=False):
self.extensionMarker = present
self.lowerEndpoint = None
self.upperEndpoint = None
super(ExtensionMarker, self).__init__()
class SequenceOfValueSize(ConstraintsIntersection):
'''
Use this constraint with SEQUENCE OF:
with asn1 schema:
MySeqOf::= SEQUENCE (SIZE(1..2)) OF OCTET STRING
Python code:
class MySeqOf(SequenceOfType):
subtypeSpec SequenceOfValueSize(1, 2)
componentType = OctetStringType()
This is due to asn1PERser 'ValueSize' and pyasn1 'ValueSizeConstraint' will try to apply
this size constraint check on componentType and NOT on SEQUENCE OF itself.
The consequence of this is there will be no exception raised when MySeqOf is added 3rd (4th, 5th etc.)
OCTET STRING.
'''
def __init__(self, lower_endpoint, upper_endpoint, extensionMarker=False):
self.extensionMarker = extensionMarker
self.lowerEndpoint = None if lower_endpoint == MIN else lower_endpoint
self.upperEndpoint = None if upper_endpoint == MAX else upper_endpoint
super(SequenceOfValueSize, self).__init__()
class ValueRange(ValueRangeConstraint):
def __init__(self, lower_endpoint, upper_endpoint, extensionMarker=False):
self.extensionMarker = extensionMarker
self.lowerEndpoint = None if lower_endpoint == MIN else lower_endpoint
self.upperEndpoint = None if upper_endpoint == MAX else upper_endpoint
super(ValueRange, self).__init__(lower_endpoint, upper_endpoint)
def _testValue(self, value, idx):
if not self.extensionMarker:
return super(ValueRange, self)._testValue(value, idx)
class SingleValue(SingleValueConstraint):
def __init__(self, extension_marker=False, *values):
self.extensionMarker = extension_marker
self.lowerEndpoint = min(values)
self.upperEndpoint = max(values)
super(SingleValue, self).__init__(*values)
def _testValue(self, value, idx):
if not self.extensionMarker:
return super(SingleValue, self)._testValue(value, idx)
class ValueSize(ValueSizeConstraint):
def __init__(self, lower_endpoint, upper_endpoint, extensionMarker=False):
self.extensionMarker = extensionMarker
self.lowerEndpoint = None if lower_endpoint == MIN else lower_endpoint
self.upperEndpoint = None if upper_endpoint == MAX else upper_endpoint
super(ValueSize, self).__init__(lower_endpoint, upper_endpoint)
def _testValue(self, value, idx):
if not self.extensionMarker:
return super(ValueSize, self)._testValue(value, idx)
class ConstraintOr(ConstraintsUnion):
def __init__(self, extensionMarker=False, *constraints):
self.extensionMarker = extensionMarker
self.lowerEndpoint = min([constraint.lowerEndpoint for constraint in constraints])
self.upperEndpoint = max([constraint.upperEndpoint for constraint in constraints])
super(ConstraintOr, self).__init__(*constraints)
def _testValue(self, value, idx):
if not self.extensionMarker:
return super(ConstraintOr, self)._testValue(value, idx)
class ConstraintAnd(ConstraintsIntersection):
def __init__(self, extensionMarker=False, *constraints):
self.extensionMarker = extensionMarker
self.lowerEndpoint = min([constraint.lowerEndpoint for constraint in constraints])
self.upperEndpoint = max([constraint.upperEndpoint for constraint in constraints])
super(ConstraintAnd, self).__init__(*constraints)
def _testValue(self, value, idx):
if not self.extensionMarker:
return super(ConstraintAnd, self)._testValue(value, idx)
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template import RequestContext
from app.authhelper import get_signin_url, get_token_from_code
import requests
import urllib
def home(request):
# Redirect to groups
return HttpResponseRedirect(reverse('groups'))
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The following are routes specific to the web
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def groups(request):
# If there is no token in the session, redirect to home
access_token = request.session.get('access_token_web')
if access_token == None:
return HttpResponseRedirect(reverse('login'))
else:
# Send these headers with all API calls
headers = { 'User-Agent' : 'python_tutorial/1.0',
'Authorization' : 'Bearer {0}'.format(access_token),
'Accept' : 'application/json' }
# Perform the GET for groups using O365 Unified API
response = requests.get("https://graph.microsoft.com/beta/me/joinedgroups", headers = headers, params = None)
# Parse the API root for getting details (this includes the tenant)
meta = response.json()['@odata.context']
meta = meta[:meta.rfind('/')]
# Render the view
return render(
request,
'app/index.html',
context_instance = RequestContext(request,
{
'title':'My Groups',
'request': request,
'groups': response.json()['value'],
'apiroot': meta
}))
def detail(request, group_id):
# If there is no token in the session, redirect to home
access_token = request.session.get('access_token_web')
if access_token == None:
return HttpResponseRedirect(reverse('login'))
else:
if request.GET.get('root', '') == '':
return HttpResponseRedirect(reverse('groups'))
else:
# Send these headers with all API calls
headers = { 'User-Agent' : 'python_tutorial/1.0',
'Authorization' : 'Bearer {0}'.format(access_token),
'Accept' : 'application/json' }
# Perform the GET for groups using O365 Unified API
url = request.GET.get('root', '') + '/groups/' + group_id + '/members'
response = requests.get(url, headers = headers, params = None)
# Render the view
return render(
request,
'app/detail.html',
context_instance = RequestContext(request,
{
'title':'My Group Memebership',
'request': request,
'members': response.json()['value']
}))
def login(request):
# Check for code returned from authorize
auth_code = request.GET.get('code', '')
if auth_code == '':
redirect_uri = request.build_absolute_uri(reverse('login'))
sign_in_url = get_signin_url(redirect_uri)
# Render the view
return render(
request,
'app/login.html',
context_instance = RequestContext(request,
{
'title':'Login',
'request': request,
'redirect': sign_in_url,
'isAddin': False
}))
else:
redirect_uri = request.build_absolute_uri(reverse('login'))
access_token = get_token_from_code(auth_code, redirect_uri, "https://graph.microsoft.com")
# Save the token in the session
request.session['access_token_web'] = access_token
return HttpResponseRedirect(reverse('groups'))
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The following are routes specific add-ins
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def addingroups(request):
# If there is no token in the session, redirect to home
access_token = request.session.get('access_token_addin')
if access_token == None:
return HttpResponseRedirect(reverse('addinlogin'))
else:
# Send these headers with all API calls
headers = { 'User-Agent' : 'python_tutorial/1.0',
'Authorization' : 'Bearer {0}'.format(access_token),
'Accept' : 'application/json' }
# Perform the GET for groups using O365 Unified API
response = requests.get("https://graph.microsoft.com/beta/me/joinedgroups", headers = headers, params = None)
# Parse the API root for getting details (this includes the tenant)
meta = response.json()['@odata.context']
meta = meta[:meta.rfind('/')]
# Render the view
return render(
request,
'app/addin_index.html',
context_instance = RequestContext(request,
{
'title':'My Groups',
'request': request,
'groups': response.json()['value'],
'apiroot': meta
}))
def addindetail(request, group_id):
# If there is no token in the session, redirect to home
access_token = request.session.get('access_token_addin')
if access_token == None:
return HttpResponseRedirect(reverse('addinlogin'))
else:
if request.GET.get('root', '') == '':
return HttpResponseRedirect(reverse('addingroups'))
else:
# Send these headers with all API calls
headers = { 'User-Agent' : 'python_tutorial/1.0',
'Authorization' : 'Bearer {0}'.format(access_token),
'Accept' : 'application/json' }
# First get the groups data again...could be cached instead
groupsResponse = requests.get("https://graph.microsoft.com/beta/me/joinedgroups", headers = headers, params = None)
# Parse the API root for getting details (this includes the tenant)
meta = groupsResponse.json()['@odata.context']
meta = meta[:meta.rfind('/')]
# Perform the GET for groups using O365 Unified API
url = request.GET.get('root', '') + '/groups/' + group_id + '/members'
membershipResponse = requests.get(url, headers = headers, params = None)
# Render the view
return render(
request,
'app/addin_detail.html',
context_instance = RequestContext(request,
{
'title':'My Group Memebership',
'request': request,
'members': membershipResponse.content,
'groups': groupsResponse.json()['value'],
'apiroot': meta
}))
def addinlogin(request):
# Check for code returned from authorize
auth_code = request.GET.get('code', '')
if auth_code == '':
redirect_uri = request.build_absolute_uri(reverse('addinlogin'))
sign_in_url = get_signin_url(redirect_uri)
# Render the view
return render(
request,
'app/login.html',
context_instance = RequestContext(request,
{
'title':'Login',
'request': request,
'redirect': sign_in_url,
'isAddin': True
}))
else:
redirect_uri = request.build_absolute_uri(reverse('addinlogin'))
access_token = get_token_from_code(auth_code, redirect_uri, "https://graph.microsoft.com")
# Save the token in the session
request.session['access_token_addin'] = access_token
return HttpResponseRedirect(reverse('addingroups')) |
import re
class Config:
def __init__(self):
self.data = {
'dataset' : None,
'mnist_la_autoencoder' : None,
'mnist_la_autoencoder_classifier' : None,
'mnist_la_simple_classifier' : None,
'fashion_mnist_la_autoencoder' : None,
'fashion_mnist_la_autoencoder_classifier' : None,
'fashion_mnist_la_simple_classifier' : None,
'cifar10_la_autoencoder' : None,
'cifar10_la_autoencoder_classifier' : None,
'cifar10_la_simple_classifier' : None,
'digits_la_autoencoder' : None,
'digits_la_autoencoder_classifier' : None,
'digits_la_simple_classifier' : None,
'loss_function_autoencoder' : None,
'loss_function_classifier' : None,
'optimizer_autoencoder' : None,
'optimizer_classifier' : None,
'mnist_latent_size' : None,
'fashion_mnist_latent_size' : None,
'cifar10_latent_size' : None,
'digits_latent_size' : None,
'mnist_autoencoder_epochs' : None,
'fashion_mnist_autoencoder_epochs' : None,
'cifar10_autoencoder_epochs' : None,
'digits_autoencoder_epochs' : None,
'mnist_classifier_epochs' : None,
'fashion_mnist_classifier_epochs' : None,
'cifar10_classifier_epochs' : None,
'digits_classifier_epochs' : None,
'num_reconstructions': None,
'plot_tSNE': None,
'plot_learning': None,
'freeze' : None,
'mnist_D1D2_fraction' : None,
'mnist_D2_training_fraction' : None,
'fashion_mnist_D1D2_fraction' : None,
'fashion_mnist_D2_training_fraction' : None,
'cifar10_D1D2_fraction' : None,
'cifar10_D2_training_fraction' : None,
'digits_D1D2_fraction' : None,
'digits_D2_training_fraction' : None
}
def get_config(self):
self.read_config()
return self.data
def read_config(self):
with open("./config_file.txt", "r") as f:
config_data = f.readlines()
for line in config_data:
line = line.strip("\n")
try:
variable, value = line.split(":")
if variable in self.data:
self.parse_data(variable, value)
except:
print(variable + " is missing a value.")
def parse_data(self, variable, data):
if variable == 'dataset':
self.data[variable] = data
elif variable == 'mnist_la_autoencoder':
self.data[variable] = float(data)
elif variable == 'mnist_la_autoencoder_classifier':
self.data[variable] = float(data)
elif variable == 'mnist_la_simple_classifier':
self.data[variable] = float(data)
elif variable == 'fashion_mnist_la_autoencoder':
self.data[variable] = float(data)
elif variable == 'fashion_mnist_la_autoencoder_classifier':
self.data[variable] = float(data)
elif variable == 'fashion_mnist_la_simple_classifier':
self.data[variable] = float(data)
elif variable == 'cifar10_la_autoencoder':
self.data[variable] = float(data)
elif variable == 'cifar10_la_autoencoder_classifier':
self.data[variable] = float(data)
elif variable == 'cifar10_la_simple_classifier':
self.data[variable] = float(data)
elif variable == 'digits_la_autoencoder':
self.data[variable] = float(data)
elif variable == 'digits_la_autoencoder_classifier':
self.data[variable] = float(data)
elif variable == 'digits_la_simple_classifier':
self.data[variable] = float(data)
elif variable == 'loss_function_autoencoder':
self.data[variable] = data
elif variable == 'loss_function_classifier':
self.data[variable] = data
elif variable == 'optimizer_autoencoder':
self.data[variable] = data
elif variable == 'optimizer_classifier':
self.data[variable] = data
elif variable == 'mnist_latent_size':
self.data[variable] = int(data)
elif variable == 'fashion_mnist_latent_size':
self.data[variable] = int(data)
elif variable == 'cifar10_latent_size':
self.data[variable] = int(data)
elif variable == 'digits_latent_size':
self.data[variable] = int(data)
elif variable == 'mnist_autoencoder_epochs':
self.data[variable] = int(data)
elif variable == 'fashion_mnist_autoencoder_epochs':
self.data[variable] = int(data)
elif variable == 'cifar10_autoencoder_epochs':
self.data[variable] = int(data)
elif variable == 'digits_autoencoder_epochs':
self.data[variable] = int(data)
elif variable == 'mnist_classifier_epochs':
self.data[variable] = int(data)
elif variable == 'fashion_mnist_classifier_epochs':
self.data[variable] = int(data)
elif variable == 'cifar10_classifier_epochs':
self.data[variable] = int(data)
elif variable == 'digits_classifier_epochs':
self.data[variable] = int(data)
elif variable == 'freeze':
self.data[variable] = int(data)
elif variable == 'num_reconstructions':
self.data[variable] = int(data)
elif variable == 'plot_tSNE':
self.data[variable] = int(data)
elif variable == 'plot_learning':
self.data[variable] = int(data)
elif variable == 'mnist_D1D2_fraction':
self.data[variable] = int(data)
elif variable == 'mnist_D2_training_fraction':
self.data[variable] = int(data)
elif variable == 'fashion_mnist_D1D2_fraction':
self.data[variable] = int(data)
elif variable == 'fashion_mnist_D2_training_fraction':
self.data[variable] = int(data)
elif variable == 'cifar10_D1D2_fraction':
self.data[variable] = int(data)
elif variable == 'cifar10_D2_training_fraction':
self.data[variable] = int(data)
elif variable == 'digits_D1D2_fraction':
self.data[variable] = int(data)
elif variable == 'digits_D2_training_fraction':
self.data[variable] = int(data)
|
print ("This is Anjar")
print ("Welcome to Python Tutorial by Anjar") |
from django.db import models
from django.utils.timezone import now
class forecast_img(models.Model):
img = models.ImageField(upload_to='forecast_img')
name = models.CharField(max_length=100)
class forecastol_img(models.Model):
'''
name = models.CharField(max_length=50, default="")
time = models.DateTimeField(default=now())
material = models.CharField(max_length=50, default="")
origin = models.CharField(max_length=50, default="")
ifRot = models.CharField(max_length=50, default="")
ifRot_result=models.CharField(max_length=50, default="")
'''
imgroot=models.CharField(max_length=100, default="")
|
import core.models as coremodels
damadam_url = 'http://apollo7788.pagekite.me'
damadam_user = 'aasanads'
damadam_pass = 'damadam1234'
import json
import unirest
def deleteTopup(tid, callback):
data = {}
data['tid'] = tid
data = json.dumps(data)
unirest.post(damadam_url + "/api/ad/delete/", headers={ "Content-type": "application/json" },params=data, auth=(damadam_user, damadam_pass), callback=callback)
def resumeTopup(tid, callback):
data = {}
data['tid'] = tid
data = json.dumps(data)
unirest.post(damadam_url + "/api/ad/resume/", headers={ "Content-type": "application/json" },params=data, auth=(damadam_user, damadam_pass), callback=callback)
def suspendTopup(topup, callback):
data = {}
data['tid'] = topup
data = json.dumps(data)
unirest.post(damadam_url + "/api/ad/suspend/", headers={ "Content-type": "application/json" },params=data, auth=(damadam_user, damadam_pass), callback=callback)
def sendAd(ad1, clicks, tid):
print 'data'
data = ad1.to_json()
data['clicks'] = clicks
data['tid'] = tid
data = json.dumps(data)
response = unirest.post(damadam_url + "/api/ad/live/", headers={ "Content-type": "application/json" },params=data, auth=(damadam_user, damadam_pass), callback=ad_sent_callback)
print response
def ad_sent_callback(response):
print response.body |
# -*- coding:utf-8 -*-
# 递归解法
'''
解决两个字符串的动态规划问题,一般都是用两个指针 i,j 分别指向两个字符串的最后,然后一步步往前走,缩小问题的规模。
'''
'''
dp(i, j)的定义:返回 s1[0..i] 和 s2[0..j] 的最小编辑距离
'''
def min_distance(s1, s2):
def dp(i,j):
if i == -1:
return j + 1
if j == -1:
return i + 1
if s1[i] == s2[j]:
return dp(i-1, j-1)
else:
# 插入情况 # 删除情况 # 替换情况 # 这个需要明确操作对象是哪个?
return min(dp(i,j-1) + 1, dp(i-1, j) + 1, dp(i-1,j-1) + 1)
return dp(len(s1)-1, len(s2)-1)
# 上面方法存在重叠子问题,可以用备忘录或DP table 进行优化
# 对于重叠子问题,优化方法无非是备忘录或者 DP table
#
# 备忘录优化
def min_distance(s1, s2):
memo = dict()
def dp(i,j):
if (i,j) in memo:
return memo[(i,j)]
if i == -1:
return j + 1
if j == -1:
return i + 1
if s1[i] == s2[j]:
memo[(i,j)] = dp(i-1, j-1)
else:
memo[(i,j)] = min(dp(i,j-1) + 1, dp(i-1, j) + 1, dp(i-1,j-1) + 1)
return memo[(i,j)]
return dp(len(s1)-1, len(s2)-1)
# DP table 优化
# DP table 是自底向上求解,递归解法是自顶向下求解
import numpy as np
def min_distance(s1, s2):
m = len(s1)
n = len(s2)
dp = np.zeros((m+1,n+1))
for i in range(1,m+1):
dp[i][0] = i
for j in range(1,n+1):
dp[0][j] = j
for i in range(1,m+1):
for j in range(1,n+1):
if s1[i-1] == s2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i][j-1]+1, dp[i-1][j]+1, dp[i-1][j-1]+1)
print('dp table is:\n',dp)
return dp[m][n]
def min_distance(s1, s2):
m = len(s1)
n = len(s2)
dp = np.zeros((m+1,n+1))
op = np.zeros((m+1,n+1))
for i in range(1,m+1):
dp[i][0] = i
for j in range(1,n+1):
dp[0][j] = j
for i in range(1,m+1):
for j in range(1,n+1):
if s1[i-1] == s2[j-1]:
dp[i][j] = dp[i-1][j-1]
op[i][j] = 0
else:
arr = [dp[i][j-1]+1, dp[i-1][j]+1, dp[i-1][j-1]+1]
# 0-插入,1-删除,2-替换
opt_choice = [1,2,3]
index = arr.index(int(min(arr)))
opt = opt_choice[index]
dp[i][j] = arr[index]
op[i][j] = opt
print('dp table is:\n',op)
return dp[m][n]
s1 = 'mt'
s2 = 'ma'
r = min_distance(s1, s2)
print('the minimum value is: ',r)
exit()
'''
动态规划求莱文斯坦距离
求解思路:
将字符串a按列算,字符串b按行算,如下结构:
m t a c n u
m
i
t
c
m
u
莱文斯坦距离衡量的是字符串的差异化程度
'''
# 争哥版
import numpy as np
def levenshtein_distance(str_a, str_b):
# str_a的长度是列的大小
# str_b的长度是行的大小
# 所以定义states矩阵时要写成(len_b, len_a)
# 同理,在返回最终结果时也是一样的!
len_a = len(str_a)
len_b = len(str_b)
states = np.zeros((len_b, len_a))
for j in range(len_a):
if j == 0 and str_b[0] == str_a[j]:
states[0][0] = 0
else:
states[0][j] = states[0][j-1] + 1
for i in range(len_b):
if i == 0 and str_a[0] == str_b[i]:
states[i][0] = 0
else:
states[i][0] = states[i-1][0] + 1
for i in range(1, len_b):
for j in range(1, len_a):
if str_b[i] == str_a[j]:
states[i][j] = min(states[i-1][j-1], states[i-1][j] + 1, states[i][j-1]+1)
else:
states[i][j] = min(states[i-1][j-1]+1, states[i-1][j] + 1, states[i][j-1]+1)
print(states[len_b-1][len_a-1])
str_a = 'mtacnu'
str_b = 'mitcm'
levenshtein_distance(str_a, str_b)
exit()
len_a = len(str_a)
len_b = len(str_b)
states = np.zeros((len_b, len_a))
# 处理第一行,遍历字符串a
for j in range(len_a):
if str_a[j] == str_b[0] and j == 0:
states[0][0] = 0
else:
states[0][j] = states[0][j-1] + 1
# 处理第一列,遍历字符串b
for i in range(len_b):
if str_b[i] == str_a[0] and i == 0:
states[i][0] = 0
else:
states[i][0] = states[i-1][0] + 1
# 从第二行,第二列开始处理
for i in range(1, len_b):
for j in range(1, len_a):
if str_a[j] == str_b[i]:
# 状态转换方程
# 从操作层面来理解states[i-1][j]和states[i][j-1]为什么需要+1?
# 如果a[i]和b[j]不相等,可以删除a[i],然后递归考察a[i+1]和b[i],则操作+1
# 现在反过来想,a[i]和b[j]相等了,则在a[i-1]和b[j]状态时,势必进行了一次操作
states[i][j] = min(states[i-1][j]+1, states[i][j-1]+1, states[i-1][j-1])
else:
states[i][j] = min(states[i-1][j]+1, states[i][j-1]+1, states[i-1][j-1]+1)
print(states)
print(states[len_b-1][len_a-1])
|
def generateKey(string, key):
key = list(key)
if len(string) == len(key):
return (key)
else:
for i in range(len(string) -
len(key)):
key.append(key[i % len(key)])
return ("".join(key))
def cipher_text(string, key):
cipher_text = []
for i in range(len(string)):
x = (ord(string[i]) +
ord(key[i])) % 26
x += ord('A')
cipher_text.append(chr(x))
return ("".join(cipher_text))
string = input("Enter a String: ")
string1 = string.upper()
keyword = input("Enter a key: ")
keyword1 = keyword.upper()
key = generateKey(string1, keyword1)
cipher = cipher_text(string1, key)
print("Cipher Text is: \n"+ cipher) |
from datetime import datetime
from django import forms
from django.contrib import admin
from organisations.models import OrganisationDivision
class CurrentDivisionFilter(admin.SimpleListFilter):
title = "Current Divisions"
parameter_name = "is_current"
def lookups(self, request, model_admin):
return (("true", "Current Divisions"),)
def queryset(self, request, queryset):
if self.value() == "true":
return queryset.filter_by_date(datetime.today())
return queryset
class TempIdFilter(admin.SimpleListFilter):
title = "With Temp ID"
parameter_name = "has_temp_id"
def lookups(self, request, model_admin):
return (("true", "With Temp ID"),)
def queryset(self, request, queryset):
if self.value() == "true":
return queryset.filter_with_temp_id()
return queryset
class OrganisationDivisionAdminForm(forms.ModelForm):
class Meta:
model = OrganisationDivision
fields = "__all__"
class OrganisationDivisionAdmin(admin.ModelAdmin):
list_display = ("official_identifier", "name", "divisionset")
ordering = ("divisionset", "name")
search_fields = ("official_identifier", "name")
list_filter = [CurrentDivisionFilter, TempIdFilter, "division_type"]
form = OrganisationDivisionAdminForm
readonly_fields = ["created", "modified"]
def get_queryset(self, request):
return super().get_queryset(request).defer("geography")
|
#coding:utf-8
def script(s, player=None):
from NaoQuest.quest import Quest
from NaoSensor.plant import Plant
if not player:
print("Error in execution of post_script \"testobj1_post\": player is None")
return
# on choppe la plante liée à cette quete pour modifier ce que doit dire nao
s.desc = s.desc.format(player.current_quest.plante_lie.get_data(Plant.PLANTATION)["recouvrir"])
|
from preprocessing.misc_processing import *
from preprocessing.evaluator import *
from preprocessing.dataset_loader import *
from preprocessing.dataset import *
from preprocessing.tweet_paraphrase import *
from preprocessing.mrpc import *
|
from RPi import GPIO
import datetime
import logging
import subprocess
from Lib import *
from pathlib import Path
import mysql.connector as mariadb
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("MAIN_LOG")
class Device:
def __init__(self):
GPIO.cleanup()
# pinmode BCM
GPIO.setmode(GPIO.BCM)
# set class parameters
self.adc = MCP3008()
self.water_sensor = Reed(12)
self.coffee_check = LDR(self.adc, 0)
self.cup_check = LDR(self.adc, 1)
self.temp_sensor = NTC(self.adc, 2)
self.boiler = Boiler(25)
self.pump = WaterPump(20)
self.dispenser = Dispenser(16)
self.songpath = None
self.player = None
self.alarm_coffee = 0
self.alarm_water = 0
self.cup_val = 750 # TEST TO GET RIGHT VALUE
# self.rtc = DS3121(0x68, 17, cb_on_alarm=self.play_alarm())
self.rtc = DS3121(0x68, 17)
self.rtc.set_datetime(datetime.datetime.now())
log.info(self.rtc.get_datetime())
# ADDING EDGE DETECTION
log.debug("SQW VAL: {}".format(GPIO.input(self.rtc.sqw)))
GPIO.add_event_detect(self.rtc.sqw, GPIO.FALLING, callback=self.play_alarm)
self.update_alarm()
def machine_ready(self):
if not self.water_sensor.check_value():
return [False, "Refill water"]
log.debug("COFFEE CHECK VALUE: {}".format(self.coffee_check.get_value()))
if self.coffee_check.get_value() < 950: # TEST TO CALIBRATE
return [False, "Refill coffee"]
log.debug("CUP SENSOR VALUE: {}".format(self.cup_check.get_value()))
if self.cup_check.get_value() < self.cup_val: # TEST TO CALIBRATE
return [False, "Place cup"]
return [True, "The machine is ready to use"]
def brew(self, coffee, water):
if self.machine_ready()[0]: # ADD [0] TO ENABLE!!
if isinstance(coffee, int) and coffee > 0 and isinstance(water, int) and water > 0:
log.info("HEATING...")
self.boiler.on()
print("TEMP SENSOR VALUE: {}".format(self.temp_sensor.get_value()))
while self.temp_sensor.get_value() > 200:
log.info("TEMP SENSOR VALUE: {}".format(self.temp_sensor.get_value))
self.boiler.off()
log.info("ADDING {} SCOOPS OF COFFEE...".format(coffee))
for i in range(coffee):
log.info("SCOOP {}".format(i))
self.dispenser.scoop()
log.info("POORING {}ML OF WATER...".format(water))
self.pump.pump_amount(water)
log.info("ENJOY!")
return True
return False
def update_alarm(self):
log.debug("UPDATING ALARM!")
a = get_next_alarm()
if a:
# GET DATA
alarm = get_alarm(a[0])
coffee = get_coffee(alarm[0][5])
song = get_song(alarm[0][4])
# SET ALARM
log.debug("ALARM UPDATED")
self.rtc.reset()
self.rtc.set_alarm(alarm[0][3], alarm[0][1], alarm[0][2])
# SET ALARM PARAMETERS
self.alarm_water = coffee[0][1]
self.alarm_coffee = coffee[0][2]
self.set_song(song[0][2])
# LOGGING VALUES
log.debug("NEXT ALARM: {}".format(a))
log.debug("ALARM: {}".format(alarm))
log.debug("ALARM SONG: {}".format(song))
log.debug("ALARM COFFEE: {}".format(coffee))
else:
log.debug("NO ALARM ANYTIME SOON")
self.rtc.clear_alarm()
log.debug(self.rtc.get_datetime())
self.rtc.log_alarm()
def set_song(self, song_title):
self.songpath = Path("web/static/songs/", song_title)
def play_song(self):
self.player = subprocess.Popen(["omxplayer", str(self.songpath)], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def stop_song(self):
if self.player is not None:
self.player.stdin.write(b"q")
def play_alarm(self):
log.debug("CALLED PLAY_ALARM()")
if self.alarm_coffee != 0:
log.debug("PLAYING ALARM")
self.play_song()
self.brew(self.alarm_coffee, self.alarm_water)
while self.cup_check.get_value() > self.cup_val:
continue
self.stop_song()
self.update_alarm()
else:
log.debug("CALLBACK INITIALISATION")
def get_data(sql, params=None):
conn = mariadb.connect(database='project1', user='project1-sensor', password='sensorpassword')
cursor = conn.cursor()
records = []
try:
log.info(sql)
cursor.execute(sql, params)
result = cursor.fetchall()
for row in result:
records.append(list(row))
except Exception as e:
log.error("Error on fetching data: {0})".format(e))
cursor.close()
conn.close()
return records
def get_next_alarm():
today = datetime.datetime.today().weekday() + 1
if today == 7:
tomorrow = 1
else:
tomorrow = today + 1
curr_hour = datetime.datetime.now().hour
curr_min = datetime.datetime.now().minute
next_alarm = False
log.debug("NEXT ALARM: TODAY = {} AND TOMORROW = {}".format(today, tomorrow))
next_alarms = get_data("select A.alarmID, A.hour, A.minutes, D.dayID from alarms as A "
"join alarmDays as D "
"on A.alarmID = D.alarmID "
"where (D.dayID = %s "
"or D.dayID = %s) "
"and A.active = 1 "
"order by D.dayID ASC, "
"A.hour ASC, "
"A.minutes ASC,"
"A.alarmID ASC",
[today, tomorrow, ])
for a in next_alarms:
log.debug("NEXT ALARM LOOP: {}-{}-{}".format(a[3], a[1], a[2]))
if a[3] == today:
if a[1] > curr_hour or (a[1] == curr_hour and a[2] > curr_min):
next_alarm = a
break
continue
next_alarm = a
break
log.debug("FETCH NEXT ALARM: {}".format(next_alarm))
return next_alarm
def get_alarm(alarm_id):
return get_data("select alarmID, hour, minutes, active, songID, coffeeID, username "
"from alarms "
"where alarmID = %s",
[alarm_id, ])
def get_song(song_id):
return get_data("select title, artist, filename, public, username "
"from songs "
"where songID = %s",
[song_id, ])
def get_coffee(coffee_id):
return get_data("select name, "
"amt_water, "
"amt_coffee, "
"username, "
"public "
"from coffees "
"where coffeeID = %s;",
[coffee_id, ])
def main():
# ONLY FOR TESTING PURPOSES
GPIO.setmode(GPIO.BCM)
adc = MCP3008()
water_sensor = Reed(12)
coffee_check = LDR(adc, 0)
cup_check = LDR(adc, 1)
temp_sensor = NTC(adc, 2)
boiler = Boiler(25)
pump = WaterPump(20)
dispenser = Dispenser(16)
try:
while 1:
dispenser.scoop()
time.sleep(5)
except KeyboardInterrupt:
boiler.off()
pump.off()
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
from init import get_driver
from mattermostdriver.exceptions import ResourceNotFound
class Notifier:
def __init__(self):
self.client = get_driver()
self.client.login()
self.user_id = self.client.users.get_user_by_username("jarvisbot")["id"]
def notify(self, message, email):
c_id = self.get_direct_channel_id(email)
if c_id is None:
print("Failed to send message to " + email)
return
self.client.posts.create_post(options={
'channel_id': c_id,
'message': message});
def get_direct_channel_id(self, email):
c_name = self.client.users.get_user_by_email(email)["id"] + "__" + self.user_id
try:
channel_id = \
self.client.channels.get_channel_by_name(self.client.teams.get_team_by_name("se501")["id"],
c_name)[ "id"]
except ResourceNotFound:
c_name = self.user_id + "__" + self.client.users.get_user_by_email(email)["id"]
try:
channel_id = \
self.client.channels.get_channel_by_name(self.client.teams.get_team_by_name("se501")["id"],
c_name)["id"]
except ResourceNotFound:
return None
return channel_id
|
# Generated by Django 3.1.3 on 2020-11-16 00:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rest_api', '0004_auto_20201115_1735'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birthday', models.DateField(verbose_name='Дата рождения')),
('description', models.TextField(blank=True, null=True, verbose_name='Описание')),
('avatar', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rest_api.pictures', verbose_name='Аватарка')),
],
options={
'verbose_name': 'Пользователь',
'verbose_name_plural': 'Пользователи',
},
),
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.profile', verbose_name='Автор'),
),
migrations.AlterField(
model_name='like',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.profile', verbose_name='Автор'),
),
migrations.AlterField(
model_name='pictures',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.profile', verbose_name='Автор'),
),
migrations.AlterField(
model_name='publication',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.profile', verbose_name='Автор'),
),
migrations.AlterField(
model_name='subscription',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author', to='rest_api.profile', verbose_name='Автор'),
),
migrations.AlterField(
model_name='subscription',
name='subscriber',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscriber', to='rest_api.profile', verbose_name='Подписчик'),
),
migrations.DeleteModel(
name='User',
),
]
|
#!/usr/bin/env python
# encoding: utf-8
"""
hfetcher.py
Created by yang.zhou on 2012-10-29.
Copyright (c) 2012 zhouyang.me. All rights reserved.
"""
|
import pytest
def test_fail():
if True == False:
pytest.fail()
<warning descr="This code is unreachable">print("should be reported as unreachable")</warning>
else:
return 1 |
from rdflib.namespace import RDF
from source.utils import id2uri, g_add_with_valid
import csv
import json
def create_ttl(g, u, row: dict):
"""
baid: 99415175
activity: Active
aid: 47346
sid: 103186613
cid: 44273608
geneid: 10203
pmid: 9438028
aidtype: Confirmatory
aidmdate: 20181015
hasdrc: 0
rnai: 0
protacxn: Q16602
acname: IC50
acqualifier: =
acvalue: 5
aidsrcname: ChEMBL
aidname: CGRP1 receptor affinity on human neuroblastoma cells SK-N-MC, which selectively express the human CGRP1 receptor.
cmpdname: Yvptdvgseaf
targetname: CALCRL - calcitonin receptor like receptor (human)
targeturl: /gene/10203
ecs: NULL
repacxn: Q16602
taxids: NULL
cellids: NULL
targettaxid
"""
sid = id2uri(row["sid"], "sid")
cid = id2uri(row["cid"], "cid")
gid = id2uri(row["geneid"], "gid")
pmid = id2uri(row["pmid"], "pmid")
protein = id2uri(row["protacxn"], "protein")
g_add_with_valid(g, sid, RDF.type, u.sid)
g_add_with_valid(g, sid, u.sid2cid, cid)
g_add_with_valid(g, sid, u.sid2pmid, pmid)
g_add_with_valid(g, cid, RDF.type, u.cid)
g_add_with_valid(g, gid, RDF.type, u.gid)
g_add_with_valid(g, gid, u.gid2pmid, pmid)
g_add_with_valid(g, pmid, RDF.type, u.pmid)
g_add_with_valid(g, protein, RDF.type, u.protein)
return g
if __name__ == "__main__":
create_ttl(0,0,0)
|
d1, d2, d3 = map(int, input().split())
print(min([2*(d1+d2), d1+d2+d3, 2*(d1+d3), 2*(d2+d3)]))
|
# -*- coding: utf-8 -*-
'''
Created on 24-08-2013
@author: Krzysztof Langner
'''
from sloppy.interpreter import Runtime
import os.path
import unittest
COMMANDS_FILE = os.path.join(os.path.dirname(__file__), 'testdata/commands.txt')
PARSER_KEYWORDS = [
['count'],
['events']
]
class CommandsMockup(object):
def __init__(self):
self.result = ''
def loadData(self, features):
self.result = "load " + features
def printEventCount(self, features):
self.result = "event count"
class Test(unittest.TestCase):
def testNoCommand(self):
runtime = Runtime()
commands = CommandsMockup()
for keywords in PARSER_KEYWORDS:
runtime.parser.add_keywords(keywords)
runtime.add_mapping([('count', 'KEYWORD'),('event', 'KEYWORD')],
commands.printEventCount)
runtime.add_mapping([('load', 'KEYWORD'),('', 'DATE')],
commands.loadData)
counter = 0
with open(COMMANDS_FILE, "r") as commands_file:
for line in commands_file:
line = line.strip()
if len(line) > 0 and line[0] != '#':
tokens = line.split('->')
self.assertEqual(2, len(tokens))
runtime.execute(tokens[0])
self.assertEqual(tokens[1].strip(), commands.result)
counter += 1
print('Processed %d commands' % counter)
def testContainsFalse(self):
runtime = Runtime()
tokens = [('ala', ''), ('ma', ''), ('kota', '')]
words = [('alexandra', '')]
self.assertFalse(runtime._contains(tokens, words))
def testContainsFalse2(self):
runtime = Runtime()
tokens = [('ala', ''), ('ma', ''), ('kota', 'NN')]
words = [('ala', ''), ('kota', 'NA')]
self.assertFalse(runtime._contains(tokens, words))
def testContainsTrue(self):
runtime = Runtime()
tokens = [('ala', ''), ('ma', ''), ('kota', 'NN')]
words = [('ala', ''), ('kota', 'NN')]
self.assertTrue(runtime._contains(tokens, words))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testTokenizer1']
unittest.main() |
def solution(food):
foods = ''
for i in range(1, len(food)):
if food[i] % 2 == 1:
foods += str(i) * ((food[i] - 1) // 2)
else:
foods += str(i) * (food[i] // 2)
return foods + '0' + foods[::-1] |
from collections import defaultdict, deque, Counter
import sys
from decimal import *
from heapq import heapify, heappop, heappush
import math
import random
import string
from copy import deepcopy
from itertools import combinations, permutations, product
from operator import mul, itemgetter
from functools import reduce, lru_cache
from bisect import bisect_left, bisect_right
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
sys.setrecursionlimit(1000000000)
mod = 10 ** 9 + 7
INF = float('inf')
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
#############
# Main Code #
#############
N = 5
# 木グラフ
que = [
[1, 2],
[1, 4],
[2, 3],
[2, 5]
]
# 重みつき
que_dis = [
[1, 2, 2],
[1, 4, 1],
[2, 3, 2],
[2, 5, 1]
]
# 重みなし
def build_tree(n, edge_list):
G = [[] for i in range(n)]
for a, b in edge_list:
G[a - 1].append(b - 1)
G[b - 1].append(a - 1)
return G
# 重みつき
def build_tree_dis(n, edge_list):
G = [[] for i in range(n)]
for a, b, c in edge_list:
G[a - 1].append([b - 1, c])
G[b - 1].append([a - 1, c])
return G
# 木の建設
G1 = build_tree(N, que)
G2 = build_tree_dis(N, que_dis)
# 木を探索
def search(n, edges, sta):
ignore = [0] * N
ignore[sta] = 1
pos = deque([sta])
# 探索
while len(pos) > 0:
u = pos.popleft()
for i in edges[u]:
if ignore[i] == 0:
ignore[i] = 1
pos.append(i)
# [0, 1, 3, 2, 4]
search(N, G1, 0)
# staからの距離
# staからの距離を求めてそのもっとも遠い店からまた距離を求めた時のもっとも遠い店が木の直径
def distance(n, edges, sta):
ignore = [-1] * N # 距離を求めたいときはfloat('inf')にする
ignore[sta] = 0
pos = deque([sta])
while len(pos) > 0:
u = pos.popleft()
for i in edges[u]:
if ignore[i[0]] == -1:
ignore[i[0]] = ignore[u] + i[1]
pos.append(i[0])
return ignore
def distance(n, edges, sta):
ignore = [-1] * N # 距離を求めたいときはfloat('inf')にする
ignore[sta] = 0
pos = deque([sta])
while len(pos) > 0:
u = pos.popleft()
for v in edges[u]:
if ignore[v] == -1:
ignore[v] = ignore[u] + 1
pos.append(v)
return ignore
# [0, 2, 4, 1, 3]
print(distance(N, G2, 0))
# staからendまでのルート
def router(n, sta, end):
pos = deque([sta])
ignore = [0] * n
path = [0] * n
path[sta] = -1
while pos[0] != end:
u = pos.popleft()
ignore[u] = 1
for i in dist[u]:
if ignore[i] != 1:
path[i] = u
pos.append(i)
route = deque([end])
while True:
next = path[route[0]]
route.appendleft(next)
if route[0] == sta:
break
return list(route)
# staからbfsして親要素を記録
def parents(n, sta, dist):
pos = deque([sta])
ignore = [0] * n
path = [0] * n # 親要素
path[sta] = -1
d = [[] for i in range(n)] # 有向辺
while pos:
u = pos.popleft()
ignore[u] = 1
for i in dist[u]:
if ignore[i] != 1:
path[i] = u
d[u].append(i)
pos.append(i)
return path
# dfsで子要素の部分木の大きいを求める
def dfs(u, par):
res = 1 # 自身のサイズ
for v in E[u]:
if v != par:
size_c = dfs(v, u) # 子方向の部分木のサイズ
# print(u, size_c, 'c')
res += size_c
size_p = N - res # 親方向に伸びる部分木のサイズ
# print(u, size_p, 'p')
return res
dfs(0, -1) # 実行
cnt = 0
size = [0] * N # 部分木のサイズ
parents = [-1] * N # 0を根としたときの親
depth = [0] * N # 深さ
# 親と深さと部分木のサイズを保持しておく
def dfs(u, p):
global cnt, size, parents, depth
size[u] -= cnt
cnt += 1
for v in E[u]:
if v != p:
depth[v] = depth[u] + 1
parents[v] = u
dfs(v, u)
size[u] += cnt
ans = 0
# 部分木の色ぬり + 閉路検出
def search(sta, dist):
global ans
# 現在の位置とparent
pos = deque([[sta, -1]])
ignore[x] = 1
flag = True
while pos:
u, parent = pos.popleft()
for i in dist[u]:
if i != parent:
if ignore[i] == 1:
flag = False
continue
ignore[i] = 1
pos.append([i, u])
if flag:
ans += 1
# ABC021 C - 正直者の高橋くん
# 最短経路の本数
N = getN()
a, b = getNM()
M = getN()
dist = [[] for i in range(N)]
for i in range(M):
x, y = getNM()
dist[x - 1].append(y - 1)
dist[y - 1].append(x - 1)
# スタートからの最短距離測定
def distance(sta):
# 木をstaから順にたどる(戻るの禁止)
pos = deque([sta])
ignore = [-1] * N
ignore[sta] = 0
while len(pos) > 0:
u = pos.popleft()
for i in dist[u]:
if ignore[i] == -1:
ignore[i] = ignore[u] + 1
pos.append(i)
return ignore
d = distance(a - 1)
# スタートから特定の点まで最短距離で行く通りの数
def counter(sta):
pos = deque([sta])
ignore = [0] * N
cnt = [0] * N
cnt[sta] = 1
while len(pos) > 0:
u = pos.popleft()
if ignore[u] == 0:
ignore[u] = 1
# d[i] == d[u] + 1を満たすuの子ノード全てに
# 「スタートからuまでの通りの数」をプラス(他のルートからも来る)
for i in dist[u]:
if d[i] == d[u] + 1:
cnt[i] += cnt[u]
pos.append(i)
return cnt
print(counter(a - 1)[b - 1] % mod)
# 赤黒木
# 距離が3とか5離れている頂点を探せる?
color = [-1] * N
color[s] = 1
que = deque([s]) # 赤スタート
while que:
u = que.popleft()
for v in E[u]:
if color[v] != -1:
continue
# 親のmodが1なら2を入れる
if color[u] == 1:
color[v] = 2
else:
color[v] = 1
que.append(v)
# dfsによる連結成分分解 木でなくてもいい
# groupを書き込む
ignore = [0] * N
group = [[] for i in range(N)]
def dfs(u, g):
ignore[u] = 1
group[g].append(u)
for v in E[u]:
if not ignore[v]:
dfs(v, g)
g = -1
for u in range(N):
if not ignore[u]:
g += 1
dfs(u, g)
# オイラーツアー
N = getN()
E = [[] for i in range(N)]
for _ in range(N - 1):
a, b = getNM()
E[a - 1].append(b - 1)
E[b - 1].append(a - 1)
for i in range(N):
E[i].sort()
ans = []
def dfs(u, p):
ans.append(u)
for v in E[u]:
if v != p:
dfs(v, u)
ans.append(u)
dfs(0, -1)
# bfsでも一回頂点から流す→葉から探索でdfsができる
# codeforces round 693 G. Moving to the Capital
T = getN()
for _ in range(T):
_ = input()
N, M = getNM()
dis1 = [float('inf')] * N
dis1[0] = 0
E = [[] for i in range(N)]
for _ in range(M):
u, v = getNM()
E[u - 1].append(v - 1)
# 距離の計測
q = deque([0])
# トポソ順
order = []
while q:
u = q.popleft()
order.append(u)
for v in E[u]:
if dis1[v] > dis1[u] + 1:
dis1[v] = dis1[u] + 1
q.append(v)
dis2 = [float('inf')] * N
# 葉から探索
while order:
u = order.pop()
for v in E[u]:
# 逆行
if dis1[v] <= dis1[u]:
dis2[u] = min(dis2[u], dis1[v])
else:
dis2[u] = min(dis2[u], dis2[v])
print(*[min(dis1[i], dis2[i]) for i in range(N)])
# Educational Codeforces Round 112 (Rated for Div. 2)
# D. Say No to Palindromes
# 完全二分木のトーナメントの作り方 逆にしていく
# bi(試合数) = 2 ** K - 1
# ind(試合のコード) = bi - (試合のindex)
# dp = [0] * (bi + 1)
# indの子要素(前の試合)はind * 2とind * 2 + 1
K = getN()
S = list(input())
bi = 2 ** K - 1
dp = [0] * (bi + 1)
def rec(mat, result):
ind = bi - mat# reverse
S[bi - ind] = result # rewrite
while ind >= 1:
# first game
if ind > bi // 2:
if S[bi - ind] == '0' or S[bi - ind] == '1':
dp[ind] = 1
else:
dp[ind] = 2
# second, third...
else:
if S[bi - ind] == '0':
dp[ind] = dp[ind * 2 + 1]
elif S[bi - ind] == '1':
dp[ind] = dp[ind * 2]
else:
dp[ind] = dp[ind * 2 + 1] + dp[ind * 2]
ind //= 2
for i in range(bi):
rec(i, S[i])
Q = getN()
for _ in range(Q):
m, r = input().split()
rec(int(m) - 1, r)
print(dp[1])
# bfs探索した木の復元 もっとも高さが低いもの
N = getN()
A = [i - 1 for i in getList()]
res = 1
E = [[] for i in range(N)] # 1-indexで返す
prev, next, rev = [0], [], 0 # 前の段の要素の数, 現在の段の要素の数、と現在の段の反転数
for i in range(1, N):
# 反転している prevの個数 - 1までは許される
if A[i - 1] > A[i]:
# これ以上は持てないので段を変える
if rev + 1 == len(prev):
res += 1
prev = next # これは1以上
rev = 0
next = []
else:
rev += 1
# 置く
next.append(A[i])
E[prev[rev]].append(A[i])
# 葉から探索していく
T = getN()
for _ in range(T):
_ = input()
N, K = getNM()
E = [[] for i in range(N)]
for i in range(N - 1):
u, v = getNM()
E[u - 1].append(v - 1)
E[v - 1].append(u - 1)
depth = [1] * N
# 葉
q = deque([i for i in range(N) if len(E[i]) == 1])
# 子要素の数
order = [len(E[i]) for i in range(N)]
while q:
u = q.popleft()
for v in E[u]:
order[v] -= 1
if order[v] == 1:
q.append(v)
depth[v] = depth[u] + 1
print(sum([(d > K) for d in depth]))
# 非再帰dfs
def euler_tour(N, E, sta):
q = deque([[sta, 1]])
dis = [-1] * N
dis[sta] = 0
par = [-1] * N
# 例 部分木の大きさを求める
size = [1] * N
while q:
u, f = q.pop()
if f:
#### 行きがけ処理をここに書く ###
# do function
#############################
# 帰りがけ用の記録
q.append([u, 0])
# 重みつきなら for v, d in E[u]
for v in E[u]:
if dis[v] == -1:
# 重みつきならdis[u] + d
dis[v] = dis[u] + 1
par[v] = u
# 次の探索用
q.append([v, 1])
#### 子に操作するときはここに書く
# do function
#############################
else:
#### 帰りがけ処理をここに書く ###
# do function
if u != sta:
size[par[u]] += size[u]
#############################
return size
|
from django.shortcuts import render
def home_index(request):
return render(request, 'home_index.html', {'nav': 'home'}) |
from django import forms
from django.forms import ModelForm
from .models import User,Patient,Doctor,Service,Appointment
class LoginForm(forms.Form):
email = forms.EmailField(required=True)
password = forms.CharField(required=True)
class SignUpForm(forms.Form):
email = forms.EmailField(required=True)
password = forms.CharField(required=True)
confirm_password = forms.CharField(required=True)
def clean_email(self):
email = self.cleaned_data.get("email")
user = User.objects.filter(email__iexact=email).first()
if user:
if user.is_staff:
user_role = "Staff"
else:
user_role = "Patient"
raise forms.ValidationError(
"{} with this email already exists, use another email.".format(
user_role
)
)
return email
def clean_password(self):
password = self.cleaned_data.get("password")
if len(password) < 6:
raise forms.ValidationError("Password should be minimum 6 characters long")
if password != self.data.get("confirm_password"):
raise forms.ValidationError("Passwords do not match")
return password
class DateInput(forms.DateInput):
input_type = 'date'
class AppointmentForm(ModelForm):
class Meta:
model = Appointment
fields = ('doctor', 'date', 'time', 'name',)
widgets = {
'date': DateInput(),
}
class PatientAddForm(forms.ModelForm):
class Meta:
model = Patient
exclude = ()
widgets = {
'dateofbirth': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%d')
}
def __init__(self, *args, **kwargs):
super(PatientAddForm, self).__init__(*args, **kwargs)
self.fields['dateofbirth'].widget = DateInput()
class DoctorAddForm(forms.ModelForm):
class Meta:
model = Doctor
exclude = ()
class DoctorForm(forms.ModelForm):
class Meta:
model = Doctor
exclude = ('details','speciality')
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
exclude = ()
|
import sys
sys.path.append('../1/')
sys.path.append('../2/') |
import sys
import random
train_file = 'train.tsv'
dev_file = 'dev.tsv'
train_data_ratio = 0.7 # train.tsvのデータとdev.tsvのデータの比率
random.seed(0)
def read_file(input_file):
with open(input_file, 'r', encoding='utf-8', newline='') as fr:
text = fr.readlines()
for i, line in enumerate(text):
text[i] = line.replace('\n', '')
return text
def make_data(input_file, label):
text = read_file(input_file)
data = make_cola_data(input_file, label, text)
return data
# colaデータの作成
def make_cola_data(input_file, label, text):
cola_data = []
cola = '{}\t{}\t{}\t{}'
for line in text:
line = cola.format(input_file, label, '*', line)
cola_data.append(line)
return cola_data
# データセットの作成
def create_dataset(output_file, data_a, data_b):
with open(output_file, 'w', encoding='utf-8', newline='') as fw:
data_list = data_a + data_b
random.shuffle(data_list)
for data in data_list:
fw.writelines(data + '\n')
def main():
args = sys.argv
argc = len(args)
if argc != 3:
print('Usage: python %s filename filename'.format(args[0]))
exit()
global input_file_a, input_file_b
input_file_a = args[1]
input_file_b = args[2]
label_a = '0'
label_b = '1'
data_a = make_data(input_file_a, label_a)
data_b = make_data(input_file_b, label_b)
train_size_a = int(len(data_a) * train_data_ratio)
train_size_b = int(len(data_b) * train_data_ratio)
create_dataset(
train_file, data_a[:train_size_a], data_b[:train_size_b])
create_dataset(
dev_file, data_a[train_size_a:], data_b[train_size_b:])
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
try:
some codes
except(RuntimError, TypeError, NameError):
exception handlers
except:
default handler
else:
print("something will be done if no exception")
finally:
print("finally clause: do some clean-up actions")
# some exceptions
* SyntaxError
* ImportError
* NameError name not defined
* TypeError can not convert from one type to another
* OSError
* StopIteration
|
import RPi.GPIO as GPIO
from itertools import repeat
from datetime import datetime,time
from time import sleep
from pytz import timezone
import logging,pdb, sys
'''initial var'''
RELAIS_4_GPIO = 22
water_time = 1800 #30 min
tz = 'Rome'
'''logging config'''
logging.basicConfig(
level=logging.INFO,
filename='water.log',
format='[%(asctime)s] %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
'''functions'''
def water_on():
GPIO.output(RELAIS_4_GPIO, GPIO.LOW)
logging.debug('Water On')
return True
def water_off():
GPIO.output(RELAIS_4_GPIO, GPIO.HIGH)
logging.debug('Water Off')
return True
def water_status():
pin_status = GPIO.input(RELAIS_4_GPIO)
print('pin status: ', pin_status, type(pin_status))
if pin_status == 1:
logging.info('Water is OFF')
print('Water is OFF')
else:
logging.info('Water is ON')
print('Water is ON')
return True
def main():
logging.info('Starting up ')
'''GPIO settings'''
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#GPIO.setup(RELAIS_4_GPIO, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(RELAIS_4_GPIO, GPIO.OUT)
try:
'''main program'''
water_on()
water_status()
for i in range(water_time):
sleep(1)
water_off()
water_status()
sys.exit()
except KeyboardInterrupt:
logging.info('shutting down for keyboard interrupt')
water_off()
water_status()
except:
logging.info('shutting down for other interrupt')
water_off()
water_status()
#GPIO.cleanup()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.