text stringlengths 8 6.05M |
|---|
from math import sqrt
def is_prime(n):
for i in range(2, int(sqrt(n))+1):
if n%i == 0:
return False
return True
def gen_prime(n):
total = 0
for i in range(2, n+1):
if (is_prime(i)):
total += i
print(i)
print(total)
gen_prime(2000000)
|
from rdflib import URIRef
from prompt_toolkit.shortcuts import radiolist_dialog
class UserDisambiguation:
def __init__(self, graph):
self.g = graph
def S(self, uri):
if isinstance(uri, str):
uri = URIRef(uri)
return uri.n3(self.g.g.namespace_manager)
def ask(self, classes, entity):
values = [(c, c) for c in classes]
dialog = radiolist_dialog(values=values, text=f"Choose correct type for {entity}")
res = dialog.run()
return res
def do_recluster(self, cluster):
values = [("recluster", "Recluster"), ("keep", "Keep Cluster, Refine Types")]
dialog = radiolist_dialog(values=values, text=f"Choose how to handle cluster {cluster}")
res = dialog.run()
return res == "recluster"
def recluster(self, bad_cluster):
clusters = []
if len(bad_cluster) == 2:
return [[bad_cluster[0]], [bad_cluster[1]]]
while len(bad_cluster) > 0:
print("-"*10)
for i, c in enumerate(bad_cluster):
print(f"{i}) {c}")
print(f"Currently have {len(clusters)}")
inp_cluster = input("List items in cluster: ")
ids = [int(i.strip()) for i in inp_cluster]
values = [bad_cluster[i] for i in ids if i < len(bad_cluster)]
if len(values) == 0:
continue
clusters.append(values)
for val in values:
bad_cluster.remove(val)
return clusters
|
import pandas
from flask import Flask, request, render_template
from flask.json import jsonify
from flask_socketio import SocketIO, emit
from influxdb import InfluxDBClient
from plotly.io import to_json
from plotly import graph_objs
from functools import wraps
from datetime import datetime
app = Flask(__name__)
socketio = SocketIO(app)
light_state = [{'name': lamp, 'val': 0} for lamp in ('Вкл.\nПульт', 'Лампы', 'Насос')]
motors_state = 0
def work_with_base(function):
"""Декоратор для упрощения работы с базой."""
@wraps(function)
def modificied_function(*args, **kwargs):
client = InfluxDBClient(host='localhost', port=8086)
client.switch_database('test')
result = function(*args, client=client, **kwargs)
client.close()
return result
return modificied_function
# Костыль для того, чтобы никто не ругался,
# что javascript нельзя ответы на запросы получать
@app.after_request
def add_cors_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/data', methods=['POST'])
@work_with_base
def post(self, client=None):
if not client:
print('Error with connection to DB!')
return 'Error with DB.'
data = request.json
now = int(datetime.today().timestamp() * 10**9)
for key, value in data.items():
client.write(f'{key} value={value} {now}')
# Передача новых данных всем текущим клиентам
emit('update', (datetime.today().strftime('%Y-%m-%d %H:%M:%S'), data), namespace='/real-time-graph', broadcast=True)
return 'Ok.'
@app.route('/command', methods=['POST'])
def add_commands():
data = request.json
print(data)
if 'command' not in data.keys() and 'arg' not in data.keys():
return 'Failed.'
if data['command'] in ('A', 'D'):
light_state[int(data['arg'])]['val'] = int(data['command'] == 'A')
elif data['command'] in ('R', 'L'):
global motors_state
motors_state += int(data['arg'])
return 'Success!'
@app.route('/get_command')
def get_command():
commands = {'light': [bulb['val'] for _, bulb in enumerate(light_state[1:])]}
commands['control'] = light_state[0]['val']
global motors_state
if motors_state:
commands['motors'] = motors_state
motors_state = 0
return jsonify(commands)
@app.route('/bulbs')
def bulbs_update():
return jsonify({f'bulb{num}': bulb['val'] for num, bulb in enumerate(light_state)})
@app.route('/view')
@work_with_base
def show_last_state(client=None):
if not client:
print('Error with connection to DB!')
return 'Error with DB.'
measurements = list(point['name'] for point in client.query('SHOW measurements').get_points())
data = client.query(f'SELECT last(value) from {", ".join(measurements)}')
data = {measurements[_id]: point["last"] for _id, point in enumerate(data.get_points())}
return render_template('data.html', data=data)
@app.route('/graph')
@work_with_base
def graph(client=None):
if not client:
print('Error with connection to DB!')
return 'Error with DB.'
measurements = list(point['name'] for point in client.query('SHOW measurements').get_points())
measurement = measurements[0]
if request.args.get('plot') in measurements:
measurement = request.args['plot']
data = client.query(f"SELECT value FROM {measurement}")
df = pandas.DataFrame(list(data.get_points()))
df['time'] = pandas.to_datetime(df['time'])
print(df)
graph = graph_objs.Figure()
graph.add_trace(graph_objs.Scatter(x=df['time'], y=df['value']))
graph.update_layout(title=f"График {measurement}",
xaxis_title="Дата",
yaxis_title=measurement,)
return render_template('graph.html', plot=to_json(graph), filter=measurement)
@socketio.on('connect', namespace='/real-time-graph')
def handle_message():
print('client connected')
@socketio.on('disconnect', namespace='/real-time-graph')
def handle_json():
print('client disconnected')
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', data=enumerate(light_state))
if __name__ == '__main__':
socketio.run(app, debug=True)
# socketio.run(app, host='0.0.0.0')
|
# GRU params
input_length = 400 # the size of each embedded tweet (the size of the input vector)
hidden_length_rumors = 400 # the size of the hidden vectors of the rumor detection task GRU
hidden_length_stances = 400 # the size of the hidden vectors of the stance detection task GRU
hidden_length_shared = 200 # the size of the hidden vectors of the shared GRU
output_dim_rumors = 3 # output size for rumor detection (True rumor, False rumor, Unverified)
output_dim_stances = 4 # output size for stance classification (Support, Deny, Query, Comment)
task_stances_no = 1
task_rumors_no = 2
|
def howManyLightsabersDoYouOwn(name='Luke'):
""" how_many_lightsabers_do_you_own == PEP8
(forced mixedCase by CodeWars) """
return 18 if name == 'Zach' else 0
|
import ctypes
import os
def gotoxy(x,y):
return ctypes.windll.kernel32.SetConsoleCursorPosition(ctypes.windll.kernel32.GetStdHandle(-11),(((y&0xFFFF)<<0x10)|(x&0xFFFF)))
def startScreen():
os.system('cls')
gotoxy(30,5)
print("<Exercise Objective Setting and Achievement Program>")
gotoxy(70,16)
print("201711370 Kang\tYunseok")
gotoxy(70,17)
print("201911156 Kim\tByeolchan")
gotoxy(70,18)
print("201911175 Park\tJinyoung")
gotoxy(70,19)
print("201911192 Yang\tSukjoon")
gotoxy(70,20)
print("201910514 Jo\tJaehun")
input()
os.system('cls') |
# Python List: different types is an option
# -----------------------------------------
a = [5, 10, 15, 20, 25, 30, 35, 40]
# a[2] = 15
print("a[2] = ", a[2])
# a[0:3] = [5, 10, 15]
print("a[0:3] = ", a[0:3])
# a[5:] = [30, 35, 40]
print("a[5:] = ", a[5:])
# Lists are mutable: changeable
a[2] = 14
print("a[2] was changed: ", a)
# Python Tuple: Immutable Lists
# -----------------------------
t = (5, 'program', 1+3j)
# t[1] = 'program'
print("t[1] = ", t[1])
# t[0:3] = (5, 'program', (1+3j))
print("t[0:3] = ", t[0:3])
# will generate an error
print("t[0] = 10 will generate an error!")
print("t[0] = 10")
# Python Set: unordered collection of unique items
# ------------------------------------------------
a = {5, 2, 3, 2, 5, 5, 3, 1, 4}
# data type of variable a
print(type(a))
# sets eliminates duplicates
a
# slicing = no option, it is unordered so it has no index
print("slicing a set is not possible, it has no indeces")
print("a[1]")
# Python Dictionary: unordered collection of key-value pairs
# ----------------------------------------------------------
d = {1:'value', 'key':2}
print(type(d))
print("d[1] = ", d[1])
print("d['key'] = ", d['key'])
# Generates error
# Only key's are retrievable, values can only be accessed via the key
print("Generates error")
print("print('d[2] = ', d[2])")
|
import sys
import argparse
import os
import re
import time
def argument_setting():
parser = argparse.ArgumentParser()
parser.add_argument("-c", required=True, help='Just cpp file name')
parser.add_argument("-a", required=False, help='Answer file name (File must in "answers" directory)')
parser.add_argument("-i", required=False, help='Sample input file name (File must in "inputs" directory')
args = parser.parse_args()
return args
def make_answer_name(week, problem_num):
ret = "sample-"
ret += week
ret += ("_" + problem_num)
ret += ".1.out"
return ret
def make_input_name(week, problem_num):
ret = "sample-"
ret += week
ret += ("_" + problem_num)
ret += ".1.in"
return ret
def score(code_file, answer_file, input_file):
output_file = code_file.replace('.cpp' ,'')
file_name_split = code_file.split('_')
week_str = file_name_split[0]
week = week_str[0]
num = re.sub(r'[^0-9]', '', week_str)
week += num
problem_num = file_name_split[1]
if answer_file == None:
answer_file = make_answer_name(week, problem_num)
if input_file == None:
input_file = make_input_name(week, problem_num)
if code_file not in os.listdir():
print("Execution File is not exist!")
return -1
if "outputs" not in os.listdir():
os.mkdir("outputs")
if "programs" not in os.listdir():
os.mkdir("programs")
if "answers" not in os.listdir():
os.mkdir("answers")
if "inputs" not in os.listdir():
os.mkdir("inputs")
if answer_file not in os.listdir("./answers"):
print("Answer File is not exist!")
return -1
if input_file not in os.listdir("./inputs"):
print("Input File is not exist!")
return -1
print("Now I am Compiling your cpp file...\n")
os.system(f'g++ -o ./programs/{output_file} {code_file}')
start = time.time()
os.system(f'./programs/{output_file} < ./inputs/{input_file} > ./outputs/{output_file}.out')
end = time.time()
outputs = open("./outputs/"+output_file+".out").readlines()
answers = open("./answers/"+answer_file).readlines()
wrong_list = []
score = 0
for idx, (output, answer) in enumerate(zip(outputs, answers)):
output = output.strip()
answer = answer.strip()
if output == answer:
score += 1
else:
print(f"{idx + 1} line : {output} \t {answer} -------------------------> wrong line")
wrong_list.append(idx+1)
sc = round(score/len(outputs) * 100, 2)
wrong_list = str(wrong_list).lstrip("[").rstrip("]")
print(f"Your Problem Solving Score : {sc}")
print(f"Wrong Answer Lines : {wrong_list}\n")
if __name__ == '__main__':
args = argument_setting()
print(" _____ _ ")
print(" | __ \\ | | ")
print(" | |__) | __ ___ | | ___ _ __ ___ ")
print(" | ___/ '__/ _ \| |/ _ \ '_ ` _ \ ")
print(" | | | | | (_) | | __/ | | | | | ")
print(" |_| |_|__\___/|_|\___|_| |_| |_| ")
print(" / ____| | | (_) ")
print(" | (___ ___ | |_ ___ _ __ __ _ ")
print(" \___ \ / _ \| \ \ / / | '_ \ / _` |")
print(" ____) | (_) | |\ V /| | | | | (_| |")
print(" |_____/ \___/|_| \_/ |_|_| |_|\__, |")
print(" __/ |")
print(" |___/ \n")
print("Hello! This is Problem Solving Example Scoring Program\n")
score(args.c, args.a, args.i)
|
import unittest
import time
from selenium import webdriver
class testBaidu(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.driver = webdriver.Chrome()
cls.base_url = 'https://www.baidu.com'
def setUp(self) -> None:
print('我没用了吗,开始')
def baidu_search(self,search_key):
self.driver.get(self.base_url)
self.driver.find_element_by_id('kw').send_keys(search_key)
self.driver.find_element_by_id('su').click()
time.sleep(10)
def test_search_key_selenium(self):
search_key = 'selenium'
self.baidu_search(search_key)
self.assertEqual(self.driver.title,search_key+"_百度搜索")
def test_search_key_unittest(self):
search_key = 'unittest'
self.baidu_search(search_key)
self.assertEqual(self.driver.title, search_key + "_百度搜索")
def tearDown(self) -> None:
print('我没用了吗,那我走了')
@classmethod
def tearDownClass(cls) -> None:
cls.driver.quit()
if __name__ == '__main__':
unittest.main()
"""
1.用setUp与setUpClass区别
setup():每个测试case运行前运行
teardown():每个测试case运行完后执行
setUpClass():必须使用@classmethod 装饰器,所有case运行前只运行一次
tearDownClass():必须使用@classmethod装饰器,所有case运行完后只运行一次
2.@是修饰符,classmethod是python里的类方法
""" |
from .UQRSAnalysis import UQRSAnalysis
from .UQAnalysis import UQAnalysis
from .ResponseSurfaces import ResponseSurfaces
from .RSAnalyzer import RSAnalyzer
from .SensitivityAnalysis import SensitivityAnalysis
from .Common import Common
class RSSensitivityAnalysis(UQRSAnalysis):
psuadeNames = ['rssobol1','rssobol2','rssoboltsi']
def __init__(self, ensemble, output, subType, responseSurface, rsOptions = None,
userRegressionFile = None, xprior = None):
super(RSSensitivityAnalysis, self).__init__(ensemble, output, UQAnalysis.RS_SENSITIVITY,
responseSurface, subType, rsOptions,
userRegressionFile, xprior)
sa_bars = [False, False, False]
self.showErrorBars = sa_bars[self.subType]
@staticmethod
def getSubTypeFullName(num):
return SensitivityAnalysis.fullNames[num]
def analyze(self):
data = self.ensemble
fnameRS = Common.getLocalFileName(RSAnalyzer.dname, data.getModelName().split()[0], '.rsdat')
index = ResponseSurfaces.getEnumValue(self.responseSurface)
fixedAsVariables = index == ResponseSurfaces.USER
data.writeToPsuade(fnameRS, fixedAsVariables=fixedAsVariables)
cmd = RSSensitivityAnalysis.psuadeNames[self.subType]
mfile = RSAnalyzer.performSA(fnameRS, self.outputs[0], cmd, self.showErrorBars,
self.responseSurface, self.rsOptions,
self.userRegressionFile, self.xprior)
if mfile is not None:
self.archiveFile(mfile)
return mfile
def showResults(self):
cmd = RSSensitivityAnalysis.psuadeNames[self.subType]
cmd_ = cmd
if self.showErrorBars:
cmd = cmd + 'b'
mfile = 'matlab' + cmd + '.m'
self.restoreFromArchive(mfile)
RSAnalyzer.plotSA(self.ensemble, self.outputs[0], self.responseSurface,
cmd_, self.showErrorBars, mfile)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from web_backend.nvlserver.module import nvl_meta
from sqlalchemy import BigInteger, String, Column, Boolean, DateTime, Table
from sqlalchemy.sql.functions import func
request_logger = Table(
'request_logger',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('route', String(500)),
Column('request_data', String(12000)),
Column('response_data', String(12000)),
Column('active', Boolean, default=True, nullable=False),
Column('deleted', Boolean, default=False, nullable=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True),
server_default=func.now(), onupdate=func.now(), nullable=False)
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
import math
from skimage.measure import find_contours,approximate_polygon
import deeplab_model
from utils import preprocessing
from utils import dataset_util
from PIL import Image
import matplotlib.pyplot as plt
class Deeplabv3plus():
def __init__(self, modelPath):
self.modelPath = modelPath
self.output_stride=16
self.batch_size=1
self.base_architecture='resnet_v2_101'
self._NUM_CLASSES=2
def predict(self,image_files):
model = tf.estimator.Estimator(
model_fn=deeplab_model.deeplabv3_plus_model_fn,
model_dir=self.modelPath,
params={
'output_stride': self.output_stride,
'batch_size': 12, # Batch size must be 1 because the images' size may differ
'base_architecture': self.base_architecture,
'pre_trained_model': None,
'batch_norm_decay': None,
'num_classes': self._NUM_CLASSES,
})
predictions = model.predict(
input_fn=lambda: preprocessing.eval_input_fn(image_files))
return predictions
def infere(self, image_files, imageId=None, debug=False):
predictions = self.predict(image_files)
result=[]
for data in predictions:
for i in range(13):
label =i
img = data['decoded_labels']
img=img[:,:,0]
if np.all(img==0):
continue
img=np.where(img==255,0,img)
cat_img=np.where(img==i,1,0)
mask = cv2.resize(cat_img.astype(np.uint8), (512,512))
area, perimetr, cv2Poly = self.getMaskInfo(mask, (10,10))
if cv2Poly is None:
print("Warning: Object is recognized, but contour is empty!")
continue
verts = cv2Poly[:,0,:]
r = {'classId': data['classes'][0][i],
'label': label,
'area': area,
'perimetr': perimetr,
'verts': verts}
if imageId is not None:
r['objId'] = "{}_obj-{}".format(imageId, i)
result.append(r)
return result
def getMaskInfo(self, img, kernel=(10, 10)):
#Define kernel
kernel = np.ones(kernel, np.uint8)
#Open to erode small patches
thresh = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
#Close little holes
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE,kernel, iterations=4)
thresh=thresh.astype('uint8')
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
maxArea = 0
maxContour = None
# Get largest area contour
for cnt in contours:
a = cv2.contourArea(cnt)
if a > maxArea:
maxArea = a
maxContour = cnt
if maxContour is None: return [None, None, None]
perimeter = cv2.arcLength(maxContour,True)
# aproximate contour with the 1% of squared perimiter accuracy
# approx = cv2.approxPolyDP(maxContour, 0.01*math.sqrt(perimeter), True)
return maxArea, perimeter, maxContour
|
# marketplace/marketplace.py
import os
from flask import Flask, render_template
import grpc
from recommendations_pb2 import BookCategory, RecommendationRequest
from recommendations_pb2_grpc import RecommendationsStub
# creates a Flask app to render a web page for the user
app = Flask(__name__)
# create your gRPC channel and stub
recommendations_host = os.getenv("RECOMMENDATIONS_HOST", "localhost")
recommendations_channel = grpc.insecure_channel(
f"{recommendations_host}:50051"
)
recommendations_client = RecommendationsStub(recommendations_channel)
# create render_homepage() to be called when the user visits the home page of your app
@app.route("/")
def render_homepage():
recommendations_request = RecommendationRequest(
user_id=1, category=BookCategory.MYSTERY, max_results=3
)
recommendations_response = recommendations_client.Recommend(
recommendations_request
)
return render_template(
"homepage.html",
recommendations=recommendations_response.recommendations,
) |
'''
Created on Jan 24, 2016
@author: Andrei Padnevici
@note: This is an exercise: 9.2
'''
try:
file = open(input("Enter file name: "))
except:
print("Invalid file")
exit()
daysDict = dict()
day = None
for line in file:
words = line.split()
if len(words) == 0 or words[0] != 'From': continue
try:
day = words[2]
except:
print("Cannot parse this '%s' line and get the day name" % line.strip())
if day is not None: daysDict[day] = daysDict.get(day, 0) + 1
print(daysDict)
|
############### adaptive gain configuration for FAST Multi-beam backend -- Roach2s ###########
### This function is used to find a proper gain value for 8 bit output data. Set the gain value step by step to increase or decrease the value of output data. Once the average data falls into the preset range, the entire adaptive adjustment is completed.
#################
#!/usr/bin/python
import time,struct, socket
from corr import katcp_wrapper, log_handlers
import numpy as np
#roach2 = 'r2d021403.s6.pvt' # (10.0.1.169) mounted on asa2
roach2 = '10.128.2.7' # for China machine
katcp_port = 7147
#IP1 = "10.10.12.2" #bind on IP addresses
IP1 = "192.168.1.127" #for China GPU server
PORT = 12345
N_FREQ = 4096
thres_low = 10 # low threshold
thres_high = 30 # high threshold
gain_step = 0x0100 # step gain
if __name__ == '__main__':
data1_tmp = np.zeros(N_FREQ)
data2_tmp = np.zeros(N_FREQ)
unit = 'u0'
# connect to roach2
print('Connecting to server %s on port %i... ' % (roach2, katcp_port)),
fpga = katcp_wrapper.FpgaClient(roach2)
time.sleep(0.1)
if fpga.is_connected():
print('ok')
else:
print('ERROR connecting to server %s on port %i.\n' % (roach2,katcp_port))
exit_fail()
# read the original gain value
gain = fpga.read_int(unit+'_gain')
print('original gain from %s is: 0x%X ...') % (unit,gain)
while True:
# receive data from 10 GbE port, close the socket after packets receiving
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock1.bind((IP1, PORT))
data1, addr1 = sock1.recvfrom(4104)
data2, addr2 = sock1.recvfrom(4104)
sock1.close()
# extract the xx and yy spectrum from 2 packets
header1 = struct.unpack('<Q', data1[0:8])[0]
header2 = struct.unpack('<Q', data2[0:8])[0]
data1_tmp = np.fromstring(data1[8:],dtype=np.uint8)
data2_tmp = np.fromstring(data2[8:],dtype=np.uint8)
xx = np.append(data1_tmp [0::2],data2_tmp [0::2])
yy = np.append(data1_tmp [1::2],data2_tmp [1::2])
# calculate the average value of the spectrum
avg_xx = np.average(xx)
avg_yy = np.average(yy)
max_xx = np.amax(xx)
max_yy = np.amax(yy)
print 'max. value of xx is: %d, max. value of yy is: %d' % (max_xx,max_yy)
print 'average value of xx is: %f, average value of yy is: %f' % (avg_xx,avg_yy)
# if the average data falls into the preset range then quit
if((avg_xx > thres_low) & (avg_xx < thres_high)):
print('Adaptive gain configuration for xx is done!')
# if yy heven't finsh then only adjust yy
if((avg_yy > thres_low) & (avg_yy < thres_high)):
print('Adaptive gain configuration for yy is done!')
print('Adaptive gain configuration is done!')
break
elif(avg_yy < thres_low):
gain += gain_step <<16
elif(avg_yy > thres_high):
gain -= gain_step <<16
elif(avg_xx < thres_low):
gain += gain_step <<16 | gain_step
elif(avg_xx > thres_high):
gain -= gain_step <<16 | gain_step
print('Configuring spectrometer "%s" scale coefficients, gain=0x%X ... ' % (unit, gain)),
fpga.write_int(unit + '_gain', gain) # in 16_8-16_8 format
print('done')
print('waitting to refresh data ...')
time.sleep(1)
print('done')
|
"""
수 정렬하기
N <1,000,000
"""
import sys
n = int(sys.stdin.readline())
data = []
for _ in range(n):
data.append(int(sys.stdin.readline()))
data.sort()
for d in data:
print(d) |
#!/usr/bin/python
#-*- coding: UTF-8 -*-
# creat by lilin
import os
import re
import sys
import json
import urllib
# http://pic.sogou.com/pics?query=%C6%FB%B3%B5&mode=1&start=144&reqType=ajax&reqFrom=result&tn=0
#http://pic.sogou.com/pics?query=%B4%F2%B9%FE%C7%B7&w=05002100&p=40030500&_asf=pic.sogou.com&_ast=1493904488&sc=index&oq=dahaqian&ri=0&sourceid=sugg&sut=7487&sst0=1493904487711
baseurl = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=index&fr=&hs=0&xthttps=111111&sf=1&fmq=&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E5%95%8A%E5%93%88'
#baseurl = 'https://image.baidu.com/pics?query='
#j_1 = '&mode=1&start='
#j_2 = '&reqType=ajax&reqFrom=result&tn=0'
start = 0 # start index
end = 48 * 20 # total count , about 20 pages pic
save_path = 'sg_pic_down/'
if os.name == 'nt':
save_path = 'sg_pic_down\\'
if not os.path.exists(save_path):
os.makedirs(save_path)
pic_index = 0
print 'cur encoding:' + sys.stdin.encoding
query_words = '人打哈欠'.decode('utf8').encode('gbk') #decode解码encode编码
query_item = urllib.quote(query_words) #将url数据获取之后,将其编码,从而适用于url字符串中,使其能被打印和web服务器接收
print 'find[%s][%s]'%(query_words,query_item)
def format_url(query_str,start):
return baseurl + query_str
pic_url_start = '"pic_url":"'
pic_url_start_len = len(pic_url_start)
def find_one_pic(page,start):
m = page.find(pic_url_start,start) #字符串中查找子字符串,如果找到返回首字母位置,如果找不到返回-1
if m >= 0:
n = page.find('"',m+pic_url_start_len)
if n >= 0:
s = page[m+pic_url_start_len:n]
urls = s.split('/')
if len(urls) > 0:
return (s,n,urls[-1])
return None
def split_picurl(page):
ret = []
start = 0
while True:
info = find_one_pic(page,start)
if info:
start = info[1]
ret.append((info[0],info[2]))
else:
break
return ret
def sg_pic_down(index):
global pic_index #全局变量
u = format_url(query_item,index)
#print 'get pic list from[%s]'%(u)
try:
f = urllib.urlopen(u)
page = f.read() #读取该页
pics = split_picurl(page)
for item in pics:
local_f = save_path + item[1]
#with open(local_f,'wb') as lf:
# pic_f = urllib.urlopen(item[0])
# lf.write(pic_f.read())
#print 'pic[%s][%s]'%(s,urls[-1])
try:
urllib.urlretrieve(item[0],local_f)
print 'down[%s]->[%s]'%(item[0],local_f)
except Exception as ex:
print 'Exception.urlretrieve:'+str(ex)
return len(pics)
except UnicodeDecodeError as ude:
print "UnicodeDecodeError:"+str(ude)
except Exception as ex:
print 'Exception:'+str(ex)
if __name__ == '__main__':
index = start
while True:
if index >= end:
break
try:
index += sg_pic_down(index)
except KeyboardInterrupt as ki:
print 'KeyboardInterrupt Close'
break
|
__author__ = 'bobby'
import struct
import socket
import dicts
def tcp(ptk):
tcpHeader = ptk[0][34:54]
tcp_hdr = struct.unpack("!HHLL1sBHHH", tcpHeader) # HH 2 short integers 1 for each port, 16B for trash
print 'TCP Header:'+'\n'+'Source port: {} Destination port: {} Sequence Number: {} ACK Number: {} '.format(*tcp_hdr[:4])
print 'TCP Flags: '+dicts.flag_dict[str(hex(tcp_hdr[5]))]
print 'Window: {} Checksum: {} Urgent Pointer: {}'.format(*tcp_hdr[6:])
# HTTP data
if tcp_hdr[0] == 80 or tcp_hdr[0] == 443: # or tcp_hdr[1] == 80 or tcp_hdr[1] == 443
data = ptk[0][58:]
print "HTTP (user-agent and data) : "+'\n'+data+'\n'+'-'*25
def udp(ptk):
udp_datagram = ptk[0][34:38]
udp_hdr = struct.unpack("!HHHH", udp_datagram)
print "UDP Header:"+'\n'+"Source port: {} Destiny port: {} length: {} checksum: {}".format(*udp_hdr)+'\n'+'-'*25
def icmp(ptk):
icmp_header = ptk[0][34:38]
icmp_hdr = struct.unpack("!BBH", icmp_header)
print "ICMP Header:"+'\n'+'Type: {} Code: {} checksum: {}'.format(*icmp_hdr)+'\n'+'Data: '+ptk[0][38:42]+'\n'+\
'-'*25
# Captures the incoming and outgoing connections.
# Create a new socket using the given address family, socket type and protocol number, 0x0003 -> all protocols
try:
rawSocket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0003)) # 0x0800 -> IP (only incoming)
while 1:
# Receive data from the socket
ptk = rawSocket.recvfrom(65565) # returns the data into a tuple
'The Ethernet header'
ethernetHeader = ptk[0][0:14] # 14 Bytes Ethernet header (first element, the first 14 bytes of it)
eth_hdr = struct.unpack("!6s6sH", ethernetHeader) # 6B dst_mac address , 6B src_mac address, 2B ether_type
if socket.ntohs(eth_hdr[2]) == 8: # IP
'The IP header'
ipHeader = ptk[0][14:34] # 20 Bytes IP header
ip_hdr = struct.unpack("!9sB2s4s4s", ipHeader) # 12+4+4 = 20-> 12 trash, 4B srcAdrss, 4B dstAdrr
if ip_hdr[1] == 6: # TCP
'The TCP header'
tcp(ptk)
elif ip_hdr[1] == 16: # UDP
'The UDP header'
udp(ptk)
elif ip_hdr[1] == 1: # ICMP
'The ICMP header'
icmp(ptk)
rawSocket.close()
except socket.error:
print "Need to be root to run the socket!"
|
import glob
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
def get_transform():
transforms_list = []
transforms_list += [transforms.ToTensor(), ]
return transforms.Compose(transforms_list)
class GraphData(Dataset):
def __init__(self, name, folder, sample=False):
self.files = glob.glob(folder+ name + '_data*') # [:1000]
self.data = []
self.adj = np.load(folder + name+'_adj.npy', allow_pickle=True)
for i in self.files:
self.data.append(np.load(i, allow_pickle=True))
print(i, np.load(i, allow_pickle=True).shape)
if sample:
break
self.data = np.concatenate(self.data, axis=0)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
pos = self.data[idx]
return torch.tensor(pos).type(torch.FloatTensor), torch.tensor(self.adj).type(torch.FloatTensor)
|
"""models"""
from django.db import models
class Board(models.Model):
"""Database model for each task"""
user_id = models.CharField(max_length=255)
title = models.CharField(max_length=100)
content = models.TextField(default='')
isComplete = models.BooleanField(default=False)
|
from api.api.models import Player
for player in Player.objects.all():
player.calculate_stats()
|
import numpy as np
from ahrs.common.orientation import q2R, ecompass, acc2q
from ahrs.common.mathfuncs import cosd, sind, skew
class EKF:
def __init__(self,
gyr: np.ndarray = None,
acc: np.ndarray = None,
mag: np.ndarray = None,
frequency: float = 100.0,
frame: str = 'NED',
**kwargs):
self.gyr = gyr
self.acc = acc
self.mag = mag
self.frequency = frequency
self.frame = frame # Local tangent plane coordinate frame
self.Dt = kwargs.get('Dt', 1.0/self.frequency)
self.q0 = kwargs.get('q0')
self.P = np.identity(4) # Initial state covariance
self.R = self._set_measurement_noise_covariance(**kwargs)
self._set_reference_frames(kwargs.get('magnetic_ref'), self.frame)
# Process of data is given
# if self.gyr is not None and self.acc is not None:
self.Q = self._compute_all(self.frame)
def _set_measurement_noise_covariance(self, **kw) -> np.ndarray:
self.noises = np.array(kw.get('noises', [0.3**2, 0.5**2, 0.8**2]))
if 'var_gyr' in kw:
self.noises[0] = kw.get('var_gyr')
if 'var_acc' in kw:
self.noises[1] = kw.get('var_acc')
if 'var_mag' in kw:
self.noises[2] = kw.get('var_mag')
self.g_noise, self.a_noise, self.m_noise = self.noises
return np.diag(np.repeat(self.noises[1:], 3))
def _set_reference_frames(self, mref: float, frame: str = 'NED') -> None:
if frame.upper() not in ['NED', 'ENU']:
raise ValueError(f"Invalid frame '{frame}'. Try 'NED' or 'ENU'")
# Magnetic Reference Vector
if mref is None:
# Local magnetic reference of Munich, Germany
from ahrs.common.mathfuncs import MUNICH_LATITUDE, MUNICH_LONGITUDE, MUNICH_HEIGHT
from ahrs.utils.wmm import WMM
wmm = WMM(latitude=MUNICH_LATITUDE, longitude=MUNICH_LONGITUDE, height=MUNICH_HEIGHT)
self.m_ref = np.array([wmm.X, wmm.Y, wmm.Z]) if frame.upper() == 'NED' else np.array([wmm.Y, wmm.X, -wmm.Z])
elif isinstance(mref, (int, float)):
cd, sd = cosd(mref), sind(mref)
self.m_ref = np.array([cd, 0.0, sd]) if frame.upper() == 'NED' else np.array([0.0, cd, -sd])
else:
self.m_ref = np.copy(mref)
self.m_ref /= np.linalg.norm(self.m_ref)
# Gravitational Reference Vector
self.a_ref = np.array([0.0, 0.0, -1.0]) if frame.upper() == 'NED' else np.array([0.0, 0.0, 1.0])
def _compute_all(self, frame: str) -> np.ndarray:
"""
Estimate the quaternions given all sensor data.
Attributes ``gyr``, ``acc`` MUST contain data. Attribute ``mag`` is
optional.
Returns
-------
Q : numpy.ndarray
M-by-4 Array with all estimated quaternions, where M is the number
of samples.
"""
# if self.acc.shape != self.gyr.shape:
# raise ValueError("acc and gyr are not the same size")
num_samples = len(self.gyr)
Q = np.zeros((num_samples, 4))
Q[0] = self.q0
if self.mag is not None:
###### Compute attitude with MARG architecture ######
if self.q0 is None:
Q[0] = ecompass(self.acc[0], self.mag[0], frame=frame, representation='quaternion')
Q[0] /= np.linalg.norm(Q[0])
# EKF Loop over all data
for t in range(1, num_samples):
if self.acc is None:
Q[t] = self.updateAll(q=Q[t-1], gyr=self.gyr[t], mag=self.mag[t], mode="mag")
else:
Q[t] = self.updateAll(Q[t-1], self.gyr[t], self.acc[t], self.mag[t], mode="mag")
return Q
###### Compute attitude with IMU architecture ######
if self.q0 is None:
Q[0] = acc2q(self.acc[0])
Q[0] /= np.linalg.norm(Q[0])
# EKF Loop over all data
for t in range(1, num_samples):
Q[t] = self.updateAll(Q[t-1], self.gyr[t], self.acc[t], mode="accel")
return Q
def Omega(self, x: np.ndarray) -> np.ndarray:
"""Omega operator.
Given a vector :math:`\\mathbf{x}\\in\\mathbb{R}^3`, return a
:math:`4\\times 4` matrix of the form:
.. math::
\\boldsymbol\\Omega(\\mathbf{x}) =
\\begin{bmatrix}
0 & -\\mathbf{x}^T \\\\ \\mathbf{x} & \\lfloor\\mathbf{x}\\rfloor_\\times
\\end{bmatrix} =
\\begin{bmatrix}
0 & -x_1 & -x_2 & -x_3 \\\\
x_1 & 0 & x_3 & -x_2 \\\\
x_2 & -x_3 & 0 & x_1 \\\\
x_3 & x_2 & -x_1 & 0
\\end{bmatrix}
This operator is constantly used at different steps of the EKF.
Parameters
----------
x : numpy.ndarray
Three-dimensional vector.
Returns
-------
Omega : numpy.ndarray
Omega matrix.
"""
return np.array([
[0.0, -x[0], -x[1], -x[2]],
[x[0], 0.0, x[2], -x[1]],
[x[1], -x[2], 0.0, x[0]],
[x[2], x[1], -x[0], 0.0]])
def f(self, q: np.ndarray, omega: np.ndarray) -> np.ndarray:
"""Linearized function of Process Model (Prediction.)
.. math::
\\mathbf{f}(\\mathbf{q}_{t-1}) = \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} =
\\begin{bmatrix}
q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\
q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\
q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\
q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w
\\end{bmatrix}
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
omega : numpy.ndarray
Angular velocity, in rad/s.
Returns
-------
q : numpy.ndarray
Linearized estimated quaternion in **Prediction** step.
"""
Omega_t = self.Omega(omega)
return (np.identity(4) + 0.5*self.Dt*Omega_t) @ q
def dfdq(self, omega: np.ndarray) -> np.ndarray:
"""Jacobian of linearized predicted state.
.. math::
\\mathbf{F} = \\frac{\\partial\\mathbf{f}(\\mathbf{q}_{t-1})}{\\partial\\mathbf{q}} =
\\begin{bmatrix}
1 & - \\frac{\\Delta t}{2} \\omega_x & - \\frac{\\Delta t}{2} \\omega_y & - \\frac{\\Delta t}{2} \\omega_z\\\\
\\frac{\\Delta t}{2} \\omega_x & 1 & \\frac{\\Delta t}{2} \\omega_z & - \\frac{\\Delta t}{2} \\omega_y\\\\
\\frac{\\Delta t}{2} \\omega_y & - \\frac{\\Delta t}{2} \\omega_z & 1 & \\frac{\\Delta t}{2} \\omega_x\\\\
\\frac{\\Delta t}{2} \\omega_z & \\frac{\\Delta t}{2} \\omega_y & - \\frac{\\Delta t}{2} \\omega_x & 1
\\end{bmatrix}
Parameters
----------
omega : numpy.ndarray
Angular velocity in rad/s.
Returns
-------
F : numpy.ndarray
Jacobian of state.
"""
x = 0.5*self.Dt*omega
return np.identity(4) + self.Omega(x)
def h_both(self, q: np.ndarray) -> np.ndarray:
C = q2R(q).T
if len(self.z) < 4:
return C @ self.a_ref
return np.r_[C @ self.a_ref, C @ self.m_ref]
def h_mag(self, q):
C = q2R(q).T
return C @ self.m_ref
def dhdq_both(self, q: np.ndarray, mode: str = 'normal') -> np.ndarray:
if mode.lower() not in ['normal', 'refactored']:
raise ValueError(f"Mode '{mode}' is invalid. Try 'normal' or 'refactored'.")
qw, qx, qy, qz = q
v = np.r_[self.a_ref, self.m_ref]
H = np.array([[-qy*v[2] + qz*v[1], qy*v[1] + qz*v[2], -qw*v[2] + qx*v[1] - 2.0*qy*v[0], qw*v[1] + qx*v[2] - 2.0*qz*v[0]],
[ qx*v[2] - qz*v[0], qw*v[2] - 2.0*qx*v[1] + qy*v[0], qx*v[0] + qz*v[2], -qw*v[0] + qy*v[2] - 2.0*qz*v[1]],
[-qx*v[1] + qy*v[0], -qw*v[1] - 2.0*qx*v[2] + qz*v[0], qw*v[0] - 2.0*qy*v[2] + qz*v[1], qx*v[0] + qy*v[1]]])
if len(self.z) == 6:
H_2 = np.array([[-qy*v[5] + qz*v[4], qy*v[4] + qz*v[5], -qw*v[5] + qx*v[4] - 2.0*qy*v[3], qw*v[4] + qx*v[5] - 2.0*qz*v[3]],
[ qx*v[5] - qz*v[3], qw*v[5] - 2.0*qx*v[4] + qy*v[3], qx*v[3] + qz*v[5], -qw*v[3] + qy*v[5] - 2.0*qz*v[4]],
[-qx*v[4] + qy*v[3], -qw*v[4] - 2.0*qx*v[5] + qz*v[3], qw*v[3] - 2.0*qy*v[5] + qz*v[4], qx*v[3] + qy*v[4]]])
H = np.vstack((H, H_2))
return 2.0*H
def dhdq_mag(self, q: np.ndarray, mode: str = 'normal') -> np.ndarray:
qw, qx, qy, qz = q
v = self.m_ref
H = np.array([[-qy*v[2] + qz*v[1], qy*v[1] + qz*v[2], -qw*v[2] + qx*v[1] - 2.0*qy*v[0], qw*v[1] + qx*v[2] - 2.0*qz*v[0]],
[ qx*v[2] - qz*v[0], qw*v[2] - 2.0*qx*v[1] + qy*v[0], qx*v[0] + qz*v[2], -qw*v[0] + qy*v[2] - 2.0*qz*v[1]],
[-qx*v[1] + qy*v[0], -qw*v[1] - 2.0*qx*v[2] + qz*v[0], qw*v[0] - 2.0*qy*v[2] + qz*v[1], qx*v[0] + qy*v[1]]])
return 2.0*H
def updateAll(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray = None, mag: np.ndarray = None, mode: str = "both") -> np.ndarray:
"""
Perform an update of the state.
Parameters
----------
q : numpy.ndarray
A-priori orientation as quaternion.
gyr : numpy.ndarray
Sample of tri-axial Gyroscope in rad/s.
acc : numpy.ndarray
Sample of tri-axial Accelerometer in m/s^2.
mag : numpy.ndarray
Sample of tri-axial Magnetometer in uT.
Returns
-------
q : numpy.ndarray
Estimated a-posteriori orientation as quaternion.
"""
if not np.isclose(np.linalg.norm(q), 1.0):
raise ValueError("A-priori quaternion must have a norm equal to 1.")
# Current Measurements
g = np.copy(gyr) # Gyroscope data as control vector
a = np.copy(acc)
if mode == "both":
a_norm = np.linalg.norm(a)
m_norm = np.linalg.norm(mag)
self.z = np.r_[a/a_norm, mag/m_norm]
elif mode == "mag":
m_norm = np.linalg.norm(mag)
self.z = np.r_[mag/m_norm]
elif mode == "accel":
a_norm = np.linalg.norm(a)
self.z = np.r_[a/a_norm]
else:
raise ValueError()
# Noises are (gyro, accel, mag)
# If we have the mag, make a 6x6 with the diagonal of [accel, accel, accel, mag, mag, mag]
# Otherwise just a 3x3 with [accel, accel, accel]
self.R = np.diag(np.repeat(self.noises[1:] if mag is not None else self.noises[1], 3))
if mode == "both":
diagonal = np.repeat(self.noises[1:], 3)
elif mode == "mag":
diagonal = np.repeat(self.noises[2], 3)
elif mode == "accel":
diagonal = np.repeat(self.noises[1], 3)
self.R = np.diag(diagonal)
# ----- Prediction -----
q_t = self.f(q, g) # Predicted State
F = self.dfdq(g) # Linearized Fundamental Matrix
W = 0.5*self.Dt * np.r_[[-q[1:]], q[0]*np.identity(3) + skew(q[1:])] # Jacobian W = df/dω
Q_t = 0.5*self.Dt * self.g_noise * W@W.T # Process Noise Covariance
P_t = F@self.P@F.T + Q_t # Predicted Covariance Matrix
# ----- Correction -----
if mode == "mag":
y = self.h_mag(q_t)
else:
y = self.h_both(q_t) # Expected Measurement function
if mode == "mag":
H = self.dhdq_mag(q_t) # Linearized Measurement Matrix
else:
H = self.dhdq_both(q_t) # Linearized Measurement Matrix
v = self.z - y # Innovation (Measurement Residual)
S = H@P_t@H.T + self.R # Measurement Prediction Covariance
K = P_t@H.T@np.linalg.inv(S) # Kalman Gain
self.P = (np.identity(4) - K@H)@P_t
self.q = q_t + K@v # Corrected state
self.q /= np.linalg.norm(self.q)
return self.q
|
import numpy as np
''' Some kinds of Loss Functions
'''
class MSE:
''' Mean Sqaure Error:
C = sum((y-a)^2)
'''
def loss(self, batchA, batchY):
batchD = batchA - batchY
square = np.square(batchD)
return np.sum(square)
''' Delta:
@C/@aL
'''
def delta(self, batchA, batchY):
return 2 * (batchA - batchY)
class CE:
''' Cross Entropy:
C = yloga + (1-y)log(1-a)
'''
def loss(self, batchA, batchY):
return
def delta(self, batchA, batchY):
return
mse = MSE()
ce = CE() |
# -*- coding: ISO-8859-1 -*-
import json
from ...constantes import *
from util import *
def create_chart(conf, entries):
"""
Update Chart configuration and Datas
"""
serie_index = 0
for serie in conf['series']:
data = []
for entry in entries:
if entry is not None:
data.append(entry.datatolist(str(serie['db'])))
conf['series'][serie_index]['data'] = data
serie_index += 1
""" Add PlotBands """
plotBands = []
last_entry = len(entries)-1
n = 1
while n < last_entry and\
entries[n].phase is not None and\
entries[n] is not None and\
entries[n].next().phase is not None:
begin = entries[n].dt
phase = entries[n].phase
n += 1
while entries[n] is not None and\
entries[n].phase is not None and\
entries[n].phase == phase and\
n < last_entry:
n += 1
end = entries[n].dt
plotBand = {
'color': PhaseColor[phase],
'from': datetime_to_timestamp(begin),
'to': datetime_to_timestamp(end)
}
plotBands.append(plotBand)
conf['xAxis']['plotBands'] = plotBands
""" Add Labels """
condition_flag_allumage = '((prec.phase is not None) and (prec.phase is not PHASE_ALLUMAGE))'
condition_next_is_not_maintien = '((next.phase is not None) and (next.phase is not PHASE_MAINTIEN))'
labels = json.loads(json.dumps(ChartLabel)) #make a copy of original object
labels['name'] = 'Labels'
for entry in entries:
if entry is not None and entry.phase is not None:
#Label Allumage
if entry.event is not None:
data = {
"x": datetime_to_timestamp(entry.dt),
"title": 'Allumage'
}
labels['data'].append(data)
"""
# Label Combustion
if entry.phase == PHASE_COMBUSTION and\
entry.prec() is not None and\
entry.prec().phase is not PHASE_COMBUSTION and\
entry.all_next_verify_condition(5, condition_next_is_not_maintien):
data = {
"x": datetime_to_timestamp(entry.dt),
"title": 'Combustion'
}
labels['data'].append(data)
"""
conf['series'].append(labels)
""" Add Subtitle (plotbands legend) """
#conf["subtitle"] = ChartLegend
""" Add Title (date begin date end) """
if len(entries) > 3:
begin = pretty_date(entries[0].dt)
end = pretty_date(entries[len(entries)-1].dt)
#conf["title"]["text"] = 'Monitoring Chaudière du {0} au {1}'.format(begin, end)
conf["title"]["text"] = 'Monitoring Chaudière'
conf["subtitle"]["text"] = ' du {0} au {1}'.format(begin, end)
else:
conf["title"]["text"] = 'Monitoring Chaudière'
""" Return new conf """
return conf
static_conf_raw = {
"chart": {"defaultSeriesType": 'spline'},
"credits": {"enabled": False},
"exporting": {"filename": 'chaudiere'},
'rangeSelector' : {
'inputEnabled': 'false',
'selected' : 2,
'buttons': [
{
'type': 'minute',
'count': 15,
'text': '15m'
},{
'type': 'hour',
'count': 1,
'text': '1h'
},{
'type': 'hour',
'count': 2,
'text': '2h'
},{
'type': 'hour',
'count': 4,
'text': '4h'
},{
'type': 'hour',
'count': 6,
'text': '6h'
},{
'type': 'all',
'text': 'All'
}]
},
'title': {
'text': 'Chaudière'
},
'yAxis': [
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Température'},
'softMin': 55,
'softMax': 70,
'top': str((100/4)*0+3*0)+'%',
'height': '25%',
'lineWidth': 1,
},
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Vent'},
'softMin': 0,
'softMax': 20,
'top': str((100/4)*1+3*1)+'%',
'height': '25%',
'offset': 0,
'lineWidth': 1,
},
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Alimentation'},
'softMin': 0,
'softMax': 30,
'top': str((100/4)*2+3*2)+'%',
'height': '25%',
'offset': 0,
'lineWidth': 1,
},
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Allumage'},
'softMin': 0,
'softMax': 100,
'top': str((100/4)*3+3*3)+'%',
'height': '12%',
'offset': 0,
'lineWidth': 1,
}
],
'tooltip': {
'shared': True,
'split': False,
'crosshairs': True
},
"series": [
{
"name": InputName[TEMP_CHAUDIERE],
"db": InputDb[TEMP_CHAUDIERE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
{
"name": InputName[TEMP_FUMEE],
"db": InputDb[TEMP_FUMEE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
{
"name": InputName[VENT_PRIMAIRE],
"db": InputDb[VENT_PRIMAIRE],
"data": [],
"yAxis": 1,
"tooltip": {"valueDecimals": 0}
},
{
"name": InputName[ALIMENTATION],
"db": InputDb[ALIMENTATION],
"data": [],
"yAxis": 2,
"tooltip": {"valueDecimals": 0}
},
{
"name": InputName[ALLUMAGE],
"db": InputDb[ALLUMAGE],
"data": [],
"yAxis": 3,
"tooltip": {"valueDecimals": 0}
}
]
}
static_conf_minute_full = {
"chart": {"defaultSeriesType": 'spline'},
"credits": {"enabled": False},
"exporting": {"filename": 'chaudiere'},
'rangeSelector' : {
'inputEnabled': 'false',
'selected' : 5,
'buttons': [
{
'type': 'minute',
'count': 15,
'text': '15m'
},{
'type': 'hour',
'count': 1,
'text': '1h'
},{
'type': 'hour',
'count': 2,
'text': '2h'
},{
'type': 'hour',
'count': 4,
'text': '4h'
},{
'type': 'hour',
'count': 6,
'text': '6h'
},{
'type': 'all',
'text': 'All'
}
]
},
'title': {'text': 'Chaudière'},
'xAxis': {
'plotBands': None
},
'yAxis': [
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Température'},
'softMin': 55,
'softMax': 70,
'top': str((100/4)*0+3*0)+'%',
'height': '25%',
'lineWidth': 1,
},
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Vent'},
'softMin': 0,
'softMax': 20,
'top': str((100/4)*1+3*1)+'%',
'height': '25%',
'offset': 0,
'lineWidth': 1,
},
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Alimentation'},
'softMin': 0,
'softMax': 30,
'top': str((100/4)*2+3*2)+'%',
'height': '25%',
'offset': 0,
'lineWidth': 1,
},
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Allumage'},
'softMin': 0,
'softMax': 15,
'top': str((100/4)*3+3*3)+'%',
'height': '12%',
'offset': 0,
'lineWidth': 1,
},
],
'tooltip': {
'shared': True,
'split': False,
'crosshairs': True
},
"series": [
{
"name": InputName[TEMP_CHAUDIERE],
"db": InputDb[TEMP_CHAUDIERE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
{
"name": InputName[TEMP_FUMEE],
"db": InputDb[TEMP_FUMEE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
{
"name": InputName[VENT_PRIMAIRE],
"db": InputDb[VENT_PRIMAIRE],
"data": [],
"yAxis": 1,
"tooltip": {"valueDecimals": 0}
},
{
"name": InputName[ALIMENTATION],
"db": InputDb[ALIMENTATION],
"data": [],
"yAxis": 2,
"tooltip": {"valueDecimals": 0}
},
{
"name": InputName[ALLUMAGE],
"db": InputDb[ALLUMAGE],
"data": [],
"yAxis": 3,
"tooltip": {"valueDecimals": 0}
}
]
}
static_conf_minute = {
"chart": {"defaultSeriesType": 'spline'},
"subtitle": {
"text": '',
"useHTML": True,
"verticalAlign": 'top',
"y": 40,
},
"credits": {"enabled": False},
"exporting": {"filename": 'chaudiere'},
"legend" : {
"enabled": True,
"align": 'left',
"layout": 'vertical',
"verticalAlign": 'top',
"x": 10,
"y": 80,
"floating": True,
"borderWidth": 1,
"backgroundColor": '#FFFFFF'
},
'rangeSelector' : {
'inputEnabled': 'false',
'selected' : 5,
'buttons': [
{
'type': 'minute',
'count': 15,
'text': '15m'
},{
'type': 'hour',
'count': 1,
'text': '1h'
},{
'type': 'hour',
'count': 2,
'text': '2h'
},{
'type': 'hour',
'count': 4,
'text': '4h'
},{
'type': 'hour',
'count': 6,
'text': '6h'
},{
'type': 'all',
'text': 'All'
}
]
},
'navigator': {
'enabled': True
},
'title': {'text': 'Chaudière'},
'xAxis': {
'plotBands': None
},
'yAxis': [
{
'labels': {'align': 'right','x': -3},
'title': {'text': 'Température'},
'softMin': 55,
'softMax': 70,
'top': str((0))+'%',
'height': '100%',
'lineWidth': 1,
},
],
'tooltip': {
'shared': True,
'split': False,
'crosshairs': True
},
"series": [
{
"name": InputName[TEMP_CHAUDIERE],
"db": InputDb[TEMP_CHAUDIERE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
{
"name": InputName[TEMP_FUMEE],
"db": InputDb[TEMP_FUMEE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
]
}
live_conf = {
"chart": {
"renderTo": 'data-container',
"defaultSeriesType": 'spline',
"events": {
"load": 'requestLastWatt0'
}
},
"title": {
"text": 'Live random data'
},
"xAxis": {
"type": 'datetime',
},
"series": [{
"name": 'Random data',
"data": []
}]
}
local_display_static_conf_minute = {
"chart": {"defaultSeriesType": 'spline'},
"subtitle": {
"text": '',
"useHTML": True,
"verticalAlign": 'top',
"y": 40,
},
"credits": {"enabled": False},
"exporting": {"filename": 'chaudiere'},
"legend" : {
"enabled": True,
"align": 'left',
"layout": 'vertical',
"verticalAlign": 'top',
"x": 10,
"y": 80,
"floating": True,
"borderWidth": 1,
"backgroundColor": '#FFFFFF'
},
'rangeSelector': {
'selected': 4,
'inputEnabled': False,
'buttonTheme': {
'visibility': 'hidden'
},
'labelStyle': {
'visibility': 'hidden'
}
},
'navigator': {
'enabled': False
},
'scrollbar': {
'enabled': False
},
'title': {'text': 'chaudière'},
'xAxis': {
'plotBands': None
},
'yAxis': [
{
'labels': {'align': 'right','x': -3, 'style':{"fontSize": "20px"}},
'title': {'text': 'Température'},
'softMin': 55,
'softMax': 70,
'top': str((0))+'%',
'height': '100%',
'lineWidth': 1,
},
],
'tooltip': {
'shared': True,
'split': False,
'crosshairs': True
},
"series": [
{
"name": InputName[TEMP_CHAUDIERE],
"db": InputDb[TEMP_CHAUDIERE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
{
"name": InputName[TEMP_FUMEE],
"db": InputDb[TEMP_FUMEE],
"data": [],
"yAxis": 0,
"tooltip": {"valueDecimals": 1}
},
]
}
|
import string
import re
from numpy import array , argmax, random, take
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding,Bidirectional, RepeatVector, TimeDistributed
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from keras import optimizers
import matplotlib.pyplot as plt
pd.set_option('display.max_colwidth',200)
def read_text(filename):
file=open(filename, mode='rt', encoding='utf-8')
text = file.read()
file.close()
return text
def to_lines(text):
sents =text.strip().split('\n')
sents = [i.split('\t') for i in sents]
data =read_text("C:\Users\Senpai\Desktop\transilator.txt")
deu_eng = to_lines(data)
deu_eng = array(deu_eng)
deu_eng = deu_eng[:50000:]
deu_eng
eng_1=[]
deu_1=[]
for i in deu_eng[:,0]:
eng_1.append(len(i.split()))
for i in deu_eng[:,1]:
deu_1.append(len(i.split()))
length_df = pd.DataFrame({'eng':eng_1, 'deu':deu_1})
length_df.hist(bins=30)
plt.show()
def tokenization(lines):
tokenizer=Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
eng_tokenizer = tokenization(deu_eng[:,0])
eng_vocab_size = len(eng_tokenizer.word_index) +1
eng_length =8
print ('English Vocabulary Size: %d' % eng_vocab_size)
deu_tokenizer = tokenization(deu_eng[:,1])
deu_vocab_size = len(deu_tokenizer.word_index)+1
deu_length =8
print('Deutch Vocabulary Size: %d' % deu_vocab_size)
def encode_sequences(tokenizer,length,lines):
seq =tokenizer.text_to_sequences(lines)
seq =pad_sequences(seq,maxlen=length,padding='post')
return seq
from sklearn.model_selection import train_test_split
train, test =train_test_split(deu_eng,test_size=0.2,random_state =12)
trainX =encode_sequences(deu_tokenizer,deu_length,train[:,1])
trainY =encode_sequences(eng_tokenizer,deu_length,train[:,1])
testX= encode_sequences(deu_tokenizer,deu_length, train[:,1])
testY =encode_sequences(eng_tokenizer,deu_length,train[:,1])
def build_model(in_vocab, out_vocab, in_timesteps, out_timesteps,units):
model = Sequential()
model.add(Embedding(in_vocab,units,input_length=in_timesteps,mask_zero=True))
model.add(LSTM(units))
model.add(RepeatVector(out_timesteps))
model.add(LSTM(units, return_sequences='softmax'))
return model
model = build_model(deu_vocab_size,eng_vocab_size,deu_length,eng_length,512)
rms= optimizers.RMSprop(lr=0.001)
model.compile(optimizer=rms, loss='sparse_categorical_crossentropy')
filename = 'model.h1.24_transilator'
checkpoint = ModelCheckpoint(filename,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
history = model.fit(trainX.reshape(trainX.shape[0],trainY.shape[1],1),epochs=5,batch_size=512,validation_split=0.2,callbacks=[checkpoint],verbose=1)
model =load_model('model.h1.24_transilator')
preds =model.predict_classes(testX.reshape((testX.shape[0],testX.shape[1])))
def get_word(n,tokenizer):
for word, index in tokenizer.word_index.items():
if index == n:
return word
return None
preds_text=[]
for i in preds:
temp=[]
for j in range(len(i)):
t= get_word(i[j], eng_tokenizer)
if j>0:
if (t== get_word(i[j-1], eng_tokenizer)) or (t==None):
temp.append('')
else:
temp.append(t)
else:
if(t==None):
temp.append('')
else:
temp.append(t)
preds_text.append(' '.join(temp))
pred_df =pd.DataFrame({'actual' : test[:,0], 'predicted': preds_text})
pd.set_option('display.max_colwidth', 200)
pred_df.head(15) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-21 15:10
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('statements', '0002_auto_20160810_0801'),
]
operations = [
migrations.RemoveField(
model_name='statement',
name='date',
),
migrations.RemoveField(
model_name='statement',
name='first_name',
),
migrations.RemoveField(
model_name='statement',
name='last_name',
),
migrations.RemoveField(
model_name='statement',
name='position',
),
migrations.RemoveField(
model_name='statement',
name='position_location',
),
migrations.AddField(
model_name='statement',
name='content',
field=django.contrib.postgres.fields.jsonb.JSONField(default={'default': 'value'}),
preserve_default=False,
),
]
|
import json
# TODO: Add an items() implementation
class PyJSON(object):
def __init__(self, d):
if type(d) is str:
d = json.loads(d)
self.from_dict(d)
self._keys = d.keys() # use this to create the items iterator
def from_dict(self, d):
self.__dict__ = {}
for key, value in d.items():
if type(value) is dict:
value = PyJSON(value)
elif type(value) is list:
# replace any dict entries with recursions
for idx, x in enumerate(value):
if type(x) == dict:
value[idx] = PyJSON(x)
self.__dict__[key] = value
def to_dict(self):
d = {}
for key, value in self.__dict__.items():
if type(value) is PyJSON:
value = value.to_dict()
elif type(value) is list:
for idx, x in enumerate(value):
if type(x) == PyJSON:
value[idx] = x.to_dict()
d[key] = value
return d
def __repr__(self):
return str(self.to_dict())
def __setitem__(self, key, value):
self.__dict__[key] = value
def __getitem__(self, key):
return self.__dict__[key]
if __name__ == '__main__':
# Only run if this is executed as a script
j = '{ "a": { "a1" : 1, "a2":3 }, "b": {"b1": "b_one", "b2": "b_two"}, "c": [3,4,5]}'
d = PyJSON(j)
print(d.c)
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Ramil Nugmanov <stsouko@live.ru>
# This file is part of PREDICTOR.
#
# PREDICTOR is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import uuid
from collections import defaultdict
from os import path
from .config import (UPLOAD_PATH, StructureStatus, TaskStatus, ModelType, TaskType, REDIS_HOST, REDIS_JOB_TIMEOUT,
REDIS_PASSWORD, REDIS_PORT, REDIS_TTL, StructureType)
from .models import Tasks, Structures, Additives, Models, Additivesets, Destinations, Users, Results
from .redis import RedisCombiner
from flask import Blueprint, url_for, send_from_directory, request
from flask_login import current_user
from flask_restful import reqparse, Resource, fields, marshal, abort, Api
from functools import wraps
from pony.orm import db_session, select, left_join
from validators import url
from werkzeug import datastructures
api_bp = Blueprint('api', __name__)
api = Api(api_bp)
redis = RedisCombiner(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, result_ttl=REDIS_TTL,
job_timeout=REDIS_JOB_TIMEOUT)
class ModelTypeField(fields.Raw):
def format(self, value):
return ModelType(value)
taskstructurefields = dict(structure=fields.Integer, data=fields.String, temperature=fields.Float(298),
pressure=fields.Float(1),
todelete=fields.Boolean(False),
additives=fields.List(fields.Nested(dict(additive=fields.Integer, amount=fields.Float))),
models=fields.List(fields.Nested(dict(model=fields.Integer, name=fields.String))))
modelfields = dict(example=fields.String, description=fields.String, type=ModelTypeField, name=fields.String,
destinations=fields.List(fields.Nested(dict(host=fields.String, port=fields.Integer(6379),
password=fields.String, name=fields.String))))
@api_bp.route('/task/batch_file/<string:file>', methods=['GET'])
def batch_file(file):
return send_from_directory(directory=UPLOAD_PATH, filename=file)
def get_model(_type):
with db_session:
return next(dict(model=m.id, name=m.name, description=m.description, type=m.type,
destinations=[dict(host=x.host, port=x.port, password=x.password, name=x.name)
for x in m.destinations])
for m in select(m for m in Models if m.model_type == _type.value))
def get_additives():
with db_session:
return {a.id: dict(additive=a.id, name=a.name, structure=a.structure, type=a.type)
for a in select(a for a in Additives)}
def get_models_list(skip_prep=True, skip_dest_and_example=False):
with db_session:
res = {}
for m in (select(m for m in Models if m.model_type in (ModelType.MOLECULE_MODELING.value,
ModelType.REACTION_MODELING.value))
if skip_prep else select(m for m in Models)):
res[m.id] = dict(model=m.id, name=m.name, description=m.description, type=m.type)
if not skip_dest_and_example:
res[m.id]['example'] = m.example
res[m.id]['destinations'] = [dict(host=x.host, port=x.port, password=x.password, name=x.name)
for x in m.destinations]
return res
def fetchtask(task, status):
job = redis.fetch_job(task)
if job is None:
abort(404, message='Invalid task id. Perhaps this task has already been removed')
if not job:
abort(500, message='modeling server error')
if not job['is_finished']:
abort(512, message='PROCESSING.Task not ready')
if job['result']['status'] != status:
abort(406, message='Task status is invalid. Task status is [%s]' % job['result']['status'].name)
if job['result']['user'] != current_user.id:
abort(403, message='User access deny. You do not have permission to this task')
return job['result'], job['ended_at']
def format_results(task, status):
result, ended_at = fetchtask(task, status)
result['task'] = task
result['date'] = ended_at.strftime("%Y-%m-%d %H:%M:%S")
result['status'] = result['status'].value
result['type'] = result['type'].value
result.pop('jobs')
for s in result['structures']:
s['status'] = s['status'].value
s['type'] = s['type'].value
for m in s['models']:
m.pop('destinations', None)
m.pop('example', None)
m['type'] = m['type'].value
for r in m['results']:
r['type'] = r['type'].value
for a in s['additives']:
a['type'] = a['type'].value
return result
def authenticate(func):
@wraps(func)
def wrapper(*args, **kwargs):
if current_user.is_authenticated:
return func(*args, **kwargs)
abort(401, message=dict(user='not authenticated'))
return wrapper
class AuthResource(Resource):
method_decorators = [authenticate]
class AdminResource(Resource):
pass
#method_decorators = [authenticate]
class AvailableModels(Resource):
def get(self):
out = []
for x in get_models_list().values():
x.pop('destinations')
x['type'] = x['type'].value
out.append(x)
return out, 200
class AvailableAdditives(Resource):
def get(self):
out = []
for x in get_additives().values():
x['type'] = x['type'].value
out.append(x)
return out, 200
class RegisterModels(AdminResource):
def post(self):
data = marshal(request.get_json(force=True), modelfields)
models = data if isinstance(data, list) else [data]
available = {x['name']: [(d['host'], d['port'], d['name']) for d in x['destinations']]
for x in get_models_list(skip_prep=False).values()}
report = []
for m in models:
if m['destinations']:
if m['name'] not in available:
with db_session:
new_m = Models(type=m['type'], name=m['name'], description=m['description'],
example=m['example'])
for d in m['destinations']:
Destinations(model=new_m, **d)
report.append(dict(model=new_m.id, name=new_m.name, description=new_m.description,
type=new_m.type.value,
example=new_m.example,
destinations=[dict(host=x.host, port=x.port, name=x.name)
for x in new_m.destinations]))
else:
tmp = []
with db_session:
model = Models.get(name=m['name'])
for d in m['destinations']:
if (d['host'], d['port'], d['name']) not in available[m['name']]:
tmp.append(Destinations(model=model, **d))
if tmp:
report.append(dict(model=model.id, name=model.name, description=model.description,
type=model.type.value, example=model.example,
destinations=[dict(host=x.host, port=x.port, name=x.name)
for x in tmp]))
return report, 201
''' ===================================================
collector of modeled tasks (individually). return json
===================================================
'''
class ResultsTask(AuthResource):
def get(self, task):
try:
task = int(task)
except ValueError:
abort(404, message='invalid task id. Use int Luke')
with db_session:
result = Tasks.get(id=task)
if not result:
abort(404, message='Invalid task id. Perhaps this task has already been removed')
if result.user.id != current_user.id:
abort(403, message='User access deny. You do not have permission to this task')
models = get_models_list(skip_dest_and_example=True)
for v in models.values():
v['type'] = v['type'].value
additives = get_additives()
s = select(s for s in Structures if s.task == result)
r = left_join((s.id, r.model.id, r.key, r.value, r.result_type)
for s in Structures for r in s.results if s.task == result and r is not None)
a = left_join((s.id, a.additive.id, a.amount)
for s in Structures for a in s.additives if s.task == result and a is not None)
structures = {x.id: dict(structure=x.id, data=x.structure, temperature=x.temperature, pressure=x.pressure,
type=x.structure_type, status=x.structure_status, additives=[], models=[])
for x in s}
for s, a, aa in a:
tmp = dict(amount=aa)
tmp.update(additives[a])
structures[s]['additives'].append(tmp)
tmp_models = defaultdict(dict)
for s, m, rk, rv, rt in r:
tmp_models[s].setdefault(m, []).append(dict(key=rk, value=rv, type=rt))
for s, mr in tmp_models.items():
for m, r in mr.items():
tmp = dict(results=r)
tmp.update(models[m])
structures[s]['models'].append(tmp)
return dict(task=result.id, status=TaskStatus.DONE.value, date=result.date.strftime("%Y-%m-%d %H:%M:%S"),
type=result.task_type, user=result.user.id if result.user else None,
structures=list(structures.values())), 200
def post(self, task):
result, ended_at = fetchtask(task, TaskStatus.DONE)
with db_session:
_task = Tasks(type=result['type'], date=ended_at, user=Users[current_user.id])
for s in result['structures']:
_structure = Structures(structure=s['data'], type=s['type'], temperature=s['temperature'],
pressure=s['pressure'], status=s['status'], task=_task)
for a in s['additives']:
Additivesets(additive=Additives[a['additive']], structure=_structure, amount=a['amount'])
for m in s['models']:
_model = Models[m['model']]
for r in m.get('results', []):
Results(model=_model, structure=_structure, type=r['type'], key=r['key'], value=r['value'])
return dict(task=_task.id, status=TaskStatus.DONE.value, date=ended_at.strftime("%Y-%m-%d %H:%M:%S"),
type=result['type'].value, user=current_user.id), 201
class ModelTask(AuthResource):
""" ===================================================
api for task modeling.
===================================================
"""
def get(self, task):
return format_results(task, TaskStatus.DONE), 200
def post(self, task):
data = marshal(request.get_json(force=True), taskstructurefields)
result = fetchtask(task, TaskStatus.PREPARED)[0]
prepared = {s['structure']: s for s in result['structures']}
structures = data if isinstance(data, list) else [data]
tmp = {x['structure']: x for x in structures if x['structure'] in prepared}
if 0 in tmp:
abort(400, message='invalid structure data')
additives = get_additives()
models = get_models_list()
for s, d in tmp.items():
if d['todelete']:
prepared.pop(s)
else:
if d['additives'] is not None:
alist = []
for a in d['additives']:
if a['additive'] in additives and 0 < a['amount'] < 1:
a.update(additives[a['additive']])
alist.append(a)
prepared[s]['additives'] = alist
if result['type'] != TaskType.MODELING: # for search tasks assign compatible models
prepared[s]['models'] = [get_model(ModelType.select(prepared[s]['type'], result['type']))]
elif d['models'] is not None and prepared[s]['status'] == StructureStatus.CLEAR:
prepared[s]['models'] = [models[m['model']] for m in d['models']
if m['model'] in models and
models[m['model']]['type'].compatible(prepared[s]['type'],
TaskType.MODELING)]
if d['temperature']:
prepared[s]['temperature'] = d['temperature']
if d['pressure']:
prepared[s]['pressure'] = d['pressure']
result['structures'] = list(prepared.values())
result['status'] = TaskStatus.MODELING
new_job = redis.new_job(result)
if new_job is None:
abort(500, message='modeling server error')
return dict(task=new_job['id'], status=result['status'].value, type=result['type'].value,
date=new_job['created_at'].strftime("%Y-%m-%d %H:%M:%S"), user=result['user']), 201
class PrepareTask(AuthResource):
""" ===================================================
api for task preparation.
===================================================
"""
def get(self, task):
return format_results(task, TaskStatus.PREPARED), 200
def post(self, task):
data = marshal(request.get_json(force=True), taskstructurefields)
result = fetchtask(task, TaskStatus.PREPARED)[0]
preparer = get_model(ModelType.PREPARER)
prepared = {s['structure']: s for s in result['structures']}
structures = data if isinstance(data, list) else [data]
tmp = {x['structure']: x for x in structures if x['structure'] in prepared}
if 0 in tmp:
abort(400, message='invalid structure data')
additives = get_additives()
for s, d in tmp.items():
if d['todelete']:
prepared.pop(s)
else:
if d['additives'] is not None:
alist = []
for a in d['additives']:
if a['additive'] in additives and 0 < a['amount'] < 1:
a.update(additives[a['additive']])
alist.append(a)
prepared[s]['additives'] = alist
if d['data']:
prepared[s]['data'] = d['data']
prepared[s]['status'] = StructureStatus.RAW
prepared[s]['models'] = [preparer]
elif s['status'] == StructureStatus.RAW:
prepared[s]['models'] = [preparer]
if d['temperature']:
prepared[s]['temperature'] = d['temperature']
if d['pressure']:
prepared[s]['pressure'] = d['pressure']
result['structures'] = list(prepared.values())
result['status'] = TaskStatus.PREPARING
new_job = redis.new_job(result)
if new_job is None:
abort(500, message='modeling server error')
return dict(task=new_job['id'], status=result['status'].value, type=result['type'].value,
date=new_job['created_at'].strftime("%Y-%m-%d %H:%M:%S"), user=result['user']), 201
class CreateTask(AuthResource):
""" ===================================================
api for task creation.
===================================================
"""
def post(self, _type):
try:
_type = TaskType(_type)
except ValueError:
msg = 'invalid task type [%s]. valid values are %s' % (_type, ', '.join(str(e.value) for e in TaskType))
abort(400, message=msg)
data = marshal(request.get_json(force=True), taskstructurefields)
additives = get_additives()
preparer = get_model(ModelType.PREPARER)
structures = data if isinstance(data, list) else [data]
data = []
for s, d in enumerate(structures, start=1):
if d['data']:
alist = []
for a in d['additives'] or []:
if a['additive'] in additives and 0 < a['amount'] < 1:
a.update(additives[a['additive']])
alist.append(a)
data.append(dict(structure=s, data=d['data'], status=StructureStatus.RAW, type=StructureType.UNDEFINED,
pressure=d['pressure'], temperature=d['temperature'],
additives=alist, models=[preparer]))
if not data:
abort(400, message='invalid structure data')
new_job = redis.new_job(dict(status=TaskStatus.NEW, type=_type, user=current_user.id, structures=data))
if new_job is None:
abort(500, message='modeling server error')
return dict(task=new_job['id'], status=TaskStatus.PREPARING.value, type=_type.value,
date=new_job['created_at'].strftime("%Y-%m-%d %H:%M:%S"), user=current_user.id), 201
uf_post = reqparse.RequestParser()
uf_post.add_argument('file.url', type=str)
uf_post.add_argument('file.path', type=str)
uf_post.add_argument('structures', type=datastructures.FileStorage, location='files')
class UploadTask(AuthResource):
""" ===================================================
api for structures upload.
===================================================
"""
def post(self, _type):
try:
_type = TaskType(_type)
except ValueError:
msg = 'invalid task type [%s]. valid values are %s' % (_type, ', '.join(str(e.value) for e in TaskType))
abort(403, message=msg)
args = uf_post.parse_args()
if args['file.url'] and url(args['file.url']):
# smart frontend
file_url = args['file.url']
elif args['file.path'] and path.exists(path.join(UPLOAD_PATH, path.basename(args['file.path']))):
# NGINX upload
file_url = url_for('.batch_file', file=path.basename(args['file.path']))
elif args['structures']:
# flask
file_name = str(uuid.uuid4())
args['structures'].save(path.join(UPLOAD_PATH, file_name))
file_url = url_for('.batch_file', file=file_name)
else:
abort(400, message='invalid structure data')
new_job = redis.new_job(dict(status=TaskStatus.NEW, type=_type, user=current_user.id,
structures=[dict(data=dict(url=file_url), status=StructureStatus.RAW,
type=StructureType.UNDEFINED,
models=[get_model(ModelType.PREPARER)])]))
if new_job is None:
abort(500, message='modeling server error')
return dict(task=new_job['id'], status=TaskStatus.PREPARING.value, type=_type.value,
date=new_job['created_at'].strftime("%Y-%m-%d %H:%M:%S"), user=current_user.id), 201
api.add_resource(CreateTask, '/task/create/<int:_type>')
api.add_resource(UploadTask, '/task/upload/<int:_type>')
api.add_resource(PrepareTask, '/task/prepare/<string:task>')
api.add_resource(ModelTask, '/task/model/<string:task>')
api.add_resource(ResultsTask, '/task/results/<string:task>')
api.add_resource(AvailableAdditives, '/resources/additives')
api.add_resource(AvailableModels, '/resources/models')
api.add_resource(RegisterModels, '/admin/models')
|
"""Django signal handlers for modoboa_dmarc."""
from django.urls import reverse
from django.utils.translation import gettext as _
from django.dispatch import receiver
from modoboa.admin import signals as admin_signals
from . import models
@receiver(admin_signals.extra_domain_actions)
def dmarc_domain_actions(sender, user, domain, **kwargs):
"""Return a link to access domain report."""
if not models.Record.objects.filter(header_from=domain).exists():
return []
return [{
"name": "dmarc_report",
"url": reverse("dmarc:domain_report", args=[domain.pk]),
"title": _("Show DMARC report for {}").format(domain.name),
"img": "fa fa-pie-chart"
}]
|
import sys
from threading import Thread
import Jetson.GPIO as GPIO
from user_event import detect_click as listener
from user_event import controll_sys as action
pin_types = {
'bcm' : GPIO.BCM,
'board' : GPIO.BOARD,
}
click = {
'1' : True,
'2' : False,
}
args = sys.argv
pin_type = pin_types[args[1]]
input_pin = int(args[2])
output_pin = int(args[3])
click_type = args[4]
def main():
listener.PowerListener(listener.PowerHandler(action.LedBlink(pin_type, output_pin), click[click_type]), pin_type, input_pin).run()
if __name__ == '__main__':
main()
|
import argparse,collections,copy,datetime,os,pandas,shutil,sys,time
import Wrangler
# Based on NetworkWrangler\scripts\build_network.py
#
USAGE = """
Builds a network using the specifications in network_specification.py, which should
define the variables listed below (in this script)
The [-c configword] is if you want an optional word for your network_specification.py
(e.g. to have multiple scenarios in one file). Access it via CONFIG_WORD.
"""
###############################################################################
# #
# Define the following in an input configuration file #
# #
###############################################################################
# MANDATORY. Set this to be the Project Name.
# e.g. "RTP2021", "TIP2021", etc
PROJECT = None
# MANDATORY. Set this to be the Scenario Name
# e.g. "Base", "Baseline"
SCENARIO = None
# MANDATORY. Set this to be the git tag for checking out network projects.
TAG = None
# MANDATORY. The network you are buliding on top of.
# This should be a clone of https://github.com/BayAreaMetro/TM1_2015_Base_Network
PIVOT_DIR = os.environ['TM1_2015_Base_Network']
# OPTIONAL. If PIVOT_DIR is specified, MANDATORY. Specifies year for PIVOT_DIR.
PIVOT_YEAR = 2015
# MANDATORY. Set this to the directory in which to write your outputs.
# "hwy" and "trn" subdirectories will be created here.
OUT_DIR = None
# MANDATORY. Should be a dictionary with keys in NET_MODES
# to a list of projects. A project can either be a simple string, or it can be
# a dictionary with with keys 'name', 'tag' (optional), and 'kwargs' (optional)
# to specify a special tag or special keyword args for the projects apply() call.
# For example:
# {'name':"Muni_TEP", 'kwargs':{'servicePlan':"'2012oct'"}}
NETWORK_PROJECTS = None
# MANDATORY. This is the folder where the NetworkProjects (each of which is a
# local git repo) are stored.
# As of 2023 July, this is now on Box: https://mtcdrive.box.com/s/cs0dmr987kaasmi83a6irru6ts6g4y1x
NETWORK_BASE_DIR = os.environ['TM1_NetworkProjects']
# unused & vestigial (I think)
NETWORK_PROJECT_SUBDIR = None
NETWORK_SEED_SUBDIR = None
NETWORK_PLAN_SUBDIR = None
# OPTIONAL. A list of project names which have been previously applied in the
# PIVOT_DIR network that projects in this project might rely on. For example
# if DoyleDrive exists, then Muni_TEP gets applied differently so transit lines
# run on the new Doyle Drive alignment
APPLIED_PROJECTS = None
# OPTIONAL. A list of project names. For test mode, these projects won't use
# the TAG. This is meant for developing a network project.
TEST_PROJECTS = None
TRN_MODES = ['trn']
NET_MODES = ['hwy'] + TRN_MODES
THIS_FILE = os.path.realpath(__file__)
# standard subdirs for transit and roadway
TRN_SUBDIR = "trn"
HWY_SUBDIR = "hwy"
###############################################################################
###############################################################################
# #
# Helper functions #
# #
###############################################################################
def getProjectNameAndDir(project):
if type(project) == type({'this is':'a dict'}):
name = project['name']
else:
name = project
(path,name) = os.path.split(name)
return (path,name)
def getNetworkListIndex(project, networks):
for proj in networks:
(path,name) = getProjectNameAndDir(proj)
if project == name or project == os.path.join(path,name):
return networks.index(proj)
return None
def getProjectMatchLevel(left, right):
(left_path,left_name) = getProjectNameAndDir(left)
(right_path,right_name) = getProjectNameAndDir(right)
match = 0
if os.path.join(left_path,left_name) == os.path.join(right_path,right_name):
match = 2
elif left_name == right_name:
match = 1
#Wrangler.WranglerLogger.debug("Match level %d for %s and %s" % (match, os.path.join(left_path,left_name), os.path.join(right_path,right_name)))
return match
def getProjectYear(PROJECTS, my_proj, netmode):
"""
PROJECTS is an OrderedDict, year -> netmode -> [ project list ]
Returns first year in which my_proj shows up in the netmode's project list, plus netmode, plus number in list
e.g. 2020.hwy.02 for second hwy project in 2020
Returns -1 if the project is not found
"""
for year in PROJECTS.keys():
for proj_idx in range(len(PROJECTS[year][netmode])):
proj = PROJECTS[year][netmode][proj_idx]
if type(proj) is dict and my_proj == proj['name']:
return "{}.{}.{:0>2d}".format(year,netmode,proj_idx+1)
elif proj == my_proj:
return "{}.{}.{:0>2d}".format(year,netmode,proj_idx+1)
return -1
def checkRequirements(REQUIREMENTS, PROJECTS, req_type='prereq'):
if req_type not in ('prereq','coreq','conflict'):
return (None, None)
# Wrangler.WranglerLogger.debug("checkRequirements called with requirements=[{}] projects=[{}] req_typ={}".format(REQUIREMENTS, PROJECTS, req_type))
is_ok = True
# REQUIREMENTS: netmode -> project -> netmode -> [list of projects]
for netmode in REQUIREMENTS.keys():
for project in REQUIREMENTS[netmode].keys():
project_year = getProjectYear(PROJECTS, project, netmode)
if (type(project_year) == int) and (project_year == -1):
Wrangler.WranglerLogger.warning('Cannot find the {} project {} to check its requirements'.format(netmode, project))
continue # raise?
Wrangler.WranglerLogger.info('Checking {} project {} ({}) for {}'.format(netmode, project, project_year, req_type))
for req_netmode in REQUIREMENTS[netmode][project].keys():
req_proj_list = REQUIREMENTS[netmode][project][req_netmode]
req_proj_years = {}
for req_proj in req_proj_list:
req_project_year = getProjectYear(PROJECTS, req_proj, req_netmode)
# req_project_year is a string, YYYY.[trn|hwy].[number]
# prereq
if req_type=="prereq":
if (type(req_project_year) == int) and (req_project_year < 0):
is_ok = False # required project must be found
Wrangler.WranglerLogger.warning("required project not found")
elif req_project_year > project_year:
is_ok = False # and implemented before or at the same time as the project
Wrangler.WranglerLogger.warning("required project year {} > project year {}".format(req_project_year, project_year))
# save into proj_years
req_proj_years[req_proj] = req_project_year
# sub out the list info with the project year info
REQUIREMENTS[netmode][project][req_netmode] = req_proj_years
return (REQUIREMENTS, is_ok)
def writeRequirements(REQUIREMENTS, PROJECTS, req_type='prereq'):
if req_type=='prereq':
print_req = 'Pre-requisite'
elif req_type=='coreq':
print_req = 'Co-requisite'
elif req_type=='conflict':
print_req = 'Conflict'
else:
return None
Wrangler.WranglerLogger.info("Requirement verification - {}".format(print_req))
Wrangler.WranglerLogger.info(" Year {:50} {:50} Year".format("Project",print_req+" " + "Project"))
# REQUIREMENTS: netmode -> project -> netmode -> req_proj -> req_proj_year
for netmode in REQUIREMENTS.keys():
for project in REQUIREMENTS[netmode].keys():
project_year = getProjectYear(PROJECTS, project, netmode)
for req_netmode in REQUIREMENTS[netmode][project].keys():
for req_project in REQUIREMENTS[netmode][project][req_netmode].keys():
Wrangler.WranglerLogger.info("{} {} {:50} {} {:50} {}".format(netmode, project_year, project,
req_netmode, req_project, REQUIREMENTS[netmode][project][req_netmode][req_project]))
def getProjectAttributes(project):
# Start with TAG if not build mode, no kwargs
project_type = 'project'
tag = None
kwargs = {}
# Use project name, tags, kwargs from dictionary
if type(project)==type({'this is':'a dictionary'}):
project_name = project['name']
if 'tag' in project: tag = project['tag']
if 'type' in project: project_type = project['type']
if 'kwargs' in project: kwargs = project['kwargs']
# Use Project name directly
elif type(project)==type("string"):
project_name = project
# Other structures not recognized
else:
Wrangler.WranglerLogger.fatal("Don't understand project %s" % str(project))
return (project_name, project_type, tag, kwargs)
def preCheckRequirementsForAllProjects(NETWORK_PROJECTS, TEMP_SUBDIR, networks, continue_on_warning, BUILD_MODE=None, TEST_PROJECTS=None):
PRE_REQS = {'hwy':{},'trn':{}}
CO_REQS = {'hwy':{},'trn':{}}
CONFLICTS = {'hwy':{},'trn':{}}
# Network Loop #1: check out all the projects, check if they're stale, check if they're the head repository. Build completed
# project list so we can check pre-reqs, etc, in loop #2.
for netmode in NET_MODES:
# Build the networks!
Wrangler.WranglerLogger.info("Checking out %s networks" % netmode)
clonedcount = 0
for model_year in NETWORK_PROJECTS.keys():
for project in NETWORK_PROJECTS[model_year][netmode]:
(project_name, projType, tag, kwargs) = getProjectAttributes(project)
if tag == None: tag = TAG
# test mode - don't use TAG for TEST_PROJECTS
if BUILD_MODE=="test" and type(TEST_PROJECTS)==type(['List']):
if project_name in TEST_PROJECTS:
Wrangler.WranglerLogger.debug("Skipping tag [%s] because test mode and [%s] is in TEST_PROJECTS" %
(TAG, project_name))
tag = None
Wrangler.WranglerLogger.debug("Project name = %s" % project_name)
cloned_SHA1 = None
# if project = "dir1/dir2" assume dir1 is git, dir2 is the projectsubdir
(head,tail) = os.path.split(project_name)
if head:
cloned_SHA1 = networks[netmode].cloneProject(networkdir=head, projectsubdir=tail, tag=tag,
projtype=projType, tempdir=TEMP_SUBDIR, **kwargs)
(prereqs, coreqs, conflicts) = networks[netmode].getReqs(networkdir=head, projectsubdir=tail, tag=tag,
projtype=projType, tempdir=TEMP_SUBDIR)
else:
cloned_SHA1 = networks[netmode].cloneProject(networkdir=project_name, tag=tag,
projtype=projType, tempdir=TEMP_SUBDIR, **kwargs)
(prereqs, coreqs, conflicts) = networks[netmode].getReqs(networkdir=project_name, projectsubdir=tail, tag=tag,
projtype=projType, tempdir=TEMP_SUBDIR)
# find out if the applied project is behind HEAD
# get the HEAD SHA1
cmd = r"git show-ref --head master"
if projType=='project':
join_subdir = Wrangler.Network.NETWORK_PROJECT_SUBDIR
if projType=='seed':
join_subdir = Wrangler.Network.NETWORK_SEED_SUBDIR
cmd_dir = os.path.join(Wrangler.Network.NETWORK_BASE_DIR, join_subdir, project_name)
(retcode, retStdout, retStderr) = networks[netmode]._runAndLog(cmd, run_dir = cmd_dir)
# Wrangler.WranglerLogger.debug("results of [%s]: %s %s %s" % (cmd, str(retcode), str(retStdout), str(retStderr)))
if retcode != 0: # this shouldn't happen -- wouldn't cloneAndApply have failed?
Wrangler.WranglerLogger.fatal("Couldn't run cmd [%s] in [%s]: stdout=[%s] stderr=[%s]" % \
(cmd, cmd_dir, str(retStdout), str(retStderr)))
sys.exit(2)
head_SHA1 = retStdout[0].split()[0]
# if they're different, log more information and get approval (if not in test mode)
if cloned_SHA1 != head_SHA1:
Wrangler.WranglerLogger.warning("Using non-head version of project of %s" % project_name)
Wrangler.WranglerLogger.warning(" Applying version [%s], Head is [%s]" % (cloned_SHA1, head_SHA1))
cmd = "git log %s..%s" % (cloned_SHA1, head_SHA1)
(retcode, retStdout, retStderr) = networks[netmode]._runAndLog(cmd, run_dir = cmd_dir)
Wrangler.WranglerLogger.warning(" The following commits are not included:")
for line in retStdout:
Wrangler.WranglerLogger.warning(" %s" % line)
# test mode => warn is sufficient
# non-test mode => get explicit approval
if continue_on_warning:
Wrangler.WranglerLogger.warning("Continuing (continue_on_warning)")
elif BUILD_MODE !="test" and not continue_on_warning:
Wrangler.WranglerLogger.warning(" Is this ok? (y/n) ")
response = input("")
Wrangler.WranglerLogger.debug(" response = [%s]" % response)
if response.strip().lower()[0] != "y":
sys.exit(2)
# find out if the project is stale
else:
cmd = 'git show -s --format="%%ct" %s' % cloned_SHA1
(retcode, retStdout, retStderr) = networks[netmode]._runAndLog(cmd, run_dir = cmd_dir)
applied_commit_date = datetime.datetime.fromtimestamp(int(retStdout[0]))
applied_commit_age = datetime.datetime.now() - applied_commit_date
# if older than STALE_YEARS year, holler
STALE_YEARS = 5
if applied_commit_age > datetime.timedelta(days=365*STALE_YEARS):
Wrangler.WranglerLogger.warning(" This project was last updated %.1f years ago (over %d), on %s" % \
(applied_commit_age.days/365.0,
STALE_YEARS, applied_commit_date.strftime("%x")))
if continue_on_warning:
Wrangler.WranglerLogger.warning("Continuing (continue_on_warning)")
elif BUILD_MODE !="test":
Wrangler.WranglerLogger.warning(" Is this ok? (y/n) ")
response = input("")
Wrangler.WranglerLogger.debug(" response = [%s]" % response)
if response.strip().lower() not in ["y", "yes"]:
sys.exit(2)
clonedcount += 1
# format: netmode -> project -> { netmode: [requirements] }
if len(prereqs ) > 0: PRE_REQS[ netmode][project_name] = prereqs
if len(coreqs ) > 0: CO_REQS[ netmode][project_name] = coreqs
if len(conflicts) > 0: CONFLICTS[netmode][project_name] = conflicts
# Check requirements
prFile = 'prereqs.csv'
crFile = 'coreqs.csv'
cfFile = 'conflicts.csv'
# Check prereqs
(PRE_REQS, allPrereqsFound) = checkRequirements(PRE_REQS, NETWORK_PROJECTS, req_type='prereq')
if len(PRE_REQS['trn'])>0 or len(PRE_REQS['hwy'])>0:
writeRequirements(PRE_REQS, NETWORK_PROJECTS, req_type='prereq')
if allPrereqsFound:
Wrangler.WranglerLogger.debug('All PRE-REQUISITES were found. Are the PRE-REQUISITES matches correct? (y/n)')
else:
Wrangler.WranglerLogger.debug('!!!WARNING!!! Some PRE-REQUISITES were not found or ordered correctly. Continue anyway? (y/n)')
response = input("")
Wrangler.WranglerLogger.debug(" response = [%s]" % response)
if response.strip().lower() not in ["y", "yes"]:
sys.exit(2)
# Check coreqs
(CO_REQS, allCoreqsFound) = checkRequirements(CO_REQS, NETWORK_PROJECTS, req_type='coreq')
if len(CO_REQS['trn'])>0 or len(CO_REQS['hwy'])>0:
writeRequirements(CO_REQS, NETWORK_PROJECTS, req_type='coreq')
if allCoreqsFound:
Wrangler.WranglerLogger.debug('All CO-REQUISITES were found. Are the CO-REQUISITE matches correct? (y/n)')
else:
Wrangler.WranglerLogger.debug('!!!WARNING!!! Some CO-REQUISITES were not found. Continue anyway? (y/n)')
response = input("")
Wrangler.WranglerLogger.debug(" response = [%s]" % response)
if response.strip().lower() not in ["y", "yes"]:
sys.exit(2)
# Check conflicts
(CONFLICTS, anyConflictFound) = checkRequirements(CONFLICTS, NETWORK_PROJECTS, req_type='conflict')
if len(CONFLICTS['trn'])>0 or len(CONFLICTS['hwy'])>0:
writeRequirements(CONFLICTS, NETWORK_PROJECTS, 'conflict')
if anyConflictFound:
Wrangler.WranglerLogger.debug('!!!WARNING!!! Conflicting projects were found. Continue anyway? (y/n)')
else:
Wrangler.WranglerLogger.debug('No conflicting projects were found. Enter \'y\' to continue.')
response = input("")
Wrangler.WranglerLogger.debug(" response = [%s]" % response)
if response.strip().lower() not in ["y", "yes"]:
sys.exit(2)
# Wrangler.WranglerLogger.debug("NETWORK_PROJECTS=%s NET_MODES=%s" % (str(NETWORK_PROJECTS), str(NET_MODES)))
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=USAGE, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--configword", help="optional word for network specification script")
parser.add_argument("--continue_on_warning", help="Don't prompt the user to continue if there are warnings; just warn and continue", action="store_true")
parser.add_argument("--skip_precheck_requirements", help="Don't precheck network requirements, stale projects, non-HEAD projects, etc", action="store_true")
parser.add_argument("--create_project_diffs", help="Pass this to create proejct diffs information for each project. NOTE: THIS WILL BE SLOW", action="store_true")
parser.add_argument("project_name", help="required project name, for example NGF")
parser.add_argument("--scenario", help="optional SCENARIO name")
parser.add_argument("net_spec", metavar="network_specification.py", help="Script which defines required variables indicating how to build the network")
parser.add_argument("--NGF_netvariant",
choices=[
"BlueprintSegmented",
"P1a_AllLaneTolling_ImproveTransit", "P1b_AllLaneTolling_Affordable",
"P2a_AllLaneTollingPlusArterials_ImproveTransit", "P2b_AllLaneTollingPlusArterials_Affordable",
"P3b_3Cordons_Affordable", "P3a_3Cordons_ImproveTransit",
"P4_NoNewPricing", "P1x_AllLaneTolling_PricingOnly"],
help="Specify which network variant network to create.")
args = parser.parse_args()
NOW = time.strftime("%Y%b%d.%H%M%S")
BUILD_MODE = None # regular
if (args.project_name == 'NGF'):
PIVOT_DIR = r"L:\Application\Model_One\NextGenFwys\INPUT_DEVELOPMENT\Networks\NGF_Networks_NoProjectNoSFCordon_08\net_2035_NGFNoProjectNoSFCordon"
PIVOT_YEAR = 2035
TRN_NET_NAME = "transitLines"
# some of the NGF NetworkProjects use geopandas (namely NGF_TrnFreqBoostsCordons and NGF_TrnExtendedServiceHours_Cordons)
# doing this import here in order to catch installation issues early
import geopandas
TRANSIT_CAPACITY_DIR = os.path.join(PIVOT_DIR, "trn")
TRN_NET_NAME = "transitLines"
HWY_NET_NAME = "freeflow.net"
# Read the configuration
NETWORK_CONFIG = args.net_spec
PROJECT = args.project_name
if args.scenario: SCENARIO = args.scenario
if args.project_name == 'NGF':
SCENARIO = args.NGF_netvariant
NET_VARIANT = args.NGF_netvariant
OUT_DIR = "{}_network_".format(PROJECT) + "{}"
if SCENARIO:
OUT_DIR = "{}_{}_network_".format(PROJECT, SCENARIO) + "{}"
LOG_FILENAME = "build%snetwork_%s_%s_%s.info.LOG" % ("TEST" if BUILD_MODE=="test" else "", PROJECT, SCENARIO, NOW)
Wrangler.setupLogging(LOG_FILENAME,
LOG_FILENAME.replace("info", "debug"))
exec(open(NETWORK_CONFIG).read())
# Verify mandatory fields are set
if PROJECT==None:
print("PROJECT not set in %s" % NETWORK_CONFIG)
sys.exit(2)
if SCENARIO==None:
print("SCENARIO not set in %s" % NETWORK_CONFIG)
# sys.exit(2)
if TAG==None:
print("TAG not set in %s" % NETWORK_CONFIG)
sys.exit(2)
if OUT_DIR==None:
print("OUT_DIR not set in %s" % NETWORK_CONFIG)
sys.exit(2)
if NETWORK_PROJECTS==None:
print("NETWORK_PROJECTS not set in %s" % NETWORK_CONFIG)
sys.exit(2)
if TRANSIT_CAPACITY_DIR:
Wrangler.TransitNetwork.capacity = Wrangler.TransitCapacity(directory=TRANSIT_CAPACITY_DIR)
# Create a scratch directory to check out project repos into
SCRATCH_SUBDIR = "scratch"
TEMP_SUBDIR = "Wrangler_tmp_" + NOW
if not os.path.exists(SCRATCH_SUBDIR): os.mkdir(SCRATCH_SUBDIR)
os.chdir(SCRATCH_SUBDIR)
if args.project_name == 'NGF':
os.environ["CHAMP_node_names"] = "M:\\Application\\Model One\\Networks\\TM1_2015_Base_Network\\Node Description.xls"
print()
else:
os.environ["CHAMP_node_names"] = os.path.join(PIVOT_DIR,"Node Description.xls")
networks = {
'hwy' :Wrangler.HighwayNetwork(modelType=Wrangler.Network.MODEL_TYPE_TM1, modelVersion=1.0,
basenetworkpath=os.path.join(PIVOT_DIR,"hwy"),
networkBaseDir=NETWORK_BASE_DIR,
networkProjectSubdir=NETWORK_PROJECT_SUBDIR,
networkSeedSubdir=NETWORK_SEED_SUBDIR,
networkPlanSubdir=NETWORK_PLAN_SUBDIR,
isTiered=True if PIVOT_DIR else False,
tag=TAG,
tempdir=TEMP_SUBDIR,
networkName="hwy",
tierNetworkName=HWY_NET_NAME),
'trn':Wrangler.TransitNetwork( modelType=Wrangler.Network.MODEL_TYPE_TM1, modelVersion=1.0,
basenetworkpath=os.path.join(PIVOT_DIR,"trn"),
networkBaseDir=NETWORK_BASE_DIR,
networkProjectSubdir=NETWORK_PROJECT_SUBDIR,
networkSeedSubdir=NETWORK_SEED_SUBDIR,
networkPlanSubdir=NETWORK_PLAN_SUBDIR,
isTiered=True if PIVOT_DIR else False,
networkName=TRN_NET_NAME)
}
# For projects applied in a pivot network (because they won't show up in the current project list)
if APPLIED_PROJECTS != None:
for proj in APPLIED_PROJECTS:
networks['hwy'].appliedProjects[proj]=TAG
# Wrangler.WranglerLogger.debug("NETWORK_PROJECTS=%s NET_MODES=%s" % (str(NETWORK_PROJECTS), str(NET_MODES)))
if args.skip_precheck_requirements:
Wrangler.WranglerLogger.info("skip_precheck_requirements passed so skipping preCheckRequirementsForAllProjects()")
else:
preCheckRequirementsForAllProjects(NETWORK_PROJECTS, TEMP_SUBDIR, networks, args.continue_on_warning, BUILD_MODE, TEST_PROJECTS)
# create the subdir for SET_CAPCLASS with set_capclass.job as apply.s
SET_CAPCLASS = "set_capclass"
SET_CAPCLASS_DIR = os.path.join(TEMP_SUBDIR, SET_CAPCLASS)
os.makedirs(SET_CAPCLASS_DIR)
source_file = os.path.join(os.path.dirname(THIS_FILE), "set_capclass.job")
shutil.copyfile( source_file, os.path.join(SET_CAPCLASS_DIR, "apply.s"))
# Network Loop #2: Now that everything has been checked, build the networks.
for YEAR in NETWORK_PROJECTS.keys():
projects_for_year = NETWORK_PROJECTS[YEAR]
appliedcount = 0
for netmode in NET_MODES:
Wrangler.WranglerLogger.info("Building {} {} networks".format(YEAR, netmode))
for project in projects_for_year[netmode]:
(project_name, projType, tag, kwargs) = getProjectAttributes(project)
if tag == None: tag = TAG
Wrangler.WranglerLogger.info("Applying project [{}] of type [{}] with tag [{}] and kwargs[{}]".format(project_name, projType, tag, kwargs))
if projType=='plan':
continue
# save a copy of this network instance for comparison
if args.create_project_diffs:
network_without_project = copy.deepcopy(networks[netmode])
applied_SHA1 = None
cloned_SHA1 = networks[netmode].cloneProject(networkdir=project_name, tag=tag,
projtype=projType, tempdir=TEMP_SUBDIR, **kwargs)
(parentdir, networkdir, gitdir, projectsubdir) = networks[netmode].getClonedProjectArgs(project_name, None, projType, TEMP_SUBDIR)
applied_SHA1 = networks[netmode].applyProject(parentdir, networkdir, gitdir, projectsubdir, **kwargs)
appliedcount += 1
# Create difference report for this project
# TODO: roadway not supported yet
if args.create_project_diffs and netmode!="hwy":
# difference information to be store in network_dir netmode_projectname
# e.g. BlueprintNetworks\net_2050_Blueprint\trn_BP_Transbay_Crossing
project_diff_folder = os.path.join("..", OUT_DIR.format(YEAR),
"{}_{}".format(HWY_SUBDIR if netmode == "hwy" else TRN_SUBDIR, project_name))
hwypath=os.path.join("..", OUT_DIR.format(YEAR), HWY_SUBDIR)
# the project may get applied multiple times -- e.g., for different phases
suffix_num = 1
project_diff_folder_with_suffix = project_diff_folder
while os.path.exists(project_diff_folder_with_suffix):
suffix_num += 1
project_diff_folder_with_suffix = "{}_{}".format(project_diff_folder, suffix_num)
Wrangler.WranglerLogger.debug("Creating project_diff_folder: {}".format(project_diff_folder_with_suffix))
# new!
networks[netmode].reportDiff(network_without_project, project_diff_folder_with_suffix, project_name,
roadwayNetworkFile=os.path.join(os.path.abspath(hwypath), HWY_NET_NAME))
del network_without_project
# if hwy project has set_capclass override, copy it to set_capclass/apply.s
set_capclass_override = os.path.join(TEMP_SUBDIR, project_name, "set_capclass.job")
if os.path.exists(set_capclass_override):
dest_file = os.path.join(SET_CAPCLASS_DIR, "apply.s")
shutil.copyfile(set_capclass_override, dest_file)
Wrangler.WranglerLogger.info("Copied override {} to {}".format(set_capclass_override, dest_file))
if appliedcount == 0:
Wrangler.WranglerLogger.info("No applied projects for this year -- skipping output")
continue
# Initialize output subdirectories up a level (not in scratch)
hwypath=os.path.join("..", OUT_DIR.format(YEAR),HWY_SUBDIR)
if not os.path.exists(hwypath): os.makedirs(hwypath)
trnpath = os.path.join("..", OUT_DIR.format(YEAR),TRN_SUBDIR)
if not os.path.exists(trnpath): os.makedirs(trnpath)
# apply set_capclass before writing any hwy network
kwargs = {'MODELYEAR':'{}'.format(YEAR)}
applied_SHA1 = networks['hwy'].applyProject(parentdir=TEMP_SUBDIR, networkdir=SET_CAPCLASS,
gitdir=os.path.join(TEMP_SUBDIR, SET_CAPCLASS), **kwargs)
networks['hwy'].write(path=hwypath,name=HWY_NET_NAME,suppressQuery=True,
suppressValidation=True) # MTC TM1 doesn't have turn penalties
# os.environ["CHAMP_node_names"] = os.path.join(PIVOT_DIR,"Node Description.xls")
hwy_abs_path = os.path.abspath( os.path.join(hwypath, HWY_NET_NAME) )
networks['trn'].write(path=trnpath,
name="transitLines",
writeEmptyFiles = False,
suppressQuery = True,
suppressValidation = False,
cubeNetFileForValidation = hwy_abs_path)
# Write the transit capacity configuration
Wrangler.TransitNetwork.capacity.writeTransitVehicleToCapacity(directory = trnpath)
Wrangler.TransitNetwork.capacity.writeTransitLineToVehicle(directory = trnpath)
Wrangler.TransitNetwork.capacity.writeTransitPrefixToVehicle(directory = trnpath)
Wrangler.WranglerLogger.debug("Successfully completed running %s" % os.path.abspath(__file__))
|
# draw images
import random
from PIL import Image, ImageDraw
import PIL
img1 = Image.new('RGB', (1000, 1000), color = 'white')
img2 = Image.new('RGB', (100, 100), color = 'red')
img.save('img/image.png')
|
from itm import UCProtocol
from utils import waits, wait_for
import logging
log = logging.getLogger(__name__)
class Commitment_Prot(UCProtocol):
def __init__(self, k, bits, sid, pid, channels, poly, pump, importargs):
self.ssid = sid[0]
self.committer = sid[1]
self.receiver = sid[2]
self.iscommitter = pid == self.committer
UCProtocol.__init__(self, k, bits, sid, pid, channels, poly, pump, importargs)
self.bit = None
self.nonce = None
self.state = 1
self.commitment = -1
def commit(self, bit):
self.nonce = self.sample(self.k)
self.bit = bit
self.write('p2f', ((self.sid, 'F_ro'), ('ro', (self.nonce, self.bit))), 2)
m = wait_for(self.channels['f2p'])
fro,(_,msg) = m.msg
print('\nsending\n')
self.write('p2f', ((self.sid, 'F_ro'), ('send', self.receiver, msg, 1)), 1)
def reveal(self):
self.write('p2f', ((self.sid, 'F_ro'), ('send', self.receiver, (self.nonce, self.bit), 0)), 1)
def env_msg(self, m):
if self.bit is None and self.iscommitter and m.msg[0] == 'commit':
_,bit = m.msg
self.commit(bit)
elif self.bit is not None and self.iscommitter and m.msg[0] == 'reveal':
self.reveal()
else:
self.pump.write('')
def check_commit(self, preimage):
print('writing to ro', (self.sid,self.pid))
self.write('p2f', ((self.sid,'F_ro'), ('ro', preimage)), 1)
m = wait_for(self.channels['f2p'])
fro,(_,msg) = m.msg
assert self.commitment == msg
nonce,bit = preimage
self.write('p2z', ('open', bit), 0)
def func_msg(self, m):
fro,msg = m.msg
if not self.iscommitter and msg[0] == 'send' and self.state is 1:
self.channels['p2z'].write( 'commit', 0 )
self.commitment = msg[1]
self.state = 2
elif not self.iscommitter and msg[0] == 'send' and self.state is 2:
print('\n***checking commit**\n')
self.check_commit(msg[1])
else:
self.pump.write('')
|
# List of tuples
# Each tuple contains a test:
# - the first element are the inputs,
# - the second are the output,
# - and the third is the message in case of an error
# To test another case, add another tuple
input_values = [
# Test case 1
(
["15","6"],
["AREA DE UN RECTANGULO","=====================","Dame la base:","Dame la altura:","Perimetro=42","Area=90"],
"Revisa los requisitos del ejercicio, entrada por entrada, salida por salida"
)
]
|
# Polygon Path Extrusion Tool - Version 3.5
bl_info = {'name':'Path Extrude','category':'Object','blender':(2,80,0)}
import bpy
import math
import numpy as np
from mathutils import Matrix, Vector
class PathExtrude(bpy.types.Operator):
bl_idname = 'object.pathextrude'
bl_label = 'Extrude Along Path'
bl_options = {'REGISTER','UNDO'}
def execute(self, context):
bpy.ops.object.mode_set(mode='OBJECT')
# Define the extrusion path as the most recently selected object
extrusion_path = bpy.context.active_object
# Detect if the extrusion path is closed
path_closed = len(extrusion_path.data.edges) == len(extrusion_path.data.vertices)
# Order Vertices
path_vertex_list = extrusion_path.data.vertices
path_edge_list = [(Edge.vertices[0],Edge.vertices[1]) for Edge in extrusion_path.data.edges]
def getOtherIndex(edgeTuple, vertexIndex):
if vertexIndex in edgeTuple:
if edgeTuple[0] == vertexIndex:
return edgeTuple[1]
else:
return edgeTuple[0]
def findAdjacentVertices(edgeList, vertexIndex):
return [getOtherIndex(edgeTuple, vertexIndex) for edgeTuple in edgeList if vertexIndex in edgeTuple ]
def findNextVertex(edgeList, vertexIndex, lastVertex):
return [getOtherIndex(edgeTuple, vertexIndex) for edgeTuple in edgeList if vertexIndex in edgeTuple and lastVertex not in edgeTuple ]
cleanPath = True
edgeDict = dict()
for i in range(len(path_vertex_list)):
edgeDict[i]=0
for edgeTuple in path_edge_list:
edgeDict[edgeTuple[0]] +=1
edgeDict[edgeTuple[1]] +=1
for i in range(len(path_vertex_list)):
if edgeDict[i] > 2:
cleanPath = False
break
if cleanPath:
pathOrigins = findAdjacentVertices(path_edge_list, 0)
pathLists = []
finalOrder = []
indicesPassed = []
for i in range(len(pathOrigins)):
pathList = [pathOrigins[i]]
previousVertex = 0
indicesPassed.append(previousVertex)
currentVertex = pathOrigins[i]
nextVertex = findNextVertex(path_edge_list, currentVertex, previousVertex)
while len(nextVertex) >0:
if nextVertex[0] not in indicesPassed:
pathList.append(nextVertex[0])
previousVertex = currentVertex
indicesPassed.append(previousVertex)
currentVertex = nextVertex[0]
nextVertex = findNextVertex(path_edge_list, currentVertex, previousVertex)
else:
break
pathLists.append(pathList)
if not path_closed:
if len(pathLists) == 1:
finalOrder = [0] + pathLists[0]
elif len(pathLists) == 2:
if len(pathLists[0]) > len(pathLists[1]):
pathLists[1].reverse()
finalOrder = pathLists[1] + [0] + pathLists[0]
else:
pathLists[0].reverse()
finalOrder = pathLists[0] + [0] + pathLists[1]
else:
finalOrder = [0] + pathLists[0]
path_vertex_list = [extrusion_path.data.vertices[a] for a in finalOrder]
# Tabulate extrusion path vertices
vertex_list = [np.array((extrusion_path.matrix_world@r.co).to_tuple()) for r in path_vertex_list]
# Remove duplicate vertices only if adjacent in the extrusion order
clean_vertex_list = []
last_vertex = (np.NaN, np.NaN, np.NaN)
for vertexTuple in vertex_list:
if not np.all(np.isclose(np.array(last_vertex), np.array(vertexTuple))):
clean_vertex_list.append(vertexTuple)
last_vertex = vertexTuple
vertex_list = clean_vertex_list
# Double the first vertex if closed
if path_closed:
vertex_list.append(vertex_list[0])
# Calculate difference vectors for translation
difference_list = []
for i in range(len(vertex_list)):
if i == 0:
difference_list.append(vertex_list[i])
else:
difference_list.append(vertex_list[i]-vertex_list[i-1])
# Calculate average normal vectors at each point
normalized_differences = [vector/np.linalg.norm(vector) for vector in difference_list]
average_list = []
for i in range(len(normalized_differences)):
if i == 0:
if path_closed:
average_list.append(normalized_differences[i+1]+normalized_differences[-1])
else:
average_list.append(normalized_differences[i+1])
elif i != len(normalized_differences)-1:
average_list.append(normalized_differences[i+1]+normalized_differences[i])
else:
if path_closed:
average_list.append(normalized_differences[i]+normalized_differences[1])
else:
average_list.append(normalized_differences[i])
average_list = [vector/np.linalg.norm(vector) for vector in average_list]
for curve in bpy.context.selected_objects:
if curve == extrusion_path:
curve.select_set(state=False)
for curve in bpy.context.selected_objects:
if curve != extrusion_path:
# The first selected curve (not the active object) is the extruded curve
extruded_curve = curve
# Make curve active
bpy.context.view_layer.objects.active = curve
# Delete faces in extruded mesh if the path is closed
if path_closed:
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type = 'FACE')
bpy.ops.mesh.dissolve_faces()
bpy.ops.mesh.delete(type = 'ONLY_FACE')
bpy.ops.mesh.select_mode(type = 'VERT')
bpy.ops.object.mode_set(mode = 'OBJECT')
# Move the extruded curve center to the first vertex of the extrusion path
extruded_curve.location.x = vertex_list[0][0]
extruded_curve.location.y = vertex_list[0][1]
extruded_curve.location.z = vertex_list[0][2]
# Tabulate the vertices as a numpy array
extruded_curve_vertices = [(extruded_curve.matrix_world@r.co).to_tuple() for r in extruded_curve.data.vertices]
extruded_curve_vertices = np.array(extruded_curve_vertices)
# Calculate the normal vector of the best fit plane for the vertices
centered_vertices = extruded_curve_vertices - np.average(extruded_curve_vertices, axis=0)
eigenvalues, eigenvectors = np.linalg.eig(np.dot(centered_vertices.T, centered_vertices))
initial_normal = eigenvectors[:,list(abs(eigenvalues)).index(min(abs(eigenvalues)))]
if np.dot(initial_normal, average_list[1]) < 0:
initial_normal = -1*initial_normal
# Scaling and rotation of extruded curve, keeping orientation for open path
extrusion_path.select_set(state=False)
if abs(np.dot(average_list[0],initial_normal)) != 1:
orient_vectorz = np.cross(initial_normal, average_list[0])
orient_vectorz /= np.linalg.norm(orient_vectorz)
orient_vectory = average_list[0]
orient_vectorx = np.cross(orient_vectorz, orient_vectory)
orient_vectorx /= np.linalg.norm(orient_vectorx)
orientMatrix = Matrix(((orient_vectorx[0],orient_vectory[0],orient_vectorz[0]),
(orient_vectorx[1],orient_vectory[1],orient_vectorz[1]),
(orient_vectorx[2],orient_vectory[2],orient_vectorz[2])))
if not path_closed:
if np.dot(average_list[0],initial_normal) != 0:
factor0 = abs(1/np.dot(average_list[0],initial_normal))
else:
factor0 = 1
bpy.ops.transform.resize(value=(factor0,1,1), orient_matrix=orientMatrix)
average_list[0] = initial_normal
else:
cos = np.dot(initial_normal,average_list[0])
if (bpy.app.version[0] == 2 and bpy.app.version[1] == 90) or bpy.app.version[0] > 2 :
bpy.ops.transform.rotate(value=math.acos(cos), orient_matrix=-1*orientMatrix)
elif bpy.app.version[0] == 2 and bpy.app.version[1] == 92:
if -1*orient_vectorz[0] > 0:
zRotation = math.atan(-1*orient_vectorz[1]/orient_vectorz[0])
elif -1*orient_vectorz[0] < 0:
zRotation = math.pi + math.atan(-1*orient_vectorz[1]/orient_vectorz[0])
elif orient_vectorz[1] > 0:
zRotation = math.pi/2
elif orient_vectorz[1] < 0:
zRotation = -1*math.pi/2
else:
zRotation = 0
if orient_vectorz[0] != 0 or orient_vectorz[1] != 0:
yRotation = math.atan(orient_vectorz[2]/math.sqrt(orient_vectorz[0]**2 + orient_vectorz[1]**2))
elif orient_vectorz[2] > 0:
yRotation = math.pi/2
else:
yRotation = -1*math.pi/2
bpy.ops.transform.rotate(value= -1*zRotation, orient_axis = 'Z')
bpy.ops.transform.rotate(value= yRotation, orient_axis = 'Y')
bpy.ops.transform.rotate(value= math.acos(cos), orient_axis = 'X')
bpy.ops.transform.rotate(value= -1*yRotation, orient_axis = 'Y')
bpy.ops.transform.rotate(value= zRotation, orient_axis = 'Z')
else:
bpy.ops.transform.rotate(value=math.acos(cos), orient_matrix=orientMatrix)
if np.dot(average_list[0],normalized_differences[1]) != 0:
factor0 = abs(1/np.dot(average_list[0],normalized_differences[1]))
else:
factor0 = 1
bpy.ops.transform.resize(value=(factor0,1,1), orient_matrix=orientMatrix)
else:
factor0 = 1
bpy.ops.transform.translate(value=(0,0,0))
new_extruded_curve_vertices = [(bpy.context.view_layer.objects.active.matrix_world@r.co).to_tuple() for r in bpy.context.view_layer.objects.active.data.vertices]
if path_closed:
normalized_differences[0] = (normalized_differences[1]+normalized_differences[-1])/np.linalg.norm(normalized_differences[1]+normalized_differences[-1])
else:
normalized_differences[0] = normalized_differences[1]
factor_list = []
for i in range(len(normalized_differences)):
if i == 0:
factor_list.append(factor0)
elif i != len(normalized_differences)-1:
factor_list.append(1/math.sin(math.acos(np.dot(-1*normalized_differences[i],normalized_differences[i+1]))/2))
else:
if path_closed:
factor_list.append(factor0)
else:
factor_list.append(1)
for i in range(1,len(vertex_list)):
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type = 'VERT')
if i == 1:
bpy.ops.mesh.select_all(action='SELECT')
if not path_closed or i !=len(vertex_list)-1:
bpy.ops.mesh.extrude_region_move()
bpy.ops.transform.translate(value=difference_list[i])
cos = np.dot(average_list[i-1],average_list[i])
if abs(cos) != 1:
orient_vectorz = np.cross(average_list[i-1],average_list[i])
orient_vectorz /= np.linalg.norm(orient_vectorz)
orient_vectory = average_list[i]
orient_vectorx = np.cross(orient_vectorz, orient_vectory)
orient_vectorx /= np.linalg.norm(orient_vectorx)
orientMatrix = Matrix(((orient_vectorx[0],orient_vectory[0],orient_vectorz[0]),
(orient_vectorx[1],orient_vectory[1],orient_vectorz[1]),
(orient_vectorx[2],orient_vectory[2],orient_vectorz[2])))
if (bpy.app.version[0] == 2 and bpy.app.version[1] == 90) or bpy.app.version[0] > 2:
bpy.ops.transform.rotate(value=math.acos(cos), orient_matrix=-1*orientMatrix)
elif bpy.app.version[0] == 2 and bpy.app.version[1] == 92:
if -1*orient_vectorz[0] > 0:
zRotation = math.atan(-1*orient_vectorz[1]/orient_vectorz[0])
elif -1*orient_vectorz[0] < 0:
zRotation = math.pi + math.atan(-1*orient_vectorz[1]/orient_vectorz[0])
elif orient_vectorz[1] > 0:
zRotation = math.pi/2
elif orient_vectorz[1] < 0:
zRotation = -1*math.pi/2
else:
zRotation = 0
if orient_vectorz[0] != 0 or orient_vectorz[1] != 0:
yRotation = math.atan(orient_vectorz[2]/math.sqrt(orient_vectorz[0]**2 + orient_vectorz[1]**2))
elif orient_vectorz[2] > 0:
yRotation = math.pi/2
else:
yRotation = -1*math.pi/2
bpy.ops.transform.rotate(value= -1*zRotation, orient_axis = 'Z')
bpy.ops.transform.rotate(value= yRotation, orient_axis = 'Y')
bpy.ops.transform.rotate(value= math.acos(cos), orient_axis = 'X')
bpy.ops.transform.rotate(value= -1*yRotation, orient_axis = 'Y')
bpy.ops.transform.rotate(value= zRotation, orient_axis = 'Z')
else:
bpy.ops.transform.rotate(value=math.acos(cos), orient_matrix=orientMatrix)
bpy.ops.transform.resize(value=(1/factor_list[i-1],1,1), orient_matrix=orientMatrix)
bpy.ops.transform.resize(value=(factor_list[i],1,1), orient_matrix=orientMatrix)
else:
bpy.ops.object.mode_set(mode = 'OBJECT')
for Vertex in bpy.context.active_object.data.vertices:
if (extruded_curve.matrix_world@Vertex.co).to_tuple() in new_extruded_curve_vertices:
Vertex.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type = 'EDGE')
bpy.ops.mesh.bridge_edge_loops()
bpy.ops.object.mode_set(mode = 'OBJECT')
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(PathExtrude.bl_idname)
def register():
bpy.types.VIEW3D_MT_object.append(menu_func)
bpy.utils.register_class(PathExtrude)
def unregister():
bpy.utils.unregister_class(PathExtrude)
if __name__ == '__main__':
register()
|
"""
данная программа находит верхний передел интегрирования по заданной функции и значению интеграла
интеграл вычисляется методом Гаусса с помощью составление квадратурной формулы Гаусса
по корням полинома Лежандра заданной степени
"""
from math import cos, pi, exp, sqrt
import numpy as np
eps = 0.0001
def f(t):
return exp(-t * t / 2)
def get_polinom_Legendre(n, x):
p = []
p.append(1)
p.append(x)
i = 2
while i <= n:
tmp = (2 * i - 1) * x * p[i - 1] - (i - 1) * p[i - 2]
tmp /= i
p.append(tmp)
i += 1
return p
def get_deveration_polinom_Legendre(n, p, x):
res = n / (1 - x * x) * (p[n - 1] - x * p[n])
return res
# можно оптимизировать поиском первой половины корней
def get_roots_Legendre(n):
# если i брать в ест. порядке, то получится от большего к меньшему
x = [cos(pi * (4 * i - 1) / (4 * n + 2)) for i in range(n, 0, -1)]
# print(x)
px = []
dpx = []
for i in range(0, n):
p = []
dp = []
while True:
p = get_polinom_Legendre(n, x[i])
dp = get_deveration_polinom_Legendre(n, p, x[i])
dx = p[n] / dp
x[i] -= dx
if abs(dx) < eps:
break
px.append(p)
dpx.append(dp)
return x, px, dpx
def get_coef_Gauss_formula(x, dp, n):
def getA(i):
return 2 / (1 - x[i] * x[i]) / (dp[i] * dp[i])
a = [getA(i) for i in range(0, n)]
return a
# работает хуже чем формула
def get_coef_Gauss(x, n):
z = []
for i in range(0, n):
if i % 2 == 0:
z.append(2 / (i + 1))
else:
z.append(0)
matr = []
matr.append([1 for i in range(0, n)])
for i in range(1, n):
matr.append([])
for j in range (0, n):
matr[i].append(matr[i-1][j] * x[j])
res = np.linalg.solve(matr, z) ########
return res
def F(x, alpha, t, weights):
res = 0
n = len(t)
for i in range(0, n):
res += weights[i] * f((x / 2) * (t[i] + 1)) ## modife if lower limit is not 0
res *= (x / 2)
return res - alpha
#a, b - значения для поиска (а = 0)
def find_limit_of_integration(a, b, alpha, t, weights):
print(a, F(a, alpha, t, weights))
print(alpha)
if(F(a, alpha, t, weights) > 0):
a, b = b, a
if(F(a, alpha, t, weights) > 0):
print("Увеличить диапазон поиска!")
return
tmp = 0
j = 0
# while True:
while j < 10:
#j += 1
tmp = (a + b) / 2
Ftmp = F(tmp, alpha, t, weights)
print(tmp, Ftmp)
if abs((b - tmp) / b) < eps:
break
if Ftmp < 0:
a = tmp
else:
b = tmp
return tmp
n = int(input("n = "))
alpha = float(input("a = "))
alpha *= sqrt(2 * pi)
x, px, dpx = get_roots_Legendre(n)
print("Корни полинома Лежандра:")
print(x)
print("Весовые коэф. по формуле:")
a = get_coef_Gauss_formula(x, dpx, n)
print(a)
print("Коэф. по решению системы:")
b = get_coef_Gauss(x, n)
print(b)
res = find_limit_of_integration(0, 5, alpha, x, b)
print("x = ", res)
print(F(res, alpha, x, b))
"""
import matplotlib.pyplot as plt
tx = np.linspace(0, 20, 100)
y = [F(i, alpha, x, b) for i in tx]
plt.plot(tx, y)
plt.show()
""" |
#!/usr/bin/python2.7
"""
Usage: python benchmarks.py [all] [cpu] [memory] [disk] [cpuExp]
"""
import subprocess
import sys
#'''To execute the input command '''
def execute(command):
subP = subprocess.Popen(command, stdout=subprocess.PIPE)
subP.wait()
out, err = subP.communicate()
#sys.stdout.write(popen.stdout.read())
if err is not None:
sys.stdout.write("Error: ",err)
sys.stdout.write(out)
#'''To execute a make command'''
def make(arg):
execute(['make', arg])
#To invoke cpu benchmark
def cpu(loop):
operations = ['FLOPS','IOPS']
threads = ['1','2','4']
make('cpu')
cpuStr = './cpu'
runsArgs = []
print "*******************************************************************"
print " CPU BENCHMARK "
print "*******************************************************************"
print "OP_TYPE, N_THREAD, N_OPERATIONS, T_TAKEN_AVG, T_TAKEN_SD, SPEED"
for i in range(loop):
for op in operations:
for th in threads:
runArgs = [cpuStr,op,th]
execute(runArgs)
#To invoke cpu experiment
def cpuExp(loop):
operations = ['FLOPS','IOPS']
make('ReadCPU')
cpuStr = './ReadCPU'
runsArgs = []
for i in range(loop):
for op in operations:
runArgs = [cpuStr,op]
execute(runArgs)
#To invoke memory benchmark
def memory(loop):
operations = ['SEQ', 'RAND']
threads = ['1','2']
size = ['1B','1KB','1MB']
make('memory')
cpuStr = './memory'
runsArgs = []
print "******************************************************************************************"
print " MEMORY BENCHMARK "
print "******************************************************************************************"
print "ACCESS_MODE, NUM_THREAD, BLOCK_SIZE, TOTAL_BYTES, T_LATENCY_AVG, T_LATENCY_SD, THROUGHPUT"
for i in range(loop):
for op in operations:
for sz in size:
for th in threads:
runArgs = [cpuStr, op, sz, th]
execute(runArgs)
#To invoke disk benchmark
def disk(loop):
operations = ['READ','WRITE']
access = ['SEQ','RAND']
threads = ['1','2']
size = ['1B','1KB','1MB']
make('disk')
cpuStr = './disk'
runsArgs = []
print "*****************************************************************************************************"
print " DISK BENCHMARK "
print "*****************************************************************************************************"
print "OPERATION, ACCESS_MODE, NUM_THREAD, BLOCK_SIZE, TOTAL_BYTES, T_LATENCY_AVG, T_LATENCY_SD, THROUGHPUT"
for i in range(loop):
for op in operations:
for ac in access:
for sz in size:
for th in threads:
runArgs = [cpuStr, op, ac, sz, th]
execute(runArgs)
#Main section form where the program begins
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.stderr.write(__doc__)
exit(1)
#Default number of times the experiments to be run
loop = 1
for arg in sys.argv:
if arg == 'cpu' or arg == 'all' :
cpu(loop)
if arg == 'memory' or arg == 'all' :
memory(loop)
if arg == 'disk' or arg == 'all' :
disk(loop)
if arg == 'cpuExp' :
cpuExp(loop)
make('clean')
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in the shared Keras ResNet modules into this module.
The TensorFlow official Keras models are moved under
official/vision/image_classification
In order to be backward compatible with models that directly import its modules,
we import the Keras ResNet modules under official.resnet.keras.
New TF models should not depend on modules directly under this path.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from official.vision.image_classification import cifar_preprocessing
from official.vision.image_classification import common as keras_common
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification import resnet_cifar_main as keras_cifar_main
from official.vision.image_classification import resnet_cifar_model
from official.vision.image_classification import resnet_imagenet_main as keras_imagenet_main
from official.vision.image_classification import resnet_model
del absolute_import
del division
del print_function
|
#-*- coding:utf-8 -*-
score = 100;
high = 170;
applePrice = 3.5;
appleWeight = 7.5;
name = 'george'
totalCost = appleWeight*applePrice;
discountCost = totalCost * 0.9;
print('totalCost is: %f'%totalCost); # %符號只能顯示number
print('Discounted price is: %d'%discountCost);
print('Buyer name: %s'%name);
|
""" Setup script. """
import io
from setuptools import setup, find_packages
from src.permaviss.version import __version__
with io.open("README.rst", "r", encoding="utf-8") as readme_file:
README = readme_file.read()
setup(
name="permaviss",
version=__version__,
description="Persistence Mayer Vietoris spectral sequence",
long_description=README,
long_description_content_type="text/x-rst",
author_email="atorras1618@gmail.com",
author="Alvaro Torras Casas",
url="https://github.com/atorras1618/PerMaViss",
download_url="https://github.com/atorras1618/PerMaViss/tarball/v0.1",
licence="MIT",
keywords=["spectral sequence", "persistent homology", "Mayer Vietoris"],
packages=find_packages("src"),
package_dir={"": "src"},
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
from datetime import datetime
#GraphQL Database
from graphqlclient import GraphQLClient
DB_URL = 'https://api.graph.cool/simple/v1/cjrfet7u94als0129de33wha3'
DB_UPLOAD_URL = 'https://api.graph.cool/file/v1/cjrfet7u94als0129de33wha3'
client = GraphQLClient(DB_URL)
#PyAudio
import pyaudio
import wave
import sys
#Variables
KEYWORD = 'brain'
#also modify file to show that it was accessed
#also add features to play the file once grabbed
#also add randomizer
#also add sentiment analysis
def gqlGetOrdered():
result = client.execute('''
query {
allFiles(
last: 1
skip: 1,
filter: {
file2: null
}) {
id
name
url
}
}
''')
return result
#QUERY TO GET FILE2_URL, searches by keyword and looks only for lyrebird converted files (file2null)
def gqlGetKeyword():
variables = {"text": KEYWORD}
variables = json.dumps(variables)
variables = str(variables)
#makes the query call with variables and returns results
result = client.execute('''
query ($text: String){
allFiles(filter: {
AND: [{
text_contains: $text
}, {
file2: null
}]
}) {
id
name
url
}
}
''', variables)
return result
###CHOOSE HERE WHETHER TO RUN AN ORDERED QUERY OR A KEYWORD QUERY, HOW TO RANDOM QUERY?
#result = gqlGetOrdered()
result = gqlGetKeyword()
result = json.loads(result)
result = result['data']
result = result['allFiles']
result = result[0]
FILE2_ID = result['id']
FILE2_NAME = result['name']
FILE2_URL = result['url']
#FILE2_TEXT = result['text']
#print(FILE2_ID, FILE2_NAME, FILE2_URL)
#updates DB with STT text field
def gqlMutateText(fid):
#sets any variables to pass to query, packs all the variables into a JSON, to feed to the GQLdb
acc = datetime.now().replace(microsecond=0).isoformat() #.strftime("%m/%d/%Y, %I:%M:%S %p")
print(acc)
variables = {"id": fid, "accessedAt": acc}
variables = json.dumps(variables)
variables = str(variables)
#makes the query call with variables and returns results
result = client.execute('''
mutation ($id: ID!, $accessedAt: [DateTime!]) {
updateFile(
id: $id
accessedAt: $accessedAt
) {
id
name
accessedAt
url
}
}
''', variables) # , ) do i add variables here, a dictionary/string of them?
#print("added text to DB")
print(result)
return result
gqlMutateText(FILE2_ID)
#download TTS Lyrebird file
def getFile(furl, fname):
r = requests.get(furl, allow_redirects=True)
results = open('Play/' + fname, 'wb').write(r.content) #creates a file from the url downloaded
return results
getFile(FILE2_URL, FILE2_NAME)
#play file
def playFile(fname):
CHUNK = 1024
#if len(sys.argv) < 2:
# print('plays a wave file. usage: %s filename.wav' % sys.argv[0])
#wf = wave.open(sys.argv[1], 'rb')
wf = wave.open('Play/' + fname, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
#num_frames=wf.getnframes(),
output=True)
data = wf.readframes(CHUNK)
while data != '':
stream.write(data, exception_on_underflow=False)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
#exit()
playFile(FILE2_NAME)
#exit()
#exception_on_underflow pyaudio?????
#num_frames = int(len(frames) / (self._channels * width))
|
import model_template
import tensorflow as tf
from tensorflow.python.ops import gen_nn_ops
import keras
class full_conv(model_template.ModelTemplate):
def __init__(self,n_outputs,dropout_rate,**kwargs):
super().__init__(**kwargs)
self.dropout_rate = dropout_rate
self.n_outputs = n_outputs
self.data = None
self.conv1_1 = None
self.conv1_1_bn = None
self.relu1_1=None
self.pool1 = None
self.conv2_1 = None
self.conv2_1_bn = None
self.relu2_1=None
self.pool2 = None
self.conv3_1 = None
self.conv3_1_bn = None
self.relu3_1 = None
self.pool3 = None
self.conv4_1 = None
self.conv4_1_bn = None
self.relu4_1 = None
self.pool4 = None
self.global_average_pooling=None
self.logits = None
#self.probs = None
def spp_layer2(self, input_tensor, levels=[2, 1], name='SPP_layer'):
'''Multiple Level SPP layer.
Works for levels=[1, 2, 3, 6].'''
self.sp_tensor = input_tensor
with tf.variable_scope(name):
pool_outputs = []
for l in levels:
pool = gen_nn_ops.max_pool_v2(self.sp_tensor, ksize=[1, tf.math.ceil(
tf.math.divide(tf.shape(self.sp_tensor)[1], l)),
tf.math.ceil(
tf.math.divide(tf.shape(self.sp_tensor)[2],
l)), 1],
strides=[1, tf.math.floor(
tf.math.divide(tf.shape(self.sp_tensor)[1], l)),
tf.math.floor(
tf.math.divide(tf.shape(self.sp_tensor)[2], l)), 1],
padding='VALID')
pool_outputs.append(tf.reshape(pool, [tf.shape(input_tensor)[0], -1]))
spp_pool = tf.concat(pool_outputs, 1)
spp_pool = tf.reshape(spp_pool, (-1, 4 * 256 + 256))
return spp_pool
def build(self, input_tensor):
if input_tensor is None:
raise ValueError("input_tensor must be a valid tf.Tensor object")
# input_tensor = tf.placeholder(tf.float32, shape=(None, 224, 224, 3), name='data')
self.data = input_tensor
conv1_1= tf.layers.conv2d(self.data, 32, 5, activation=tf.nn.relu,padding="same",name="conv1_1")
pool1 = tf.layers.max_pooling2d(conv1_1, 4, 2, name="pool1")
conv2_1 = tf.layers.conv2d(pool1, 64, 5, activation=tf.nn.relu, name='conv2_1',padding="same")
pool2 = tf.layers.max_pooling2d(conv2_1, 4, 2, name="pool2")
conv3_1 = tf.layers.conv2d(pool2, 128, 5, activation=tf.nn.relu, name='conv3_1',padding="same")
pool3 = tf.layers.max_pooling2d(conv3_1, 4, 2, name="pool3")
conv4_1 = tf.layers.conv2d(pool3, 256, 5, activation=tf.nn.relu, name='conv4_1',padding="same")
pool4 = tf.layers.max_pooling2d(conv4_1, 4, 2, name="pool4")
#spp = self.spp_layer2(pool4)
global_average_pooling = tf.reduce_mean(pool4, axis=[1, 2],name="global_pooling")
global_dropout = tf.layers.dropout(global_average_pooling, rate=self.dropout_rate, name='global_dropout')
logits = tf.layers.dense(global_dropout, units=self.n_outputs, name="dense_1")
self.conv1_1 = conv1_1
self.pool1 = pool1
self.conv2_1 =conv2_1
self.pool2 =pool2
self.conv3_1 =conv3_1
self.pool3 = pool3
self.conv4_1 =conv4_1
self.pool4 =pool4
#self.spp = spp
self.global_average_pooing=global_average_pooling
self.global_dropout=global_dropout
self.logits=logits
|
from __future__ import print_function
import urllib2
from bs4 import BeautifulSoup
f = open('lists/ListEnWikiVital.txt','w')
#urls = ['http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Level/2'] # Single page where the 100 vital articles are linked
urls = ['http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/People','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/History','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Geography','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Arts','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Philosophy_and_religion','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Everyday_life','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Society_and_social_sciences','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Biology_and_health_sciences','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Physical_sciences','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Technology','http://en.wikipedia.org/wiki/Wikipedia:Vital_articles/Expanded/Mathematics'] #pulls the article links to the vital 9742, plus some misc. links
for url in urls:
s = urllib2.urlopen(url).read()
#with file('List of articles every Wikipedia should have_Expanded - Meta.html') as f:
# s = f.read()
soup = BeautifulSoup(s)
soup.prettify()
for anchor in soup.findAll('a', href=True):
if '/wiki/' in anchor['href'] and not ':' in anchor['href'] and not '//' in anchor['href'] and not 'Main_Page' in anchor['href']: # keeps the links mostly limited to Wikipedia articles
print(anchor['href'],file=f)
f.close
|
import torch.nn as nn
def mse_loss(output, target):
loss = nn.MSELoss(output, target)
return loss
def find_best_orientation(kpts_gt, kpts_est, difference_thresh=0.5):
"""
Same as flip_loss, but also returns the best orientation and the original vs min loss
:param kpts_gt: Ground-truth keypoints
:param kpts_est: Inferred keypoints
:param difference_thresh: Minimal loss must be difference_thresh*original loss to be returned
:return: Unflipped loss, flipped loss, recommended flipping operation
"""
for batch in range(0, len(kpts_gt), 1):
tmp = kpts_est[batch].clone()
# no flip
min_loss = mse_loss(kpts_gt[batch], tmp)
original_loss = min_loss
min_op = "identity"
# horizontal flip
tmp[0::2] = 1.0 - tmp[0::2]
loss = mse_loss(kpts_gt[batch], tmp)
if loss < (min_loss * difference_thresh):
min_loss = loss
min_op = "hor"
# horizontal and vertical flip
tmp[1::2] = 1.0 - tmp[1::2]
loss = mse_loss(kpts_gt[batch], tmp)
if loss < (min_loss * difference_thresh):
min_loss = loss
min_op = "hor_ver"
# vertical flip
tmp[0::2] = 1.0 - tmp[0::2]
loss = mse_loss(kpts_gt[batch], tmp)
if loss < (min_loss * difference_thresh):
min_loss = loss
min_op = "ver"
return original_loss, min_loss, min_op
def flip_loss(kpts_gt, kpts_est, difference_thresh=0.5):
"""
This loss is invariant to flipped images
:param kpts_gt: Ground-truth keypoints
:param kpts_est: Inferred keypoints
:param difference_thresh: Minimal loss must be difference_thresh*original loss to be returned
:return: MSELoss of the best flip
"""
original_loss, min_loss, min_op = find_best_orientation(kpts_gt, kpts_est, difference_thresh)
return min_loss |
num_gate = int(input())
num_plane = int(input())
gates = [0 for _ in range(num_gate)]
num_fill = 0
cap = num_gate - 1
for p in range(num_plane):
num = int(input())
for g in range(min(num - 1, cap), -1, -1):
if gates[g] == 0:
num_fill+=1
gates[g] = num
if g == cap - 1:
cap = g
break
else:
break
print(num_fill)
|
import os
from flask import current_app
from .tasks import upload
def upload_file(file, user_id, video_id, field="url"):
dir_path = f"{current_app.root_path}/tmp/"
local_path = f"{dir_path}{file.filename}"
if not os.path.exists(dir_path):
os.makedirs(dir_path)
file.save()
upload.apply_async(
args=[
video_id,
local_path,
f"videoblog/{user_id}/{video_id}-{file.filename}",
field,
]
)
|
import xgboost as xgb
import sys
trainFile = sys.argv[1]
testFile = sys.argv[2]
max_depth = sys.argv[3]
eta = sys.argv[4]
subsample = sys.argv[5]
colsample_bytree = sys.argv[6]
colsample_bylevel = sys.argv[7]
min_child_weight = sys.argv[8]
gamma = sys.argv[9]
alpha = sys.argv[10]
lambdaParam = sys.argv[11]
numIterations = sys.argv[12]
evalMetric = sys.argv[13]
scale_pos_weight = sys.argv[14]
modelNam = sys.argv[15]
print(trainFile)
print(testFile)
print(max_depth)
print(eta)
print(subsample)
print(colsample_bytree)
print(colsample_bylevel)
print(min_child_weight)
print(gamma)
print(alpha)
print(lambdaParam)
print(numIterations)
print(evalMetric)
print(scale_pos_weight)
print(modelNam)
dtrain = xgb.DMatrix(trainFile)
dtest = xgb.DMatrix(testFile)
print(dtrain)
print(dtest)
param = {'scale_pos_weight':scale_pos_weight,'alpha':alpha,'lambda':lambdaParam,'gamma':gamma,'min_child_weight':min_child_weight,'colsample_bylevel':colsample_bylevel,'colsample_bytree':colsample_bytree,'subsample':subsample,'bst:max_depth':max_depth, 'bst:eta':eta, 'silent':1, 'objective':'binary:logistic' }
param['nthread'] = 10
param['eval_metric'] = evalMetric
evallist = [(dtest,'eval'), (dtrain,'train')]
plst = param.items()
num_round = 30
print("training");
bst = xgb.train( plst, dtrain, 50, evallist )
bst.save_model(modelNam)
print(bst)
#evalHistory = xgb.cv( plst, dtrain,int(numIterations),100,['auc','error'])
#print(evalHistory);
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 14:46:01 2019
@author: chakra
"""
#PYTHON COLLECTION.
#there are 4 type of list in PYTHON, THEY ARE:
#LIST, TUPLE, SET and DICTIONARY
#lIST is a collection which are ordered and changeable. it allows duplicate too
#List are represented with [] bracket
x=["chakra","chahana", "indra", "sabhya"]
#print the list
print(x)
#print by index, starts with index 0 thats 1
#will print indra since its in index 2 thats 3
print(x[2])
#members can be replacable
#lets replace indra with IndraMaya, which is in index 2
x[2]="indraMaya"
#check the list
print(x)
#NEGATIVE INDEXING
#-1 means last items and son on with -2
print(x[-1])
#RANGE OF INDEX
thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"]
#index 2 included not 5
print(thislist[2:5])
#RANGE OF NEGATIVE INDEX
#-4 include but not -1
#output will be ['orange', 'kiwi', 'melon']
print(thislist[-4:-1])
#loop through list
thelist = ["apple", "banana", "cherry"]
for x in thelist:
print(x)
#check if the item exist
if "apple" in thelist:
print("yes apple exist")
else:
print("no apple on list")
#lsit length
#use len() function with variable that you want to find length of
print(len(thelist))
#ADD ITEMS
#adding items at the end of the list use append()
thelist.append("pineapple")
print(thelist)
#to ADD items at the specific INDEX use insert() method as insert(1, itemname)
#insert at index 0 thats 1st items
thelist.insert(0,"mango")
print(thelist)
#REMOVE Items
thelist.remove("banana")
print(thelist)
#pop() removes specified index or the last items
thelist.pop(1)
print(thelist)
#del() can also be used to remove specified index
del thelist[2]
print(thelist)
#clear() method empty the list
#thelist.clear()
#print(thelist)
'''#copy list
thislist = ["apple", "banana", "cherry"]
mylist=thislist.copy()
print(mylist)
#another way to copy
mylist=list(thislist)
print(mylist)
'''
#join TWO list
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
list3 = list1 + list2
print(list3)
#Another way to join two lists are by appending all the items from list2 into list1, one by one:
#Append list2 into list1:
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
for x in list2:
list1.append(x)
print(list1)
#you can use the extend() method, which purpose is to add elements from one list to another list:
#Use the extend() method to add list2 at the end of list1:
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
list1.extend(list2)
print(list1)
#the list constructor
#use double round bracket, hut the display list will be in []
mynewlist=list(("cp","neopaney","pittsburgh","PA"))
print(mynewlist)
#---------------------------------------------------------------------------------------------------
'''List Methods
Python has a set of built-in methods that you can use on lists.
Method Description
append() Adds an element at the end of the list
clear() Removes all the elements from the list
copy() Returns a copy of the list
count() Returns the number of elements with the specified value
extend() Add the elements of a list (or any iterable), to the end of the current list
index() Returns the index of the first element with the specified value
insert() Adds an element at the specified position
pop() Removes the element at the specified position
remove() Removes the item with the specified value
reverse() Reverses the order of the list
sort() Sorts the list'''
#---------------------------------------------------------------------------------------------------
|
def get_vlans(services, host):
vlans = {}
for cl, cldata in services.iteritems():
for vid, vdata in cldata.iteritems():
if host in vdata['nodes']:
vlans[vid] = cl
return vlans
helpers = {
'get_vlans': get_vlans,
}
def vl_srv_helper(services, get_help, host):
if get_help in helpers:
result = helpers[get_help](services, host)
else:
result = "Helper {} not found".format(get_help)
return result
class FilterModule(object):
''' URI filter '''
def filters(self):
return {
'vl_srv_helper': vl_srv_helper
}
|
class FlatAssociativeArray:
def __init__(self, array):
"""Inits from array to histogram"""
self.data_list = []
self.keys = []
if array:
for item in array:
if not self.search(item):
self.insert(item)
else:
self.update(item)
self.keys = self.getKeys()
def insert(self, data):
"""Appends data to end of list,
updates keys with data
Params: data - key of thing to add
str -> ()
*Should only be called if data not in data_list
"""
item = (data, 1)
self.data_list.append(item)
self.keys = self.getKeys()
def update(self, data):
"""Increases count of data
Params: data - key of elem to update
str -> ()
* if data in keys, increases count
"""
index = self.getIndex(data)
self.data_list[index] = (self.getKey(index), self.getValue(index) + 1)
def remove(self, data):
"""Removes given tuple
Params: data - elem to remove
(str, int) -> ()
"""
key = data[0]
index = self.getIndex(key)
self.data_list.remove(index)
def search(self, key):
"""Returns (key, val) item given key
Params: key - key of data to retrive
str -> (str, int)
"""
index = self.getIndex(key)
if index:
return self.data_list[index]
return None
def getIndex(self, key):
"""Returns index of key | O(n)
Params - key - key of data to retrive
str -> int
"""
keys = self.keys
for i in range(len(keys)):
if key == keys[i]:
return i
def getKey(self, index):
"""Returns key at index
int -> str
"""
return self.data_list[index][0]
def getValue(self, key):
"""Returns value of key (FREQUENCY)
str -> int
"""
index = self.getIndex(key)
if index:
return self.data_list[index][1]
def getKeys(self):
"""Returns all keys in data_list
() -> array
"""
keys = []
for item in self.data_list:
keys.append(item[0])
return keys
def __iter__(self):
"""Required for iterable"""
for item in self.data_list:
yield item
def __getitem__(self, index):
"""Required for iterable"""
return self.search(index)
def __name__(self):
"""Required for timeit benchmark"""
return "FlatAssociativeArray"
|
class Employee:
def __init__(self,a,b):
self.first=a
self.second=b
def play(self,other):
return Employee(self.first+other.first,self.second+other.second)
__add__=play
def __repr__(self):
return '{}-{}'.format(self.first,self.second)
@property
def summ(self):
return self.first+self.second
@summ.setter
def summ(self,num):
self.second=num%10
num=int(num/10)
self.first=num
a=Employee(5,6)
b=Employee(6,7)
def world1():
print("hi")
str="world1"
print(str()) |
from core.optimizer.graph import Source, Graph, Edge, Node, Sink
from core.optimizer.graph_optimization import ford_best, get_min_cut, get_value
#node_source = Source('s')
#node_a = Node("a", 10, {"a": 4})
#node_b = Node("b", 15, {"b": 4})
node_a = Source("a")
node_b = Source("b")
node_c = Node("c", 20, {"a": 4})
node_d = Node("d", 30, {"c": 1/3})
#node_e = Node("e", 10, {"b": 2, "d": 6})
node_sink = Sink('t', dependency={'d': 1})
#edge_sa = Edge(node_source, node_a)
#edge_sb = Edge(node_source,node_b)
edge_ac = Edge(node_a, node_c,capacity=2.)
edge_bd = Edge(node_b, node_d,capacity=3.)
#edge_de = Edge(node_d, node_e)
#edge_ce = Edge(node_c, node_e)
edge_ct = Edge(node_c, node_sink, capacity=1.)
edge_dt = Edge(node_d, node_sink, capacity=2.)
#print([str(edge) for edge in shortest_path])
graph = Graph([edge_ac, edge_bd, edge_dt, edge_ct])
graphs = graph.get_subgraphs()
for graph in graphs:
print(ford_best(graph)[2])
'''
res_graph = graph.residual_graph()
max_flow = 0
shortest_path = res_graph.shortest_path(node_source, node_sink)
while len(shortest_path) != 0:
print([str(edge) for edge in shortest_path])
print(res_graph.capacity)
values = get_value(shortest_path)
for i in range(len(shortest_path)):
if shortest_path[i] in graph.edges():
graph[shortest_path[i].node_1.id][shortest_path[i].node_2.id].flow += values[2]
elif shortest_path[i] in res_graph.edges():
graph[shortest_path[i].node_1.id][shortest_path[i].node_2.id].flow -= values[2]
max_flow += values[2]
print([(str(edge), edge.flow) for edge in graph.edges()])
res_graph = graph.residual_graph()
shortest_path = res_graph.shortest_path(node_source, node_sink)
min_cut = res_graph.bfs(node_source)
for node in list(res_graph.nodes.values()):
print(node.distance)
print(max_flow)
print(values)
print([str(edge) for edge in shortest_path])
values = get_value(shortest_path)
#update flow
for i in range(len(shortest_path)):
shortest_path[i].flow = values[2]
'''
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load compute backend services into Inventory.
This pipeline depends on the LoadProjectsPipeline.
"""
from google.cloud.security.common.data_access import project_dao as proj_dao
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import parser
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadBackendServicesPipeline(base_pipeline.BasePipeline):
"""Load compute backend services for all projects."""
RESOURCE_NAME = 'backend_services'
def _transform(self, resource_from_api):
"""Create an iterator of backend services to load into database.
Args:
resource_from_api (dict): Forwarding rules, keyed by
project id, from GCP API.
Yields:
iterator: backend service properties in a dict.
"""
for (project_id, backend_services) in resource_from_api.iteritems():
for backend_service in backend_services:
yield {'project_id': project_id,
'id': backend_service.get('id'),
'creation_timestamp': parser.format_timestamp(
backend_service.get('creationTimestamp'),
self.MYSQL_DATETIME_FORMAT),
'name': backend_service.get('name'),
'description': backend_service.get('description'),
'affinity_cookie_ttl_sec': self._to_int(
backend_service.get('affinityCookieTtlSec')),
'backends': parser.json_stringify(
backend_service.get('backends', [])),
'cdn_policy': parser.json_stringify(
backend_service.get('cdnPolicy', {})),
'connection_draining': parser.json_stringify(
backend_service.get('connectionDraining', {})),
'enable_cdn': self._to_bool(
backend_service.get('enableCDN')),
'health_checks': parser.json_stringify(
backend_service.get('healthChecks', [])),
'iap': parser.json_stringify(
backend_service.get('iap', {})),
'load_balancing_scheme': backend_service.get(
'loadBalancingScheme'),
'port': self._to_int(backend_service.get('port')),
'port_name': backend_service.get('portName'),
'protocol': backend_service.get('protocol'),
'region': backend_service.get('region'),
'session_affinity': backend_service.get(
'sessionAffinity'),
'timeout_sec': backend_service.get('timeoutSec'),
'raw_backend_service':
parser.json_stringify(backend_service)}
def _retrieve(self):
"""Retrieve backend services from GCP.
Get all the projects in the current snapshot and retrieve the
compute backend services for each.
Returns:
dict: Mapping projects with their backend services (list):
{project_id: [backend_services]}
"""
projects = (proj_dao
.ProjectDao(self.global_configs)
.get_projects(self.cycle_timestamp))
backend_services = {}
for project in projects:
project_backend_services = self.safe_api_call(
'get_backend_services', project.id)
if project_backend_services:
backend_services[project.id] = project_backend_services
return backend_services
def run(self):
"""Run the pipeline."""
forwarding_rules = self._retrieve()
loadable_rules = self._transform(forwarding_rules)
self._load(self.RESOURCE_NAME, loadable_rules)
self._get_loaded_count()
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operation = [
migrations.CreateModel(
name="Aluno",
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=True, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
('curso', models.CharField(max_length=30)),
('idade', models.IntegerField()),
],
),
]
|
# 模拟题 => dfs,数据小的可怜
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
res = set()
def Search(s, n):
if n == len(digits):
res.add(s)
return
if 2 <= int(digits[n]) <= 6:
for i in range(3):
c = chr(ord('a') + 3*(int(digits[n]) - 2) + i)
Search(s + c, n+1)
elif int(digits[n]) == 7:
for item in ['p', 'q', 'r', 's']:
Search(s + item, n+1)
elif int(digits[n]) == 8:
for item in ['t', 'u', 'v']:
Search(s + item, n+1)
else:
for item in ['w', 'x', 'y', 'z']:
Search(s + item, n+1)
Search("", 0)
return list(res)
# 评论区nb的python推导式
class Solution:
def letterCombinations(self, digits: str) -> list:
KEY = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
if digits == '':
return []
ans = ['']
for num in digits:
ans = [pre+suf for pre in ans for suf in KEY[num]]
return ans |
import RPi.GPIO as GPIO
import time
# Define Pinout
PIN_MOTOR_0 = 21
PIN_MOTOR_1 = 20
PIN_LIGHT = 16
PIN_MOTOR_3 = 12
PIN_BUTTON_0 = 25
PIN_BUTTON_1 = 24
PIN_OPEN = 8
PIN_CLOSED = 7
# Init variable
pin_b0 = 0
pin_b1 = 0
pin_open = 0
pin_closed = 0
i = 0
# Init GPIO Inpout/Output
GPIO.setmode(GPIO.BCM) # set board mode to Broadcom
GPIO.setup(PIN_MOTOR_0, GPIO.OUT) # set up pin Motor 0
GPIO.setup(PIN_MOTOR_1, GPIO.OUT) # set up Pin Motor 1
GPIO.setup(PIN_BUTTON_0, GPIO.IN) # set up pin Button Closed
GPIO.setup(PIN_BUTTON_1, GPIO.IN) # set up Pin Button Open
GPIO.setup(PIN_OPEN, GPIO.IN) # set up Pin End Of Course Door Opened
GPIO.setup(PIN_CLOSED, GPIO.IN) # set up Pin End Of Course Door closed
GPIO.setup(PIN_LIGHT, GPIO.OUT) # set up pin infra red light
GPIO.output(PIN_LIGHT, GPIO.HIGH)
while 1 :
#print('i = %d \n' % i)
pin_b0 = GPIO.input(PIN_BUTTON_0)
pin_b1 = GPIO.input(PIN_BUTTON_1)
pin_open = GPIO.input(PIN_CLOSED)
pin_closed = GPIO.input(PIN_OPEN)
if pin_b0 != 1:
print('Fermeture de porte')
while pin_closed and pin_b1 :
pin_b1 = GPIO.input(PIN_BUTTON_1)
pin_closed = GPIO.input(PIN_OPEN)
GPIO.output(PIN_MOTOR_0, GPIO.LOW)
GPIO.output(PIN_MOTOR_1, GPIO.HIGH)
GPIO.output(PIN_MOTOR_0, GPIO.LOW)
GPIO.output(PIN_MOTOR_1, GPIO.LOW)
if pin_closed !=1:
print('Porte fermee')
time.sleep(1)
if pin_b1 != 1:
print('Ouverture de porte')
while pin_open and pin_b0 :
pin_b0 = GPIO.input(PIN_BUTTON_0)
pin_open = GPIO.input(PIN_CLOSED)
GPIO.output(PIN_MOTOR_0, GPIO.HIGH)
GPIO.output(PIN_MOTOR_1, GPIO.LOW)
GPIO.output(PIN_MOTOR_0, GPIO.LOW)
GPIO.output(PIN_MOTOR_1, GPIO.LOW)
if(pin_open!=1):
print('Porte ouverte')
time.sleep(1)
GPIO.output(PIN_MOTOR_0, GPIO.LOW)
GPIO.output(PIN_MOTOR_1, GPIO.LOW)
time.sleep(0.1)
i+=1
GPIO.output(PIN_LIGHT, GPIO.LOW)
|
from __future__ import absolute_import
from api.elasticsearchModel import elastic
from api.facegrouping import group_pics
from alchemyapi import AlchemyAPI
from celery import shared_task
from PIL import Image
import json
import os
al = AlchemyAPI()
@shared_task
def process_image(uuid, path, filename):
manage_image_size(path)
response = al.faceTagging('image', path)
if response['status'] == 'OK':
response['uuid'] = uuid
response['name'] = filename
del(response['usage'])
del(response['NOTICE'])
response['files'] = crop_faces(response, path)
elastic.save_metadata(response)
def manage_image_size(path):
size = os.path.getsize(path)
rez = 500
if size > 10**6:
im = Image.open(path)
print im.size
size = (rez, int(im.size[1]*1.*rez/im.size[0]))
im = im.resize(size, Image.ANTIALIAS)
print im.size
im.save(path, "JPEG")
def crop_faces(response, path):
im = Image.open(path)
faces = []
for img_attr in response['imageFaces']:
att = [img_attr['positionX'], img_attr['positionY'], img_attr['height'], img_attr['width']]
att = map(int, att)
box = (att[0], att[1], att[0] + att[2], att[1] + att[3])
region = im.crop(box).resize((100, 100), Image.ANTIALIAS)
faces.append(region)
file_names = []
for i, f in enumerate(faces):
pth = '{}_{}_crop.jpg'.format(path[:-4], i)
# file_names.append({'path':pth, 'group': 'NA'})
file_names.append(pth)
f.save(pth, "JPEG")
return file_names
@shared_task
def cluster_pics(uuid):
groups = group_pics(uuid)
elastic.save_group_info(groups, uuid)
|
# -*- coding:utf-8 -*-
"""
@author:zhouqiuhong
@file:urls.py
@time:2018/8/7 000718:13
"""
from django.conf.urls import url, include
from .views import OrgView, UserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView, TeacherListView
from .views import TeacherDetailView
urlpatterns = [
url(r"^list/$", OrgView.as_view(), name="org_list"),
url(r"^add_ask/$", UserAskView.as_view(), name="add_ask"),
url(r"^home/(?P<org_id>\d+)/$", OrgHomeView.as_view(), name="org_home"),
url(r"^course/(?P<org_id>\d+)/$", OrgCourseView.as_view(), name="org_course"),
url(r"^desc/(?P<org_id>\d+)/$", OrgDescView.as_view(), name="org_desc"),
url(r"^org_teacher/(?P<org_id>\d+)/$", OrgTeacherView.as_view(), name="org_teacher"),
#用户收藏
url(r"^add_fav/$", AddFavView.as_view(), name="add_fav"),
#讲师列表页
url(r"^teacher/list/$", TeacherListView.as_view(), name="teacher_list"),
#讲师详情页面
url(r"^teacher/detail/(?P<teacher_id>\d+)/$", TeacherDetailView.as_view(), name="teacher_detail"),
] |
import config
import os
from os import path
from fabric import Connection
from invoke import UnexpectedExit
from dotenv import load_dotenv
load_dotenv()
HOME_FOLDER = os.getenv('HOME_FOLDER')
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD')
class ParkunDeploy:
def __init__(self):
self.connection = Connection(
host=config.SERVER,
user=config.USERNAME,
port=22,
connect_kwargs={"password": config.PASSWORD})
def run_parkun(self):
self.safe_run_command(
f'export HOME_FOLDER={HOME_FOLDER} &&' +
f'export REDIS_PASSWORD=\'{REDIS_PASSWORD}\' && ' +
'make start')
def stop_current(self):
self.safe_run_command(
f'export HOME_FOLDER={HOME_FOLDER} && ' +
f'export REDIS_PASSWORD=\'{REDIS_PASSWORD}\' && ' +
'make stop')
def upload_makefile(self):
filename = 'Makefile'
file = path.join(os.getcwd(), filename)
result = self.connection.put(file, remote=f'{HOME_FOLDER}')
print(f'Uploaded {result.local} to {result.remote}')
def upload_docker_compose(self):
filename = 'docker-compose.yml'
file = path.join(os.getcwd(), filename)
result = self.connection.put(file, remote=f'{HOME_FOLDER}/deploy')
print(f'Uploaded {result.local} to {result.remote}')
filename = '.env'
file = path.join(os.getcwd(), filename)
result = self.connection.put(file, remote=f'{HOME_FOLDER}/deploy')
print(f'Uploaded {result.local} to {result.remote}')
def safe_run_command(self, command: str) -> None:
try:
self.connection.run(command)
except UnexpectedExit as e:
print(e.result)
def deploy(self):
self.create_deploy_folder()
self.upload_configs()
self.upload_makefile()
self.upload_docker_compose()
def upload_configs(self):
self.upload_parkun_bot_config()
self.upload_appeal_sender_config()
self.upload_broadcaster_config()
self.upload_rabbit_config()
self.upload_redis_config()
def upload_redis_config(self):
self.safe_run_command('mkdir -p deploy/redis')
directory = path.join(os.getcwd(), 'redis')
for filename in os.listdir(directory):
file = os.path.join(directory, filename)
result = self.connection.put(
file,
remote=f'{HOME_FOLDER}/deploy/redis')
print(f'Uploaded {result.local} to {result.remote}')
def upload_rabbit_config(self):
self.safe_run_command('mkdir -p deploy/rabbit')
directory = path.join(os.getcwd(), 'rabbitmq')
for filename in os.listdir(directory):
file = os.path.join(directory, filename)
result = self.connection.put(
file,
remote=f'{HOME_FOLDER}/deploy/rabbit')
print(f'Uploaded {result.local} to {result.remote}')
def upload_parkun_bot_config(self):
self.safe_run_command('mkdir -p deploy/parkun_bot')
filename = 'config.py'
file = path.join(os.getcwd(), 'parkun_bot', filename)
result = self.connection.put(file,
remote=f'{HOME_FOLDER}/deploy/parkun_bot')
print(f'Uploaded {result.local} to {result.remote}')
def upload_appeal_sender_config(self):
self.safe_run_command('mkdir -p deploy/appeal_sender')
filename = 'config.py'
file = path.join(os.getcwd(), 'appeal_sender', filename)
result = self.connection.put(
file,
remote=f'{HOME_FOLDER}/deploy/appeal_sender')
print(f'Uploaded {result.local} to {result.remote}')
def upload_broadcaster_config(self):
self.safe_run_command('mkdir -p deploy/broadcaster')
filename = 'config.py'
file = path.join(os.getcwd(), 'broadcaster', filename)
result = self.connection.put(
file,
remote=f'{HOME_FOLDER}/deploy/broadcaster')
print(f'Uploaded {result.local} to {result.remote}')
def create_deploy_folder(self):
self.safe_run_command('mkdir -p deploy')
def start(self) -> None:
self.stop_current()
self.deploy()
self.run_parkun()
if __name__ == "__main__":
ParkunDeploy().start()
|
import os
import pandas as pd
from powersimdata.network.usa_tamu.constants.zones import abv
def get_pv_tracking_data():
"""Load solar PV information from EIA860 for all plants installed in 2016.
:return: (*pandas.DataFrame*) -- solar pv plant information as found in
form EIA860
"""
file = os.path.join(os.path.dirname(__file__), "data", "3_3_Solar_Y2016.csv")
solar_plant_info = pd.read_csv(
file,
skiprows=range(1),
usecols=[
"Plant Code",
"State",
"Prime Mover",
"Nameplate Capacity (MW)",
"Single-Axis Tracking?",
"Dual-Axis Tracking?",
"Fixed Tilt?",
],
).fillna("N")
pv_info = solar_plant_info[solar_plant_info["Prime Mover"] == "PV"].copy()
pv_info.drop("Prime Mover", axis=1, inplace=True)
return pv_info
def get_pv_tracking_ratio_state(pv_info, state):
"""Get solar PV tracking technology ratios for the query state in 2016 from EIA860
:param pandas.DataFrame pv_info: solar pv plant information as found in
form EIA860 as returned by :func:`get_pv_tracking_data`.
:param list state: the query state(s).
:return: (*tuple*) -- tracking technology proportion (fix, 1-axis, 2-axis)
for the query state in 2016.
:raise ValueError: if state is invalid.
"""
if not isinstance(state, list):
raise TypeError("state must be a list")
for s in state:
if s not in abv:
raise ValueError("Invalid State: %s" % s)
pv_info_state = pv_info[pv_info["State"].isin(state)].copy()
if pv_info_state.empty:
print("No solar PV plant in %s" % ", ".join(state))
return
fix = 0
single = 0
dual = 0
total_capacity = 0
for i in pv_info_state.index:
capacity = pv_info_state.loc[i]["Nameplate Capacity (MW)"]
if pv_info_state.loc[i]["Single-Axis Tracking?"] == "Y":
single += capacity
total_capacity += capacity
if pv_info_state.loc[i]["Dual-Axis Tracking?"] == "Y":
dual += capacity
total_capacity += capacity
if pv_info_state.loc[i]["Fixed Tilt?"] == "Y":
fix += capacity
total_capacity += capacity
return fix / total_capacity, single / total_capacity, dual / total_capacity
|
from selenium import webdriver
import time
driver=webdriver.Chrome();
driver.get("https://www.baidu.com/")
time.sleep(6)
driver.quit()
|
# Ayman Mobin (am8wc)
# POTD 7
integer_input = int(input("Enter and integer: "))
if integer_input < 1 or integer_input > 3999:
print("Input must be between 1 and 3999")
else:
thousands = int(integer_input/1000)
hundreds = int((integer_input-thousands*1000)/100)
tens = int(((integer_input-thousands*1000)-(hundreds*100))/10)
ones = int(((integer_input-thousands*1000)-(hundreds*100)-(tens*10)))
# print(thousands)
# print(hundreds)
# print(tens)
# print(ones)
numeral = ''
if thousands == 1:
numeral = numeral + 'M'
if thousands == 2:
numeral = numeral + 'MM'
if thousands == 3:
numeral = numeral + 'MMM'
if hundreds == 1:
numeral = numeral + 'C'
if hundreds == 2:
numeral = numeral + 'CC'
if hundreds == 3:
numeral = numeral + 'CCC'
if hundreds == 4:
numeral = numeral + 'CD'
if hundreds == 5:
numeral = numeral + 'D'
if hundreds == 6:
numeral = numeral + 'DC'
if hundreds == 7:
numeral = numeral + 'DCC'
if hundreds == 8:
numeral = numeral + 'DCCC'
if hundreds == 9:
numeral = numeral + 'CM'
if tens == 1:
numeral = numeral + 'X'
if tens == 2:
numeral = numeral + 'XX'
if tens == 3:
numeral = numeral + 'XXX'
if tens == 4:
numeral = numeral + 'XL'
if tens == 5:
numeral = numeral + 'L'
if tens == 6:
numeral = numeral + 'LX'
if tens == 7:
numeral = numeral + 'LXX'
if tens == 8:
numeral = numeral + 'LXXX'
if tens == 9:
numeral = numeral + 'XC'
if ones == 1:
numeral = numeral + 'I'
if ones == 2:
numeral = numeral + 'II'
if ones == 3:
numeral = numeral + 'III'
if ones == 4:
numeral = numeral + 'IV'
if ones == 5:
numeral = numeral + 'V'
if ones == 6:
numeral = numeral + 'VI'
if ones == 7:
numeral = numeral + 'VII'
if ones == 8:
numeral = numeral + 'VIII'
if ones == 9:
numeral = numeral + 'IX'
print('In roman numerals,', integer_input, 'is', numeral) |
"""
Author: Rokon Rahman
File: Network architecture for classifing cifer-10 data images
"""
import keras, os
from keras.layers import (Dense, Activation,
Flatten, Conv2D, MaxPooling2D, Dropout)
from keras.models import Sequential, load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping
# project modules
from .. import config
model_checkpoint_dir = os.path.join(config.checkpoint_path(), "baseline.h5")
saved_model_dir = os.path.join(config.output_path(), "baseline.h5")
#defining CNN model
def get_model(): # 90% + CIFER-10 MODEL
model = Sequential()
model.add(Conv2D(32, (3, 3), padding = "same",
input_shape = config.img_shape))
model.add(Activation("relu"))
model.add(Conv2D(32, (3, 3), padding = "same",
input_shape = config.img_shape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(rate = 0.20))
model.add(Conv2D(64, (3, 3), padding = "same",
input_shape = config.img_shape))
model.add(Activation("relu"))
model.add(Conv2D(64, (3, 3), padding = "same", # L relu
input_shape = config.img_shape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(rate = 0.20))
model.add(Flatten())
model.add(Dense(384,
kernel_regularizer= keras.regularizers.l2(0.01)))
model.add(Activation("relu"))
model.add(Dropout(rate = 0.30))
model.add(Dense(config.nb_classes))
model.add(Activation("softmax"))
return model
def read_model():
model = load_model(saved_model_dir)
return model
def save_model_checkpoint():
return ModelCheckpoint(model_checkpoint_dir,
monitor = 'val_loss',
verbose = 2,
save_best_only = True,
save_weights_only = False,
mode='auto',
period = 1)
def set_early_stopping():
return EarlyStopping(monitor = 'val_loss',
patience = 40,
verbose = 2,
mode = 'auto')
if __name__ == "__main__":
m = get_model()
m.summary()
|
class CheckLargest:
def __init__(self):
x = input("first number")
try:
self.x = float(x)
except ValueError:
print("the input is not a number")
CheckLargest()
y = input("second number")
try:
self.y = float(y)
except ValueError:
print("the input is not a number")
CheckLargest()
z = input("third number")
try:
self.z = float(z)
except ValueError:
print("the input is not a number")
CheckLargest()
def Check(self):
if self.x > self.y:
if self.x > self.z:
print(f"{self.x} is the largest number")
else:
print(f"{self.z} is the largest number")
elif self.y > self.z:
print(f"{self.y} is the largest number")
else:
print(f"{self.z} is the largest number")
|
########################### TrackingModule ###########################
import numpy as np
from numpy.linalg import inv, cholesky
import math as mt
from matplotlib import pyplot as plt
class track:
# Initialization
def __init__(self, centroid, box, frame_num, classification, laneresult):
self.state = centroid
self.state = np.insert(self.state,2,0.001)
self.state = np.insert(self.state,4,0.05)
self.box = box
self.P = np.array([[0.3, 0, 0, 0, 0],
[0, 0.3, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0.3, 0],
[0, 0, 0, 0, 0.3]])
self.Q = np.array([[0.2, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0],
[0, 0, 0.4, 0, 0],
[0, 0, 0, 0.01, 0],
[0, 0, 0, 0, 0.1]])
self.R = np.array([[0.01, 0, 0, 0],
[0, 0.01, 0, 0],
[0, 0, 0.2, 0],
[0, 0, 0, 0.1]])
if classification == 1:
self.width_max = 1
self.length_max = 2
else:
self.width_max = 2
self.length_max = 4
self.processed = 0
self.kappa = 0
self.alpha = 0.3
self.Age = 1
self.classification = classification
#self.ClusterID = cluster_id
# For kitti -> self.Start = frame_num/2 / else -> self.Start = frame_num
self.Start = frame_num
self.Activated = 0
self.DelCnt = 0
self.history_state = np.empty([0,6])
self.history_box = np.empty([0,3])
self.dead_flag = 0
self.motionPredict = 0
##
self.yaw_angle = centroid[2]
self.laneresult = laneresult
def sigma_points(self,P):
# Should We take kappa len - 3 because of wid, len, hei term ??
# wid, len, hei term -> not in the kalman filter but in the LPF ??
n = len(self.state)
Xi = np.zeros((n, 2*n+1))
W = np.zeros(2*n+1)
self.kappa=20
Xi[:, 0] = self.state
W[0] = self.kappa / (n + self.kappa)
U = cholesky((n + self.kappa)*P)
for i in range(n):
Xi[:, i+1] = self.state + U[:, i]
Xi[:, n+i+1] = self.state - U[:, i]
W[i+1] = 1 / (2*(n+self.kappa))
W[n+i+1] = W[i+1]
return Xi, W
def UT(self,Xi, W, noiseCov):
mean = np.sum(W * Xi, axis=1)
cov = W * (Xi - mean.reshape(-1, 1)) @ (Xi - mean.reshape(-1, 1)).T
return mean, cov + noiseCov
def fx(self,Xi, dt):
'''
cosy=mt.cos(self.state[2])
siny=mt.sin(self.state[2])
A=np.array([[1,0,dt*cosy,0,0,0,0,0],
[0,1,dt*siny,0,0,0,0,0],
[0,0,1,0,0,0,0,0],
[0,0,0,1,dt,0,0,0],
[0,0,0,0,1,0,0,0],
[0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,1]])
'''
# Revised CTRV Model
##################################
# Use state of sigma points
##################################
Xi_pred = np.zeros([5,11])
for i in range(0,len(Xi.T)):
coeff = Xi[2][i]/Xi[4][i]
add_part = np.array([coeff * (mt.sin(Xi[3][i] + Xi[4][i]*dt) - mt.sin(Xi[3][i])),
coeff * (-mt.cos(Xi[3][i] + Xi[4][i]*dt) + mt.cos(Xi[3][i])),
0,
Xi[4][i] * dt,
0])
Xi_pred[:,i] = Xi[:,i] + add_part
'''coeff = self.state[2]/self.state[4]
add_part = np.array([coeff * (mt.sin(self.state[3] + self.state[4]*dt) - mt.sin(self.state[3])),
coeff * (-mt.cos(self.state[3] + self.state[4]*dt) + mt.cos(self.state[3])),
0,
self.state[4] * dt,
0,
0,
0,
0])'''
#return Xi + add_part.reshape(-1,1)
return Xi_pred
def hx(self,Xi):
# B = np.array([[1,0,0,0,0],
# [0,1,0,0,0],
# [0,0,1,0,0],
# [0,0,0,1,0],
# [0,0,0,0,1]])
B = np.array([[1,0,0,0,0],
[0,1,0,0,0],
[0,0,1,0,0],
[0,0,0,1,0]])
return B @ Xi
#def unscented_kalman_filter(self, z_meas, box_meas, car_list, z_processed, dt):
def unscented_kalman_filter(self, car_centroid, car_box, car_processed, else_centroid, else_box, else_processed, dt):
temp = -1
useWhatBox = -1
"""Unscented Ksalman Filter Algorithm."""
# (1) Sample Sigma Points and Weights.
Xi, W = self.sigma_points(self.P)
# (2) Predict Mean and Error Covariance of States.
fXi = self.fx(Xi, dt)
x_pred, P_x = self.UT(fXi, W, self.Q)
######################################################################################
# (3) Data Association
######################################################################################
# 1) Car
if self.classification == 0:
# A) Use Car measurement
# First Gate
for i in range(0, len(car_centroid)):
if car_processed[i] == 1:
continue
z_meas_trans = np.array([0,0])
z_meas_trans[0] = car_centroid[i,0] - x_pred[0]
z_meas_trans[1] = car_centroid[i,1] - x_pred[1]
Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
[-mt.sin(self.state[3]), mt.cos(self.state[3])]])
z_meas_rot = Rot_inverse @ z_meas_trans
if -self.width_max * 0.3 <= z_meas_rot[0] <= self.width_max * 0.3 and -self.length_max * 0.4 <= z_meas_rot[1] <= self.length_max * 0.4:
self.processed = 1
car_processed[i] = 1
temp = i
useWhatBox = 0
break
# Second Gate (When Self Track is Not updated by the First gate)
if self.processed == 0:
for i in range(0, len(car_centroid)):
if car_processed[i] == 1:
continue
z_meas_trans = np.array([0,0])
z_meas_trans[0] = car_centroid[i,0] - x_pred[0]
z_meas_trans[1] = car_centroid[i,1] - x_pred[1]
# z_meas_trans[0] = clusters[i].res[0] - self.state[0]
# z_meas_trans[1] = clusters[i].res[1] - self.state[1]
Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
[-mt.sin(self.state[3]), mt.cos(self.state[3])]])
z_meas_rot = Rot_inverse @ z_meas_trans
if -self.width_max * 0.5 <= z_meas_rot[0] <= self.width_max * 0.5 and -self.length_max * 1.0 <= z_meas_rot[1] <= self.length_max * 1.0:
self.processed = 1
car_processed[i] = 1
temp = i
useWhatBox = 0
break
# B) Use Else measurement
# First Gate
for i in range(0, len(else_centroid)):
if else_processed[i] == 1:
continue
z_meas_trans = np.array([0,0])
z_meas_trans[0] = else_centroid[i,0] - x_pred[0]
z_meas_trans[1] = else_centroid[i,1] - x_pred[1]
Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
[-mt.sin(self.state[3]), mt.cos(self.state[3])]])
z_meas_rot = Rot_inverse @ z_meas_trans
if -self.width_max * 0.3 <= z_meas_rot[0] <= self.width_max * 0.3 and -self.length_max * 0.4 <= z_meas_rot[1] <= self.length_max * 0.4:
self.processed = 1
else_processed[i] = 1
temp = i
useWhatBox = 2
break
# Second Gate (When Self Track is Not updated by the First gate)
if self.processed == 0:
for i in range(0, len(else_centroid)):
if else_processed[i] == 1:
continue
z_meas_trans = np.array([0,0])
z_meas_trans[0] = else_centroid[i,0] - x_pred[0]
z_meas_trans[1] = else_centroid[i,1] - x_pred[1]
# z_meas_trans[0] = clusters[i].res[0] - self.state[0]
# z_meas_trans[1] = clusters[i].res[1] - self.state[1]
Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
[-mt.sin(self.state[3]), mt.cos(self.state[3])]])
z_meas_rot = Rot_inverse @ z_meas_trans
if -self.width_max * 0.5 <= z_meas_rot[0] <= self.width_max * 0.5 and -self.length_max * 1.0 <= z_meas_rot[1] <= self.length_max * 1.0:
self.processed = 1
else_processed[i] = 1
temp = i
useWhatBox = 2
break
elif self.classification==1:
pass
######################################################################################
# 3) Else : use car > pedestrian > else
elif self.classification == 2:
# A) Use Car measurement
# First Gate
for i in range(0, len(car_centroid)):
if car_processed[i] == 1:
continue
z_meas_trans = np.array([0,0])
z_meas_trans[0] = car_centroid[i,0] - x_pred[0]
z_meas_trans[1] = car_centroid[i,1] - x_pred[1]
Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
[-mt.sin(self.state[3]), mt.cos(self.state[3])]])
z_meas_rot = Rot_inverse @ z_meas_trans
if -self.width_max * 0.3 <= z_meas_rot[0] <= self.width_max * 0.3 and -self.length_max * 0.4 <= z_meas_rot[1] <= self.length_max * 0.4:
self.processed = 1
car_processed[i] = 1
temp = i
useWhatBox = 0
break
# Second Gate (When Self Track is Not updated by the First gate)
if self.processed == 0:
for i in range(0, len(car_centroid)):
if car_processed[i] == 1:
continue
z_meas_trans = np.array([0,0])
z_meas_trans[0] = car_centroid[i,0] - x_pred[0]
z_meas_trans[1] = car_centroid[i,1] - x_pred[1]
# z_meas_trans[0] = clusters[i].res[0] - self.state[0]
# z_meas_trans[1] = clusters[i].res[1] - self.state[1]
Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
[-mt.sin(self.state[3]), mt.cos(self.state[3])]])
z_meas_rot = Rot_inverse @ z_meas_trans
if -self.width_max * 0.5 <= z_meas_rot[0] <= self.width_max * 0.5 and -self.length_max * 1.0 <= z_meas_rot[1] <= self.length_max * 1.0:
self.processed = 1
car_processed[i] = 1
temp = i
useWhatBox = 0
break
# # C) Use Else measurement
# # First Gate
# for i in range(0, len(else_centroid)):
# if else_processed[i] == 1:
# continue
# z_meas_trans = np.array([0,0])
# z_meas_trans[0] = else_centroid[i,0] - x_pred[0]
# z_meas_trans[1] = else_centroid[i,1] - x_pred[1]
# Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
# [-mt.sin(self.state[3]), mt.cos(self.state[3])]])
# z_meas_rot = Rot_inverse @ z_meas_trans
# if -self.width_max * 0.3 <= z_meas_rot[0] <= self.width_max * 0.3 and -self.length_max * 0.4 <= z_meas_rot[1] <= self.length_max * 0.4:
# self.processed = 1
# else_processed[i] = 1
# temp = i
# useWhatBox = 2
# break
# # Second Gate (When Self Track is Not updated by the First gate)
# if self.processed == 0:
# for i in range(0, len(else_centroid)):
# if else_processed[i] == 1:
# continue
# z_meas_trans = np.array([0,0])
# z_meas_trans[0] = else_centroid[i,0] - x_pred[0]
# z_meas_trans[1] = else_centroid[i,1] - x_pred[1]
# # z_meas_trans[0] = clusters[i].res[0] - self.state[0]
# # z_meas_trans[1] = clusters[i].res[1] - self.state[1]
# Rot_inverse = np.array([[mt.cos(self.state[3]), mt.sin(self.state[3])],
# [-mt.sin(self.state[3]), mt.cos(self.state[3])]])
# z_meas_rot = Rot_inverse @ z_meas_trans
# if -self.width_max * 0.5 <= z_meas_rot[0] <= self.width_max * 0.5 and -self.length_max * 1.0 <= z_meas_rot[1] <= self.length_max * 1.0:
# self.processed = 1
# else_processed[i] = 1
# temp = i
# useWhatBox = 2
# break
# (4) Measurement Update
# No measurement
if temp == -1:
self.state = x_pred
self.P = P_x
self.DelCnt += 1
# Measurement associated
else:
hXi = self.hx(fXi)
z_pred, P_z = self.UT(hXi, W, self.R)
# Calculate Off Diagonal Elements of Error Covariance and Kalman Gain.
Pxz = W * (fXi - x_pred.reshape(-1, 1)) @ (hXi - z_pred.reshape(-1, 1)).T
K = Pxz @ inv(P_z)
# Validation Check : Yaw angle
#heading_angle =
if useWhatBox == 0:
measured_state = car_centroid[temp]
elif useWhatBox == 1:
pass
elif useWhatBox == 2:
measured_state = else_centroid[temp]
# measured_state = centroid[temp]
# self.yaw_angle = centroid[temp][2]
# if self.length_max > box[temp][1]:
# if measured_state[0] > 5:
# measured_state[0] += (self.length_max - box[temp][1]) * mt.cos(self.state[3])
# measured_state[0] += (self.length_max - box[temp][1]) * mt.sin(self.state[3])
# elif measured_state[0] < -5:
# measured_state[0] -= (self.length_max - box[temp][1]) * mt.cos(self.state[3])
# measured_state[0] -= (self.length_max - box[temp][1]) * mt.sin(self.state[3])
#print("measured angle:", measured_state[2])
# if 80 * mt.pi/180 <= mt.fabs(measured_state[2] - self.state[3]) < 100*mt.pi/180:
# if self.state[3] >= 0:
# measured_state[2] += mt.pi/2
# elif self.state[3] < 0:
# measured_state[2] -= mt.pi/2
# if mt.fabs(measured_state[2] - self.state[3]) >= 160*mt.pi/180:
# if self.state[3] >= 0:
# measured_state[2] -= mt.pi
# elif self.state[3] < 0:
# measured_state[2] += mt.pi
# '''if measured_state[2] > mt.pi/2:
# measured_state[2] -= mt.pi
# elif measured_state[2] < -mt.pi/2:
# measured_state[2] += mt.pi'''
# if measured_state[2] > mt.pi/2 or measured_state[2] < -mt.pi/2:
# print("check")
# measured_state[2] = self.state[3]
measured_state[2] = mt.atan((measured_state[1] - self.state[1]) / (measured_state[0] - self.state[0]))
# if 80 * mt.pi/180 <= mt.fabs(measured_state[2] - self.state[3]) < 100*mt.pi/180:
# if self.state[3] >= 0:
# measured_state[2] += mt.pi/2
# elif self.state[3] < 0:
# measured_state[2] -= mt.pi/2
measured_state = np.insert(measured_state, 2, (measured_state[0] - self.state[0])/mt.fabs(measured_state[0] - self.state[0])*mt.sqrt((self.state[0] - measured_state[0])**2 + (self.state[1] - measured_state[1])**2)/dt)
#measured_state = np.insert(measured_state, 4, (measured_state[2] - self.state[3])/dt)
self.state = x_pred + K @ (measured_state - z_pred)
self.P = P_x - K @ P_z @ K.T
if useWhatBox == 0:
self.update_box(car_box[temp])
elif useWhatBox == 1:
pass
elif useWhatBox == 2:
self.update_box(else_box[temp])
else:
print("What's the matter??")
self.Age += 1
self.DelCnt = 0
#self.ClusterID = cluster_id[temp]
if self.classification == 2:
self.classification = useWhatBox
# (5) Get max width and length box
if self.width_max < self.box[0]:
self.width_max = self.box[0]
if self.length_max < self.box[1]:
self.length_max = self.box[1]
# (6) Store History
state_ = np.insert(self.state,5,self.laneresult)
self.history_state = np.append(self.history_state, [state_], axis = 0)
self.history_box = np.append(self.history_box, [self.box], axis = 0)
def update_box(self, box_meas):
self.box = (1 - self.alpha) * self.box + self.alpha * box_meas |
import re
import urllib
import time
import json
import io
import base64
import ast
from datetime import datetime,date
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.core.mail import EmailMessage
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rauth import OAuth1Service
from .tasks import store_health_data
from .serializers import UserGarminDataEpochSerializer,\
UserGarminDataSleepSerializer,\
UserGarminDataBodyCompositionSerializer,\
UserGarminDataDailySerializer,\
UserGarminDataActivitySerializer,\
UserGarminDataManuallyUpdatedSerializer,\
UserGarminDataStressDetailSerializer,\
UserGarminDataMetricsSerializer,\
UserGarminDataMoveIQSerializer,\
UserLastSyncedSerializer
from .models import UserGarminDataEpoch,\
UserGarminDataSleep,\
UserGarminDataBodyComposition,\
UserGarminDataDaily,\
UserGarminDataActivity,\
UserGarminDataManuallyUpdated,\
UserGarminDataStressDetails,\
UserGarminDataMetrics,\
UserGarminDataMoveIQ,\
GarminConnectToken, \
GarminFitFiles,\
UserLastSynced,\
GarminConnectToken
from users.models import GarminToken
from hrr.tasks import create_hrrdata
class UserGarminDataEpochView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataEpoch.objects.all()
serializer_class = UserGarminDataEpochSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataSleepView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataSleep.objects.all()
serializer_class = UserGarminDataSleepSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataBodyCompositionView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataBodyComposition.objects.all()
serializer_class = UserGarminDataBodyCompositionSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataDailyView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataDaily.objects.all()
serializer_class = UserGarminDataDailySerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataActivityView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataActivity.objects.all()
serializer_class = UserGarminDataActivitySerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataManuallyUpdatedView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataManuallyUpdated.objects.all()
serializer_class = UserGarminDataManuallyUpdatedSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataStressDetailsView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataStressDetails.objects.all()
serializer_class = UserGarminDataStressDetailSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataMetricsView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataMetrics.objects.all()
serializer_class = UserGarminDataMetricsSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class UserGarminDataMoveIQView(generics.ListCreateAPIView):
permission_classes = (IsAuthenticated,)
queryset = UserGarminDataMoveIQ.objects.all()
serializer_class = UserGarminDataMoveIQSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class GarminPing(APIView):
'''
This view will receive Health PING API data and
call the celery taks to store that data in database
'''
def post(self, request, format="json"):
store_health_data.delay(request.data)
return Response(status=status.HTTP_200_OK)
def get(self, request, format="json"):
return Response(status = status.HTTP_200_OK)
class UserLastSyncedItemview(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = UserLastSyncedSerializer
queryset = UserLastSynced.objects.all()
def get_object(self):
qs = self.get_queryset()
try:
last_synced_obj = qs.get(user=self.request.user)
return last_synced_obj
except UserLastSynced.DoesNotExist as e:
return None
def get(self,request, format=None):
last_synced = self.get_object()
if last_synced:
serializer = UserLastSyncedSerializer(last_synced)
return Response(serializer.data)
else:
return Response({})
class GarminConnectPing(APIView):
def _handle_received_file(self,f):
with open(f.name()+'.fit', 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
def post(self, request, format=None):
'''
This view will receive Health PING API data and
store in database
'''
file = request.FILES['file']
file2 = file.read()
file_name = request.data['uploadMetaData']
oauthToken_fitfile = ast.literal_eval(file_name)
file_oauth = oauthToken_fitfile['oauthToken']
activity_id = oauthToken_fitfile.get('activityIds',0)
date_now = datetime.now()
date_str = date_now.strftime("%Y-%m-%d")
try:
user = User.objects.get(garmin_connect_token__token = file_oauth)
# print(type(user))
except User.DoesNotExist:
user = None
if user:
if activity_id[0]:
activities = UserGarminDataActivity.objects.filter(
user=user,summary_id=str(activity_id[0]))
if activities:
for value in activities:
data = value.data
data_formated = ast.literal_eval(data)
strat_time = data_formated.get("startTimeInSeconds",0)
activity_offset = data_formated.get("startTimeOffsetInSeconds",0)
start_time = strat_time + activity_offset
if start_time:
fitfile_belong_date = datetime.utcfromtimestamp(start_time)
fitfile_belong_date = fitfile_belong_date.date()
else:
fitfile_belong_date = None
else:
fitfile_belong_date = None
else:
fitfile_belong_date = None
fit_file_obj = GarminFitFiles.objects.create(
user=user,fit_file=file2,
meta_data_fitfile=oauthToken_fitfile,
fit_file_belong_date=fitfile_belong_date)
if fitfile_belong_date:
year = str(fitfile_belong_date.year)
month = str(fitfile_belong_date.month)
day = str(fitfile_belong_date.day)
date_str = year+'-'+month+'-'+day
print(date_str,"date str")
create_hrrdata.delay(
user.id,
date_str,
date_str
)
headers={"Location":"/"}
return Response(status = status.HTTP_201_CREATED,headers=headers)
def get(self, request, format=None):
print("\n\nGARMIN CONNECT PUSH GET METHOD CALL\n\n",request.data,"\n\n")
return Response(status = status.HTTP_200_OK)
def connect_request_token(request):
'''
Request for unauthorized request token and request token secret
'''
req_url = 'http://connectapi.garmin.com/oauth-service-1.0/oauth/request_token'
authurl = 'http://connect.garmin.com/oauthConfirm'
acc_url = 'http://connectapi.garmin.com/oauth-service-1.0/oauth/access_token'
conskey = 'fc281870-3111-47fd-8576-fc90efef0fb1';
conssec = 'hZPITG4SuEIXiFdInYs9or8TI9psvroqdGZ';
session = request.session
if not 'auth_token' in session and ('state' in session and session['state'])==1:
session['state'] = 0;
service = OAuth1Service(
consumer_key = conskey,
consumer_secret = conssec,
request_token_url = req_url,
access_token_url = acc_url,
authorize_url = authurl,
)
session = request.session
request_token, request_token_secret = service.get_request_token()
session['connect_request_token'] = request_token
session['connect_request_token_secret'] = request_token_secret
session['state'] = 1
callback_string = urllib.parse.quote('https://app.jvbwellness.com/callbacks/garminconnect')
return redirect(authurl + '?oauth_token={0}&oauth_callback={1}'.format(request_token,callback_string))
def connect_receive_token(request):
'''
Request for auth token and token secret. Save them in database for associated user
'''
req_url = 'http://connectapi.garmin.com/oauth-service-1.0/oauth/request_token'
authurl = 'http://connect.garmin.com/oauthConfirm'
acc_url = 'http://connectapi.garmin.com/oauth-service-1.0/oauth/access_token'
conskey = 'fc281870-3111-47fd-8576-fc90efef0fb1';
conssec = 'hZPITG4SuEIXiFdInYs9or8TI9psvroqdGZ';
session = request.session
# oauth_token = request.GET['oauth_token']
oauth_verifier = request.GET['oauth_verifier']
service = OAuth1Service(
consumer_key = conskey,
consumer_secret = conssec,
request_token_url = req_url,
access_token_url = acc_url,
authorize_url = authurl,
)
access_token, access_token_secret = service.get_access_token(session['connect_request_token'],
session['connect_request_token_secret'],method='POST',data={'oauth_verifier': oauth_verifier},
header_auth=True)
# Check if token and token secret exist. If exist then update otherwise
# create new entry in the database
try:
token = GarminConnectToken.objects.get(user = request.user)
if token:
setattr(token, "token", access_token)
setattr(token, "token_secret", access_token_secret)
token.save()
except GarminConnectToken.DoesNotExist:
GarminConnectToken.objects.create(user=request.user,token=access_token,
token_secret=access_token_secret)
return redirect('/service_connect') |
#!/usr/bin/python
import sys
import boto.ec2
#instance_id = sys.argv[1]
conn = boto.ec2.connect_to_region("us-east-2",
aws_access_key_id="",
aws_secret_access_key=""
)
conn.run_instances(
'ami-02e680c4540db351e',
key_name='botoDemo',
instance_type='t2.micro',
security_groups=['default'])
|
import logging
import hashlib
import random
import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import Group, User
from django.contrib.auth import (login as app_login,
logout as app_logout)
from django.core.urlresolvers import reverse_lazy, reverse
from django.shortcuts import redirect, get_object_or_404, render
from django.utils.text import slugify
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, RedirectView
from .forms import AuthenticationForm, RegistrationForm
from generic.functions.create_log_for_superuser import prepare_and_create_log
from generic.functions.send_email import SendEmailClass
from profiles.models import Profile
"""Variable for logging site"""
LOGGER = logging.getLogger(__name__)
class LoginView(FormView):
"""Login View
View allows User loggining, if User successfully login
he redirect on {tasks:list} (Main page)
Extends:
django.views.generic.FormView
Variables:
template_name {str} -- template
success_url {str} -- url if User successfully login
form_class {obj} -- form for view
"""
form_class = AuthenticationForm
template_name = 'authorization/login.html'
success_url = reverse_lazy('tasks:list')
def form_valid(self, form):
app_login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
class RegisterView(FormView):
"""Registration View
View allows User registration. If User successfully registration
in profile.models.profile create new field with User's slug, activation key
and expire day of this key. View generate activation key and send his on
User Email for confirmed.
Extends:
django.views.generic.FormView
Variables:
template_name {str} -- template
success_url {str} -- url if User successfully registrated
form_class {obj} -- form for view
Methods:
form_valid -- Save User, Create Profile[slug],
Generate activation key and expire date for confirm email.
Send on User's email confirm letter
"""
form_class = RegistrationForm
template_name = 'authorization/registration.html'
success_url = reverse_lazy('tasks:list')
def generate_activation_key(self, user_info):
"""Generate activation key
Methods generate activation key for confirmed User's email.
Generate with 'SHA1' technology and User Information
Arguments:
user_info {str} -- something information from User for
generate key
Returns:
[str[HASH]] -- HASH string
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
return hashlib.sha1(salt + user_info).hexdigest()
def generate_key_expires(self, term_days):
"""Generate Expire Key Date
Generate Date when activation key doesn't be worked
Arguments:
term_days {int} -- count day where activation key doesn't be
worked
Returns:
[string] -- date
"""
return datetime.datetime.today() + datetime.timedelta(term_days)
def form_valid(self, form):
"""Form Valid
If form valid User save in database with his profile.
Generate Activation key with expire date.
For User's email sended confirmation letter
Return:
Redirect
"""
form.save()
user_email = form.cleaned_data['email']
activation_key = self.generate_activation_key(user_email)
key_expires = self.generate_key_expires(settings.KEY_EXPIRE_TERM)
user = User.objects.get(email=user_email)
user.groups.add(Group.objects.get(name='customers'))
slug = self.create_slug(user)
new_profile = Profile(
user=user,
activation_key=activation_key,
key_expires=key_expires,
slug=slug
)
new_profile.save()
email_data = {
'username': user.username,
'activation_key': activation_key
}
email = SendEmailClass(
subject=_('Account Confirmation'),
sender=settings.EMAIL_HOST_USER,
to=settings.EMAIL_SUPERUSERS,
template=settings.EMAIL_TEMPLATES['confirmation'],
data=email_data
)
email.send()
return render(
self.request,
settings.REGISTRATION_TEMPLATES['thanks'],
context={
'username': user.username,
'email': user.email
}
)
def create_slug(self, user, new_slug=None):
"""Create User's slug
If slug unique -- return slug
Else create new slug
Arguments:
user {obj} -- User
Keyword Arguments:
new_slug {str} -- slug (default: {None})
Returns:
[str] -- unique slug
"""
if user.username not in ['edit']:
slug = slugify(user.username)
else:
slug = slugify('cus_edit')
if new_slug is not None:
slug = new_slug
qs = User.objects.filter(profile__slug=slug).order_by('-pk')
exists = qs.exists()
if exists:
new_slug = '%s-%s' % (slug, qs.first().pk)
return self.create_slug(user, new_slug=new_slug)
return slug
class LogoutView(RedirectView):
"""Logout View
If User is authenticated doing log out User from site
Extends:
django.views.generic.RedirectView
Variables:
url {str} -- redirect if success logout
"""
url = reverse_lazy('auth:login')
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
app_logout(request)
return super(LogoutView, self).get(request, *args, **kwargs)
class ConfirmEmailView(RedirectView):
"""Confirmation Email
If confirmation key and expire date are valid User's status
is active = True
Extends:
django.views.generic.RedirectView
"""
def get(self, request, activation_key, *args, **kwargs):
if request.user.is_authenticated():
return redirect(reverse('tasks:list'))
user_profile = get_object_or_404(Profile,
activation_key=activation_key)
if user_profile.key_expires < timezone.now():
# generate new key and date and send to email
LOGGER.error(_("User expires date less than now"))
user = user_profile.user
user.is_active = True
user.save()
prepare_and_create_log(4, '', user.profile.slug, '')
messages.success(self.request,
_('Your activate you account, please login'))
return redirect(reverse('auth:login'))
|
def geraMatriz(tam, tam_x, tam_y):
for i in range(0, tam):
print("{")
geraLinha(4, i * 3, i * 3, i * 3, 5)
print("},")
def geraLinha(tam, val_x, val_y, val_z, dist):
for i in range(0, tam):
print("<" + str(val_x + i * dist) + ", " + str(val_y + i * dist) + ", " + str(val_z + i * dist) + ">, ", end='')
a_ini = [0, 0, 0]
a_fim = [15, 0, 0]
b_ini = [0, 15, 0]
b_fim = [15, 15, 0]
c = [[]]
def geraMalha(a_ini, a_fim, b_ini, b_fim):
partes = 4
salto = (b_ini - a_ini)/partes
for i in range(0, 4):
for j in range(0, 4):
print("[" + str(i) + "|" + str(j) + "]", end='')
print("")
|
#!/usr/bin/python
def check(num):
num = str(num)
if len(num) != 17:
return False
count = 1
for i in range(0, 18, 2):
if num[i] != str(count):
return False
count += 1
return True
ans = 138902663 # sqrt(19293949596979899)
while not check(ans ** 2):
ans -= 1
print ans * 10
|
#!/usr/bin/env python
"""
Python wrapper for priceline.com API
"""
from priceline.api import Api |
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import argparse
from os import path, makedirs
from multiprocessing import Pool
import os
from scipy.spatial import distance
from datetime import datetime
PROBE_FILE = None
PROBE = None
GALLERY_FILE = None
GALLERY = None
TWINS = None
ID_SIZE = None
DATASET = None
METRIC = None
def match_features(output, group):
authentic_save = path.join(output, '{}_authentic.txt'.format(group))
impostor_save = path.join(output, '{}_impostor.txt'.format(group))
twins_save = path.join(output, '{}_twins.txt'.format(group))
labels_save = path.join(output, '{}_labels.txt'.format(group))
# run this in multiple processes to speed things up
pool = Pool(os.cpu_count())
print(os.cpu_count())
impostor_file = open(impostor_save, 'w')
authentic_file = open(authentic_save, 'w')
labels_file = []
if DATASET == 'ND':
twins_file = open(twins_save, 'w')
for authentic, impostor, twins, label in pool.imap_unordered(match, PROBE):
if impostor.shape[0] > 0:
np.savetxt(impostor_file, impostor, delimiter=' ', fmt='%i %i %s')
if authentic.shape[0] > 0:
np.savetxt(authentic_file, authentic, delimiter=' ', fmt='%i %i %s')
if twins.shape[0] > 0:
np.savetxt(twins_file, twins, delimiter=' ', fmt='%i %i %s')
if label is not None:
labels_file.append(label)
if GALLERY_FILE != PROBE_FILE:
labels_gallery = path.join(output, '{}_labels_gallery.txt'.format(group))
labels_gallery_file = []
for j in range(len(GALLERY)):
image_b_path = GALLERY[j]
image_b = path.split(image_b_path)[1]
label_bb = path.split(path.split(image_b_path)[0])[1] + '/' + image_b
label = (j, label_bb[:-4])
labels_gallery_file.append(label)
np.savetxt(labels_gallery, labels_gallery_file, delimiter=' ', fmt='%s')
impostor_file.close()
authentic_file.close()
labels_file = np.array(labels_file)
np.savetxt(labels_save, labels_file, delimiter=' ', fmt='%s')
if DATASET == 'ND':
twins_file.close()
def chisquare(p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
bin_dists = (p - q)**2 / (p + q + np.finfo('float').eps)
return np.sum(bin_dists)
def match(probe):
authentic_list = []
impostor_list = []
twins_list = []
image_a_path = probe
image_a = path.split(image_a_path)[1]
label_aa = path.split(path.split(image_a_path)[0])[1] + '/' + image_a
features_a = np.load(image_a_path)
if np.ndim(features_a) == 1:
features_a = features_a[np.newaxis, :]
i = np.int(np.where(PROBE == image_a_path)[0])
label = (i, label_aa[:-4])
start = i
if GALLERY_FILE != PROBE_FILE:
start = -1
for j in range(start + 1, len(GALLERY)):
image_b_path = GALLERY[j]
image_b = path.split(image_b_path)[1]
label_bb = path.split(path.split(image_b_path)[0])[1] + '/' + image_b
if image_a == image_b and DATASET != 'PUBLIC_IVS':
continue
elif DATASET == 'PUBLIC_IVS' and label_aa == label_bb:
continue
features_b = np.load(image_b_path)
if np.ndim(features_b) == 1:
features_b = features_b[np.newaxis, :]
if METRIC == 1:
score = np.mean(cosine_similarity(features_a, features_b))
elif METRIC == 2:
score = distance.euclidean(features_a, features_b)
else:
score = chisquare(features_a, features_b)
comparison = (i, j, score)
if DATASET == 'CHIYA':
image_a_label = image_a[:-5]
image_b_label = image_b[:-5]
elif DATASET == 'CHIYA_VAL':
image_a_label = image_a[1:-4]
image_b_label = image_b[1:-4]
elif DATASET == 'PUBLIC_IVS':
image_a_label = path.split(label_aa)[0]
image_b_label = path.split(label_bb)[0]
elif ID_SIZE > 0:
image_a_label = image_a[:ID_SIZE]
image_b_label = image_b[:ID_SIZE]
else:
image_a_label = image_a.split('_')[0]
image_b_label = image_b.split('_')[0]
if image_a_label == image_b_label:
if DATASET == 'ND_GENDERS_V3':
day_a = image_a.split('_')[4]
day_b = image_b.split('_')[4]
if day_a == day_b:
continue
authentic_list.append(comparison)
elif DATASET == 'ND':
i_a, j_a = np.where(TWINS == image_a[:ID_SIZE])
i_b, j_b = np.where(TWINS == image_b[:ID_SIZE])
if i_a >= 0 and i_a == i_b:
twins_list.append(comparison)
else:
impostor_list.append(comparison)
else:
impostor_list.append(comparison)
impostor_list = np.round(np.array(impostor_list), 6)
authentic_list = np.round(np.array(authentic_list), 6)
twins_list = np.round(np.array(twins_list), 6)
return authentic_list, impostor_list, twins_list, label
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Match Extracted Features')
parser.add_argument('-probe', '-p', help='Probe image list.')
parser.add_argument('-gallery', '-g', help='Gallery image list.')
parser.add_argument('-output', '-o', help='Output folder.')
parser.add_argument('-dataset', '-d', help='Dataset name.')
parser.add_argument('-group', '-gr', help='Group name, e.g. AA')
parser.add_argument('-metric', '-m', default=1,
help='Metric to us: (1) Cosine Similarity; (2) Euclidean Distance; (3) Chi Square')
args = parser.parse_args()
time1 = datetime.now()
if args.gallery is None:
args.gallery = args.probe
if not path.exists(args.output):
makedirs(args.output)
DATASET = args.dataset.upper()
METRIC = int(args.metric)
if DATASET == 'ND':
TWINS = np.loadtxt('/afs/crc.nd.edu/user/v/valbiero/ND_Dataset/Metadata/twins.txt', delimiter=' ', dtype=np.str)
ID_SIZE = -1
elif DATASET == 'MORPH':
ID_SIZE = -1
elif DATASET == 'IJBB':
ID_SIZE = -1
elif DATASET == 'CHIYA':
ID_SIZE = -1
elif DATASET == 'CHIYA_VAL':
ID_SIZE = -1
elif DATASET == 'ND_GENDERS_V3':
ID_SIZE = -1
elif DATASET == 'PUBLIC_IVS':
ID_SIZE = -1
elif DATASET == 'AFD':
ID_SIZE = -1
else:
raise Exception('NO FILE PATTERN FOR THE DATASET INFORMED.')
PROBE_FILE = args.probe
PROBE = np.sort(np.loadtxt(PROBE_FILE, dtype=np.str))
GALLERY_FILE = args.gallery
GALLERY = np.sort(np.loadtxt(args.gallery, dtype=np.str))
match_features(args.output, args.group)
print(PROBE_FILE)
print(GALLERY_FILE)
time2 = datetime.now()
print(time2 - time1)
|
import pytest
import mockito
import time
from src import index
from mockito import when
from src.index import get_wait_status, is_job_complete
@pytest.mark.parametrize("wait_timeout, input_secs_before_timeout, expected",[
(0, 0, {'seconds_before_timeout': 0 , 'wait_status': 'JOB_WAIT_TIMEOUT'}),
(0, 10, {'seconds_before_timeout': 0 , 'wait_status': 'JOB_WAIT_TIMEOUT'}),
(-1, 0, {'seconds_before_timeout': 0 , 'wait_status': 'JOB_WAIT_TIMEOUT'}),
])
def test_get_wait_status_with_unlikely_time_waits(wait_timeout, input_secs_before_timeout, expected):
when(index).is_job_complete().thenReturn(False)
actual = get_wait_status(wait_timeout, input_secs_before_timeout)
assert actual == expected
@pytest.mark.parametrize("wait_timeout, input_secs_before_timeout, expected_status",[
(15, 8, 'KEEP_WAITING'),
(15,0, 'JOB_WAIT_TIMEOUT')
])
def test_get_wait_status_expected_time_waits(wait_timeout, input_secs_before_timeout, expected_status):
when(index).is_job_complete().thenReturn(False)
total_secs_before_timeout = int(time.time()) + input_secs_before_timeout
actual = get_wait_status(wait_timeout, total_secs_before_timeout)['wait_status']
assert actual == expected_status
def test_get_wait_status_job_completed():
when(index).is_job_complete().thenReturn(True)
actual = get_wait_status(15, 0)['wait_status']
assert actual == 'JOB_COMPLETED'
|
from _typeshed import Incomplete
from collections.abc import Generator
def edge_bfs(
G, source: Incomplete | None = None, orientation: Incomplete | None = None
) -> Generator[Incomplete, None, Incomplete]: ...
|
import functools
import inspect
def argtypes_check(*argtypes):
'''function arguments type checker'''
def _argtypes_check(func):
'''Take the function'''
@functools.wraps(func)
def __argtypes_check(*func_args):
'''Take arguments'''
if len(argtypes) != len(func_args):
raise TypeError('expected {} but get {} arguments'.format(
len(argtypes), len(func_args)))
for argtype, func_arg in zip(argtypes, func_args):
if not isinstance(argtype, func_arg):
raise TypeError('expected {} but get {}'.format(
argtypes, tuple(type(func_arg) for func_arg in func_args)))
return func(*func_args)
return __argtypes_check
return _argtypes_check
@argtypes_check(int, int)
def add(x, y):
'''Add two integers.'''
return x + y
# version 2:
def checkargs(function):
def _f(*arguments):
print(inspect.getfullargspec(function))
for index, argument in enumerate(inspect.getfullargspec(function)[0]):
if not isinstance(arguments[index], function.__annotations__[argument]):
raise TypeError("{} is not of type {}".format(
arguments[index],
function.__annotations__[argument]))
return function(*arguments)
_f.__doc__ = function.__doc__
return _f
def coerceargs(function):
def _f(*arguments):
new_arguments = []
for index, argument in enumerate(inspect.getfullargspec(function)[0]):
new_arguments.append(function.__annotations__[
argument](arguments[index]))
return function(*new_arguments)
_f.__doc__ = function.__doc__
return _f
if __name__ == "__main__":
@checkargs
def f(x: int, y: int):
"""
A doc string!
"""
return x, y
@coerceargs
def g(a: int, b: int):
"""
Another doc string!
"""
return a + b
print(f(1, 2))
try:
print(f(3, 4.0))
except TypeError as e:
print(e)
print(g(1, 2))
print(g(3, 4.0))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 20:52:03 2019
@author: My
"""
from textblob import TextBlob
def identify_sentimental_words(text):
for rows in text:
blob = TextBlob(rows[5])
identified_words = []
for words, tag in blob.tags:
if (tag in ['JJ','JJS','JJR','RBR','RBS']):
identified_words.append(words)
return identified_words
|
from gevent.pywsgi import WSGIServer
from FlaskApp import app
if __name__ == '__main__':
# http_server = WSGIServer(('0.0.0.0', 7000), app,keyfile='server.key',certfile='server.crt')
http_server = WSGIServer(('127.0.0.1', 7000), app, keyfile='server.key', certfile='server.crt')
http_server.serve_forever()
# app.run(host='0.0.0.0', port=80, debug=True)
|
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
result = []
def generate(sofar, num_open, remain):
if remain == 0 and num_open == 0:
result.append(sofar)
return
if remain < 0:
return
if num_open > 0:
generate(sofar+')', num_open-1, remain)
generate(sofar+'(', num_open+1, remain-1)
return
generate("", 0, n)
return result
|
import os, fnmatch, pickle
import numpy as np
import os, fnmatch, pickle
import csv
import numpy as np
import random
from DQN_2048.game2048 import Game2048Env # logica del gioco 2048
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Conv2D, Flatten, Input
from keras.layers.merge import concatenate
from keras.optimizers import Adam
import keras.backend as K
from rl.agents.dqn import DQNAgent
from rl.policy import EpsGreedyQPolicy, LinearAnnealedPolicy, GreedyQPolicy
from rl.memory import SequentialMemory
from rl.callbacks import FileLogger
from DQN_2048.callbacks2048 import TestCall2048
from DQN_2048.processors2048 import OneHotNNInputProcessor
# Create the environment for the DQN agent:
ENV_NAME = '2048'
env = Game2048Env()
NB_STEPS_TRAINING = int(5e5)
path = ''
data_filepath = 'data/'
if not os.path.exists(data_filepath):
os.makedirs(data_filepath)
csv_filepath = data_filepath + 'test/test_steps_'+ str(NB_STEPS_TRAINING) +'.csv'
if os.path.exists(csv_filepath):
exit()
random.seed(123)
np.random.seed(123)
env.seed(123)
PREPROC="onehot2steps"
processor = OneHotNNInputProcessor(num_one_hot_matrices=16)
model = Sequential()
model.add(Flatten(input_shape=(1, 4+4*4, 16,) + (4, 4)))
model.add(Dense(units=1024, activation='relu'))
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=4, activation='linear'))
memory = SequentialMemory(limit=6000, window_length=1)
TRAIN_POLICY = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=0.05, value_min=0.05, value_test=0.01, nb_steps=100000)
TEST_POLICY = EpsGreedyQPolicy(eps=.01)
dqn = DQNAgent(model=model, nb_actions=4, test_policy=TEST_POLICY, policy=TRAIN_POLICY, memory=memory, processor=processor,
nb_steps_warmup=5000, gamma=.99, target_model_update=1000, train_interval=4, delta_clip=1.)
dqn.compile(Adam(lr=.00025), metrics=['mse'])
weights_filepath = data_filepath + 'train/weights_steps_'+ str(NB_STEPS_TRAINING) +'.h5f'
dqn.load_weights(weights_filepath)
with open(csv_filepath, 'w', newline='') as file:
writer = csv.writer(file, quoting=csv.QUOTE_NONNUMERIC, delimiter=';')
writer.writerow(['episode', 'episode_steps', 'highest_score', 'max_tile'])
_callbacks = [TestCall2048(csv_filepath)]
dqn.test(env, nb_episodes=500, visualize=False, verbose=1, callbacks=_callbacks)
|
"""
You are given an array (which will have a length of at least 3, but could be very large)
containing integers. The array is either entirely comprised of odd integers or entirely comprised
of even integers except for a single integer N. Write a method that takes the array as an argument
and returns N.
For example:
[2, 4, 0, 100, 4, 11, 2602, 36]
Should return: 11
[160, 3, 1719, 19, 11, 13, -21]
Should return: 160
"""
def find_outlier(integers):
d = {0: 0, 1: 0}
for n in integers:
d[n % 2] += 1
d[2 + n % 2] = n
return d[2] if d[0] == 1 else d[3]
# test.assert_equals(find_outlier([2,6,8,10,3]), 3)
print(find_outlier([2, 6, 8, 10, 3]))
|
#!/usr/bin/env python
import sys, time;
def iterative(base, exp):
if exp == 0:
return 1;
product = base;
for i in range(1, exp):
product *= base;
return product;
def recursive(base, exp):
if exp == 0:
return 1;
if exp > 1:
return base * recursive(base, exp - 1);
else:
return base;
if __name__ == "__main__":
sys.setrecursionlimit(10000);
print("Exponent\tIterative\tRecursive");
for x in range(10000):
start_time_iterative = time.time();
iterative(3, x);
end_time_iterative = time.time();
start_time_recursive = time.time();
recursive(3, x);
end_time_recursive = time.time();
print("%i\t%g\t%g" % (x, end_time_iterative - start_time_iterative, end_time_recursive - start_time_recursive));
|
"""Identify if page is in single or multi-columns zone."""
from operator import itemgetter
def run_filter(page_index, year):
"""Determine whether or not to operate on incoming class file."""
manual_begin_end_dict = {
'1920': [[-1, False], [949, True], [1291, False]],
'1921': [[-1, False], [665, True], [875, False], [1128, True], [1418, False]]
}
difference_list = sorted([[item, page_index - item[0]] for item in
manual_begin_end_dict[year] if page_index - item[0] > 0],
key=itemgetter(1))
in_column = difference_list[0][0][1]
return in_column
|
#!/usr/bin/env python
'''
**********************************************************************
* Filename : steer.py
* Description : A driver to steer the front wheels left and right.
This is used in my self driving R/C car "tanis"
as the basic module to steer the car
* Author : Joe Kocsis
* E-mail : Joe.Kocsis3@gmail.com
* Website : www.github.com/jkocsis3/tanis
**********************************************************************
'''
from PWM_Control import PCA9685
from smbus import SMBus
import rospy
class Steer(object):
def __init__(self, debug=True):
# Set our limits. there are about 30 degrees of movement on each side of center.
self._maxangle = {"left": 60, "straight": 90, "right": 120}
def turn(self, anglein):
print("turning to: " + str(anglein))
# http://www.python-exemplary.com/drucken.php?inhalt_mitte=raspi/en/servomotors.inc.php
fPWM = 50
i2c_address = 0x40 # (standard) adapt to your module
channel = 15 # adapt to your wiring
# a = 6.5 # adapt to your servo
# b = 2 # adapt to your servo
bus = SMBus(1) # Raspberry Pi revision 2
pwm = PCA9685.PWM(bus, i2c_address)
pwm.setFreq(fPWM)
# duty = a / 180 * anglein + b
duty = self.SetAngle(anglein)
pwm.setDuty(channel, duty)
print("direction =" + str(anglein) + "-> duty =" + str(duty))
print("turn complete")
def SetAngle(self, angle):
return angle / 18 + 2
if __name__ == '__main__':
Steer()
|
import sys
import os
import subprocess
sys.path.insert(0, 'scripts')
import experiments as exp
def print_help():
print("syntax: python generate_parsimony.py runname sequence model trees_number")
if (len(sys.argv) != 5):
print_help()
sys.exit(0)
runname = sys.argv[1]
sequence = sys.argv[2]
model = sys.argv[3]
trees_number = int(sys.argv[4])
resultsdir = os.path.join("oldraxml", "generate_parsimony", runname)
resultsdir = exp.create_result_dir(resultsdir)
result_msg = "old raxml git: \n" + exp.get_git_info(exp.oldraxml_root) + "\n"
exp.write_results_info(resultsdir, result_msg)
for parsimonySeed in range(1, trees_number):
command = []
command.append(exp.oldraxml_exec)
command.append("-y") # stop after parsimony generation
command.append("-m")
command.append(model)
command.append("-s")
command.append(sequence)
command.append("-p")
command.append(str(parsimonySeed))
command.append("-w")
command.append(resultsdir)
command.append("-n")
command.append("parsi" + str(parsimonySeed))
print("Running " + str(" ".join(command)))
subprocess.check_call(command)
|
from django.db import models
class Users(models.Model):
username = models.CharField(max_length=30, verbose_name="name")
password = models.CharField(max_length=30, verbose_name="password")
# Create your models here.
|
######################################################################
############### Support Vector Machine Classifier ####################
######################################################################
import math
import numpy as np
import pandas as pd
from sklearn import datasets
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
# load in iris dataset for examples
wine = datasets.load_wine()
wine = pd.DataFrame(np.c_[wine["data"], wine["target"]],
columns=wine["feature_names"]+["target"])
X = wine[wine.columns[wine.columns != "target"]].values
y = wine[wine.columns[wine.columns == "target"]].values
print(wine.head())
class Support_Vector_Machine:
def __init__(self, C=0.01, visualization=True):
self.visualization = visualization
self.colors {1: "r", -1: "b"}
if self.visualization:
self.fig = plt.figure()
self.ax = plt.fig.add_subplot(1, 1, 1)
def fit(self, X, y):
self.w =
self.b =
def predict(self, X):
# sign(x dot w+b)
y_pred = np.sign(np.dot(np.array(X), self.w) + self.b)
return y_pred
|
import json
import numpy as np
from PIL import Image, ImageStat, ImageEnhance
path = "chest_xray"
outpath = "chest-xray-224"
path = "state-farm-distracted-driver-detection/train"
outpath = "state-farm-distracted-driver-detection/train-224"
def reduce(path, size = 224, method = Image.LANCZOS):
print(f"Open {path}/db.json")
with open(f"{path}/db.json", "r") as f:
db = json.loads(f.read())
db["path"] = outpath
for item in db["data"]:
file = f"{item['path']}/{item['name']}"
im = Image.open(file)
if "small" not in item["path"]:
im = im.resize((size,size),method)
item["size"] = (size, size)
item["path"] = item["path"].replace(path, outpath)
file = file.replace(path, outpath)
print(f"Create {file}")
im.save(file)
print(f"Create {outpath}/db.json")
with open(f"{outpath}/db.json", "w") as f:
f.write(json.dumps(db, indent=4))
if __name__ == '__main__':
reduce(path) |
from django.shortcuts import render,redirect,reverse
from django.views import View
from db.login_mixin import LoginRequiredMixin
from resume.models import Resume
from interview.models import Interview
from job.models import Position
# Create your views here.
class MyInterviewView(LoginRequiredMixin,View):
'''面试邀请'''
def get(self,request):
interview_ym_list = Interview.objects.filter(interview_status="YM",user=request.user)
interview_jm_list = Interview.objects.filter(interview_status="JM", user=request.user)
return render(request, 'my_interviews.html',{"interview_ym_list":interview_ym_list,"interview_jm_list":interview_jm_list})
class InterviewView(LoginRequiredMixin,View):
'''面试邀请函'''
def get(self,request,resume_id):
resume = Resume.objects.get(id=resume_id)
position_list = Position.objects.filter(enterprice_id=request.user.enterprice_set.all()[0].id)
return render(request, 'interview.html',{"resume":resume,"position_list":position_list})
class InvitationView(LoginRequiredMixin,View):
'''邀约'''
def get(self,request):
if request.user.role.name == "recruiter":
return render(request, 'profile.html')
else:
resume = Resume.objects.get(user_id=request.user.id)
if resume.is_public == 0:
block = "block"
none = "none"
class_name = "plus open"
else:
block = "none"
none = "block"
class_name = "plus"
return render(request, 'invitation.html',{"block":block,"none":none,"class_name":class_name})
class InterviewRecordView(LoginRequiredMixin,View):
'''面试邀请'''
def get(self,request):
interview_jm_list = Interview.objects.filter(interview_status="JM",enterprice=request.user.enterprice_set.all()[0])
resume_list = []
for applier in interview_jm_list:
resume = applier.user.resume_set.all()[0]
resume_list.append(resume)
interview_ym_list = Interview.objects.filter(interview_status="YM",enterprice=request.user.enterprice_set.all()[0])
for applier in interview_ym_list:
resume = applier.user.resume_set.all()[0]
resume_list.append(resume)
return render(request, 'interviews_record.html',{"interview_jm_list":interview_jm_list,"interview_ym_list":interview_ym_list,"resume_list":resume_list})
class InterviewResultView(LoginRequiredMixin,View):
'''面试结果'''
def get(self,request,interview_id):
interview_detail = Interview.objects.get(id=interview_id)
resume_info = interview_detail.user.resume_set.all()[0]
return render(request, 'interview_result.html',{"interview_detail":interview_detail,"resume_info":resume_info})
class InterviewDetailView(LoginRequiredMixin,View):
'''面试详情页面'''
def get(self,request):
return render(request, 'interview_detail.html')
class InterviewCommentView(LoginRequiredMixin,View):
'''面试评价'''
def get(self,request):
return render(request, 'interview_comment.html') |
#!/usr/bin/python
import os
import sys
import re
import ConfigParser
import subprocess
def main(config):
if len(sys.argv) == 1:
print 'No action specified'
exit(1)
elif sys.argv[1] == 'stop':
stop(config)
elif sys.argv[1] == 'kill':
kill(config)
elif sys.argv[1] == 'start':
start(config)
elif sys.argv[1] == 'restart':
restart(config)
def stop(config):
print 'Stopping container ' + config.get('default', 'container name')
subprocess.call(['sudo', 'docker', 'stop', config.get('default', 'container name')])
def kill(config):
stop(config)
print 'Removing container ' + config.get('default', 'container name')
subprocess.call(['sudo', 'docker', 'rm', config.get('default', 'container name')])
def restart(config):
kill(config)
start(config)
def start(config):
# print 'Restarting dnsmasq'
# subprocess.call(['sudo', '/etc/init.d/dnsmasq', 'restart'])
# print 'Fixing permissions'
# subprocess.call(['sudo', 'chmod', '0777', 'logs', '-Rf'])
# print 'Generating site config files'
# subprocess.call(['scripts/containerinfo.py'])
print 'Running container %s' % (config.get('default', 'container name'), )
# TODO: Put these in a settings file.
call_parameters = ['sudo', 'docker', 'run']
detached = config.getboolean('default', 'detached')
if detached:
call_parameters.append('-d')
for vol in config.items('volumes'):
call_parameters.append('-v')
call_parameters.append(vol[1].replace('%(dir)', os.getcwd()) + ':' + vol[0].replace('%(dir)', os.getcwd()))
call_parameters.append('-name')
call_parameters.append(config.get('default', 'container name'))
call_parameters.append(config.get('default', 'image name'))
# print call_parameters
subprocess.call(call_parameters)
web_proxy_name = config.get('default', 'proxy')
print 'Restarting container %s' % (web_proxy_name, )
subprocess.call(['sudo', 'docker', 'restart', web_proxy_name])
if __name__ == '__main__':
current_folder_path, current_folder_name = os.path.split(os.getcwd())
config = ConfigParser.RawConfigParser()
config.read(current_folder_path + '/' + current_folder_name + '/docker-loader.conf')
main(config)
|
# from __future__ import absolute_import
#
from configurations import importer
importer.install()
from common import Common
from local import Local
from prod import Prod
|
# Python program that prints a greeting.
import cs50
print('hello, world')
|
import numpy as np
import pandas as pd
from operator import itemgetter
from sklearn.metrics.pairwise import cosine_similarity
from functions import simple_process
# print recommended poems
def poem_printout(df, similar_poems):
'''
Function to print stylized list of poems.
Input
-----
df : Pandas DataFrame
Database of poems with at least title, poet,
genre, and poem URL columns
similar_poems : list (tup)
A list of poem indices and percentage of similarity.
Output
------
Prints a formatted list of poem titles with corresponding
poet, link, and percent match.
'''
# separation line
print('-'*100)
# loop over list of tuples
for i, pct in similar_poems:
# similarity of match as a percent.
print(f'{round(pct*100,1)}% match')
# title of poem and poet name
print(f'{df.loc[i,"title"].upper()} by {df.loc[i,"poet"]}')
# genre of poem
if df.loc[i,"genre"] != 'new_york_school_2nd_generation':
print(f'GENRE: {df.loc[i,"genre"].replace("_", " ").title()}')
# special case
else:
print('GENRE: New York School 2nd Generation')
# corresponding URL to PoetryFoundation.org page
print(f'URL: {df.loc[i,"poem_url"]}')
# separation line
print('-'*100)
# search based on a keyword
def word_similarity(
word,
df,
model,
n=5,
to_print=True):
'''
Function to find the n-most-similar poems, based on
an established word vector.
Input
-----
word : str
Single word whose vector, if known, will be compared
to document vectors.
df : Pandas DataFrame
Database of all poems.
model : Doc2Vec model
Fitted Gensim Doc2Vec object.
`gensim.models.doc2vec.Doc2Vec`
Optional input
--------------
n : int
The number of poems to return (default=5).
to_print : bool
Whether to print poem similarities in stylized
format (default=True).
Output
------
similar_poems : list (tup)
List of similar poems with poem index as an integer
and percent similarity as a float.
'''
# if word in model's corpus
try:
# find vector for input word, if it exists within the model
vec = model[word]
# find poems that are most similar to that word vector
similar_poems = model.docvecs.most_similar([vec], topn=n)
# optional printout
if to_print:
poem_printout(df, similar_poems)
return similar_poems
# if word not in model's corpus
except KeyError:
print("I don't know that word; try again.")
# search based on a phrase
def phrase_similarity(
text,
df,
model,
n=5,
to_print=True):
'''
Function to find the n-most-similar poems, based on
a document vector created by the input model.
Input
-----
text : str
Words to use to create a document vector and
compare to poem document vectors.
df : Pandas DataFrame
Database of all poems.
model : Doc2Vec model
Fitted Gensim Doc2Vec object.
`gensim.models.doc2vec.Doc2Vec`
Optional input
--------------
n : int
The number of poems to return (default=5).
to_print : bool
Whether to print poem similarities in stylized
format (default=True).
Output
------
similar_poems : list (tup)
List of similar poems with poem index as an integer
and percent similarity as a float.
'''
# process the input in the same manner of documents in
# the model
words = simple_process(text).split()
# create a vector for the input text based on the model
vec = model.infer_vector(words)
# find poems that are most similar to that vector
similar_poems = model.docvecs.most_similar([vec], topn=n)
# optional printout
if to_print:
poem_printout(df, similar_poems)
return similar_poems
# search based on a poem in the dataframe
def poem_similarity(
title,
poet,
df_info,
df_vectors,
n=5,
to_print=True):
'''
Function to find the n-most-similar poems, based on
cosine similarity scores.
Input
-----
title : str
Title of input poem, for which to find the most
similar poems.
poet : str
Author of poem.
df_info : Pandas DataFrame
Database of poet, title, URL, and genre.
df_vectors : Pandas DataFrame
Database of poem data and embeddings
(Doc2Vec or Word2Vec).
Optional input
--------------
n : int
The number of poems to return (default=5).
to_print : bool
Whether to print poem similarities in stylized
format (default=True).
Output
------
similar_poems : list (tup)
List of similar poems with poem index as an integer
and percent similarity as a float.
'''
# find the index value for the input poem
# NOTE: since some poems have the same title but
# different poets, both fields are required
poem_id = df_info[(df_info.title == title) & \
(df_info.poet == poet)].index[0]
# calculate cosine similarities for that poem
# reshape vector to 1 x number_of_columns to plug into
# similarity function
# NOTE: index value should correspond to same poem in
# both dataframes
cos_sims = enumerate(cosine_similarity(
df_vectors.iloc[poem_id].values.reshape(1,-1),
df_vectors)[0]
)
# find and return poems that are most similar to the
# input poem
# NOTE: add one to the `n` value and slice off first
# result because the first result will always be
# the same as the input poem
similar_poems = sorted(cos_sims,
key=itemgetter(1),
reverse=True)[1:n+1]
# optional printout
if to_print:
poem_printout(df_info, similar_poems)
return similar_poems
# filter recommended poems based on various parameters
def poem_filter(
similar_poems,
df,
genre=None,
min_lines=None,
max_lines=None,
min_len_line=None,
max_len_line=None,
polarity=None,
end_rhyme=None,
to_print=True):
'''
Function to filter results based on various optional
parameters.
Input
-----
similar_poems : list (tup)
List of document tags (corresponding to dataframe index
values) and percentage of cosine similarity.
df : Pandas DataFrame
Database of poems and info.
Optional input
--------------
genre : str
Genre of returned poems.
One of ['beat', 'black_arts_movement', 'black_mountain',
'confessional', 'harlem_renaissance', 'imagist',
'language_poetry', 'modern', 'new_york_school',
'new_york_school_2nd_generation', 'objectivist',
'romantic', 'victorian'].
min_lines : int
Minimum number of lines in returned poem.
max_lines : int
Maximum number of lines in returned poem.
min_len_line : float
Minimum average number of words per line in returned
poem.
max_len_line : float
Maximum average number of words per line in returned
poem.
polarity : str
Sentiment of poem.
One of ['positive', 'neutral', 'negative'].
end_rhyme : str
Whether returned poems have few to no end rhymes (`no`)
or many end rhymes (`yes`).
One of ['no', 'yes'].
Output
------
similar_poems : list (tup)
Filtered list of tuples with poem index as an integer
and percent similarity as a float.
Prints a message if similar_poems is empty.
'''
# genre filter
if genre:
# limit dataframe to poems within input genre
df = df[df.genre == genre]
# poem length filter
if min_lines:
if max_lines:
# if user inputs both values
df = df[(df.num_lines >= min_lines) & \
(df.num_lines <= max_lines)]
else:
# if user only inputs minimum length of poem
df = df[df.num_lines >= min_lines]
# if user only inputs maximum length of poem
elif max_lines:
df = df[df.num_lines <= max_lines]
# line length filter
if min_len_line:
if max_len_line:
# if user inputs both values
df = df[(df.avg_len_line >= min_len_line) & \
(df.avg_len_line <= max_len_line)]
else:
# if user only inputs minimum length of line
df = df[df.avg_len_line >= min_len_line]
# if user only inputs minimum length of line
elif max_len_line:
df = df[df.avg_len_line <= max_len_line]
# sentiment filter
if polarity:
# limit dataframe to poems within input sentiment polarity
df = df[df.sentiment_polarity == polarity]
# end rhyme filter
# NOTE: input is 'no' or 'yes' for user readability
# convert to 0 or 1 and limite dataframe to poems within that end_rhyme value
if end_rhyme == False:
df = df[df.end_rhyme == 0]
elif end_rhyme == True:
df = df[df.end_rhyme == 1]
# re-create the original list using only poems that satisfy the filters (i.e. appear in the filtered dataframe)
similar_poems = [(i, pct) for i, pct in similar_poems \
if i in df.index]
# return poems if available
if similar_poems:
# optional printout
if to_print:
poem_printout(df, similar_poems)
return similar_poems
# return a message if the list is empty
else:
print('Filter too fine. Please retry.')
# create genre subsets for t-SNE visualizations
def make_tsne_subset(tsne_df, poetry_df, column, col_value):
'''
Function to create subsets to prepare for t-SNE
visualization.
Input
-----
tsne_df : Pandas DataFrame
Fit/transformed t-SNE object, with document tags
as the index.
poetry_df : Pandas DataFrame
Database of poems.
column : str
Name of column on which to create subset.
col_value : str
Value of column on which to create subset.
Output
------
style_subset : Pandas DataFrame
DataFrame subset.
[Modeled after]:
https://github.com/aabrahamson3/beer30/blob/master/functions.py
'''
# limit dataframe to column with column value
subset = poetry_df.loc[poetry_df[column] == col_value]
# create a set of subsets indices
subset_set = set(subset.index)
# match those indices with corresponding indices in
# TtSNE dataframe
match = set(tsne_df.index).intersection(subset_set)
# create and return tsne_df with corresonding indices
style_subset = tsne_df[tsne_df.index.isin(match)]
return style_subset
# search based on a poem in the dataframe
# if similarities have been calculated beforehand
# NOTE: faster but requires a large file (114MB)
def poem_similarity_precalculated(
title,
poet,
df,
similarities,
n=5,
to_print=True):
'''
Function to find the n-most-similar poems, based on
cosine similarity scores.
Input
-----
title : str
Title of input poem, for which to find the most
similar poems.
poet : str
Author of poem.
df : Pandas DataFrame
Database of all poems.
similarities : list (arr)
List of arrays with cosine similarity scores.
Optional input
--------------
n : int
The number of poems to return (default=5).
to_print : bool
Whether to print poem similarities in stylized
format (default=True).
Output
------
similar_poems : list (tup)
List of similar poems with poem index as an integer
and percent similarity as a float.
'''
# find the index value for the input poem
# NOTE: since some poems have the same title but
# different poets, both fields are required
poem_id = df[(df.title == title) & (df.poet == poet)].\
index[0]
# find the list of cosine similarities for that poem
# NOTE: index value and document tag should be the same
cos_sims = enumerate(similarities[poem_id])
# find and return poems that are most similar to the
# input poem
# add one to the input number of poems and slice off
# first result because the first result will always be
# the same as the input poem
similar_poems = sorted(cos_sims,
key=itemgetter(1),
reverse=True)[1:n+1]
# optional printout
if to_print:
poem_printout(df, similar_poems)
return similar_poems |
import numpy as np
def knn_matrix(scale = 20, k = 2):
weights=np.zeros((scale,scale,2*k+1,2*k+1))
for y in range(scale):
for x in range(scale):
xr=x+2*k*scale
yr=y+2*k*scale
mink = 2*k
tem=[]
for j in range(-k, k+1):
for i in range(-k,k+1):
tem.append(2**(-0.5*((xr/scale-(mink+i))**2+(yr/scale-(mink+j))**2)))
weights[y,x] = np.array(tem).reshape(-1,2*k+1)
return weights
|
st=input()
l=list(st)
st1=''
for i in range(len(l)):
if(i%3==0):
st1+=l[i]
print(st1)
|
from bs4 import BeautifulSoup
from nltk import data, word_tokenize
import random, re
tokenizer = data.load('tokenizers/punkt/english.pickle')
dataset = open('dataset/nysk.xml', 'r').read()
soup = BeautifulSoup(dataset)
totalFiles = int(open('parameters.txt','r').readlines()[0].strip())
testCount = min(100, totalFiles*1/100)
fileNumber = 0
test = set(random.sample(list(xrange(totalFiles)), testCount))
for node in soup.findAll('text'):
fileNumber += 1
if fileNumber in test:
textdata = open('dataset/test/'+str(fileNumber)+'.txt', 'w')
else:
textdata = open('dataset/train/'+str(fileNumber)+'.txt', 'w')
text = node.text.encode('ascii', 'ignore')
text = text.decode('utf-8')
text = text.replace('\n', '')
sentences = tokenizer.tokenize(text)
for sentence in sentences:
sentence = sentence.replace('.', '')
sentence = re.sub("'","'",sentence)
sentence = re.sub("[^\w\s']|_","",sentence)
sentence = re.sub(' +',' ',sentence)
sentence = sentence.lower()
textdata.write(sentence+'\n')
textdata.close()
|
from django.shortcuts import render
from django.http import HttpResponse
# from .models import Post
import datetime
def current_datetime(request):
now = datetime.datetime.now()
html = "It is now %s." % now
return HttpResponse(html)
def index(request):
return HttpResponse("Yahoo! It works :D")
# def post_list(request):
# posts = Post.objects.order_by('published_date')
# return render(request, 'html/index.html', {posts}) |
# -*- coding: utf-8 -*-
import re
from xkeysnail.transform import *
# # Swap Alt/Super
# define_modmap({
# Key.LEFT_ALT: Key.LEFT_META,
# Key.LEFT_META: Key.LEFT_ALT,
# })
# define_multipurpose_modmap({
# Key.Q: [Key.Q, Key.LEFT_HYPER],
# Key.CAPSLOCK: [Key.ESC, Key.LEFT_CTRL],
# Key.LEFT_CTRL: [Key.ESC, Key.LEFT_CTRL],
# Key.SPACE: [Key.SPACE, Key.LEFT_SHIFT],
# Key.LEFT_META: [Key.HENKAN, Key.LEFT_META],
# })
# # Hyper key mappings
# define_keymap(None, {
# # hjkl move/selection
# K("Hyper-H"): K("Left"),
# K("Hyper-J"): K("Down"),
# K("Hyper-K"): K("Up"),
# K("Hyper-L"): K("Right"),
# K("Shift-Hyper-H"): K("Shift-Left"),
# K("Shift-Hyper-J"): K("Shift-Down"),
# K("Shift-Hyper-K"): K("Shift-Up"),
# K("Shift-Hyper-L"): K("Shift-Right"),
# K("Ctrl-Hyper-H"): K("Ctrl-Left"),
# K("Ctrl-Hyper-J"): K("Ctrl-Down"),
# K("Ctrl-Hyper-K"): K("Ctrl-Up"),
# K("Ctrl-Hyper-L"): K("Ctrl-Right"),
# K("Ctrl-Shift-Hyper-H"): K("Ctrl-Shift-Left"),
# K("Ctrl-Shift-Hyper-J"): K("Ctrl-Shift-Down"),
# K("Ctrl-Shift-Hyper-K"): K("Ctrl-Shift-Up"),
# K("Ctrl-Shift-Hyper-L"): K("Ctrl-Shift-Right"),
# # home/end
# K("Hyper-N"): K("Home"),
# K("Shift-Hyper-N"): K("Shift-Home"),
# K("Hyper-Semicolon"): K("End"),
# K("Shift-Hyper-Semicolon"): K("Shift-End"),
# # PageUp/PageDown
# K("Hyper-I"): K("Page_Up"),
# K("Hyper-M"): K("Page_Down"),
# # Insert
# K("Hyper-O"): K("Insert"),
# # Prev/Next Tab
# K("Hyper-Comma"): K("Ctrl-Shift-Tab"),
# K("Hyper-Dot"): K("Ctrl-Tab"),
# # Ctrl Shortcuts
# K("Hyper-Y"): K("Ctrl-C"), # Yank
# K("Hyper-X"): K("Ctrl-X"), # Cut
# K("Hyper-P"): K("Ctrl-V"), # Paste
# K("Hyper-A"): K("Ctrl-A"), # Select All
# K("Hyper-U"): K("Ctrl-Z"), # Undo
# K("Hyper-Slash"): K("Ctrl-F"), # Search
# # Function Keys
# K("Hyper-Key_1"): K("F1"),
# K("Hyper-Key_2"): K("F2"),
# K("Hyper-Key_3"): K("F3"),
# K("Hyper-Key_4"): K("F4"),
# K("Hyper-Key_5"): K("F5"),
# K("Hyper-Key_6"): K("F6"),
# K("Hyper-Key_7"): K("F7"),
# K("Hyper-Key_8"): K("F8"),
# K("Hyper-Key_9"): K("F9"),
# K("Hyper-Key_0"): K("F10"),
# K("Hyper-Minus"): K("F11"),
# K("Hyper-Equal"): K("F12"),
# }, "Hyper mapping")
# define_keymap(re.compile("gnome-terminal|kitty|code", re.IGNORECASE), {
# K("Esc"): [K("Muhenkan"), K("Esc")],
# }, "Disable IME")
define_keymap(re.compile("gnome-terminal|kitty", re.IGNORECASE), {
# Copy/Cut/Paste
K("Ctrl-C"): K("Ctrl-Shift-C"),
K("Ctrl-X"): K("Ctrl-Shift-X"),
K("Ctrl-V"): K("Ctrl-Shift-V"),
# Cancel
K("Ctrl-Shift-C"): K("Ctrl-C"),
}, "Terminal")
|
import numpy as np
import copy
class Agent:
def __init__(self, k, m, mu=0.1, x=None, alpha=None, mutable_variables=None):
"""
Initializes an agent object for the Genetic Algorithm simulation.
:param k: externality parameter. Must fulfill: -1 < k < 1
:param m: self-utility parameter. Must fulfill m > 0
:param mu: mutation standard deviation. Must fulfill mu > 0.
:param x: a-priori strategy. Must fulfill x >= 0
:param alpha: Altruism parameter. Must fulfill 0.5 <= alpha <= 1.0. If None, alpha is randomly initialized.
:param mutable_variables: List of variables that are subject to mutations.
"""
assert -1 < k < 1
assert m > 0
assert mu > 0
if x is not None:
assert x >= 0
if alpha is not None:
assert 0.5 <= alpha <= 1
else:
alpha = np.random.uniform(0.5, 1.0)
if mutable_variables is None:
mutable_variables = []
# Agent parameters (may be mutated)
self.alpha = alpha
self.x = x if x is not None else np.exp(np.random.uniform(0, 2))
# Agent hyperparameters (can't be mutated)
self.k = k
self.m = m
self.payoff = 0
self.utility = 0
# Mutation stuff
for var in mutable_variables:
assert var in ["alpha", "x"]
self.mutable_params = mutable_variables
self.mutation_std = mu
def compute_payoff(self, x, y):
"""
Computes the payoff of the current agent given the action of the other agent. This is equation (1) from paper.
:param x: Strategy of current agent
:param y: Strategy of interacting agent
"""
self.payoff = x * (self.k * y + self.m - x)
def compute_utility(self, u_1, u_2):
"""
Computes the utility of the current agent given the utility of the other agent. This is equation (8) from paper.
:param u_1: utility of current agent
:param u_2: utility of interacting agent
"""
self.utility = self.alpha * u_1 + (1 - self.alpha) * u_2
def interact(self, agent_2):
"""
Computes the payoff and utility of the interaction of the current agent with agent_2, for both players.
:param agent_2: The agent with whom the current agent will be interacting
:type agent_2: Agent
"""
if 'x' in self.mutable_params:
# If x is a mutable parameter, use the evolutionary x for the interaction
x = self.x
y = agent_2.x
else:
x, y = self.rational_strategies(agent_2)
self.x = x
agent_2.x = y
self.compute_payoff(x, y)
agent_2.compute_payoff(y, x)
self.compute_utility(self.payoff, agent_2.payoff)
agent_2.compute_utility(agent_2.payoff, self.payoff)
def reproduce(self):
"""
Makes an offspring agent with random mutations using the current agent as 'template'.
:return: a new Agent with random mutations given the current agent.
:rtype: Agent
"""
# Copy the current agent
offspring = copy.deepcopy(self)
# Perform the mutations
for param in self.mutable_params:
mu = np.random.normal(loc=0.0, scale=self.mutation_std)
if param == "x":
offspring.x = offspring.x + mu * offspring.x
elif param == "alpha":
offspring.alpha = offspring.alpha + mu * offspring.alpha
# Make sure new parameters fulfill the constraints
offspring.check_param_constraints()
return offspring
def check_param_constraints(self):
"""
Verifies that the current mutable parameters fulfill the constraints imposed. In case there's a violation, set
the parameter to the closest acceptable value.
"""
if self.x < 0:
self.x = 0
if self.alpha < 0.5:
self.alpha = 0.5
if self.alpha > 1:
self.alpha = 1
def rational_strategies(self, agent_2):
"""
Computes the strategies that the agents will take (i.e. equation 12), which assumes mutual knowledge of alpha
parameters, and rationality.
:param agent_2: partner agent in the interaction
:return: the optimal, rational strategies x and y (i.e. x, and x for agent_2)
"""
beta = agent_2.alpha
alpha = self.alpha
m = self.m
k = self.k
def nash_eq_x(a, b):
return b * m * (2 * a + k) / (4 * a * b - k ** 2)
x = nash_eq_x(alpha, beta)
y = nash_eq_x(beta, alpha)
return x, y |
s = 6
for c in range (0 , 500 , 6):
s1 = s + c
print (s1, end =' ') |
# import flask dependencies
import requests
from flask import Flask, request, make_response, jsonify
# initialize the flask app
app = Flask(__name__)
# default route
@app.route('/')
def index():
return 'WELCOME TO WEATHER TEMPERATURE APP'
# function for responses
def results():
# build a request object
req = request.get_json(force=True)
# fetch action from json
intent_name = req.get('queryResult').get('intent').get('displayName')
if intent_name=='temperature':
city = req.get('queryResult').get('parameters').get('city')
API_KEY = '3f3483997cd121c26d7ad4b025150044' # initialize your key here
# call API and convert response into Python dictionary
url = f'http://api.openweathermap.org/data/2.5/weather?q={city}&APPID={API_KEY}'
temp_object = requests.get(url).json()
temp=float(temp_object["main"]["temp"])-273.15
temp = round(temp, 2)
fehren = (temp * 9/5) + 32
fehren = round(fehren, 2)
celsius_fulfilmenttext=f"Temperature of {city} is {temp} °C & {fehren} °F"
# return a fulfillment response
return {
'fulfillmentMessages': [
{
'text': {
'text': [
celsius_fulfilmenttext
]
}
}
]
}
# create a route for webhook
@app.route('/webhook', methods=['GET', 'POST'])
def webhook():
# return response
return make_response(jsonify(results()))
# run the app
if __name__ == '__main__':
app.run() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.