text
stringlengths 8
6.05M
|
|---|
import os
import json
from argparse import ArgumentParser
import corenlp
os.environ.setdefault('CORENLP_HOME', 'stanford-corenlp')
def main(args):
num_sentences = 0
with open(args.input, encoding='utf-8') as f, open(args.output, mode='w', encoding='utf-8') as out:
with corenlp.CoreNLPClient(annotators="tokenize ner".split(), endpoint="http://localhost:5000") as client:
for line in f.readlines():
mrp_json = json.loads(line)
tok = []
ner = []
ann = client.annotate(mrp_json['input'], output_format='json')
for sentence in ann['sentences']:
for tokens in sentence['tokens']:
tok.append(tokens['word'])
ner.append(tokens['ner'])
if len(ner) != len(mrp_json['nodes']):
print(mrp_json['id'], " error!")
mrp_json['tok'] = tok
mrp_json['ner'] = ner
# for ner, nodes in zip(ner, mrp_json['nodes']):
# nodes['properties'].append('ner')
# nodes['values'].append(ner)
out.write(json.dumps(mrp_json) + '\n')
num_sentences += 1
if num_sentences % 1000 == 0:
print(f"Processed {num_sentences} sentences!")
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument('--input', '-i', required=True)
argparser.add_argument('--output', '-o', required=True)
args = argparser.parse_args()
main(args)
|
import sys
sys.path.append('../pyastrohog/')
from astrohog2d import *
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from astropy.io import fits
hdul=fits.open('../data/image1.fits')
image1=hdul[0].data
hdul.close()
hdul=fits.open('../data/image2.fits')
image2=hdul[0].data
hdul.close()
circstats, corrframe, smoothframe1, smoothframe2 = HOGcorr_frame(image1, image2)
print('Mean resultant vector (r) ', circstats[0])
print('Rayleigh statistic (Z) ', circstats[1])
print('Projected Rayleigh statistic (V) ', circstats[2])
print('Rayleigh statistic (ii) ', circstats[5], '+/-', circstats[6])
print('Mean angle ', circstats[7])
print('Alignment measure (AM) ', circstats[8])
hist, bin_edges = np.histogram(corrframe*180.0/np.pi, density=True, range=[-90.,90.], bins=40)
bin_center=0.5*(bin_edges[0:np.size(bin_edges)-1]+bin_edges[1:np.size(bin_edges)])
fig=plt.figure()
ax1=plt.subplot(221)
plt.imshow(image1, cmap='bone', origin='lower')
ax1=plt.subplot(222)
plt.imshow(image2, cmap='copper', origin='lower')
ax1=plt.subplot(223)
im=plt.imshow(np.abs(corrframe)*180.0/np.pi, cmap='spring', origin='lower')
cb1=plt.colorbar(im) #,fraction=0.046, pad=0.04)
cb1.set_label(r'$|\phi|$ [deg]')
ax1=plt.subplot(224)
plt.step(bin_center, hist*100, color='red')
plt.ylabel('Histogram density [%]')
plt.xlabel(r'$\phi$ [deg]')
plt.xticks([-90.,-45.,0.,45.,90.])
plt.tight_layout()
plt.show()
#import pdb; pdb.set_trace()
|
from django.conf.urls.defaults import *
urlpatterns = patterns(
'trade.views',
url(r'^$', 'home', name='trade-home'),
url(r'^init_trade_request/$', 'init_trade_request', name='init-trade-request'),
)
|
import ev3dev.ev3 as ev3
from time import sleep
from classed_line_trace import LineTrace
ts = ev3.TouchSensor('in3')
lt = LineTrace()
if __name__ == "__main__":
while not (ts.value()):
lt.line_trace()
lt.stop()
|
#glassclient.py
#***********************************************
#Client program for RJGlass to display guages
#***********************************************
import time, sys
import logging
def init_log():
level = logging.INFO
if '-debug' in sys.argv:
level = logging.DEBUG
logging.basicConfig(level=level, format='%(asctime)s.%(msecs)d %(levelname)s:%(message)s', datefmt='%H:%M:%S')
#Set up File log
logger = logging.getLogger()
handler = logging.FileHandler('GlassClient.log', mode='w')
#handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s.%(msecs)d %(levelname)s:%(message)s', '%H:%M:%S'))
logger.addHandler(handler)
#Initalize logging
init_log()
#Finish Imports
import display
import pyglet
import gauge
import client
class myEventLoop(pyglet.app.EventLoop):
def __init__(self):
super(myEventLoop, self).__init__()
self.FPS_clock = pyglet.clock.Clock()
self.fps_display = pyglet.clock.ClockDisplay(clock=self.FPS_clock)
def idle(self):
t = time.time()
pyglet.clock.tick(poll=True)
# display.win.dispatch_event('on_draw')
# display.win.flip()
# #print "IDLE"
# #print pyglet.clock.get_sleep_time(sleep_idle=True)
#time.sleep(0.01)
#print time.time()-t
#time.sleep((1/30.0)-t)
return pyglet.clock.get_sleep_time(sleep_idle=True)
def myDraw(self, dt):
self.FPS_clock.tick(poll=True)
display.win.dispatch_event('on_draw')
#Draw FPS Display
self.fps_display.draw()
display.win.flip()
def myDraw(dt):
#if c.VD_recv:
# c.VD_recv = False
#display.myDraw(dt)
pass
#print rx_count
#print c.rx_count
event_loop = myEventLoop()
#Start GlassClient program
c = client.client_c()
display = display.display_c('view.xml')
c.start()
pyglet.clock.schedule_interval(event_loop.myDraw, 1/30.0)
#pyglet.app.run()
#pyglet.clock.schedule_interval(myDraw, 1.0/60.0)
event_loop.run()
#Stop Client
c.stop()
|
import parmed as pmd
lib_file = 'GF2.lib'
mol2_red = '/home/haichit/research/rna_project/resp_fit/GF2/download_RED/P25049/Data-R.E.D.Server/Mol_MM/INTER/CT-A_m1-c1_m2-c1.mol2'
resname = 'GF2'
output = 'GF2_RED_update.lib'
# load off file and get 1st residue
res0 = pmd.load_file(lib_file)[resname]
res1 = pmd.load_file(mol2_red)
# mapping atom names to be replaced
atom_map = [
("O1P", "OP1"),
("O2P", "OP2"),
("F2'", "F"),
("H5'1", "H5'"),
("H5'2", "H5''"),
]
adict = dict(atom_map)
# print(adict)
# match atom name for mol2 file from R.E.D to tleap
# todo: idiom
for atom in res1:
if atom.name in adict:
atom.name = adict[atom.name]
# print (set(a.name for a in res0) ^ set(a.name for a in res1))
# update charge for off file
for atom0 in res0:
for atom1 in res1:
if atom1.name == atom0.name:
atom0.charge = atom1.charge
res0.save(output)
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from keras.models import load_model
from pyAudioAnalysis import audioFeatureExtraction, audioBasicIO
import pandas as pd
import numpy as np
import argparse
import imutils
import pickle
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="Path to trained model")
ap.add_argument("-l", "--labelbin", required=True,
help="Path to label binarizer")
ap.add_argument("-i", "--file", required=True,
help="Path to input sound file")
args = vars(ap.parse_args())
FRAME_SIZE = 5.e-2 # msecs
print("[INFO] Loading sound file")
[Fs, x] = audioBasicIO.readAudioFile(args['file'])
x = audioBasicIO.stereo2mono(x)
features, _ = audioFeatureExtraction.stFeatureExtraction(
x, Fs, FRAME_SIZE * Fs, FRAME_SIZE / 2 * Fs)
inputArray = np.expand_dims(features, axis=3)
print("[INFO] loading network...")
model = load_model(args["model"])
lb = pickle.loads(open(args["labelbin"], "rb").read())
first_layer = model.get_layer(index=0)
required_input_shape = first_layer.get_config()['batch_input_shape'][1:]
print('[INFO] Required Shape:', required_input_shape)
print('[INFO] Actual shape:', inputArray.shape)
# Adjust input to match required shape
if required_input_shape[1] > inputArray.shape[1]:
zerosArray = np.zeros((required_input_shape[0], required_input_shape[1] - inputArray.shape[1], 1), dtype=inputArray.dtype)
inputArray = np.concatenate( (inputArray, zerosArray), axis = 1)
else:
inputArray = inputArray[:, :required_input_shape[1], :]
print('[INFO] Post processed actual shape:', inputArray.shape)
print("[INFO] classifying sound...")
proba = model.predict(np.expand_dims(inputArray, axis=0))[0]
idx = np.argmax(proba)
label = lb.classes_[idx]
label_with_predictions = {}
for i in range(len(proba)):
label_with_predictions[lb.classes_[i]] = proba[i]
print("[INFO] Probabilities:", label_with_predictions)
print("[INFO] Prediction {}".format(label))
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('response/<str:query>', views.graphData, name='response'),
path('list/<str:pq>',views.options,name='list')
]
|
def main():
Read_character()
line()
def Read_character():
infile = open("/Users/again/Desktop/GIT/ProblemSolving/essay_file/Example.txt",'r')
readcharacter = infile.read()
infile.close()
read_charlen = len(readcharacter)
print(read_charlen)
def line():
infile = open("/Users/again/Desktop/GIT/ProblemSolving/essay_file/example.txt",'r')
line1 = infile.read()
infile.close()
result = len(line1.splitlines())
print(result)
main()
|
# the highest level conftest file in a project determines the starting point of your pytest suite
# all fixtures are visible to all lower level test files
# multiple conftest files can exist in a directory.
# lower level conftest files are applied after higher level ones
import pytest
@pytest.fixture()
def teardown_example():
a = 1
yield a
print(a)
# scope:
# session = entire pytest run
# module = 1 pytest file
# function = as name implies. 1 function
# class = as name implies. 1 class
#autouse:
# every test in the session will call the fixture automatically
@pytest.fixture(scope='session', autouse=False)
def fixture_example():
print('this is only called once per run because its scope is "session"')
# any test that invokes this fixture will be run 5 times. The tests will be reported with the names, "apple", "ball", "car", "dice", "elephant"
@pytest.fixture(params=[1, 2, 3, 4, 5], ids=['apple', 'ball', 'car', 'dice', 'elephant'])
def param_example(request):
return request.param
|
# socket模块负责socket百年城
import socket
# 模拟服务器的函数
def serverFunc():
# 1.建立socket
# socket.AF_INET:使用ipv4协议族
# socket.SOCK_DGRSM:使用UDP通信
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# 2.绑定ip和port
# 127.0.0.1:这个ip地址代表是机器本身
# 7852:随手指定的端口号
# 地址是一个tuple类型(ip,port)
addr = ("127.0.0.1",7852)
sock .bind(addr)
# 接受对方消息
# 等待对方为死等,没有其他可能性
# recvfrom接受的返回值是一个元组,前一项表示数据,后一项表示地址
# 参数的含义是缓冲区大小
# rst = sock.recvfrom(500)
data,addr = sock.recvfrom(500)
print(data)
print(type(data))
# 发送过来的数据是bytes格式,必须通过解码才能得到str格式
text = data.decode()
print(type(text))
print(text)
# 给对方返回消息
rsp = "wo bu e"
# 发送的数据需要编码成bytes格式
# 默认的是utf8
data = rsp.encode()
sock.sendto(data,addr)
if __name__ == '__main__':
import time
while 1:
try:
serverFunc()
except Exception as e:
print(e)
time.sleep(1)
|
# Generated by Django 2.1.7 on 2019-04-20 10:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('amazon', '0009_auto_20190420_1306'),
]
operations = [
migrations.CreateModel(
name='clothes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Product', models.CharField(max_length=50)),
('Price', models.IntegerField()),
],
options={
'db_table': 'clothes',
},
),
migrations.CreateModel(
name='footware',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Product', models.CharField(max_length=50)),
('Price', models.IntegerField()),
],
options={
'db_table': 'footware',
},
),
migrations.RenameModel(
old_name='women_shops',
new_name='accessories',
),
migrations.AlterModelTable(
name='accessories',
table='Accessories',
),
]
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import threading
import time
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.testutil.rule_runner import RuleRunner
def test_run() -> None:
interval_secs = 0.1
# Start the service in another thread (`setup` is a required part of the service lifecycle, but
# is unused in this case.)
sgcs = StoreGCService(
RuleRunner().scheduler.scheduler,
period_secs=(interval_secs / 4),
lease_extension_interval_secs=interval_secs,
gc_interval_secs=interval_secs,
)
sgcs.setup(services=None) # type: ignore[arg-type]
t = threading.Thread(target=sgcs.run, name="sgcs")
t.daemon = True
t.start()
# Ensure that the thread runs successfully for long enough to have run each step at least once.
# TODO: This is a coverage test: although it could examine the internal details of the service
# to validate correctness, we don't do that yet.
time.sleep(interval_secs * 10)
assert t.is_alive()
# Exit the thread, and then join it.
sgcs.terminate()
t.join(timeout=interval_secs * 10)
assert not t.is_alive()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-01 17:57
from __future__ import unicode_literals
import benchmarklib.charts.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('xname', models.CharField(max_length=128)),
('xunit', models.CharField(blank=True, max_length=50, validators=[benchmarklib.charts.models.validate_unit])),
('yname', models.CharField(max_length=128)),
('yunit', models.CharField(blank=True, max_length=50, validators=[benchmarklib.charts.models.validate_unit])),
('legend', models.CharField(blank=True, max_length=20)),
('pub_date', models.DateTimeField(auto_now=True)),
('originator', models.CharField(default='guest', max_length=20)),
('reference', models.CharField(blank=True, max_length=500)),
('details', models.CharField(blank=True, max_length=1000)),
('filename', models.FileField(storage=benchmarklib.charts.models.OverwriteStorage(), upload_to=benchmarklib.charts.models.generate_filename)),
],
),
]
|
# Python Coroutines and Tasks.
# Coroutines declared with async/await syntax is the preferred way of writing asyncio applications.
#
# To actually run a coroutine, asyncio provides three main mechanisms:
#
# > The asyncio.run() function to run the top-level entry point “main()” function.
# > Awaiting on a coroutine.
# > The asyncio.create_task() function to run coroutines concurrently as asyncio Tasks.
# Sleeping:
# coroutine asyncio.sleep(delay, result=None, *, loop=None)
# Block for delay seconds.
# If result is provided, it is returned to the caller when the coroutine completes.
# sleep() always suspends the current task, allowing other tasks to run.
# The loop argument is deprecated and scheduled for removal in Python 3.10.
# Example of coroutine displaying the current date every second for 5 seconds:
#
import asyncio
import datetime
async def display_date():
loop = asyncio.get_running_loop()
end_time = loop.time() + 5.0
while True:
print(datetime.datetime.now())
if (loop.time() + 1.0) >= end_time:
break
await asyncio.sleep(1)
asyncio.run(display_date())
|
# Generated by Django 2.1.5 on 2019-02-12 15:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('updd', '0003_auto_20190206_2149'),
]
operations = [
migrations.AddField(
model_name='personnel',
name='affectation',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='updd.Service'),
),
]
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains as AC
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
import time
import json
import os.path
import os
import sys
import tkinter as tk
from tkinter import ttk
import re
import subprocess
from lang import *
from switch import switch
__version__="v.1.4"
class Autocompleter():
def __init__(self) :
#PATH
self.PATH=os.path.dirname(__file__)
self.sellang()
self.chrome_options=webdriver.ChromeOptions()
self.chrome_options.add_argument('--disable-gpu')
self.chrome_options.add_argument('--hide-scrollbars')
self.chrome_options.add_argument('blink-settings=imagesEnabled=false')
fnull = open(os.devnull, 'w')
return1 = subprocess.call('ping 35.168.176.193', shell = True, stdout = fnull, stderr = fnull)
if return1:
self.error(self.get_dict_value(self.lang_dict,["error","Network_exception"]))
else:
fnull.close()
def get_dict_value(self,obj,pathlist):
obj=dict(obj)
for l in pathlist:
obj=obj[l]
return obj
def start(self):
self.msgbox()
def date(self):
i=0
_date=""
if i == 0 :
i+=1
elif i >= 1:
return _date
#date
today = time.localtime()
year=today.tm_year
mon=today.tm_mon
day=today.tm_mday
hour=today.tm_hour
_min=today.tm_min
sec=today.tm_sec
if mon < 10 :
mon="0{month}".format(month=str(mon))
if day < 10 :
day="0{day}".format(day=str(day))
if hour < 10 :
hour="0{hour}".format(hour=str(hour))
if _min < 10 :
_min="0{_min}".format(_min=str(_min))
if sec < 10 :
sec="0{sec}".format(sec=str(sec))
_date="{year}{mon}{day}-{hour}{min}-{sec}".format(year=year,mon=mon,day=day,hour=hour,min=_min,sec=sec)
return _date
def exit(self):
try:
self.run.quit()
self._sellang.destroy()
self._msgbox.destroy()
self._close.destroy()
self._tkerror.destroy()
except:
pass
finally:
sys.exit(0)
def sellang(self):
with open(file="{}\\assest\\lang\\list.json".format(self.PATH),mode="r",encoding="utf-8") as f:
self.__langList__=dict(json.load(f))
self._sellang=tk.Tk()
self._sellang.title("Select Language")
self._sellang.iconbitmap(self.PATH+"\\assest\\ico\\war.ico")
frm=tk.Frame(self._sellang)
frm.pack(side="right",padx=5)
lab=tk.Frame(frm)
btn=tk.Frame(frm)
lab.pack()
btn.pack()
lab1=tk.Label(lab,text="Select Language",font=("微軟正黑體",12))
lab1.pack()
self.comlist=tk.StringVar()
com1=ttk.Combobox(lab,textvariable=self.comlist)
ll=[]
for i in self.__langList__.keys():
ll.append(self.__langList__[i])
com1["values"]=ll
com1.current(0)
com1.pack()
btn_Continue_execution=tk.Button(btn,text="Next",font=("微軟正黑體",12),command=self.next)
btn_Close=tk.Button(btn,text="Close",font=("微軟正黑體",12),command=self.exit)
btn_Continue_execution.pack(side="left",padx=5)
btn_Close.pack(side="right")
self._sellang.geometry()
self._sellang.mainloop()
def next(self):
self._sellang.destroy()
cl={}
for i in self.__langList__.keys():
cl[self.__langList__[i]]=i
for case in switch(cl[self.comlist.get()]):
if case("en_US"):
self.lang_dict=en_US().__langDict__
break
if case("zh_TW"):
self.lang_dict=zh_TW().__langDict__
break
if case("ja_JP"):
self.lang_dict=ja_JP().__langDict__
break
if case():
self.lang_dict=en_US().__langDict__
def msgbox(self):
#tk
self._msgbox=tk.Tk()
self._msgbox.title(self.get_dict_value(self.lang_dict,["GUI_msgbox","title"]))
self._msgbox.resizable(False, False)
self._msgbox.iconbitmap(self.PATH+"\\assest\\ico\\war.ico")
frm=tk.Frame(self._msgbox)
frm.pack(side="right",padx=5)
lab=tk.Frame(frm)
btn=tk.Frame(frm)
img=tk.Frame(self._msgbox)
img.pack(side="left",padx=5)
image_=tk.PhotoImage(file=self.PATH+"\\assest\\png\\war.png")
img1=tk.Label(img,image=image_)
img1.pack()
lab.pack()
lab1=tk.Label(lab,text=self.get_dict_value(self.lang_dict,["GUI_msgbox","lab1"]),font=("微軟正黑體",12))
lab2=tk.Label(lab,text=self.get_dict_value(self.lang_dict,["GUI_msgbox","lab2"]),font=("微軟正黑體",12))
lab3=tk.Label(lab,text=self.get_dict_value(self.lang_dict,["GUI_msgbox","lab3"]),font=("微軟正黑體",12))
lab1.pack()
lab2.pack()
lab3.pack()
btn.pack()
btn_Continue_execution=tk.Button(btn,text=self.get_dict_value(self.lang_dict,["GUI_msgbox","BTN_Continue_execution"]),font=("微軟正黑體",12),command=self.input_ep)
btn_Close=tk.Button(btn,text=self.get_dict_value(self.lang_dict,["GUI_msgbox","BTN_Close"]),font=("微軟正黑體",12),command=self.exit)
btn_Continue_execution.pack(side="left",padx=5)
btn_Close.pack(side="right")
self._msgbox.geometry()
self._msgbox.mainloop()
def input_ep(self):
try:
self._msgbox.destroy()
except:
pass
self._start=tk.Tk()
self._start.title(self.get_dict_value(self.lang_dict,["GUI_input_ep","title"]))
self._start.resizable(False, False)
self._start.iconbitmap(self.PATH+".\\assest\\ico\\start.ico")
self.e_text=tk.StringVar()
self.p_text=tk.StringVar()
_input=tk.Frame(self._start)
_input.pack()
lab1=tk.Label(_input,text=self.get_dict_value(self.lang_dict,["GUI_input_ep","lab1"]),font=("微軟正黑體",12))
lab2=tk.Label(_input,text=self.get_dict_value(self.lang_dict,["GUI_input_ep","lab2"]),font=("微軟正黑體",12))
lab3=tk.Label(_input,text=self.get_dict_value(self.lang_dict,["GUI_input_ep","lab3"]),font=("微軟正黑體",12))
e_entry=tk.Entry(_input,font=("微軟正黑體",12),state=tk.NORMAL,textvariable=self.e_text,width=20)
p_entry=tk.Entry(_input,font=("微軟正黑體",12),state=tk.NORMAL,textvariable=self.p_text,width=20,show="\u25CF")
self.e_text.set(self.get_dict_value(self.lang_dict,["GUI_input_ep","email_input_default"]))
p_entry.bind("<Return>",self._data_login_e)
lab1.grid(row=0,column=1)
lab2.grid(row=1,column=0)
lab3.grid(row=2,column=0)
e_entry.grid(row=1,column=1,columnspan=2)
p_entry.grid(row=2,column=1,columnspan=2)
btn=tk.Frame(self._start)
btn.pack()
btn_Start=tk.Button(btn,text=self.get_dict_value(self.lang_dict,["GUI_input_ep","BTN_Start"]),font=("微軟正黑體",12),command=self._data_login)
btn_Start.pack()
self._start.geometry()
self._start.mainloop()
def restart(self):
self._tkerror.destroy()
self.input_ep()
def close(self) :
try:
self.run.quit()
except:
pass
self._close=tk.Tk()
self._close.title(self.get_dict_value(self.lang_dict,["GUI_close","title"]))
self._close.resizable(False, False)
self._close.iconbitmap(self.PATH+".\\assest\\ico\\war.ico")
frm=tk.Frame(self._close)
frm.pack(side="right",padx=5)
lab=tk.Frame(frm)
btn=tk.Frame(frm)
img=tk.Frame(self._close)
img.pack(side="left",padx=5)
image_=tk.PhotoImage(file=self.PATH+"\\assest\\png\\war.png")
img1=tk.Label(img,image=image_)
img1.pack()
lab.pack()
lab1=tk.Label(lab,text=self.get_dict_value(self.lang_dict,["GUI_close","lab1"]),font=("微軟正黑體",12))
lab2=tk.Label(lab,text=self.get_dict_value(self.lang_dict,["GUI_close","lab2"]),font=("微軟正黑體",12))
lab3=tk.Label(lab,text=self.get_dict_value(self.lang_dict,["GUI_close","lab3"]),font=("微軟正黑體",12))
lab1.pack()
lab2.pack()
lab3.pack()
btn.pack()
btn_Continue_execution=tk.Button(btn,text=self.get_dict_value(self.lang_dict,["GUI_close","BTN_Continue_execution"]),font=("微軟正黑體",12),command=self.is_)
btn_Close=tk.Button(btn,text=self.get_dict_value(self.lang_dict,["GUI_close","BTN_Close"]),font=("微軟正黑體",12),command=self.exit)
btn_Continue_execution.pack(side="left",padx=5)
btn_Close.pack(side="right")
self._close.geometry()
self._close.mainloop()
def error(self,msg):
try:
self.run.quit()
except:
pass
self._tkerror=tk.Tk()
self._tkerror.title(self.get_dict_value(self.lang_dict,["GUI_error","title"]))
self._tkerror.resizable(False, False)
self._tkerror.iconbitmap(self.PATH+".\\assest\\ico\\start.ico")
frm=tk.Frame(self._tkerror)
img=tk.Frame(self._tkerror)
img.pack(side="left",padx=5)
frm.pack(side="right")
_input=tk.Frame(frm)
_input.pack()
btn=tk.Frame(frm)
image_=tk.PhotoImage(file=self.PATH+"\\assest\\png\\error.png")
img1=tk.Label(img,image=image_)
img1.pack()
lab1=tk.Label(_input,text=self.get_dict_value(self.lang_dict,["GUI_error","lab1"]),font=("微軟正黑體",12))
lab2=tk.Label(_input,text=self.get_dict_value(self.lang_dict,["GUI_error","lab2_1"])+"\"{}\"".format(msg)+self.get_dict_value(self.lang_dict,["GUI_error","lab2_2"]),font=("微軟正黑體",12))
lab3=tk.Label(_input,text=self.get_dict_value(self.lang_dict,["GUI_error","lab3"]),font=("微軟正黑體",12))
lab4=tk.Label(_input,text=self.get_dict_value(self.lang_dict,["GUI_error","lab4"]),font=("微軟正黑體",12))
lab1.pack()
lab2.pack()
lab3.pack()
lab4.pack()
btn.pack()
btn_restart=tk.Button(btn,text=self.get_dict_value(self.lang_dict,["GUI_error","BTN_Restart"]),font=("微軟正黑體",12),command=self.restart)
btn_close=tk.Button(btn,text=self.get_dict_value(self.lang_dict,["GUI_error","BTN_Close"]),font=("微軟正黑體",12),command=self.exit)
btn_restart.pack(side="left",padx=5)
btn_close.pack(side="right")
self._tkerror.geometry()
self._tkerror.mainloop()
def settings(self):
pass
self.set=tk.Tk()
def _data_login_e(self,event):
self._data_login()
def _data_login(self):
self._start.destroy()
self.ac=self.e_text.get()
self.pw=self.p_text.get()
self.login(self.ac,self.pw)
def entry_word(self,css_selector,word,wait_sec) :
#輸入文字
word=word
entry_list=list(word)
self.run.find_element_by_css_selector(css_selector).clear()
for entry_key in entry_list :
self.run.find_element_by_css_selector(css_selector).send_keys(entry_key)
time.sleep(wait_sec)
def entry_key(self,word,WPM):
word_list=list(word)
sec=60/(WPM*5)
print(sec)
for wl in word_list :
AC(self.run).send_keys(wl).perform()
time.sleep(sec)
def login(self,email,password) :
self.run=webdriver.Chrome(executable_path=self.PATH+".\\assest\\chromedriver.exe",chrome_options=self.chrome_options)
self.run.maximize_window()
self.run.set_page_load_timeout(10)
url="http://www.ratatype.com/login/"
self.run.get(url)
time.sleep(15)
self.run.execute_script('window.stop()')
time.sleep(3)
try:
self.run.find_element_by_css_selector("#email")
except NoSuchElementException :
self.error(self.get_dict_value(self.lang_dict,["error","Network_delay_is_too_high"]))
self.run.quit()
self.entry_word("#email",email,0.05)
time.sleep(0.5)
self.entry_word("#password",password,0.05)
time.sleep(0.5)
<<<<<<< HEAD
try:
self.run.find_element_by_css_selector("#fauth > div.form-group.btn-group-auth > button").click()
except NoSuchElementException:
self.error(self.get_dict_value(self.lang_dict,["error","Website_was_updated"]))
=======
self.run.find_element_by_css_selector("#fauth > div.form-group.btn-group-auth > button").click()
>>>>>>> 4e0fcf74f1aff7304049b10eae1654cd60a66f14
time.sleep(2)
try:
self.run.find_element_by_css_selector("body > div.center > div > div > div > div.rightSide > div > div:nth-child(3)")
except:
self.keystart()
self.error(self.get_dict_value(self.lang_dict,["error","Password_is_incorrect"]))
self.run.quit()
def keystart(self) :
time.sleep(2)
self.run.find_element_by_css_selector("body > div.center > div > div.rightSide > div > div:nth-child(7) > div.nextExercise > form > button").click()
time.sleep(3)
self.run.find_element_by_css_selector("#ui-id-1 > button").click()
time.sleep(2)
html_doc=self.run.page_source
soup = BeautifulSoup(html_doc, 'html.parser')
w=(soup.find("div",{"id":"str_in"}).string)
time.sleep(3)
<<<<<<< HEAD
self.entry_key(w,105)
=======
self.entry_key(w,100)
>>>>>>> 4e0fcf74f1aff7304049b10eae1654cd60a66f14
time.sleep(2)
self.run.save_screenshot(".\\assest\\finish image\\{}.png".format(self.date()))
time.sleep(1)
self.close()
#os.exit_(1)
def is_(self):
self._close.destroy()
self.login(self.ac,self.pw)
if __name__ == "__main__" :
pl=Autocompleter()
pl.start()
|
# Generated by Django 3.1.4 on 2020-12-13 10:10
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Users', '0002_auto_20201213_1005'),
]
operations = [
migrations.AlterField(
model_name='followlist',
name='followings',
field=models.ManyToManyField(related_name='followings', to=settings.AUTH_USER_MODEL),
),
]
|
import csv
import random
import datetime
import sys
__author__ = 'jambo'
import numpy as np
MAX_ITER = 100
class SVDModel:
def __init__(self, dataset, num_of_factors, regularization_constant, learning_rate):
self.dataset = dataset
self.average = self._average_rating()
self.b_users = np.zeros(dataset.n_users)
self.b_items = np.zeros(dataset.n_items)
self.p = np.random.random((dataset.n_users, num_of_factors)) - 0.5
self.q = np.random.random((dataset.n_items, num_of_factors)) - 0.5
self.regularization_constant = regularization_constant
self.learning_rate = learning_rate
self.validate_set_size = int(len(self.dataset.tests) * 0.2)
self.size = len(self.dataset.tests)
def predict(self, u, i):
return self.average + self.b_users[u] + self.b_items[i] + np.inner(self.p[u], self.q[i])
def fit_model(self):
self._sgd()
def rmse(self, cut=None):
if cut is None:
cut = self.size
estimate = np.array([self.predict(u, i) for u, i in self.dataset.tests])[:cut]
answers = self.dataset.answers[:cut]
return float(np.sqrt(np.mean((estimate - answers) ** 2)))
def _average_rating(self):
return np.average(self.dataset.ratings[self.dataset.ratings > 0])
def _error(self, u, i):
return self.dataset.ratings[(u, i)] - self.predict(u, i)
def validated_rmse(self):
return self.rmse(cut=self.validate_set_size)
def _sgd(self):
gamma = self.learning_rate
lam = self.regularization_constant
previous_rmse = None
for _ in xrange(MAX_ITER):
random.shuffle(self.dataset.ratings_as_list)
for u, i, r in self.dataset.ratings_as_list:
error = self._error(u, i)
new_b_u = self.b_users[u] + gamma * (error - lam * self.b_users[u])
new_b_i = self.b_items[i] + gamma * (error - lam * self.b_items[i])
new_p_u = self.p[u] + gamma * (error * self.q[i] - lam * self.p[u])
new_q_i = self.q[i] + gamma * (error * self.p[u] - lam * self.q[i])
self.b_users[u], self.b_items[i], self.p[u], self.q[i] = new_b_u, new_b_i, new_p_u, new_q_i
new_rmse = self.validated_rmse()
print "validate rmse: %0.5f" % new_rmse
if previous_rmse is not None and previous_rmse - new_rmse < 5e-4:
break
previous_rmse = new_rmse
def grid_search(dataset):
"""
Best Parameters searching
"""
global results, learning_rate, factor_number, regularization_constant, model, time, rmse
results = []
for learning_rate in [0.005]:
for factor_number in [0, 5, 10, 50, 100]:
print "factor number = %d" % factor_number
for regularization_constant in [0.05, 0.1, 0.5, 1, 5]:
model = SVDModel(dataset, 50, regularization_constant, learning_rate)
time = datetime.datetime.now()
model.fit_model()
print "seconds_passed: %s" % (datetime.datetime.now() - time).total_seconds()
rmse = model.rmse()
print ("rmse for learning rate %0.4f and regularisation constant %0.4f: %0.5f"
% (learning_rate, regularization_constant, rmse))
results.append((rmse, factor_number, learning_rate, regularization_constant))
print "done"
for rmse, factor_number, learning_rate, regularization_constant in sorted(results):
print ("rmse for factor_number %d, learning rate %0.4f and regularisation constant %0.4f: %0.5f"
% (factor_number, learning_rate, regularization_constant, rmse))
|
# activate theano on gpu
import os;
#os.environ['THEANO_FLAGS'] = "device=gpu";
#import theano;
#theano.config.floatX = 'float32';
import numpy as np;
import sys, os;
import gzip;
from six.moves import cPickle;
from vae_conv import conv_variational_autoencoder;
from keras import backend as K;
import pdb
channels = 1;
batch_size = 32;
conv_layers = 3;
feature_maps = [128,128,128,128];
filter_shapes = [(3,3),(3,3),(3,3),(3,3)];
strides = [(1,1),(2,2),(1,1),(1,1)];
dense_layers = 1;
dense_neurons = [128];
dense_dropouts = [0];
latent_dim = 3;
epochs = 1;
nb_start = 0;
nb_end = 50;
epochs = 1
batch_size = 8
nb_start = 0
nb_end = 50
dim = 21
X_train = np.random.randn(2,dim,dim,1)
X_test = np.random.randn(1,dim,dim,1)
X_train = np.pad(X_train, ((0,0), (1,1), (1,1), (0,0)), 'constant')[:, 1:, 1:, :]
X_test = np.pad(X_test, ((0,0), (1,1), (1,1), (0,0)), 'constant')[:, 1:, 1:, :]
print(X_train)
feature_maps = feature_maps[0:conv_layers];
filter_shapes = filter_shapes[0:conv_layers];
strides = strides[0:conv_layers];
image_size = X_train.shape[1:];
autoencoder = conv_variational_autoencoder(image_size,channels,conv_layers,feature_maps,
filter_shapes,strides,dense_layers,dense_neurons,dense_dropouts,latent_dim);
for i in range (nb_start, nb_end):
if i == 0:
print("skipping - no previous saved file to load")
# load model;
else:
autoencoder.load("./model/model_%i" %i)
# train model;
print X_train.shape
print X_test.shape
pdb.set_trace()
autoencoder.train(X_train[0:],batch_size,epochs=epochs,
validation_data=(), checkpoint=False,filepath="./savedweights.dat");
# save model;
print "pass"
autoencoder.save(filepath="./model/model_%i" %(i+1));
# save loss over train & validation;
np.savetxt('./hist/history.losses_%i' %(i+1), autoencoder.history.losses, delimiter=',');
np.savetxt('./hist/history.val_losses_%i' %(i+1), autoencoder.history.val_losses, delimiter=',');
print('completed %i epochs' % ((i+1)*epochs));
|
from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect
from .models import BlogUser
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate,login,logout
from json import dumps
from .forms import UserForm,UserProfileForm
from django.contrib.auth.decorators import login_required
def index(request):
'''
个人信息页面
如果登陆了就显示个人信息等信息
没有登陆则是一个空白界面
'''
if request.user.is_authenticated:
user=request.user
hasLogin=True
else:
hasLogin=False
user=False
if request.user.is_authenticated: #django自带的一个判断是否为已登陆请求的方法
user=request.user
userProfile=BlogUser.objects.get(user=user)
hasLogin=True
else:
userProfile=[]
data={'userProfile':userProfile,'hasLogin':hasLogin,'user':user}
return render(request, 'accounts/index.html',data)
@csrf_exempt
def register(request):
'''
注册账号
'''
if request.method == 'GET':
if request.user.is_authenticated:
user=request.user
hasLogin=True
else:
hasLogin=False
user=False
userForm = UserForm()
profileForm = UserProfileForm()
data={'userForm':userForm,'profileForm':profileForm,'hasLogin':hasLogin,'user':user}
return render(request, 'accounts/register.html', data)
elif request.method == 'POST':
userForm = UserForm(request.POST)
userProfileForm = UserProfileForm(request.POST)
if userForm.is_valid() and userProfileForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
profile=userProfileForm.save(commit=False)
profile.user = user
if 'mugshot' in request.FILES:
profile.mugshot = request.FILES['mugshot']
profile.save()
# user_login(request)
return HttpResponseRedirect('/user/')
else:
req={'message':'fail','reason':'未接收到正确的表单,或创建过程中出错'}
return HttpResponse(dumps(req),content_type="application/json")
@csrf_exempt
def user_login(request):
'''
用户登陆页面
登陆的后台逻辑
'''
if request.method == 'GET':
if request.user.is_authenticated:
user=request.user
hasLogin=True
else:
hasLogin=False
user=False
data={'hasLogin':hasLogin,'user':user}
return render(request, 'accounts/login.html', data)
elif request.method == 'POST':
username=request.POST['username']
password=request.POST['password']
user=authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
req={'message':'success','reason':'登陆成功'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'该用户以被禁止登陆'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'错误的用户名和密码'}
return HttpResponse(dumps(req),content_type="application/json")
@login_required
def user_logout(request):
'''
用户登出
'''
logout(request)
return HttpResponseRedirect('/user/login')
@csrf_exempt
@login_required
def resetpassword(request):
'''
重置密码
'''
if request.method == 'GET':
if request.user.is_authenticated:
user=request.user
hasLogin=True
else:
hasLogin=False
user=False
data={'hasLogin':hasLogin,'user':user}
return render(request, 'accounts/resetpassword.html', data)
elif request.method == 'POST':
user=request.user
user.set_password(request.POST['password'])
user.save()
req={'message':'success','reason':'更改成功'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fall','reason':'请求方式错误'}
return HttpResponse(dumps(req),content_type="application/json")
@csrf_exempt
def getUserInfo(request):
'''
获取用户信息
'''
return HttpResponse('todo')
|
#!/usr/bin/env python3
# # QT Buttons 1
#
# Converts all images in a dir to greyscale adding a prefix
from PIL import Image
import os
prefix='inactive'
included_extenstions = ['jpg', 'bmp', 'png', 'gif']
file_names = [fn for fn in os.listdir() if any(fn.endswith(ext) for ext in included_extenstions)]
for f in file_names:
img = Image.open(f).convert('LA')
img.save(prefix+'_'+f)
|
# Generated by Django 2.0.3 on 2018-11-11 08:23
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
atomic=False
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0006_orders_checkedout'),
]
operations = [
migrations.RenameModel(
old_name='Orders',
new_name='Order',
),
]
|
import unittest
from header import Header
from packer import Packer
from unpacker import Unpacker
class TestHeader(unittest.TestCase):
def test_pack(self):
buf = bytearray(256)
packer = Packer(buf)
header = Header()
header.version = 2
header.length = 23
header.id = 42
packer.pack_message(header)
self.assertEqual(10, packer.offset)
def test_unpack(self):
buf = bytearray(256)
packer = Packer(buf)
header = Header()
header.version = 2
header.length = 23
header.id = 42
packer.pack_message(header)
self.assertEqual(10, packer.offset)
unpacker = Unpacker(bytes(buf[:packer.offset]))
hdr = Header()
hdr.unpack(unpacker)
self.assertEqual(header.version, hdr.version)
self.assertEqual(header.length, hdr.length)
self.assertEqual(header.id, hdr.id)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Link Class mapping to SQL table"
from django.db import models
from django.utils.html import format_html
from labman2.data.subdata.ReST.models import ReST
#from labman2.data.models import rest_to_html
#==============================================================================
# A numerical value, e.g. a universal constant or a simple measurement
class NumValue(ReST):
"A simple numeric data entry"
num_value = models.FloatField(null=True, blank=True,
help_text="Use style like 1.234e-5")
def __unicode__(self):
try:
return format_html(u'{0}: {1}',
str(self.num_value),
ReST.__unicode__(self))
except StandardError as err:
print "NumValue.__unicode__", err
return u'%s: %s' % (str(self.num_value), u"ReST.__unicode__(self)")
def show_details(self):
'Return a list of the fields for detailed view'
info_dict = [('NumValue', str(self.num_value))]
info_dict.extend(ReST.show_details(self))
return info_dict
|
from model import AmbarFileMeta, AmbarFileContent
from datetime import datetime
from hashlib import sha256
from subprocess import call
from os import walk, path
import hashlib
import re
import io
class PstProcessor():
def __init__(self, Logger, ApiProxy):
self.logger = Logger
self.apiProxy = ApiProxy
self.tempPath = '/pst-temp'
self.pstFileName = 'archive.pst'
def CleanUpTemp(self):
retcode = -1
try:
retcode = call('rm -rf {0}/*'.format(self.tempPath), shell=True)
if retcode != 0:
self.logger.LogMessage('info', 'error cleaning temp dir, code: {0}'.format(retcode))
return False
except Exception as e:
self.logger.LogMessage('info', 'error cleaning temp dir')
return False
return True
def ExtractPstArchive(self):
retcode = -1
cmd = 'readpst -o {0} -D -j 1 -r -tea -u -w -e {0}/{1}'.format(self.tempPath, self.pstFileName)
try:
retcode = call(cmd, shell=True)
if retcode != 0:
self.logger.LogMessage('info', 'error extracting pst, code: {0}'.format(retcode))
return False
except Exception as e:
self.logger.LogMessage('error', 'error extracting pst {0}'.format(repr(e)))
return False
return True
def WriteFileData(self, FileData):
try:
f = open('{0}/{1}'.format(self.tempPath, self.pstFileName), 'wb')
f.write(FileData)
f.close()
except Exception as e:
self.logger.LogMessage('error', 'error writing file {0}'.format(repr(e)))
return False
return True
def ReadFileData(self, FilePath):
try:
f = open(FilePath, 'rb')
fileData = f.read()
f.close()
return fileData
except Exception as e:
self.logger.LogMessage('error', 'error reading file {0} {1}'.format(FilePath, e))
return None
def Process(self, FileData, FileMeta, SourceId):
self.logger.LogMessage('verbose', 'processing pst archive {0}'.format(FileMeta.full_name))
try:
if not self.CleanUpTemp():
return
if not self.WriteFileData(FileData):
return
if not self.ExtractPstArchive():
return
for (dirpath, dirnames, filenames) in walk(self.tempPath):
for fileName in filenames:
self.logger.LogMessage('verbose', 'enqueuing file {0} from pst archive {1}'.format(fileName, FileMeta.full_name))
fullNameInArchive = '{0}{1}'.format(FileMeta.full_name, path.join(dirpath.replace(self.tempPath,''), fileName))
fullNameInFs = path.join(dirpath, fileName)
fileData = self.ReadFileData(fullNameInFs)
if not fileData:
continue
sha = sha256(fileData).hexdigest()
size = len(fileData)
if size == 0:
continue
# checking content existance
apiResp = self.apiProxy.CheckIfParsedAmbarFileContentExists(sha)
if not apiResp.Success:
self.logger.LogMessage('error', 'error checking content existance {0} {1}'.format(fullNameInArchive, apiResp.message))
continue
if not (apiResp.Found or apiResp.NotFound):
self.logger.LogMessage('error', 'unexpected response on checking content existance {0} {1} {2}'.format(fullNameInArchive, apiResp.code, apiResp.message))
continue
if apiResp.NotFound:
self.logger.LogMessage(
'verbose', 'content not found {0}'.format(fullNameInArchive))
# creating content
createContentApiResp = self.apiProxy.CreateAmbarFileContent(fileData, sha)
if not createContentApiResp.Success:
self.logger.LogMessage('error', 'error creating content {0} {1}'.format(fullNameInArchive, createContentApiResp.message))
continue
if not (createContentApiResp.Found or createContentApiResp.Created):
self.logger.LogMessage('error', 'unexpected response on create content {0} {1} {2}'.format(fullNameInArchive, createContentApiResp.code, createContentApiResp.message))
continue
if createContentApiResp.Found:
self.logger.LogMessage('verbose', 'content found {0}'.format(fullNameInArchive))
if createContentApiResp.Created:
self.logger.LogMessage('verbose', 'content created {0}'.format(fullNameInArchive))
if apiResp.Found:
self.logger.LogMessage('verbose', 'content found {0}'.format(fullNameInArchive))
# sending meta back to queue
fileMeta = AmbarFileMeta.InitWithoutId(FileMeta.created_datetime, FileMeta.updated_datetime, fileName, fullNameInArchive, FileMeta.source_id, [{'key': 'from_container', 'value': 'true'}])
apiResp = self.apiProxy.EnqueueAmbarFileMeta(fileMeta, sha, SourceId)
if not apiResp.Success:
self.logger.LogMessage('error', 'error adding meta {0} {1}'.format(
fileMeta.full_name, apiResp.message))
continue
if apiResp.BadRequest:
self.logger.LogMessage('verbose', 'bad meta, ignoring... {0}'.format(fileMeta.full_name))
continue
if not apiResp.Ok:
self.logger.LogMessage('error', 'unexpected response on adding meta {0} {1} {2}'.format(fileMeta.full_name, apiResp.code, apiResp.message))
continue
self.logger.LogMessage('verbose', 'meta added {0}'.format(fileMeta.full_name))
self.CleanUpTemp()
except Exception as ex:
self.logger.LogMessage(
'info', 'unable to unpack {0} {1}'.format(FileMeta.full_name, ex))
|
#!/usr/bin/python
import pyOTDR
import sys
import os
import xlsxwriter
import re
import time
from config import *
import matplotlib.pyplot as plt
import kvCreateXLSReport
def processReports(filenames):
createXLSReports(filenames)
def convertPair(s):
return map(float, re.findall(r'(.*)\t(.*)\n', s)[0])
def createXLSReports(filenames):
print('Старт программы')
pathReport = os.path.join(os.path.dirname(os.path.normpath(filenames[0])), f'Report {len(filenames)} traces.xlsx')
print(f'Имя файла отчёта: {pathReport}')
print('Перед созданием файла')
# if not os.path.exists(pathReport) and os.access(pathReport, os.R_OK):
print('Создание книги')
workbook = xlsxwriter.Workbook(pathReport)
prop = {'font_name': 'Arial',
'font_size': '11'}
# Задаем параметры форматирования для рабочей книги
cellFormatHeader = workbook.add_format(prop)
cellFormatHeader.set_font_size(16)
cellFormatHeader.set_bold(True)
cellFormatSubHeader = workbook.add_format(prop)
cellFormatSubHeader.set_bold(True)
prop_table = {'font_name': 'Arial',
'font_size': '11',
'border': 1,
'valign': 'center'}
cellFormatTableHeader = workbook.add_format(prop_table)
cellFormatTableDataCenter = workbook.add_format(prop_table)
cellFormatTableDataCenter.set_align('center')
cellFormatTableDataLeft = workbook.add_format(prop_table)
cellFormatTableDataLeft.set_align('left')
cellFormatTableDataRight = workbook.add_format(prop_table)
cellFormatTableDataRight.set_align('right')
START_EVENT_ROW = 42
cellFormatMainText = workbook.add_format(prop)
print('Перед прогоном файлов')
c = 1
width_columns = [9.14, 15, 18, 15, 15, 18.29, 5.29]
enum_widths = enumerate(width_columns)
for filename in filenames:
status, results, tracedata = pyOTDR.ConvertSORtoTPL(filename)
# Функцию доработать, так как не все файлы именуют с указанием с 2х сторон адресов
Addr1, Port1, Addr2, Port2 = parseFilenameSOR(filename)
if str(results["FxdParams"]["unit"]) == "km (kilometers)":
unit = "км"
else:
unit = "ошибка"
# Создаём страницу для отчёта
worksheet = workbook.add_worksheet(f'{c}')
c += 1
worksheet.set_portrait()
worksheet.set_paper(9)
# устанавливаем ширину колонок
enum_widths = enumerate(width_columns)
for col, width in enum_widths:
worksheet.set_column(col, col, width)
# Заголовок отчёта
worksheet.write('C2', f'Отчёт OTDR', cellFormatHeader)
# Подзаголовок параметров
worksheet.write('C4', f'Параметры', cellFormatSubHeader)
# Параметры левая колонка
worksheet.write('A5', f'Начало: {Addr1}', cellFormatMainText)
worksheet.write('A6', f'Кабель:', cellFormatMainText)
worksheet.write('A7', f'Диапазон: {results["FxdParams"]["range"]:6.3f} {unit}', cellFormatMainText)
worksheet.write('A8', f'Длина волны: {results["FxdParams"]["wavelength"]}', cellFormatMainText)
worksheet.write('A9', f'Порог потерь: {(results["FxdParams"]["loss thr"]).replace("dB", "дБ")}',
cellFormatMainText)
regexptime = r'\w+ \((.*)\ sec\)'
inttime = int(re.findall(regexptime, results["FxdParams"]["date/time"], re.IGNORECASE)[0])
dt = time.strftime('%d.%m.%Y %H:%M:%S', time.localtime(inttime))
worksheet.write('A10', f'Дата : {dt}', cellFormatMainText)
worksheet.write('A11', f'OTDR: {results["SupParams"]["OTDR"]} S/N: {results["SupParams"]["OTDR S/N"]}',
cellFormatMainText)
worksheet.write('A12', f'Модуль: {results["SupParams"]["module"]} S/N: {results["SupParams"]["module S/N"]}',
cellFormatMainText)
worksheet.write('A13', 'Заказчик: ПАО "Ростелеком', cellFormatMainText)
worksheet.write('A14', 'Подрядчик: АО "ТКТ-Строй', cellFormatMainText)
# Параметры правая колонка
worksheet.write('D5', f'Конец: {Addr2}', cellFormatMainText)
worksheet.write('D6', f'Волокно: {Port2}', cellFormatMainText)
worksheet.write('D7', f'Импульс: {(results["FxdParams"]["pulse width"]).replace("ns", "нс")}',
cellFormatMainText)
worksheet.write('D8', f'Коэф. преломления: {results["FxdParams"]["index"]}', cellFormatMainText)
worksheet.write('D9', f'Порог отражения: {results["FxdParams"]["refl thr"]}', cellFormatMainText)
worksheet.write('D10', f'Файл: {results["filename"]}', cellFormatMainText)
# Подзаголовок результатов измерений
worksheet.write('C16', f'Результат измерений', cellFormatSubHeader)
numEvents = results["KeyEvents"]["num events"]
distance = results["KeyEvents"][f'event {numEvents}']['distance']
totalLoss = results["KeyEvents"]["Summary"]['total loss']
lenghtLoss = float(totalLoss) / float(distance)
# Результат измерений
worksheet.write('A17', f'Длина волокна: \t{distance} {unit}', cellFormatMainText)
worksheet.write('A18', f'Затухание: \t{lenghtLoss:5.3f} дБ/{unit}', cellFormatMainText)
worksheet.write('E17', f'Полные потери: \t{totalLoss} дБ', cellFormatMainText)
# Список событий в списке для графиков и таблицы
events = []
for numEvent in range(numEvents):
event = results["KeyEvents"][f'event {numEvent + 1}']
spliceLoss = "---" if float(event["splice loss"]) == 0.00 else event["splice loss"]
reflectLoss = "---" if event["refl loss"] == "0.000" else event["refl loss"]
if numEvent + 1 == numEvents:
typeEvent = "Конец"
elif float(event["splice loss"]) < 0:
typeEvent = "Положит. дефект"
else:
typeEvent = "Потери"
events.append((numEvent + 1, typeEvent, event["distance"], spliceLoss, reflectLoss, event["slope"]))
# Тут будет график рисоваться
# path = os.path.normpath("D:\develop\python_projects\sorViewer\Гагарина 6а [2]-trace.dat")
resultTpl = [convertPair(elem) for elem in tracedata]
xs = []
ys = []
for x, y in resultTpl:
xs.append(x)
ys.append(y)
plt.grid(True)
# plt.plot([1.442, 1.442], [17, 15], label='1', color='red')
# plt.plot([3.332, 3.332], [17, 15], label='2', color='red')
plt.plot(xs, ys, linewidth=0.4, color='black')
plt.title('Рефлектограмма OTDR')
deltax = float(len(xs)*0.025*(xs[1] - xs[0]))
plt.axis([-deltax*0.3, max(xs), -0.05, max(ys)])
plt.xlabel('Длина, км')
plt.ylabel('дБ')
# Дописать функцию, в зависимости от событий должны чёрточки ставится.
for i, event in enumerate(events):
f = False
for n, x in enumerate(xs):
if float(event[2]) < x:
f = True
break
d = n-int(len(ys)*0.002)
if f:
level = ys[d]
else:
level = 0.0
plt.text(xs[d], level-1.5, event[0])
plt.plot([xs[d], xs[d]], [level+1, level-1], label='1', color='red')
if i < numEvents-1:
continue
plt.arrow(xs[d], level+1, -deltax, 0, color='red', linewidth=0.5, shape='full', head_width=0.4, head_length=deltax*0.2)
plt.arrow(xs[d], level-1, -deltax, 0, color='red', linewidth=0.5, shape='full', head_width=0.4, head_length=deltax*0.2)
fname, = os.path.splitext(os.path.basename(filename))[:-1]
pngname = os.path.join(os.path.dirname(filename), fname + '.png')
plt.savefig(pngname, dpi=300)
plt.close()
worksheet.insert_image('A20', pngname, {'x_offset': 40, 'x_scale': 0.9, 'y_scale': 0.9})
# Тут должна рисоваться таблица
worksheet.write('C41', 'Таблица событий', cellFormatSubHeader)
# Рисуем заголовок таблицы
worksheet.write_row(START_EVENT_ROW-1, 0,
('№', 'Тип', 'Дистанция', 'Потери, дБ', 'Отражение, дБ', 'Затухание, дБ/км'),
cellFormatTableHeader)
# Заполняем данные событий в таблицу
for n, curEvent in enumerate(events):
worksheet.write_row(START_EVENT_ROW + n, 0, curEvent, cell_format=cellFormatTableDataCenter)
# Задаём область печати
worksheet.print_area('A1:G57')
worksheet.fit_to_pages(1, 1)
workbook.close()
print('Книга закрылась, запись удалась')
def parseFilenameSOR(filename):
regexp = r'(.*)\[(.*)\].*[!-](.*)\[(.*)\](.*)'
addressPackage = re.findall(regexp, os.path.split(filename)[-1], re.IGNORECASE)[0][:-1]
Addr1, Port1, Addr2, Port2 = addressPackage
return Addr1, Port1, Addr2, Port2
if __name__ == '__main__':
filenames = sys.argv[1:]
processReports(filenames)
|
"""A CNC-CR-DQN agent training on Atari.
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import dm_env
import haiku as hk
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from typing import Any, Callable, Mapping, Text
import rlax
from rlax._src.value_learning import _quantile_regression_loss as rlax_quantile_regression_loss
import chex
Array = chex.Array
Numeric = chex.Numeric
#from dqn_zoo.cnc_cr_dqn.cnc_cr_dqn_parts import qr_atari_network, nc_qr_atari_network, symm_qr_atari_network, _batch_quantile_q_learning
from cnc_cr_dqn_parts import qr_atari_network, nc_qr_atari_network, symm_qr_atari_network, _batch_quantile_q_learning
class CrDqn:
"""Quantile Regression DQN agent, using Cramér loss"""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
#quantiles: jnp.ndarray,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(network_rng_key,
sample_network_input[None, ...])
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, rng_key):
"""Calculates loss given network parameters and transitions."""
# Compute Q value distributions.
_, online_key, target_key = jax.random.split(rng_key, 3)
dist_q_tm1 = network.apply(online_params, online_key,
transitions.s_tm1).q_dist
q_target_t = network.apply(target_params, target_key,
transitions.s_t)
dist_q_target_t = q_target_t.q_dist
#this could be used instead of recomputing the mean
q_values_target_t = q_target_t.q_values
losses = _batch_quantile_q_learning(
dist_q_tm1,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
dist_q_target_t, # No double Q-learning here.
dist_q_target_t,
q_values_target_t,
)
assert losses.shape == (self._batch_size,)
loss = jnp.mean(losses)
return loss
def update(rng_key, opt_state, online_params, target_params, transitions):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params = jax.grad(loss_fn)(online_params, target_params,
transitions, update_key)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = rlax.epsilon_greedy().sample(policy_key, q_t, exploration_epsilon)
return rng_key, a_t
self._select_action = jax.jit(select_action)
#self._select_action = select_action
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t = self._select_action(self._rng_key, self._online_params,
s_t, self.exploration_epsilon)
return parts.Action(jax.device_get(a_t))
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
#print(frame_t)
self._rng_key, self._opt_state, self._online_params = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions
)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
return {}
########################################################################
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
flags.DEFINE_string('environment_name', 'pong', '')
flags.DEFINE_integer('environment_height', 84, '')
flags.DEFINE_integer('environment_width', 84, '')
flags.DEFINE_bool('use_gym', False, '')
flags.DEFINE_integer('replay_capacity', int(1e6), '')
flags.DEFINE_bool('compress_state', True, '')
flags.DEFINE_float('min_replay_capacity_fraction', 0.05, '')
flags.DEFINE_integer('batch_size', 32, '')
flags.DEFINE_integer('max_frames_per_episode', 108000, '') # 30 mins.
flags.DEFINE_integer('num_action_repeats', 4, '')
flags.DEFINE_integer('num_stacked_frames', 4, '')
flags.DEFINE_float('exploration_epsilon_begin_value', 1., '')
flags.DEFINE_float('exploration_epsilon_end_value', 0.01, '')
flags.DEFINE_float('exploration_epsilon_decay_frame_fraction', 0.02, '')
flags.DEFINE_float('eval_exploration_epsilon', 0.001, '')
flags.DEFINE_integer('target_network_update_period', int(4e4), '')
flags.DEFINE_float('learning_rate', 0.00005, '')
flags.DEFINE_float('optimizer_epsilon', 0.01 / 32, '')
flags.DEFINE_float('additional_discount', 0.99, '')
flags.DEFINE_float('max_abs_reward', 1., '')
flags.DEFINE_float('max_global_grad_norm', 10., '')
flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
flags.DEFINE_integer('num_iterations', 200, '')
flags.DEFINE_integer('num_train_frames', int(1e6), '') # Per iteration.
flags.DEFINE_integer('num_eval_frames', int(5e5), '') # Per iteration.
flags.DEFINE_integer('learn_period', 16, '')
flags.DEFINE_string('results_csv_path', '/tmp/results.csv', '')
flags.DEFINE_integer('num_quantiles', 201, '')
flags.DEFINE_integer('n_nodes', 512, '')
flags.DEFINE_integer('n_layers', 1, '')
flags.DEFINE_bool('nc', False, '')
flags.DEFINE_bool('symm', False, '')
def main(argv):
"""Trains CR-DQN agent on Atari."""
logging.info(FLAGS.flags_into_string())
del argv
logging.info('CR-DQN with Cramer on Atari on %s.',
jax.lib.xla_bridge.get_backend().platform)
random_state = np.random.RandomState(FLAGS.seed)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1))
if FLAGS.results_csv_path:
writer = parts.CsvWriter(FLAGS.results_csv_path)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
FLAGS.environment_name, seed=random_state.randint(1, 2**32))
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', FLAGS.environment_name)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
#if FLAGS.nc and FLAGS.num_quantiles%2 != 0: #must be even
# print("num_quantiles must be even for nc")
# exit(1)
#num_quantiles = FLAGS.num_quantiles
#quantiles = (jnp.arange(0, num_quantiles) + 0.5) / float(num_quantiles)
if FLAGS.symm:
logging.info('Symm network')
network_fn = symm_qr_atari_network(num_actions, FLAGS.num_quantiles,
FLAGS.n_layers, FLAGS.n_nodes)
elif FLAGS.nc:
logging.info('NC network')
network_fn = nc_qr_atari_network(num_actions, FLAGS.num_quantiles,
FLAGS.n_layers, FLAGS.n_nodes)
else:
logging.info('Standard QR network')
network_fn = qr_atari_network(num_actions, FLAGS.num_quantiles)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=FLAGS.additional_discount,
max_abs_reward=FLAGS.max_abs_reward,
resize_shape=(FLAGS.environment_height, FLAGS.environment_width),
num_action_repeats=FLAGS.num_action_repeats,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=FLAGS.num_stacked_frames,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(dm_env.TimeStep,
sample_processed_timestep)
sample_network_input = sample_processed_timestep.observation
assert sample_network_input.shape == (FLAGS.environment_height,
FLAGS.environment_width,
FLAGS.num_stacked_frames)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_t=int(FLAGS.min_replay_capacity_fraction * FLAGS.replay_capacity *
FLAGS.num_action_repeats),
decay_steps=int(FLAGS.exploration_epsilon_decay_frame_fraction *
FLAGS.num_iterations * FLAGS.num_train_frames),
begin_value=FLAGS.exploration_epsilon_begin_value,
end_value=FLAGS.exploration_epsilon_end_value)
if FLAGS.compress_state:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t))
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t))
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
replay = replay_lib.TransitionReplay(FLAGS.replay_capacity, replay_structure,
random_state, encoder, decoder)
optimizer = optax.adam(
learning_rate=FLAGS.learning_rate, eps=FLAGS.optimizer_epsilon)
if FLAGS.max_global_grad_norm > 0:
optimizer = optax.chain(
optax.clip_by_global_norm(FLAGS.max_global_grad_norm), optimizer)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = CrDqn(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
#quantiles=quantiles,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=FLAGS.batch_size,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=FLAGS.min_replay_capacity_fraction,
learn_period=FLAGS.learn_period,
target_network_update_period=FLAGS.target_network_update_period,
rng_key=train_rng_key,
)
eval_agent = parts.EpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=FLAGS.eval_exploration_epsilon,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= FLAGS.num_iterations:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, FLAGS.max_frames_per_episode)
num_train_frames = 0 if state.iteration == 0 else FLAGS.num_train_frames
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers,train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, FLAGS.max_frames_per_episode)
eval_seq_truncated = itertools.islice(eval_seq, FLAGS.num_eval_frames)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers,eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
FLAGS.environment_name, eval_stats['episode_return'])
capped_human_normalized_score = np.amin([1., human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * FLAGS.num_train_frames, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_exploration_epsilon', train_agent.exploration_epsilon, '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1. - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
|
from ..base.handlers import BaseHandler
class RelativesIndexHandler(BaseHandler):
def get(self):
self.render('monitor/index.html')
class TemplatesIndexHandler(BaseHandler):
def get(self):
self.redirect("http://falcon-portal.nosa.me/templates")
class ExpressionsIndexHandler(BaseHandler):
def get(self):
self.redirect("http://falcon-portal.nosa.me/expressions")
class NodatasIndexHandler(BaseHandler):
def get(self):
self.redirect("http://falcon-portal.nosa.me/nodatas")
handlers = [
('/relatives', RelativesIndexHandler),
('/templates', TemplatesIndexHandler),
('/expressions', ExpressionsIndexHandler),
('/nodatas', NodatasIndexHandler)
]
|
from hylite import HyLibrary
import numpy as np
from scipy.spatial.qhull import ConvexHull
from tqdm import tqdm
def polynomial(data, degree = 1, method='div'):
"""
Detrend an image data array using a polynomial fit and np.polyfit( ... ).
*Arguments*:
- data = numpy array of the format image[x][y][b].
- degree = the degree of the polynomial to fit. Default is 2.
- method = 'divide' or 'subtract'. Default is 'divide'.
*Returns*:
- corr = the corrected (detrended) data.
- trend = the trend that was removed.
"""
#calculate trend
y = np.array(data.reshape( -1, data.shape[-1] )) #reshape array so each pixel is a column
y[np.logical_not(np.isfinite(y))] = 0 #kill NaNs
_x = np.arange(data.shape[-1])
fit = np.polynomial.polynomial.polyfit(_x, y.T, degree) # fit polynomial
t = np.polynomial.polynomial.polyval(_x, fit) # evaluate it
#apply correction
if 'div' in method:
y /= t
elif 'sub' in method.lower():
y -= t
y += np.min(y) #map to positive
return y.reshape(data.shape), t.reshape(data.shape)
def hull(spectra, div=True):
"""
Detrend a 1D spectra by performing a hull correction. Note that this performs the correction in-situ.
*Arguments*:
- div = True if the spectra should be divided by it's hull (default). False if the hull should be subtracted.
*Returns*:
- corr = hull corrected spectra.
- trend = the (hull) trend that was subtracted to give the corrected spectra
Returns an unchanged spectra and trend = [0,0,...] if the spectra contains nans or infs.
"""
# calculate convex hull
hull = ConvexHull(np.array([np.hstack([0, np.arange(len(spectra)), len(spectra) - 1]),
np.hstack([0, spectra, 0])]).T)
# remove unwanted simplices (e.g. along sides and base)
mask = (hull.simplices != 0).all(axis=1) & (hull.simplices != len(spectra) + 1).all(axis=1)
# build piecewise equations
x = np.arange(len(spectra), dtype=np.float32)
if not mask.any(): # edge case - convex hull is one simplex between first and last points!
y = spectra[0] + (spectra[-1] - spectra[0]) / x[-1]
else:
grad = -hull.equations[mask, 0]
itc = -hull.equations[mask, 2]
dom = [(min(x[s[0]], x[s[1]]), max(x[s[0]], x[s[1]])) for s in (hull.simplices[mask] - 1)]
cl = [(x >= d[0]) & (x <= d[1]) for d in dom]
# evaluate piecewise functions
fn = [(lambda x, m=grad[i], c=itc[i]: m * x + c) for i in range(len(grad))]
y = np.piecewise(x, cl, fn)
# return
if div:
return spectra / y, y
else:
return 1 + spectra - y, y
def get_hull_corrected(data, band_range=None, method='div', vb=True):
"""
Apply a hull correction to an entire HyData instance (HyImage, HyCloud or HyLibrary). Returns a corrected copy of
the input dataset.
*Arguments*:
- band_range = Tuple containing the (min,max) band indices or wavelengths to run the correction between. If None
(default) then the correction is run of the entire range.
- method = Trend removal method: 'divide' or 'subtract'. Default is 'divide'.
- vb = True if this should print output.
"""
# create copy containing the bands of interest
if band_range is None:
band_range = (0, -1)
else:
band_range = (data.get_band_index(band_range[0]), data.get_band_index(band_range[1]))
corrected = data.export_bands(band_range)
# convert integer data to floating point (we need floats for the hull correction)
comp = False
if corrected.is_int():
if np.nanmax( corrected.data ) > 100: # check large number used in compressed form
corrected.decompress()
else:
corrected.data = corrected.data.astype(np.float32) # cast to float for hull correction
comp = True
method = 'div' in method # convert method to bool (for performance)
# get valid pixels
D = corrected.get_raveled()
nan = corrected.header.get_data_ignore_value()
valid = (np.isfinite(D) & (D != nan)).all(axis=1) # drop nans/no-data values
valid = valid & (D != D[:, 0][:, None]).any(axis=1) # drop flat spectra (e.g. all zeros)
if len(valid > 0): # if some valid points exist, do correction
X = D[valid]
upper = []
lower = []
loop = range(X.shape[0])
if vb:
loop = tqdm(loop, leave=False, desc='Applying hull correction')
for p in loop:
X[p, :], fac = hull(X[p, :], div=method)
# special case - also apply this correction to upper/lower spectra of the HyData instance
if isinstance(corrected, HyLibrary):
if corrected.upper is not None:
# also apply correction to bounds
if method:
upper.append(corrected.upper[valid][p, :] / fac)
else:
upper.append(corrected.upper[valid][p, :] - fac)
if corrected.lower is not None:
# also apply correction to bounds
if method:
lower.append(corrected.lower[valid][p, :] / fac)
else:
lower.append(corrected.lower[valid][p, :] - fac)
# copy data back into original array
D[valid] = X
corrected.set_raveled(D)
if len(upper) > 0:
corrected.upper[valid] = np.array(upper)
if len(lower) > 0:
corrected.lower[valid] = np.array(lower)
# convert back to integer if need be
if comp:
corrected.compress()
return corrected
|
from django.shortcuts import render
from .models import address
import csv
import codecs
from django.shortcuts import HttpResponse, render, render_to_response
from django.template import RequestContext
from django.contrib import messages
from .forms import AddForm#, UploadFileForm
from django.core.files.storage import FileSystemStorage
from django.template.context_processors import csrf
from django.http import HttpResponse
from django.core.urlresolvers import resolve
from django.http import HttpResponse
#import pdb; pdb.set_trace()
EmailCol = 0
NameCol = 1
def addressbook(request):
form = AddForm()
addressList = address.objects.all()
appname = request.resolver_match.app_name
return render(request, 'myapp/index.html', locals())
def add(request):
if request.method == 'POST':
form = AddForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
email = form.cleaned_data['email']
if address.objects.filter(email=email).exists():
row = [email, name]
request.session["email"] = email
request.session["name"] = name
return askConfirm(row, 0, request)
else:
addRecord(email, name)
messages.info(request, 'New contact "%s" added.' % email)
return addressbook(request)
def upload(request):
if request.method == 'POST':
if 'file' in request.FILES:
try:
csvfile = request.FILES['file']
if not request.FILES['file'].name.split(".")[-1] == "csv":
raise("File extenxion is not csv.")
handle_uploaded_file(csvfile)
reader = csv.reader(codecs.iterdecode(csvfile, 'utf-8'))
except:
return HttpResponse("You need a proper csv file withe first line 'email,name'.")
else:
return HttpResponse("You need to select a csv file.")
count = 0
for row in reader:
if count==0 and row[0].lower()!='email':
messages.info(request, 'CSV format should be:"email, name"')
return addressbook(request)
if not handleRow(row, count, request):
return askConfirm(row, count, request)
count += 1
return addressbook(request)
def continueProcessCSV(request):
global EmailCol
global NameCol
count = 0
currentRow = int(request.POST["current"])
# single add through form
if currentRow==0:
if 'Yes' in request.POST:
email = request.session["email"]
name = request.session["name"]
updateRecord(email=email, name=name)
messages.info(request, 'Single record updated.')
elif 'No' in request.POST:
messages.info(request, 'Single record skiped.')
return addressbook(request)
# multiple add through csv
with open('addsave.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if count==currentRow :
if 'Yes' in request.POST:
updateRecord(email=row[EmailCol], name=row[NameCol])
messages.info(request, 'Updated %s.' % row[EmailCol])
elif 'No' in request.POST:
messages.info(request, 'Skiped %s.' % row[EmailCol])
elif count > currentRow:
if not handleRow(row, count, request):
return askConfirm(row, count, request)
count += 1
messages.info(request, 'CSV processed.')
return addressbook(request)
def ifExist(row):
global EmailCol
return address.objects.filter(email=row[EmailCol]).count()>0
def handleRow(row, currentRowIndex, request):
global EmailCol
global NameCol
if currentRowIndex == 0: #is csv header
if row[0].lower()=='name':
EmailCol = 1
NameCol = 0
elif row[0].lower()=='email':
EmailCol = 0
NameCol = 1
return True
if ifExist(row): # existed
return False
else:
addRecord(email=row[EmailCol], name=row[NameCol]) # add new
return True
def askConfirm(row, currentRowIndex, request):
global EmailCol
message = 'Found existed record %s, override?' % row[EmailCol]
current = currentRowIndex
action_link = "/myapp/continue/"
return render(request, 'myapp/confirm.html', locals())
def addRecord(email, name):
newItem = address(email=email, name=name)
newItem.save()
def updateRecord(email, name):
oldItem = address.objects.get(email=email)
oldItem.name = name
oldItem.save()
def handle_uploaded_file(f):
destination = open('./addsave.csv', 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
def downloadCSV(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=addressbook.csv'
writer = csv.writer(response)
writer.writerow(['email', 'name'])
allAddress = address.objects.all()
for row in allAddress:
writer.writerow([row.email, row.name])
return response
from django.db import connection, transaction
def truncateTable(request):
cursor = connection.cursor()
cursor.execute("TRUNCATE TABLE ADDRESS")
return addressbook(request)
|
# 唉没想到枚举,一开始想着怎么直接模拟到给定时间了
class Solution:
def minCostSetTime(self, startAt: int, moveCost: int, pushCost: int, targetSeconds: int) -> int:
a, res = [0] * 4, sys.maxsize
for i in range(1, 10000):
a[0], a[1] = i // 1000, i//100 % 10
a[2], a[3] = i // 10 % 10, i % 10
if a[0]*600 + a[1]*60 + a[2]*10 + a[3] != targetSeconds:
continue
# 忽略前导零
j = 0
while j < 4 and not a[j]:
j += 1
temp, k = 0, startAt
while j < 4:
if k != a[j]:
k = a[j]
temp += moveCost
temp += pushCost
j += 1
res = min(res, temp)
return res
|
"""Extension management."""
from django.conf import settings
from django.urls import include
from django.urls import re_path
from django.utils.encoding import smart_str
class ModoExtension(object):
"""
Base extension class.
Each Modoboa extension must inherit from this class to be
considered as valid.
"""
name = None
label = None
version = "NA"
description = ""
needs_media = False
always_active = False
url = None
topredirection_url = None
def get_url(self):
"""Return extension base url."""
if self.url is None:
return self.name
return self.url
def infos(self):
"""Information about this extension."""
return {
"name": self.name, "label": self.label, "version": self.version,
"description": self.description, "url": self.get_url(),
"topredirection_url": self.topredirection_url,
"always_active": self.always_active
}
def load_initial_data(self):
"""Declare extension data in this method."""
pass
def load(self):
"""Add extension loading tasks in this method."""
pass
class ExtensionsPool(object):
"""The extensions manager"""
def __init__(self):
self.extensions = {}
def register_extension(self, ext, show=True):
"""Register an extension.
:param ext: a class inheriting from ``Extension``
:param show: list the extension or not
"""
self.extensions[ext.name] = {"cls": ext, "show": show}
def get_extension(self, name):
"""Retrieve the current instance of an extension."""
if name not in self.extensions:
return None
if "instance" not in self.extensions[name]:
self.extensions[name]["instance"] = self.extensions[name]["cls"]()
return self.extensions[name]["instance"]
def get_extension_infos(self, name):
"""Return information about the specified extension."""
instance = self.get_extension(name)
if instance is None:
return None
return instance.infos()
def load_extension(self, name):
"""Load a registered extension."""
__import__(name, locals(), globals(), [smart_str("modo_extension")])
extinstance = self.get_extension(name)
if extinstance is None:
return None
extinstance.load()
return extinstance
def load_all(self):
"""Load all defined extensions.
Each extension must be loaded in order to integrate with
Modoboa. Only enabled and special extensions are loaded but
urls are always returned. The reason is urls are imported only
once so must know all of them when the python process
starts. Otherwise, it would lead to unexpected 404 errors :p
:return: a list of url maps
"""
for ext in settings.MODOBOA_APPS:
self.load_extension(ext)
def get_urls(self, category="app"):
"""Get all urls defined by extensions."""
result = []
for ext_name in list(self.extensions.keys()):
ext = self.get_extension(ext_name)
if category == "api":
root = ""
pattern = "{}.urls_api"
else:
root = r"^{}/".format(ext.get_url())
pattern = "{}.urls"
try:
result.append(
re_path(root, include(pattern.format(ext_name)))
)
except ImportError:
# No urls for this extension
pass
return result
def list_all(self):
"""List all defined extensions."""
result = []
for extname, extdef in list(self.extensions.items()):
if not extdef["show"]:
continue
infos = self.get_extension_infos(extname)
infos["id"] = extname
result += [infos]
return sorted(result, key=lambda i: i["name"])
exts_pool = ExtensionsPool()
|
#!/usr/bin/python
from Emakefun_MotorDriver import PWM
import time
class Emakefun_StepperMotor:
MICROSTEPS = 8
MICROSTEP_CURVE = [0, 50, 98, 142, 180, 212, 236, 250, 255]
#MICROSTEPS = 16
# a sinusoidal curve NOT LINEAR!
#MICROSTEP_CURVE = [0, 25, 50, 74, 98, 120, 141, 162, 180, 197, 212, 225, 236, 244, 250, 253, 255]
def __init__(self, controller, num, steps=200):
self.MC = controller
self.revsteps = steps
self.motornum = num
self.sec_per_step = 0.1
self.steppingcounter = 0
self.currentstep = 0
num -= 1
if (num == 0):
#self.PWMA = 8
self.AIN2 = 13
self.AIN1 = 11
#self.PWMB = 13
self.BIN2 = 10
self.BIN1 = 8
elif (num == 1):
#self.PWMA = 2
self.AIN2 = 2
self.AIN1 = 4
#self.PWMB = 7
self.BIN2 = 5
self.BIN1 = 7
else:
raise NameError('MotorHAT Stepper must be between 1 and 2 inclusive')
def setSpeed(self, rpm):
self.sec_per_step = 60.0 / (self.revsteps * rpm)
self.steppingcounter = 0
def oneStep(self, dir, style):
pwm_a = pwm_b = 255
# first determine what sort of stepping procedure we're up to
if (style == Emakefun_MotorHAT.SINGLE):
if ((self.currentstep/(self.MICROSTEPS/2)) % 2):
# we're at an odd step, weird
if (dir == Emakefun_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS/2
else:
self.currentstep -= self.MICROSTEPS/2
else:
# go to next even step
if (dir == Emakefun_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS
else:
self.currentstep -= self.MICROSTEPS
if (style == Emakefun_MotorHAT.DOUBLE):
if not (self.currentstep/(self.MICROSTEPS/2) % 2):
# we're at an even step, weird
if (dir == Emakefun_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS/2
else:
self.currentstep -= self.MICROSTEPS/2
else:
# go to next odd step
if (dir == Emakefun_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS
else:
self.currentstep -= self.MICROSTEPS
if (style == Emakefun_MotorHAT.INTERLEAVE):
if (dir == Emakefun_MotorHAT.FORWARD):
self.currentstep += self.MICROSTEPS/2
else:
self.currentstep -= self.MICROSTEPS/2
if (style == Emakefun_MotorHAT.MICROSTEP):
if (dir == Emakefun_MotorHAT.FORWARD):
self.currentstep += 1
else:
self.currentstep -= 1
# go to next 'step' and wrap around
self.currentstep += self.MICROSTEPS * 4
self.currentstep %= self.MICROSTEPS * 4
pwm_a = pwm_b = 0
if (self.currentstep >= 0) and (self.currentstep < self.MICROSTEPS):
pwm_a = self.MICROSTEP_CURVE[self.MICROSTEPS - self.currentstep]
pwm_b = self.MICROSTEP_CURVE[self.currentstep]
elif (self.currentstep >= self.MICROSTEPS) and (self.currentstep < self.MICROSTEPS*2):
pwm_a = self.MICROSTEP_CURVE[self.currentstep - self.MICROSTEPS]
pwm_b = self.MICROSTEP_CURVE[self.MICROSTEPS*2 - self.currentstep]
elif (self.currentstep >= self.MICROSTEPS*2) and (self.currentstep < self.MICROSTEPS*3):
pwm_a = self.MICROSTEP_CURVE[self.MICROSTEPS*3 - self.currentstep]
pwm_b = self.MICROSTEP_CURVE[self.currentstep - self.MICROSTEPS*2]
elif (self.currentstep >= self.MICROSTEPS*3) and (self.currentstep < self.MICROSTEPS*4):
pwm_a = self.MICROSTEP_CURVE[self.currentstep - self.MICROSTEPS*3]
pwm_b = self.MICROSTEP_CURVE[self.MICROSTEPS*4 - self.currentstep]
# go to next 'step' and wrap around
self.currentstep += self.MICROSTEPS * 4
self.currentstep %= self.MICROSTEPS * 4
# only really used for microstepping, otherwise always on!
#self.MC._pwm.setPWM(self.PWMA, 0, pwm_a*16)
#self.MC._pwm.setPWM(self.PWMB, 0, pwm_b*16)
# set up coil energizing!
coils = [0, 0, 0, 0]
if (style == Emakefun_MotorHAT.MICROSTEP):
if (self.currentstep >= 0) and (self.currentstep < self.MICROSTEPS):
coils = [1, 1, 0, 0]
elif (self.currentstep >= self.MICROSTEPS) and (self.currentstep < self.MICROSTEPS*2):
coils = [0, 1, 1, 0]
elif (self.currentstep >= self.MICROSTEPS*2) and (self.currentstep < self.MICROSTEPS*3):
coils = [0, 0, 1, 1]
elif (self.currentstep >= self.MICROSTEPS*3) and (self.currentstep < self.MICROSTEPS*4):
coils = [1, 0, 0, 1]
else:
step2coils = [ [1, 0, 0, 0],
[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 1],
[1, 0, 0, 1] ]
coils = step2coils[int(self.currentstep/(self.MICROSTEPS/2))]
#print "coils state = " + str(coils)
self.MC.setPin(self.AIN2, coils[0])
self.MC.setPin(self.BIN1, coils[1])
self.MC.setPin(self.AIN1, coils[2])
self.MC.setPin(self.BIN2, coils[3])
return self.currentstep
def step(self, steps, direction, stepstyle):
s_per_s = self.sec_per_step
lateststep = 0
if (stepstyle == Emakefun_MotorHAT.INTERLEAVE):
s_per_s = s_per_s / 2.0
if (stepstyle == Emakefun_MotorHAT.MICROSTEP):
s_per_s /= self.MICROSTEPS
steps *= self.MICROSTEPS
print (s_per_s , " sec per step")
for s in range(steps):
lateststep = self.oneStep(direction, stepstyle)
time.sleep(s_per_s)
if (stepstyle == Emakefun_MotorHAT.MICROSTEP):
# this is an edge case, if we are in between full steps, lets just keep going
# so we end on a full step
while (lateststep != 0) and (lateststep != self.MICROSTEPS):
lateststep = self.oneStep(dir, stepstyle)
time.sleep(s_per_s)
class Emakefun_DCMotor:
def __init__(self, controller, num):
self.MC = controller
self.motornum = num
in1 = in2 = 0
self._speed = 0
if (num == 0):
in2 = 13
in1 = 11
elif (num == 1):
in2 = 8
in1 = 10
elif (num == 2):
in2 = 2
in1 = 4
elif (num == 3):
in2 = 5
in1 = 7
else:
raise NameError('MotorHAT Motor must be between 1 and 4 inclusive')
#self.PWMpin = pwm
self.IN1pin = in1
self.IN2pin = in2
def run(self, command):
if not self.MC:
return
if (command == Emakefun_MotorHAT.FORWARD):
self.MC.setPin(self.IN2pin, 0)
self.MC.setPWM(self.IN1pin, self._speed*16)
if (command == Emakefun_MotorHAT.BACKWARD):
self.MC.setPin(self.IN1pin, 0)
self.MC.setPWM(self.IN2pin, self._speed*16)
if (command == Emakefun_MotorHAT.RELEASE):
self.MC.setPin(self.IN1pin, 0)
self.MC.setPin(self.IN2pin, 0)
def setSpeed(self, speed):
if (speed < 0):
speed = 0
if (speed > 255):
speed = 255
#self.MC._pwm.setPWM(self.PWMpin, 0, speed*16)
self._speed = speed
class Emakefun_Servo:
def __init__(self, controller, num):
self.MC = controller
self.pin = [0, 1, 14, 15, 9, 12, 3, 6]
self.PWM_pin = self.pin[num]
self.currentAngle = 0
def writeServo(self, angle):
pulse = 4096 * ((angle*11)+500) / 20000
self.MC.setPWM(self.PWM_pin, pulse)
self.currentAngle = angle
def writeServoWithSpeed(self, angle, speed):
if (speed == 10):
pulse = 4096 * ((angle * 11) + 500) / 20000
self.MC.setPWM(self.PWM_pin, pulse)
else:
if angle < self.currentAngle:
for i in range(self.currentAngle, angle, -1):
time.sleep(4 * (10 - speed) / 1000)
pulse = 4096 * ((i * 11) + 500) / 20000
self.MC.setPWM(self.PWM_pin, pulse)
else:
for i in range(self.currentAngle, angle, 1):
time.sleep(4 * (10 - speed) / 1000)
pulse = 4096 * ((i * 11) + 500) / 20000
self.MC.setPWM(self.PWM_pin, pulse)
self.currentAngle = angle
def readDegrees(self):
return self.currentAngle
class Emakefun_MotorHAT:
FORWARD = 1
BACKWARD = 2
BRAKE = 3
RELEASE = 4
SINGLE = 1
DOUBLE = 2
INTERLEAVE = 3
MICROSTEP = 4
def __init__(self, addr = 0x60, freq = 50):
self._i2caddr = addr # default addr on HAT
self._frequency = freq # default @1600Hz PWM freq
self.servos = [ Emakefun_Servo(self, n) for n in range(8) ]
self.motors = [ Emakefun_DCMotor(self, m) for m in range(4) ]
self.steppers = [ Emakefun_StepperMotor(self, 1), Emakefun_StepperMotor(self, 2) ]
self._pwm = PWM(addr, debug=False)
self._pwm.setPWMFreq(self._frequency)
def setPin(self, pin, value):
if (pin < 0) or (pin > 15):
raise NameError('PWM pin must be between 0 and 15 inclusive')
if (value != 0) and (value != 1):
raise NameError('Pin value must be 0 or 1!')
if (value == 0):
self._pwm.setPWM(pin, 0, 4096)
if (value == 1):
self._pwm.setPWM(pin, 4096, 0)
def setPWM(self, pin, value):
if (value > 4095):
self._pwm.setPWM(pin, 4096, 0)
else:
self._pwm.setPWM(pin, 0, value)
def getStepper(self, steps, num):
if (num < 1) or (num > 2):
raise NameError('MotorHAT Stepper must be between 1 and 2 inclusive')
return self.steppers[num-1]
def getMotor(self, num):
if (num < 1) or (num > 4):
raise NameError('MotorHAT Motor must be between 1 and 4 inclusive')
return self.motors[num-1]
def getServo(self, num):
if (num < 1) or (num > 8):
raise NameError('MotorHAT Motor must be between 1 and 8 inclusive')
return self.servos[num-1]
|
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('E:\csvdhf5xlsxurlallfiles\percent-bachelors-degrees-women-usa.csv')
year=df['Year']
physical_science=df['Physical Sciences']
computer_science=df['Computer Science']
plt.style.use('ggplot')
plt.subplot(2,2,1)
plt.plot(year, physical_science, color='blue')
plt.title('physical science')
plt.subplot(2,2,2)
plt.plot(year, computer_science, color='red')
plt.title('computer science')
#add annotation
cs_max=computer_science.max()
yr_max=year[computer_science.argmax()]
plt.annotate('maximum', xy=(yr_max, cs_max), xytext=(yr_max+5, cs_max+5), arrowprops=dict(facecolor='k'))
plt.show()
|
#!/usr/bin/env python
'''
Tests ParseTree and downstream ParseTreeNode.
'''
__author__ = 'Aditya Viswanathan'
__email__ = 'aditya@adityaviswanathan.com'
import os
import sys
import unittest
from parse_tree import ParseTree
# Append parent dir to $PYTHONPATH to import ReportTraverser, whose public
# methods have bindings into the ParseTreeNode.
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(my_path, os.pardir)))
import report_utils
class ParseTreeBasic(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
def test_basic(self):
answers = {
'Add(2)' : 2,
'Add(2,1)' : 3.0,
'Subtract(2,1)' : 1.0,
'Multiply(2,1)' : 2.0,
'Divide(2,1)' : 2.0,
'Multiply(2.5, 2.5)' : 6.25,
'Count(2.5, 2.5, 4)' : 3.0,
'Average(1, 2, 3)' : 2.0,
'Average(2, 2.5, 3)' : 2.5,
'Ceiling(2.3)' : 3.0,
'Round(2.156, 2)' : 2.16
}
for input_str, val in answers.iteritems():
self.assertEqual(ParseTree(input_str).evaluate_tree().val, val)
def test_nesting(self):
answers = {
'Add(Add(2,1), Add(3,1))' : 7.0,
'Subtract( Multiply( 2.5, 3.5), Add(3, 1))' : 4.75
}
for input_str, val in answers.iteritems():
self.assertEqual(ParseTree(input_str).evaluate_tree().val, val)
def test_varargs(self):
answers = {
'Add(1, 2.0, 3, 5, 7.5)' : 18.5,
'Subtract( Add(2, 3), Add (3,4), Add( 4,5))' : -11
}
for input_str, val in answers.iteritems():
self.assertEqual(ParseTree(input_str).evaluate_tree().val, val)
def test_comparators(self):
answers = {
'GreaterThan(1,1)' : 0,
'GreaterThan(1.1,1)' : 1,
'GreaterThan( 1 , 1.1 )' : 0,
'GreaterEqualThan(1,1)' : 1,
'GreaterEqualThan(1.1,1.1)' : 1,
'GreaterEqualThan(1.1,1.2)' : 0,
'LessThan(1,1)' : 0,
'LessThan(1.1,1)' : 0,
'LessThan( 1 , 1.1 )' : 1,
'LessEqualThan(1,1)' : 1,
'LessEqualThan(1.1,1.1)' : 1,
'LessEqualThan(1.1,1.2)' : 1,
'LessEqualThan(0.1, 10, 1, 100)' : 1 # TODO(aditya): clean up varargs definition.
}
for input_str, val in answers.iteritems():
self.assertEqual(ParseTree(input_str).evaluate_tree().val, val)
def test_if_else(self):
answers = {
'IfElse(GreaterThan(1,1), 1, -1)' : '-1',
'IfElse(GreaterThan(2,1), 1, -1)' : '1',
}
for input_str, val in answers.iteritems():
self.assertEqual(ParseTree(input_str).evaluate_tree().val, val)
class ParseTreeTraverser(unittest.TestCase):
@classmethod
def setUpClass(self):
data_file = open('testdata/cashflow_test.csv').name
axis_decision = report_utils.AxisDecision(data_file)
axis_decision.decide()
self.traverser = report_utils.ReportTraverser(
data_file,
axis_decision.date_axis,
axis_decision.date_index,
axis_decision.title_axis,
axis_decision.title_index)
def test_traverser_bindings(self):
answers = {
'Count(get_dates(0))' : 14,
'Count( get_titles ( 0 ) )' : 51,
'get_cell_by_text (0, Late Fee, JAN 17 )' : '0.0',
'Add(get_cell_by_text (0, Late Fee, OCT 17 ), get_cell_by_index(0, 5, 11))' : 510,
'Ceiling(Average(get_cells_by_date(0, SEP 17)))' : 1122,
'Ceiling(Average(get_cells_by_date(0, JAN 17 )))' : 1268,
'Count(get_cells_by_date(0, JAN 17 ))' : 50,
'IfElse(GreaterThan(2,1), Count(get_dates(0)), Count(get_titles(0)))' : 14,
}
for input_str, val in answers.iteritems():
self.assertEqual(
ParseTree(input_str, [self.traverser]).evaluate_tree().val, val)
def test_if_else_list_response(self):
answers = {
'IfElse(GreaterThan(2,1), get_dates(0), get_titles(0))' : ['Account Name', 'JAN 17', 'FEB 17'],
}
for input_str, val in answers.iteritems():
res = ParseTree(input_str, [self.traverser]).evaluate_tree(is_list=True)
self.assertEqual([i.val for i in res][0:3], val)
answers = {
'Count(IfElse(GreaterThan(2,1), get_dates(0), get_titles(0)))' : 14,
'Count(IfElse(GreaterThan(1,2), get_dates(0), get_titles(0)))' : 51,
'Add(IfElse(GreaterThan(1,2), 1, Count(get_titles(0))), \
IfElse(GreaterThan(2,1), 1, Count(get_titles(0))))' : 52
}
for input_str, val in answers.iteritems():
self.assertEqual(
ParseTree(input_str, [self.traverser]).evaluate_tree().val, val)
def test_list_response(self):
answers = {
'get_dates(0)' : ['AUG 17', 'SEP 17', 'OCT 17'],
'get_cells_by_title(0, Discount/Promotion)' : ['0.0', '0.0', '-50.0'],
'get_cells_by_date(0, SEP 17)' : ['$4,600.00', '', '$9,234.00']
}
for input_str, val in answers.iteritems():
res = ParseTree(input_str, [self.traverser]).evaluate_tree(is_list=True)
self.assertGreaterEqual(len(res), 3)
self.assertEqual([i.val for i in res][-6:-3], val)
def test_title_annotations(self):
answers = {
'get_cells_by_date(0, JAN 17)' : [
'Income',
'Rent-Tempe',
'Discount/Promotion'
],
'get_cells_by_title(0, Discount/Promotion)' : [
'Discount/Promotion',
'Discount/Promotion',
'Discount/Promotion'
]
}
for input_str, val in answers.iteritems():
res = ParseTree(input_str, [self.traverser]).evaluate_tree(is_list=True)
self.assertGreaterEqual(len(res), 3)
self.assertEqual([i.title.val for i in res][:3], val)
def test_date_annotations(self):
answers = {
'get_cells_by_date(0, JAN 17)' : ['JAN 17', 'JAN 17', 'JAN 17'],
'get_cells_by_title(0, Discount/Promotion)' : ['JAN 17', 'FEB 17', 'MAR 17']
}
for input_str, val in answers.iteritems():
res = ParseTree(input_str, [self.traverser]).evaluate_tree(is_list=True)
self.assertGreaterEqual(len(res), 3)
self.assertEqual([i.date.val for i in res][:3], val)
def test_nested_annotations(self):
q = 'Add(get_cell_by_index(0, 2, 10), get_cell_by_index(0,3, 10))'
res = ParseTree(q, [self.traverser]).evaluate_tree()
self.assertEqual(res.val, 10309)
self.assertEqual(res.date.val, 'OCT 17')
# TODO(aditya): Establish and test well-defined behavior for differing
# annotations in arguments to functions with singleton response (as
# above). Currently, we merely use the annotation of args[0] element.
def test_vec(self):
vector_funcs = {
# TODO(aditya): Enable test below when divide by zero bug fix is implemented.
# 'VectorDivide' : (12800, 'Discount/Promotion'),
'VectorAdd' : (-166.2, 'Discount/Promotion'),
'VectorSubtract' : (66.2, 'Discount/Promotion'),
'VectorMultiply' : (1789440, 'Discount/Promotion')
}
fixed_index_check = 9
for func, out in vector_funcs.iteritems():
q0 = 'get_cells_by_title(0, Discount/Promotion)'
q1 = 'get_cells_by_date(0,OCT 17)'
q2 = 'get_cells_by_date(0,NOV 17)'
q = func + '(' + q0 + ', ' + q1 + ', ' + q2 + ')'
res = ParseTree(q, [self.traverser]).evaluate_tree(is_list=True)
self.assertEqual(round([i.val for i in res][fixed_index_check], 1),
out[0])
self.assertEqual([i.title for i in res][fixed_index_check].val, out[1])
class ParseTreeTraversers(unittest.TestCase):
@classmethod
def setUpClass(self):
data_file = open('testdata/cashflow_test.csv').name
axis_decision = report_utils.AxisDecision(data_file)
axis_decision.decide()
self.traversers = [
report_utils.ReportTraverser(data_file,
axis_decision.date_axis,
axis_decision.date_index,
axis_decision.title_axis,
axis_decision.title_index),
report_utils.ReportTraverser(data_file,
axis_decision.date_axis,
axis_decision.date_index,
axis_decision.title_axis,
axis_decision.title_index)]
def test_eval_trees(self):
q1 = 'Add(get_cell_by_index(0, 2, 10), get_cell_by_index(0, 3, 10))'
q2 = 'Add(get_cell_by_index(0, 3, 10), get_cell_by_index(0, 4, 10))'
tree1 = ParseTree(q1, [self.traversers[0]])
tree2 = ParseTree(q2, [self.traversers[1]])
responses = ParseTree.evaluate_trees([tree1, tree2])
self.assertEqual(responses[0].val, 10309)
self.assertEqual(responses[1].val, -50)
def test_eval_manytrees(self):
q = 'Add(get_cell_by_index(0, 2, 10), get_cell_by_index(0, 3, 10))'
trees = [ParseTree(q, [self.traversers[0]])] * 100
responses = ParseTree.evaluate_trees(trees)
self.assertEqual(responses[0].val, 10309)
self.assertEqual(responses[99].val, 10309)
def test_eval_tree_by_index(self):
q = 'Add(get_cell_by_index(0, 2, 10), ' + \
'get_cell_by_index(1, 5, 10))'
res = ParseTree(q, self.traversers).evaluate_tree()
self.assertEqual(res.val, 10579)
class ParseTreeErrors(unittest.TestCase):
@classmethod
def setUpClass(self):
data_file = open('testdata/cashflow_test.csv').name
axis_decision = report_utils.AxisDecision(data_file)
axis_decision.decide()
self.traverser = report_utils.ReportTraverser(
data_file,
axis_decision.date_axis,
axis_decision.date_index,
axis_decision.title_axis,
axis_decision.title_index)
def test_nonexistent_func(self):
nonexistents = [
'IDONTEXIST()',
'IDONTEXIST(1.1)'
]
with self.assertRaises(Exception):
for nonexistent in nonexistents:
ParseTree(nonexistent, [self.traverser]).evaluate_tree()
def test_invalid_argc(self):
invalids = [
'Floor(1.1, 2.1)',
'Floor(get_cell_by_index(0, 2, 10), get_cell_by_index(0, 3, 10))',
'get_cell_by_index(0, 2, 10, 20)'
]
with self.assertRaises(Exception):
for invalid in invalids:
ParseTree(invalid, [self.traverser]).evaluate_tree()
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
'''
Created on Oct 27, 2009
@author: sxin
'''
import MySQLdb
from MySQLdb.cursors import DictCursor
#import MySQLdb.cursors
class MySql:
_conn = ''
def __init__(self, host, user, password, db, port=3306):
i = host.find(':')
if i >= 0:
host, port = host[:i], int(host[i+1:])
else:
host, port = host, port
self._conn = MySQLdb.connect(user=user,
passwd=password,
db=db,
charset='utf8',
host=host,
port=port,
cursorclass=DictCursor,
connect_timeout=10)
def query(self, sql, params=None):
try:
c = self._conn.cursor()
c.execute(sql, params)
except:
raise
return c
def commit(self):
""" 提交事务 """
self.query('COMMIT')
|
# @Time : 2018-9-10
# @Author : zxh
import threading
import traceback
import os
import datetime
import sys
_file_path = os.path.realpath(__file__)
_src_index = _file_path.rfind('src')
if _src_index == -1:
_file_path = sys.argv[0]
_src_index = _file_path.rfind('src')
if _src_index == -1:
PROJECT_PATH = None
else:
PROJECT_PATH =_file_path[0:_src_index]
print('PROJECT PATH:',PROJECT_PATH)
print()
def relative_project_path(*args):
if PROJECT_PATH is None:
raise Exception('PROJECT_PATH is None')
return os.path.join(PROJECT_PATH, *args)
# ...
def wrapper_mutex(instance, func, mutex_name='', timeout=1):
mutex_name = '_mutex_' + mutex_name
mutex = None
if mutex_name in dir(instance):
mutex = instance.__getattribute__(mutex_name)
else:
mutex = threading.Lock()
instance.__setattr__(mutex_name, mutex)
def new_func(*args):
if mutex.acquire(timeout=timeout):
r = None
try:
r = func(*args)
except:
traceback.print_exc()
mutex.release()
return r
else:
print(func.__name__, 'acquire timeout')
instance.__setattr__(func.__name__, new_func)
def get_sort_index(a, reverse=True):#默认降序
b = [i for i in range(len(a))]
c = list(zip(a, b))
c.sort(key = lambda x: x[0], reverse=reverse)
_, b = zip(*c)
return b
def this_time():
now = datetime.datetime.now()
date = now.strftime('%Y-%m-%d')
t = now.strftime('%H-%M-%S')
return date, t
def today():
return datetime.datetime.now().strftime('%Y-%m-%d')
def yesterday():
return (datetime.datetime.now() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
__all__ = ['wrapper_mutex', 'relative_project_path', 'get_sort_index', 'today', 'this_time']
|
from flask import Flask, render_template, request, jsonify
import pickle
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MinMaxScaler
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
@app.route('/', methods=['GET'])
def index():
return render_template('cardio.html')
pickle_file = open('model.pickle', 'rb')
model = pickle.load(pickle_file)
@app.route('/predict', methods=['POST'])
def predict():
user_data = request.json
age, gender, height, weight, pulse = user_data['age'], user_data['gender'], user_data['height'], user_data['weight'], user_data['pulse']
food, relative, smoke, heaviest = user_data['food'], user_data['relative'], user_data['smoke'], user_data['heaviest']
race, pressure, salt, supps, tv = user_data['race'], user_data['pressure'], user_data['salt'], user_data['supps'], user_data['tv']
age_smoke, milk, income = user_data['age_smoke'], user_data['milk'], user_data['income']
risk = _model_prediction(income, pulse, age_smoke, race, height, weight, supps, food, milk, gender, smoke, salt, pressure, tv, relative, heaviest, age)
return jsonify({'risk': risk})
def _model_prediction(income, pulse, age_smoke, race, height, weight, supps, food, milk, gender, smoke, salt, pressure, tv, relative, heaviest, age):
income, tv, heaviest, bmi, race1, race2, race3, race4, race6, race7 = _clean_data(income, tv, heaviest, height, weight, race)
X = np.array([income, pulse, age_smoke, race1, bmi, supps, food, race6, race4, race3, race2, milk, gender, race7, smoke, salt, pressure, tv, relative, heaviest, age]).reshape(1, -1)
y_hat = model.predict_proba(X)
if y_hat[:,1] > 0.5: #probability threshold of 50%
return 'HIGH-RISK'
elif y_hat[:,1] <= 0.5:
return 'NOT HIGH-RISK'
def _clean_data(income, tv, heaviest, height, weight, race):
race_array = np.zeros(6)
race_array[race-1] = 1 #creates array: value of 1 at selected race, else 0
race1, race2, race3, race4, race6, race7 = race_array #deconstruct array
poverty_ratio = income/20000
tv = round(tv,1)
heaviest = round(heaviest,1)
bmi = (weight*0.453592)/(height*0.0254)**2
return poverty_ratio, tv, heaviest, bmi, race1, race2, race3, race4, race6, race7
if __name__ == '__main__':
app.run()
|
#!/usr/bin/python
import re
import sqlite3
from BeautifulSoup import BeautifulSoup
SELECT_TMPL = '''SELECT asin, html FROM fresh;'''
INSERT_TMPL = '''INSERT INTO fresh_categories VALUES (?,?,?)'''
conn = sqlite3.connect('../../data/data')
c = conn.cursor()
c.execute('''DELETE FROM fresh_categories where 1''')
conn.commit()
def loadDatabase(asin, categories):
zipped = zip([asin] * len(categories), categories, \
reversed(range(len(categories))))
c.executemany(INSERT_TMPL, zipped)
conn.commit()
def parseCategories(asin, soup):
category_parent_node = soup.find('div', {'class': 'productSims'})
if category_parent_node != None:
categories = category_parent_node.findAll('span')
categories = map(lambda x: x.text, categories)[:(len(categories)-1)]
loadDatabase(asin, categories)
cs = conn.cursor()
cs.execute(SELECT_TMPL)
for row in cs:
asin = row[0]
input = row[1]
soup = BeautifulSoup(input.encode('ascii', 'replace'))
parseCategories(asin, soup)
|
# url https://www.cnblogs.com/wuzhanpeng/p/4261015.html#3767420
import pygame # 导入 pygame 库
from pygame.locals import * # 导入 pygame 库中的一些常量
from sys import exit # 导入 sys 库中的exit 函数
import time
from random import randint
class Enemy(pygame.sprite.Sprite):
def __init__(self, enemy_surface, enemy_init_pos):
pygame.sprite.Sprite.__init__(self)
self.image = enemy_surface
self.rect = self.image.get_rect()
self.rect.topleft = enemy_init_pos
self.speed = 2
self.down_index = 0
def update(self):
self.rect.top += self.speed
if self.rect.top >= SCREEN_HEIGHT:
self.kill()
class Bullet(pygame.sprite.Sprite):
def __init__(self, bullet_surface, bullet_init_pos):
pygame.sprite.Sprite.__init__(self)
self.image = bullet_surface
self.rect = self.image.get_rect()
self.rect.topleft = bullet_init_pos
self.speed = 8
def update(self):
self.rect.top -= self.speed
if self.rect.top <= - self.rect.height:
self.kill()
class Hero(pygame.sprite.Sprite):
def __init__(self, hero_surface, hero_init_pos):
pygame.sprite.Sprite.__init__(self)
self.image = hero_surface
self.rect = self.image.get_rect()
self.rect.topleft = hero_init_pos
self.speed = 6
self.is_hit = False
self.bullet1 = pygame.sprite.Group()
def single_shoot(self, bullet1_surface):
bullet1 = Bullet(bullet1_surface, self.rect.midtop)
self.bullet1.add(bullet1)
def move(self, offset):
x = self.rect.left + offset[pygame.K_RIGHT]- offset[pygame.K_LEFT]
y = self.rect.top + offset[pygame.K_DOWN] - offset[pygame.K_UP]
if x <0:
self.rect.left = 0
elif x > SCREEN_WIDTH - self.rect.width:
self.rect.left = SCREEN_WIDTH - self.rect.width
else:
self.rect.left = x
if y <0:
self.rect.top = 0
elif x > SCREEN_HEIGHT - self.rect.height:
self.rect.top = SCREEN_HEIGHT - self.rect.height
else:
self.rect.top = y
# 定义窗口的分辨率
SCREEN_WIDTH = 480
SCREEN_HEIGHT = 640
ticks = 0
# 定义画面帧率
FRAME_RATE = 60
# 定义动画周期(帧数)
ANIMATE_CYCLE = 30
# 初始化游戏
pygame.init()
# 初始化一个用于显示的窗口
screen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])
# 设置窗口标题
pygame.display.set_caption('this is my first pygame-program')
offset = {pygame.K_LEFT:0, pygame.K_RIGHT:0, pygame.K_UP:0, pygame.K_DOWN:0}
# 载入背景图
background = pygame.image.load('resources/image/background.png')
# 载入资源图片
shoot_img = pygame.image.load('resources/image/shoot.png')
# 用subsurface 剪切读入的图片
# hero1_rect = pygame.Rect(0,99,102, 126)
# hero2_rect = pygame.Rect(165,360,102,126)
# hero1 = shoot_img.subsurface(hero1_rect)
# hero2 = shoot_img.subsurface(hero2_rect)
hero_pos = [200,500]
bullet1_surface = shoot_img.subsurface(pygame.Rect(1004, 987, 9, 21))
enemy1_surface = shoot_img.subsurface(pygame.Rect(534, 612, 57, 43))
enemy1_down_surface = []
enemy1_down_surface.append(shoot_img.subsurface(pygame.Rect(267, 347, 57, 43)))
enemy1_down_surface.append(shoot_img.subsurface(pygame.Rect(873, 697, 57, 43)))
enemy1_down_surface.append(shoot_img.subsurface(pygame.Rect(267, 296, 57, 43)))
enemy1_down_surface.append(shoot_img.subsurface(pygame.Rect(930, 697, 57, 43)))
hero_surface = []
hero_surface.append(shoot_img.subsurface(pygame.Rect(0, 99, 102, 126)))
hero_surface.append(shoot_img.subsurface(pygame.Rect(165, 360, 102, 126)))
hero_surface.append(shoot_img.subsurface(pygame.Rect(165, 234, 102, 126)))
hero_surface.append(shoot_img.subsurface(pygame.Rect(330, 624, 102, 126)))
hero_surface.append(shoot_img.subsurface(pygame.Rect(330, 498, 102, 126)))
hero_surface.append(shoot_img.subsurface(pygame.Rect(432, 624, 102, 126)))
hero = Hero(hero_surface[0], hero_pos)
clock = pygame.time.Clock()
enemy_group = pygame.sprite.Group()
enemy_down_group = pygame.sprite.Group()
hero_down_index = 1
gameover = pygame.image.load('resources/image/gameover.png')
# 事件循环 (main loop)
while True:
clock.tick(FRAME_RATE)
# 绘制背景
screen.blit(background, (0,0))
if hero.is_hit:
if ticks %(ANIMATE_CYCLE//2) ==0:
hero_down_index +=1
hero.image = hero_surface[hero_down_index]
if hero_down_index == 5:
break
else:
hero.image = hero_surface[ticks//(ANIMATE_CYCLE//2)]
# if ticks % 50 <25:
# screen.blit(hero1, hero_pos)
# else:
# screen.blit(hero2, hero_pos)
if ticks >= ANIMATE_CYCLE:
ticks = 0
# hero.image = hero_surface[ticks//(ANIMATE_CYCLE//2)]
if ticks %10 ==0:
hero.single_shoot(bullet1_surface)
hero.bullet1.update()
hero.bullet1.draw(screen)
if ticks %30 ==0:
enemy = Enemy(enemy1_surface, [randint(0, SCREEN_WIDTH-enemy1_surface.get_width()),- enemy1_surface.get_height()])
enemy_group.add(enemy)
enemy_group.update()
enemy_group.draw(screen)
enemy_down_group.add(pygame.sprite.groupcollide(enemy_group, hero.bullet1, True, True))
for enemy1_down in enemy_down_group:
screen.blit(enemy1_down_surface[enemy1_down.down_index], enemy1_down.rect)
if ticks % (ANIMATE_CYCLE//2) ==0:
if enemy1_down.down_index <3:
enemy1_down.down_index +=1
else:
enemy_down_group.remove(enemy1_down)
enemy_down_list = pygame.sprite.spritecollide(hero, enemy_group, True)
if len(enemy_down_list) >0:
enemy_down_group.add(enemy_down_list)
hero.is_hit = True
screen.blit(hero.image, hero.rect)
ticks += 1
# 更新屏幕
pygame.display.update()
# 处理游戏推出
# 从消息队列中循环取
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("exit")
pygame.quit()
exit()
if event.type == pygame.KEYDOWN:
if event.key in offset:
offset[event.key] = 3
elif event.type == pygame.KEYUP:
if event.key in offset:
offset[event.key] =0
# offset_x = offset[pygame.K_RIGHT] - offset[pygame.K_LEFT]
# offset_y = offset[pygame.K_DOWN] - offset[pygame.K_UP]
# print(offset_x,"------", offset_y)
#
# time.sleep(1)
# hero_pos = [hero_pos[0] + offset_x, hero_pos[1] + offset_y]
hero.move(offset)
screen.blit(gameover, (0, 0))
# 玩家坠毁后退出游戏
while True:
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 11:51:58 2018
Code to test models of PSFs
@author: ppxee
"""
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.optimize import curve_fit
plt.close('all')
def radial_profile(data, center):
y, x = np.indices((data.shape)) #create coordinate grid
r = np.sqrt((x - center[0])**2 + (y - center[1])**2) #get radius values for grid
r = r.astype(np.int)
tbin = np.bincount(r.ravel(), data.ravel()) # counts number of times value
# of radius occurs in the psf
# weighted by the data
nr = np.bincount(r.ravel()) # counts number of radii values in psf
radialprofile = tbin / nr # as weighted is r*data then get profile by
# dividing by unweighted counts of r values.
return radialprofile
def FWHM2sigma_arcsecs(FWHM):
''' Function to convert the FWHM of a distribution into a sigma for that
distribution. It assumes the distribution is gaussian.
Input:
FWHM = Full width half maximum of a distriubtution (in my case usually
of an object from SExtractor)
Output:
sigma = standard deviation value of a guassian distribution with the
given FWHM. This roughly equates to the psf of the object. '''
return FWHM/np.sqrt(8*np.log(2))
def normalise(array):
return array/np.nansum(array)
def getguass(sdata, sem):
colname = 'FWHM_WORLD_'
colnames = colname+sem
mag = sdata['MAG_APER_'+sem][:,4]
mask1 = mag > 15 #removes saturated
mask2 = mag < 20 #removes very faint stars
mask = mask1 * mask2
tempsdata = sdata[mask]
fwhm = np.median(tempsdata[colnames]) * 3600
print(sem+'='+str(fwhm))
sig = FWHM2sigma_arcsecs(fwhm)
guas = norm.pdf(r, 0, sig)
# guas = normalise(guas)
return guas, sig
def my_gaussian(xdata, mean, sigma):
guass = (1/(sigma*np.sqrt(2*np.pi))) * np.exp(-0.5 * ((xdata - mean)/sigma)**2)
return guass
def origin_gaussian(xdata, sigma):
guass = (1/(sigma*np.sqrt(2*np.pi))) * np.exp(-0.5 * ((xdata - 0)/sigma)**2)
return guass
def basic_gaussian(xdata, a, b, c):
guass = a * np.exp(-((xdata - b)**2)/(2*(c**2)))
return guass
sdata = fits.open('mag_flux_tables/stars_mag_flux_table_new_limited.fits')[1].data
oldsdata = fits.open('mag_flux_tables/stars_mag_flux_table.fits')[1].data
hdr08B = fits.getheader('Images/UDS_08B_K.fits') # random year (same in all)
const = -hdr08B['CD1_1'] # constant that defines unit conversion for FWHM
sems = ['10B']#['05B', '06B', '07B', '08B', '09B', '10B', '11B', '12B']
n=1
for sem in sems:
if sem == '10B':
psf = fits.getdata('PSFs/limited_'+sem+'_K_PSF.fits')
else:
psf = fits.getdata('PSFs/new_limited_'+sem+'_K_PSF.fits')
# find radial profiles
radialprofile = radial_profile(psf, [63,63])
# radialprofile = normalise(radialprofile)
sqrtrp = np.sqrt(radialprofile)
r = np.arange(0,90,1) * const * 3600 # define radius values
# find gaussian profile from FWHM
guas, sig1 = getguass(sdata, sem)
sqrtguas = np.sqrt(guas)
#
# # plot psf (logged so you can see it)
# plt.figure(1)
# plt.subplot(121)
# plt.imshow(np.log(psf))
# plt.title('new PSF')
#
# # plot radial profile on same plot with its model
# plt.subplot(322)
# plt.plot(r, radialprofile, label=sem)
# plt.xlabel('Radius (arcsecs)')
# plt.ylabel('Flux')
# plt.legend()
# plt.subplot(324)
# plt.plot(r, sqrtrp, label=sem)
# plt.xlabel('Radius (arcsecs)')
# plt.ylabel('sqrt(Flux)')
# plt.legend()
# plt.subplot(326)
# plt.plot(r, radialprofile, label=sem)
# plt.yscale('log')
# plt.xlabel('Radius (arcsecs)')
# plt.ylabel('log(Flux)')
# plt.legend()
# plt.tight_layout(pad=1)
# n+=1
# n += 3
### set up plots wit radial profile ###
plt.figure(1)
plt.subplot(211)
plt.title('My gaussian function')
plt.plot(r, sqrtrp, label=sem+' real')
plt.xlabel('Radius (arcsecs)')
plt.ylabel('sqrt(Flux)')
plt.xlim(xmin=0, xmax=1.5)
plt.legend()
plt.subplot(212)
plt.plot(r, radialprofile, label=sem+' after')
plt.xlabel('Radius (arcsecs)')
plt.ylabel('Flux')
plt.xlim(xmin=0, xmax=1.5)
plt.legend()
## Make test guassians so you can see how it changes ###
#sigs = np.linspace(sig1, sig1+1, 10)
#for sig in sigs:
#
# gaus2 = my_gaussian(r, 0, sig)
# sqrtgaus2 = np.sqrt(gaus2)
#
# plt.figure(1)
# plt.subplot(211)
# plt.plot(r, sqrtgaus2, '--', label=sem+' Model')
# plt.legend()
#
# plt.subplot(212)
# plt.plot(r, gaus2, '--', label=sem+' Model')
# plt.legend()
# Try curve_fit function
init_vals = [0, sig1]
popt, pcov = curve_fit(my_gaussian, r, radialprofile, p0=init_vals)
fitgaus = my_gaussian(r, popt[0], popt[1])
sqrtfitgaus = np.sqrt(fitgaus)
fitfwhm = popt[1] * np.sqrt(8*np.log(2))
plt.figure(1)
plt.subplot(211)
plt.plot(r, sqrtfitgaus, '--', label=sem+' 2 param fit')
plt.legend()
plt.subplot(212)
plt.plot(r, fitgaus, '--', label=sem+' 2 param fit')
plt.legend()
## Try curve_fit function with fixed mean
#popt, pcov = curve_fit(origin_gaussian, r, radialprofile, p0=sig1)
#
#fitgaus2 = origin_gaussian(r, popt)
#sqrtfitgaus2 = np.sqrt(fitgaus2)
#fitfwhm = popt * np.sqrt(8*np.log(2))
#
#plt.figure(1)
#plt.subplot(211)
#plt.plot(r, sqrtfitgaus2, '--', label=sem+' 1 param fit')
#plt.legend()
#
#plt.subplot(212)
#plt.plot(r, fitgaus2, '--', label=sem+' 1 param fit')
#plt.legend()
# Try curve_fit function with 3 params
init_vals = [1, 0, sig1]
popt, pcov = curve_fit(basic_gaussian, r, radialprofile, p0=init_vals)
fitgaus2 = basic_gaussian(r, popt[0], popt[1], popt[2])
sqrtfitgaus2 = np.sqrt(fitgaus2)
plt.figure(1)
plt.subplot(211)
plt.plot(r, sqrtfitgaus2, '--', label=sem+' 3 param fit')
plt.legend()
plt.subplot(212)
plt.plot(r, fitgaus2, '--', label=sem+' 3 param fit')
plt.legend()
|
def printList(value):
print(value[0], end='')
for i in range (1, len(value)):
if i == (len(value) - 1):
print(', and ' + value[i])
else :
print(', ' + value[i], end = '')
spam = ['apples', 'bananas', 'tofu', 'cats']
printList(spam)
|
import numpy as np
xs = []
ys = []
try:
while True:
x, y = map(float, input().split())
xs.append([1,x])
ys.append(y)
except:
pass
for i in range (0,len(xs)):
for j in range (2,len(xs)):
xs[i].append(pow(xs[i][1],j))
ans = np.linalg.solve(xs,ys)
for i in range (0,len(ans)):
print('a'+str(i)+':',ans[i])
|
"""
Convert a Peglet grammar to a Parson one.
"""
import re
from parson import Grammar, alter
name = r'[A-Za-z_]\w*'
grammar = Grammar(r"""
grammar : _? rule* !/./.
rule : name _ '= ' :equ token* :'.' _?.
token : '|' :'|'
| /(\/\w*\/\s)/
| name !(_ '= ')
| '!' :'!'
| _ !(name _ '= ' | !/./)
| !('= '|name) /(\S+)/ :mk_regex.
name : /("""+name+""")/ !!(/\s/ | !/./).
_ : /(\s+)/.
""")
def mk_regex(s): return '/' + s.replace('/', '\\/') + '/'
def peglet_to_parson(text):
nonterminals = set()
def equ(name, space):
nonterminals.add(name)
return name, space, ': '
g = grammar(equ=alter(equ), mk_regex=mk_regex)
tokens = g.grammar(text)
return ''.join(':'+token if re.match(name+'$', token) and token not in nonterminals
else token
for token in tokens)
if __name__ == '__main__':
import sys
print peglet_to_parson(sys.stdin.read())
|
#!/usr/bin/env python
"""
@file routeGenerator.py
@author Simon Box
@date 31/01/2013
Code to generate a routes file for the "simpleT" SUMO model.
"""
import random
routes = open("grid.rou.xml", "w")
print >> routes, """<routes>
<vType id="typeCar" accel="0.8" decel="4.5" sigma="0.5" length="5" minGap="2.5" maxSpeed="25" guiShape="passenger"/>
<vType id="typeBus" accel="0.8" decel="4.5" sigma="0.5" length="17" minGap="3" maxSpeed="25" guiShape="bus"/>
<route id="bottom0totop0" edges="bottom0to0/0 0/0to0/1 0/1totop0" />
<route id="bottom0totop1" edges="bottom0to0/0 0/0to1/0 1/0to1/1 1/1totop1" />
<route id="bottom0toright1" edges="bottom0to0/0 0/0to1/0 1/0to1/1 1/1toright1" />
<route id="left0toright1" edges="left0to0/0 0/0to1/0 1/0to1/1 1/1toright1" />
<route id="top0toright0" edges="top0to0/1 0/1to1/1 1/1to1/0 1/0toright0" />
<route id="top1toleft1" edges="top1to1/1 1/1to0/1 0/1toleft1" />
"""
N = 9000
peS = 1./30
peW = 1./10
pwS = 1./30
pwE = 1./10
psE = 1./50
psW = 1./50
lastVeh = 0
vehNr = 0
for i in range(N):
if random.uniform(0,1) < peS:
print >> routes, ' <vehicle id="%i" type="typeCar" route="bottom0totop0" depart="%i" />' % (vehNr, i)
vehNr += 1
lastVeh = i
if random.uniform(0,1) < peW:
print >> routes, ' <vehicle id="%i" type="typeCar" route="bottom0totop1" depart="%i" />' % (vehNr, i)
vehNr += 1
lastVeh = i
if random.uniform(0,1) < pwS:
print >> routes, ' <vehicle id="%i" type="typeCar" route="bottom0toright1" depart="%i" />' % (vehNr, i)
vehNr += 1
lastVeh = i
if random.uniform(0,1) < pwE:
print >> routes, ' <vehicle id="%i" type="typeCar" route="left0toright1" depart="%i" />' % (vehNr, i)
vehNr += 1
lastVeh = i
if random.uniform(0,1) < psE:
print >> routes, ' <vehicle id="%i" type="typeCar" route="top0toright0" depart="%i" />' % (vehNr, i)
vehNr += 1
lastVeh = i
if random.uniform(0,1) < psW:
print >> routes, ' <vehicle id="%i" type="typeCar" route="top1toleft1" depart="%i" />' % (vehNr, i)
vehNr += 1
lastVeh = i
print >> routes, "</routes>"
routes.close()
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
import pytest
from pants.build_graph.address import Address, ResolveError
from pants.core import register
from pants.core.target_types import GenericTarget
from pants.testutil.rule_runner import RuleRunner, engine_error
from pants.version import PANTS_SEMVER
def test_get_with_version():
rule_runner = RuleRunner(aliases=[register.build_file_aliases()], target_types=[GenericTarget])
rule_runner.write_files(
{
"BUILD": dedent(
"""\
target(name=f'test{PANTS_VERSION}')
"""
),
}
)
tgt = rule_runner.get_target(Address("", target_name=f"test{PANTS_SEMVER}"))
assert tgt is not None
# NOTE: We Stringify PANTS_SEMVER in parametrize to ensure the generated test name is understandable.
@pytest.mark.parametrize(
"comparator,comparand",
[
(">", "2.0"),
(">=", str(PANTS_SEMVER)),
("==", str(PANTS_SEMVER)),
("<=", str(PANTS_SEMVER)),
("<", "3.0"),
("!=", "1.0"),
],
)
def test_get_version_comparable(comparator, comparand):
rule_runner = RuleRunner(aliases=[register.build_file_aliases()], target_types=[GenericTarget])
rule_runner.write_files(
{
"BUILD": dedent(
f"""\
if PANTS_VERSION {comparator} "{comparand}":
target(name=f'test{{PANTS_VERSION}}')
"""
),
}
)
tgt = rule_runner.get_target(Address("", target_name=f"test{PANTS_SEMVER}"))
assert tgt is not None
@pytest.mark.parametrize(
"comparator,comparand",
[
(">", "3.0"),
(">=", "3.0"),
("==", "3.0"),
("<=", "1.0"),
("<", "1.0"),
("!=", str(PANTS_SEMVER)),
],
)
def test_get_version_not_comparable(comparator, comparand):
rule_runner = RuleRunner(aliases=[register.build_file_aliases()], target_types=[GenericTarget])
rule_runner.write_files(
{
"BUILD": dedent(
f"""\
if PANTS_VERSION {comparator} "{comparand}":
target(name=f'test{{PANTS_VERSION}}')
"""
),
}
)
with engine_error(ResolveError):
rule_runner.get_target(Address("", target_name=f"test{PANTS_SEMVER}"))
|
from turtle import *
def paint_4(len):
if(len>5):
circle(len)
right(90)
forward(5)
paint_4(len-5)
def paint_Concentric_circle(radius):
if(radius>10):
paint_Concentric_circle(radius-20)
circle(radius)
penup()
right(90)
forward(20)
left(90)
pendown()
def paint_spiral(radius):
if(radius>10):
paint_spiral(radius-10)
circle(radius,180)
def paint_tree(length,level):
if level<=0:
return
forward(length)
left(45)
paint_tree(length*0.6,level-1)
right(90)
paint_tree(length*0.6,level-1)
left(45)
backward(length)
return
if __name__ == "__main__":
#paint_4(120)
#paint_Concentric_circle(100)
#paint_spiral(120)
speed(1)
left(90)
paint_tree(60,3)
mainloop()
|
#!/usr/bin/python
# -*- coding:UTF-8 -*-
import urllib2
import re
import requests
from HTMLParser import HTMLParser
import time
import urllib2
from bs4 import BeautifulSoup
# re.S 整个字符串看成一行不去关注是否有换行符
# re.I 忽略大小写
# Python2.7
def crawl_joke_list(page=4):
url = "http://www.qiushibaike.com/8hr/page/" + str(page)
# request = urllib2.Request(url)
# request.add_header('User-Agent', 'fake-client')
# response = urllib2.urlopen(request)
# text = response.read().decode("utf-8")
text=requests.get(url).content.decode("utf-8")
pattern = re.compile("<div class=\"article block untagged mb15.*?<div class=\"content\">.*?</div>", re.S)
html_parser = HTMLParser()
body = html_parser.unescape(text).replace("<br/>", "")
m = pattern.findall(body)
user_pattern = re.compile("<div class=\"author clearfix\">.*?<h2>(.*?)</h2>", re.S)
content_pattern = re.compile("<div class=\"content\">.*?<span>(.*?)</span>", re.S)
strs=""
for joke in m:
user = user_pattern.findall(joke)
output = []
if len(user) > 0:
output.append(user[0])
content = content_pattern.findall(joke)
if len(content) > 0:
output.append(content[0])
strs="\n".join(output).encode("utf-8")
print strs # "\t" 使用tab符号连接列表的所有元素 "\n" 换行
with open("hello","arw+") as f:
f.writelines(strs)
time.sleep(1)
# if __name__ == '__main__':
# crawl_joke_list()
# 爬取照片
def file_save_img(image_url, image_local_path):
r = requests.get(image_url, stream=True)
with open(image_local_path, "wb") as f:
f.write(r.content)
def craw_joke_img(page=1):
url = "http://www.qiushibaike.com/imgrank/page/" + str(page)
request = urllib2.Request(url)
request.add_header('User-Agent', 'fake-client')
response = urllib2.urlopen(request)
text = response.read().decode("utf-8")
# text=requests.get(url).content.decode("utf-8")
content_list = re.findall(r'<div class="thumb">(.*?)</div>', text, re.S)
for content in content_list:
image_list = re.findall(r'<img src="(.*?)"', content)
for img_url in image_list:
file_save_img("https:" + img_url, "./img/" + img_url.strip().split('/')[-1])
# beautifulsoup
def craw_beautifulsoup_img(page=1):
urls = "http://www.qiushibaike.com/imgrank/page/" + str(page)
res = requests.get(urls)
soup = BeautifulSoup(res.text, "html5lib")
joke_list = soup.find_all("div", class_="thumb")
for child in joke_list:
img_url = child.find("img").get("src")
file_save_img("https:" + img_url, "./img/" + img_url.strip().split('/')[-1])
# if __name__ == '__main__':
# craw_beautifulsoup_img()
|
from django import forms
from .models import *
from time import time
from django.core.exceptions import ValidationError
from django.utils.text import slugify
class TagForm(forms.ModelForm):
obj_id = None
#using when edit
class Meta:
model = Tags
fields = ['title']
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'title': 'Enter title',
}
def clean_title(self):
new_title = self.cleaned_data['title'].lower()
n_slug = slugify(new_title)
filt_slug = self.Meta.model.objects.filter(slug__iexact=n_slug)
if n_slug == 'create':
raise ValidationError('You shall not create this title #1"create"')
if filt_slug:
if self.obj_id:
if self.obj_id != filt_slug[0].id:
raise ValidationError('You shall not edit this title like you do #4"Unique"')
else:
raise ValidationError('You shall not create this title #2"Unique"')
if not n_slug:
raise ValidationError('You shall not create this title #3"Empty"')
return new_title
class PostForm(forms.ModelForm):
obj_id = None
#using when edit
class Meta:
model = Posts
fields = ['title', 'body', 'tags']
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'body': forms.Textarea(attrs={'class': 'form-control', 'style': 'height: 250px; resize: none;'}),
'tags': forms.CheckboxSelectMultiple(attrs={'class': 'form-check'}),
}
labels = {
'title': 'Enter "title":',
'body': 'Enter text:',
'tags': 'Choose tags',
}
def clean_title(self):
new_title = self.cleaned_data['title'].lower()
n_slug = slugify(new_title)
filt_slug = self.Meta.model.objects.filter(slug__iexact=n_slug)
if n_slug == 'create':
raise ValidationError('You shall not create this title #1"create"')
if filt_slug:
if self.obj_id:
if self.obj_id != filt_slug[0].id:
raise ValidationError('You shall not edit this title like you do #4"Unique"')
else:
raise ValidationError('You shall not create this title #2"Unique"')
if not n_slug:
raise ValidationError('You shall not create this title #3"Empty"')
return new_title
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ['comment']
widgets = {'comment': forms.Textarea(attrs={'class': 'form-control', 'style': 'height: 70px; resize: none;'})}
labels = {'comment': 'Enter your comment:'}
|
from sortedcontainers import SortedSet
from util import _get_hash
class GodelHashSet2:
def __init__(self, iterable):
if not isinstance(iterable, (str, list)):
raise ValueError("value must be string or list")
self.collision_prob = 0.2
self.container = [SortedSet()] * len(iterable) * 2
self.size = 0
for item in iter:
def add(self, val) -> bool:
content_ratio = self.size / len(self.container)
_hash = _get_hash(val)
idx = _hash % len(self.container)
bucket: SortedSet = self.container[idx]
if val in bucket: return False
if content_ratio > self.collision_prob:
self._resize()
bucket.add(val)
self.size += 1
return True
def remove(self, val) -> bool:
_hash = _get_hash(val)
idx = _hash % len(self.container)
bucket: SortedSet = self.container[idx]
if val in bucket:
bucket.remove(val)
self.size -= 1
return True
else:
return False
def contains(self, val) -> bool:
_hash = _get_hash(val)
idx = _hash % len(self.container)
bucket: SortedSet = self.container[idx]
return val in bucket
def _add_to_bucket(val, hash_val: int):
size = len(self.container)
i = hash_val % size
bucket: SortedSet = self.container[i]
bucket.add(val)
def _resize():
new_size = len(self.container) * 2
old_container = self.container
self.container = [SortedSet()] * new_size
item_count = 0
for bucket in old_container:
for val in bucket:
_hash = _get_hash(val)
_add_to_bucket(val, _hash)
item_count += 1
self.size = item_count
|
import numpy as np
from datetime import datetime, timedelta
from olanalytics.dt import DatetimeDescription
from olanalytics.generators import timeseries, CustomTimedCurve
def test_dt_generation():
X = timeseries(
start=datetime(2020, 1, 1),
end=datetime(2020, 1, 2),
step=timedelta(hours=1),
)
np.random.seed(seed=30)
curve = CustomTimedCurve(X)
curve.add_noise(0, 1, loc=DatetimeDescription(hour=[(0, 8), (18, 24)]))
curve.add_noise(0, 3, loc=DatetimeDescription(hour=[(8, 18)]))
curve.add_gaussian(8, center=DatetimeDescription(hour=12), stdev=1)
curve.set_zero(loc=DatetimeDescription(hour=[2, 14]))
np.testing.assert_almost_equal(
curve.Y,
[
0.644143536068335,
0.38074848963511654,
0.0,
0.16365072610275336,
0.96260781367442,
0.34666184056294447,
0.9917511141334456,
0.23508770883102223,
0.8574881179026603,
0.6761857426160136,
4.065263318784986,
5.557786770552034,
8.716959115827953,
6.739544770182674,
0.0,
2.15390512150755,
0.09607594627610168,
2.7075713429709816,
1.2659962072021014,
0.13623432220222573,
0.5441362876382811,
0.5181763468258455,
0.7668551062985054,
0.9338501433185797,
]
)
|
# Generated by Django 3.0.3 on 2020-03-26 01:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Batch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('startdate', models.DateTimeField(auto_now=True)),
('enddate', models.DateTimeField(blank=True, null=True)),
('size', models.IntegerField()),
('active', models.BooleanField(default=True)),
('startingGravity', models.FloatField()),
('estimatedEndGravity', models.FloatField()),
],
options={
'verbose_name_plural': 'batches',
},
),
migrations.CreateModel(
name='BatchAdditionItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of this addition item.', max_length=50)),
('maker', models.CharField(blank=True, help_text='Name of the company who made this item.', max_length=50)),
('lotid', models.CharField(blank=True, help_text='The lot or batch id of this item. Useful when looking for batches made with bad Lot', max_length=20)),
],
),
migrations.CreateModel(
name='BatchNoteType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='BatchStyle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='BatchTestType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('shortid', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(help_text="Enter the unit identifier, i.e. 'mgL' or 'ph'", max_length=10)),
('label', models.CharField(help_text="Enter abbreviation label of the measured unit, i.e. 'mg/L'", max_length=25, null=True)),
('name', models.CharField(help_text='Descriptive Name of the measuring unit.', max_length=25, null=True)),
('category', models.SmallIntegerField(choices=[(0, 'Temperature'), (1, 'Concentration/Density'), (2, 'Weight/Mass'), (3, 'pH'), (4, 'Timing'), (5, 'Volume')])),
],
),
migrations.CreateModel(
name='Fermenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('max_size', models.IntegerField()),
('used_size', models.IntegerField(blank=True, null=True)),
('status', models.CharField(default='Clean/Ready', max_length=15)),
('max_size_units', models.ForeignKey(on_delete=models.SET('_del'), related_name='fermenter_max_size_units', to='batchthis.Unit')),
('used_size_units', models.ForeignKey(blank=True, null=True, on_delete=models.SET('_del'), related_name='fermenter_used_size_units', to='batchthis.Unit')),
],
),
migrations.CreateModel(
name='BatchTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(auto_now=True)),
('value', models.FloatField()),
('description', models.CharField(blank=True, max_length=250)),
('batch', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='tests', to='batchthis.Batch')),
('type', models.ForeignKey(on_delete=models.SET('_del'), to='batchthis.BatchTestType')),
('units', models.ForeignKey(on_delete=models.SET('_del'), to='batchthis.Unit')),
],
),
migrations.CreateModel(
name='BatchNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='batchthis.Batch')),
('notetype', models.ForeignKey(on_delete=models.SET('_del'), to='batchthis.BatchNoteType')),
],
),
migrations.CreateModel(
name='BatchCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('bjcp_code', models.CharField(max_length=3)),
('style', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='batchthis.BatchStyle')),
],
options={
'verbose_name_plural': 'batch categories',
},
),
migrations.CreateModel(
name='BatchAddition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, help_text='Add a brief description of this Addition item and why', max_length=250)),
('amount', models.FloatField()),
('batch', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='additions', to='batchthis.Batch')),
('name', models.ForeignKey(on_delete=models.SET('_del'), to='batchthis.BatchAdditionItem')),
('units', models.ForeignKey(on_delete=models.SET('_del'), to='batchthis.Unit')),
],
),
migrations.AddField(
model_name='batch',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=models.SET('_del'), to='batchthis.BatchCategory'),
),
migrations.AddField(
model_name='batch',
name='fermenter',
field=models.ManyToManyField(blank=True, related_name='batch', to='batchthis.Fermenter'),
),
migrations.AddField(
model_name='batch',
name='size_units',
field=models.ForeignKey(on_delete=models.SET('_del'), to='batchthis.Unit'),
),
]
|
import os
import sys
import csv
from pathlib import Path
try:
from pampers import log
except Exception as e:
sys.path.insert(0, str(Path(os.path.dirname(os.path.realpath(__file__))).parents[1]))
from pampers import log
from pampers import CHAIN, APP
def csv_writer(fname: str = 'airdrop', header: list = ['public_key', 'private_key', 'tx_hash']) -> csv.DictWriter:
stamp = CHAIN.get('TIMESTAMP')
fname = f"{APP.get('NET')}-{stamp}_{fname}"
if not fname.endswith('.csv'):
fname = f'{fname}.csv'
if os.path.isfile(fname):
log.critical('please look at %s and rename it', fname)
raise Exception('airdrop outfile already exists. Retrying too soon?')
log.debug('creating csv file %s with headers %s', fname, header)
csvfile = open(fname, "w", newline='')
writer = csv.DictWriter(csvfile, fieldnames=header, dialect='excel')
writer.writeheader()
return writer
|
import logging
import sys
import simpy
from entities.delivery_boy import DeliveryBoy
class DeliveryBoyManager:
def __init__(self, env, config, xy_generator):
speed = config['speed']
self.hireCount = config['hires']
self.algoConfig = config['algo']
self.env = env
self.freePool = simpy.FilterStore(env, self.hireCount)
self.orderServed = 0
self.deliveryTimeTotal = 0
self.idleTimeTotal = 0
self.xy_generator = xy_generator
logging.critical(
"Delivery Manager: Hired %d boys with Speed %d, Algo: %s" % (self.hireCount, speed, self.algoConfig["type"]))
for i in range(self.hireCount):
x, y = self.xy_generator.next()
self.freePool.put(DeliveryBoy(self.env, i + 1, self, x, y, speed))
def deliverOrder(self, order):
# Get DB Id to be assigned or assign First Free
boyId = self.getDeliveryBoyId(order)
if boyId is None:
boy = yield self.freePool.get()
else:
boy = yield self.freePool.get(lambda boy: boy.id == boyId)
# Note down idle time and assign Order
self.idleTimeTotal += (self.env.now - boy.lastDeliveryTime)
self.env.process(boy.deliver(order))
def reportOrderServed(self, boy, deliveryTime):
self.orderServed += 1
self.deliveryTimeTotal += deliveryTime
self.freePool.put(boy)
def printSummary(self):
logging.critical("-------- Simulation Summary ------------")
logging.critical("Orders Served: %d" % self.orderServed)
logging.critical("Average Delivery Time: %f" % (self.deliveryTimeTotal / self.orderServed))
logging.critical("Average Idle Time: %f" % (self.idleTimeTotal / self.hireCount))
def getDeliveryBoyId(self, order):
if self.algoConfig["type"] == "LEAST_COST":
# Find DB with least cost free pool and return
minCost, minId = sys.maxint, None
for boy in self.freePool.items:
cost = self.getCost(order, boy)
if cost < minCost:
minCost = cost
minId = boy.id
return minId
else:
return None
def getCost(self, order, boy):
weight = self.algoConfig["weight"]
return boy.getCost(order, weight["restaurant"], weight["idle"])
|
from time import sleep
import xlrd
from xlutils.copy import copy
# from wrapt_timeout_decorator import *
import db
import tools
import importlib
import ip_test
import traceback
import Chrome_driver
import random
import Changer_windows_info as changer
import traceback
import os
import json
import thread_tokill
import sys
import threadpool
import threading
import datetime
write_flag = 0
pool = threadpool.ThreadPool(5)
def makedir_account(path):
isExists=os.path.exists(path)
if isExists:
return
else:
os.makedirs(path)
def writelog(chrome_driver,submit):
'''
writelog and
'''
path = r'..\log'
makedir_account(path)
path_ = r'..\log\pics'
makedir_account(path_)
path_ = os.path.join(path_,str(submit['Mission_Id']))
makedir_account(path_)
starttime = datetime.datetime.utcnow()
time_now = str(starttime).split('.')[0].replace(' ','').replace(':','')
pic_name = time_now+'.png'
pic = os.path.join(path_,pic_name)
print(pic)
try:
chrome_driver.save_screenshot(pic)
print('pic saved success')
except Exception as e:
print(str(e))
with open(pic,'rb') as f:
png = f.read()
Mission_Id = submit['Mission_Id']
traceback_ = traceback.format_exc()
db.write_log_db(Mission_Id,traceback_,png)
# write_flag = 0
def get_excel(path):
path_excel = path
workbook = xlrd.open_workbook(path_excel)
sheet = workbook.sheet_by_index(0)
return sheet,workbook
def get_one_data(sheet,Mission_Id,Country=''):
rows = sheet.nrows
print(rows)
# list_rows = random.sample(range(rows),rows)
badname = []
submit_ = {}
for i in range(rows):
print(i)
if i==0:
keys = sheet.row_values(i)
continue
values = sheet.row_values(i)
submit = dict(zip(keys,values))
# print(submit)
if Country != '':
if submit['Country'] != Country:
continue
key = 'Status_'+ str(Mission_Id)
flag_alpha = True
for key_ in submit:
submit[key_] = str(submit[key_]).replace('\t','').replace(' ','')
firstname = submit['firstname'].replace('\t','').replace(' ','')
lastname = submit['lastname'].replace('\t','').replace(' ','')
# print(submit[key])
# print(firstname)
# print(lastname)
if submit[key] == '':
if len(firstname) == 0:
submit['row'] = i
submit['badname'] = badname
submit_ = submit
break
if len(lastname) == 0:
submit['row'] = i
submit['badname'] = badname
submit_ = submit
break
for part in firstname:
a = tools.is_alphabet(part)
if a == False:
flag_alpha = a
print('not alpha:',part)
break
for part in lastname:
a = tools.is_alphabet(part)
if a == False:
print('not alpha:',part)
flag_alpha = a
break
if flag_alpha == True:
submit['row'] = i
submit['badname'] = badname
submit['lastname'] = lastname
submit['firstname'] = firstname
submit_ = submit
break
else:
badname.append(i)
# print('submit find:',submit)
return submit_
def change_ip(country):
for i in range(5):
try:
ip_test.ip_Test('',state = '',country=country )
return
# if zipcode != '' and zipcode != None:
# submit['zipcode'] = zipcode
# return submit
except:
pass
changer.restart()
def change_ip_dadao():
import urllib.request
opener = urllib.request.build_opener(urllib.request.ProxyHandler({'socks5':'socks5://51.15.13.163:2380'}))
# {'http':'http://192.168.30.131:24001'}))
# url_test = 'http://lumtest.com/myip.json'
# url_test = 'http://www.google.com'
res = str(opener.open('http://lumtest.com/myip.json').read(),encoding = "utf-8")
# res = json.loads(res)
print(res)
def write_status(path,workbook,submit,content):
book2 = copy(workbook)
sheet2 = book2.get_sheet(0)
col = int(str(submit['Mission_Id'])[-3:])+12
print(col)
sheet2.write(submit['Dadao']['row'],col,content)
book2.save(path)
# write_flag = 0
def mission(plans):
requests = threadpool.makeRequests(reg_part, plans)
[pool.putRequest(req) for req in requests]
pool.wait()
def get_write_content(submit):
submit_ = {}
if 'password' in submit:
submit_['password'] = submit['password']
if 'zipcode' in submit:
submit_['zipcode'] = submit['zipcode']
if 'status' in submit:
submit_['status'] = submit['status']
content = json.dumps(submit)
return content
# @timeout(600)
def reg_part(plan):
path = r'..\res\Dadao.xlsx'
global write_flag
while True:
if write_flag != 0:
sleep(3)
else:
write_flag = 1
break
sheet,workbook = get_excel(path)
submit_ = get_one_data(sheet,plan['Mission_Id'])
if submit_ == {}:
print('no data found')
write_flag = 0
return
submit = {}
submit['ID'] = plan['ID']
submit['Dadao'] = submit_
submit['Dadao']['path'] = path
submit['Site'] = plan['url_link']
submit['Mission_Id'] = plan['Mission_Id']
submit['count'] = plan['count']
submit['Mission_dir'] = plan['Mission_dir']
submit['Excels_dup'] = ['Dadao','']
submit['Country'] = plan['Country']
print('reg_part')
write_status(path,workbook,submit,'0')
write_flag = 0
# module = 'Mission_'+str(plan['Mission_Id'])
# Module = ''
# try:
# Module = importlib.import_module(module)
# except:
# pass
try:
Page_flags = db.get_page_flag(submit['Mission_Id'])
print(Page_flags)
if len(Page_flags) == 0:
print('No Page_flags found in db')
return
else:
chrome_driver = Chrome_driver.get_chrome(submit,pic=1)
submit['Page_flags'] = Page_flags
print('Page_flags found,use Record modern')
thread_tokill.web_submit(submit,chrome_driver,debug=0)
writelog(chrome_driver,submit)
# print(submit)
except Exception as e:
print(str(e))
a = traceback.format_exc()
print(a)
try:
writelog(chrome_driver,submit)
print('==========++++')
except Exception as e:
print(str(e))
# traceback.format_exc()
print('misission finished')
# content = json.dumps(submit)
status = db.get_plan_status(plan['ID'])
while True:
if write_flag != 0:
print('threading ',submit['count'],'Global ',write_flag)
sleep(3)
else:
write_flag = 1
break
sheet,workbook = get_excel(path)
if str(status) == '0':
status = ''
write_status(path,workbook,submit,str(status))
write_flag = 0
print('write status finished')
# try:
# chrome_driver.close()
# chrome_driver.quit()
# except:
# pass
for i in submit['Dadao']['badname']:
submit['row'] = i
while True:
if write_flag != 0:
sleep(3)
else:
write_flag = 1
break
sheet,workbook = get_excel(path)
write_status(path,workbook,submit,'badname')
write_flag = 0
def check_version():
num_db = db.get_current_version()
num_db = str.join('.',num_db)
file = r'ini\\VERSION.ini'
with open(file) as f:
num_native = f.readline()
print('db version:%s'%num_db)
print('native version:%s'%num_native)
flag = False
if num_native == num_db:
flag = True
# print(flag)
return flag
def change_update_file():
files = os.listdir('.')
print(files)
if 'Auto_update2.pyc' in files:
# print(modules)
file = os.path.join(os.getcwd(),'Auto_update.pyc')
file2 = os.path.join(os.getcwd(),'Auto_update2.pyc')
os.remove(file)
os.rename(file2,file)
def main(num):
try:
flag = check_version()
except Exception as e:
print(str(e))
print('get db failed,restart........')
changer.Restart()
if flag == False:
change_update_file()
command = '''start cmd /k "python Auto_update.pyc 1"{$name$:$qcy$}" "'''
os.system(command)
return
# while True:
for i in range(1):
account = db.get_account()
plan_id = account['plan_id']
# print('Plan_id:',plan_id,',connecting sql for plan info...')
try:
db.update_flag_use_all()
plans = db.read_plans(plan_id)
for k in range(len(plans)):
plans[k]['count'] = k
# print(len(plans_))
# print(plans)
# print(len(plans))
except Exception as e:
print(str(e))
print('get db failed,restart........')
changer.Restart()
if len(plans) == 0:
print('No plan for this computer!!!!!!')
return
# print(plans)
if num == 0:
try:
tools.killpid()
except Exception as e:
print(str(e))
change_ip(plans[0]['Country'])
mission(plans)
print('All Missions finished..............')
try:
print('try killing pids')
# tools.killpid()
sleep(5000)
return
print('kill pids finished')
except Exception as e:
print(str(e))
pass
restart_time = random.randint(3,5)
print('Mission completed.........')
print('Sleep',restart_time,'minutes')
# sleep(restart_time*60)
changer.Restart()
sleep(200)
def test():
path = r'..\res\Dadao.xlsx'
row = 100
for length in range(row):
sheet,workbook = get_excel(path)
row = sheet.nrows
submit = get_one_data(sheet,11000)
return
submit['Mission_Id'] = 11000
write_status(path,workbook,submit)
print(submit['firstname'],submit['lastname'])
for i in submit['firstname']:
a = tools.is_alphabet(i)
print(i,a)
if a == False:
return
for i in submit['lastname']:
a = tools.is_alphabet(i)
print(i,a)
if a == False:
return
if __name__ == '__main__':
paras=sys.argv
i = int(paras[1])
main(i)
|
import json
import logging
from django.shortcuts import render
from django.http import HttpResponse
from forms import AccountRegisterForm, AccountLoginForm, AccountUpdateForm
from services import AccountService, AuthJWT, require_loggin
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
# Get an instance of a logger
logger = logging.getLogger(__name__)
@require_loggin
def home(request):
token = request.GET['token']
return render(request, 'account/home.html', dict(token=token))
def login(request):
if request.method == 'POST':
form = AccountLoginForm(request.POST)
if form.is_valid():
user_name = form.cleaned_data['user_name']
password = form.cleaned_data['password']
token = AuthJWT.authenticate(user_name, password)
if token:
url = "%s?token=%s" % (reverse('account:home'), token)
return HttpResponseRedirect(url)
else:
response = HttpResponse()
response.status_code = 404
return response
else:
pass
else:
form = AccountLoginForm()
return render(request, 'account/login.html', {'form': form})
@require_loggin
def logout(request):
return HttpResponse("Account Logout")
def register(request):
if request.method == 'POST':
form = AccountRegisterForm(request.POST)
if form.is_valid():
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
user_name = form.cleaned_data['user_name']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
confirm_password = form.cleaned_data['confirm_password']
if password != confirm_password:
pass
AccountService.create_account(first_name=first_name,
last_name=last_name,
user_name=user_name,
email=email,
password=password)
return render(request, 'account/register.html', {'regards': 'Thank for your register'})
else:
form = AccountRegisterForm()
return render(request, 'account/register.html', {'form': form})
@require_loggin
def update(request):
logger.info("Update view")
token = None
if request.method == 'POST':
form = AccountUpdateForm(request.POST)
if form.is_valid():
token = request.META['QUERY_STRING'].split('=')[1]
address = form.cleaned_data['address']
city = form.cleaned_data['city']
country = form.cleaned_data['country']
zipcode = form.cleaned_data['zipcode']
phone = form.cleaned_data['phone']
user_id = AuthJWT.decode_token(token)
account = AccountService.get_account(user_id)
AccountService.update_account(account=account,
address=address,
city=city,
country=country,
zipcode=zipcode,
phone=phone)
else:
token = request.GET['token']
user_id = AuthJWT.decode_token(token)
account = AccountService.get_account(user_id)
form_data = dict(address=account.address,
city=account.city,
country=account.country,
zipcode=account.zipcode,
phone=account.phone)
form = AccountUpdateForm(initial=form_data)
print form
logger.debug("Accound: %s", str(account))
return render(request, 'account/update.html', {'form': form, 'token': token})
|
"""
Base password hashers.
Contains weak hashers (the original ones) available with Modoboa.
"""
import base64
import crypt
import hashlib
import string
from random import Random
from django.utils.crypto import constant_time_compare
from django.utils.encoding import force_bytes, force_str
class MetaHasher(type):
"""
PasswordHasher Metaclass
Allow classmethod to be properties
"""
@property
def name(cls):
"""Returns the name of the hasher"""
return cls.__name__.rstrip('Hasher').lower()
@property
def label(cls):
"""Returns the label of the hasher"""
return cls.name if not cls._weak else "{} (weak)".format(cls.name)
class PasswordHasher(metaclass=MetaHasher):
"""
Base class of all hashers.
"""
_weak = False
def __init__(self, target="local"):
self._target = target
def _encrypt(self, clearvalue, salt=None):
raise NotImplementedError
def _b64encode(self, pwhash):
"""Encode :keyword:`pwhash` using base64 if needed.
:param str pwhash: password hash
:return: base64 encoded hash or original hash
"""
if self._target == "ldap":
return base64.b64encode(pwhash)
return pwhash
def encrypt(self, clearvalue):
"""Encrypt a password.
The format used to store passwords is the same than dovecot's one::
{SCHEME}<hash>
<hash> may differ according to the used scheme.
:param str clearvalue: clear password
:rtype: str
:return: encrypted password
"""
pwhash = self._b64encode(self._encrypt(force_str(clearvalue)))
return "%s%s" % (self.scheme, force_str(pwhash))
def verify(self, clearvalue, hashed_value):
"""Verify a password against a hashed value.
:param str clearvalue: clear password
:param str hashed_value: password previously hashed
:return: True if passwords match, False otherwise
"""
return constant_time_compare(
self._b64encode(self._encrypt(clearvalue, hashed_value)),
hashed_value
)
def needs_rehash(self, hashed_value):
"""Check if the provided hash needs rehasing accoring to the current
parameters
:param str hashed_value: hased password
:rtype bool
:return: True if the password needs rehash, false otherwise
"""
return False
@classmethod
def get_password_hashers(cls):
"""Return all the PasswordHasher supported by Modoboa"""
return cls.__subclasses__()
class PLAINHasher(PasswordHasher):
"""
Plain (ie. clear) password hasher.
"""
_weak = True
@property
def scheme(self):
return "{PLAIN}"
def _encrypt(self, clearvalue, salt=None):
return clearvalue
class CRYPTHasher(PasswordHasher):
"""
crypt password hasher.
Uses python `crypt` standard module.
"""
_weak = True
@property
def scheme(self):
return "{CRYPT}"
def _encrypt(self, clearvalue, salt=None):
if salt is None:
salt = "".join(
Random().sample(
string.ascii_letters +
string.digits,
2
)
)
return crypt.crypt(clearvalue, salt)
class MD5Hasher(PasswordHasher):
"""
MD5 password hasher.
Uses python `hashlib` standard module.
"""
_weak = True
@property
def scheme(self):
return "{MD5}"
def _encrypt(self, clearvalue, salt=None):
obj = hashlib.md5(force_bytes(clearvalue))
return obj.hexdigest()
class SHA256Hasher(PasswordHasher):
"""
SHA256 password hasher.
Uses python `hashlib` and `base64` standard modules.
"""
_weak = True
@property
def scheme(self):
return "{SHA256}"
def _encrypt(self, clearvalue, salt=None):
return hashlib.sha256(force_bytes(clearvalue)).digest()
def _b64encode(self, pwhash):
"""Encode :keyword:`pwhash` using base64.
:param str pwhash: password hash
:return: base64 encoded hash
"""
return base64.b64encode(pwhash)
|
# -*- coding: utf-8 -*-
'''
Obtener justificaciones de un usuario
@author Ivan
@example python3 getJustificationRequestsToManage.py userId group statusList
@example python3 getJustificationRequestsToManage.py 1 ROOT APPROVED PENDING CANCELED
'''
import sys
sys.path.insert(0, '../../../python')
import inject
import logging
import asyncio
import datetime
from asyncio import coroutine
from autobahn.asyncio.wamp import ApplicationSession
from model.config import Config
###### configuracion #####
logging.getLogger().setLevel(logging.DEBUG)
def config_injector(binder):
binder.bind(Config, Config('server-config.cfg'))
inject.configure(config_injector)
config = inject.instance(Config)
class WampMain(ApplicationSession):
def __init__(self, config=None):
logging.debug('instanciando')
ApplicationSession.__init__(self, config)
self.serverConfig = inject.instance(Config)
@coroutine
def onJoin(self, details):
logging.debug('********** getJustificationRequestsToManage **********')
###### parametros #####
if len(sys.argv) < 3:
sys.exit("Error de parametros")
userId = sys.argv[1]
group = sys.argv[2]
statusList = [] if len(sys.argv) < 3 else sys.argv[3:]
###### obtencion de datos del servidor ######
justificationRequests = yield from self.call('assistance.justifications.getJustificationRequestsToManage', userId, statusList, group)
for just in justificationRequests:
print(just)
sys.exit()
if __name__ == '__main__':
from autobahn.asyncio.wamp import ApplicationRunner
from autobahn.wamp.serializer import JsonSerializer
url = config.configs['server_url']
realm = config.configs['server_realm']
debug = config.configs['server_debug']
json = JsonSerializer()
runner = ApplicationRunner(url=url, realm=realm, debug=debug, debug_wamp=debug, debug_app=debug, serializers=[json])
runner.run(WampMain)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from test.Test import Test
from test.AllAssignmentsTest import AllAssignmentsTest
from test.AllConditionsTest import AllConditionsTest
from test.AllDefinitionsTest import AllDefinitionsTest
from test.AllDecisionsTest import AllDecisionsTest
from test.AllILoopsTest import AllILoopsTest
from test.AllKPathsTest import AllKPathsTest
from test.AllUsagesTest import AllUsagesTest
from test.AllDUPathsTests import AllDUPathsTests
from test.utils import merge_states
TESTS = {
'TA': AllAssignmentsTest,
'TD': AllDecisionsTest,
'TC': AllConditionsTest,
'k-TC': AllKPathsTest,
'i-TB': AllILoopsTest,
'TDef': AllDefinitionsTest,
'TU': AllUsagesTest,
'TDU': AllDUPathsTests
}
|
# Generated by Django 3.1.1 on 2020-10-11 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BreastCancerApp', '0006_auto_20201011_1042'),
]
operations = [
migrations.AlterField(
model_name='stories',
name='imageUrl',
field=models.CharField(default='data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUTExMVFRUVFhUVFRgWFxUVFRYXFRcYFxcXFRUYHSggGB0lGxgVITEhJSkrLi4uFyAzODMtNygtLisBCgoKDg0OGhAQGy0lHSUtLS0tLS0tLS0tLS0tLS01LS0tLS0tLS0tLS0tLS0vLS0tLS0tLS0vLS0tLS0rLS0tLf/AABEIALcBFAMBIgACEQEDEQH/xAAbAAACAwEBAQAAAAAAAAAAAAACAwABBAUGB//EAD0QAAEDAgQEBAMHAgYCAwEAAAEAAhEDIQQSMUEFUWFxEyKBkTJCoQYUUrHB0fBy8RUjYoKi4UOSM1OyB//EABkBAQEBAQEBAAAAAAAAAAAAAAABAgMEBf/EAC4RAAICAQMCBQIFBQAAAAAAAAABAhESAyFRMUEEExRh8HHBUpGh0eEFFSJCgf/aAAwDAQACEQMRAD8A982n0VOYnoS1LOWJnJVZlo8JEaZCtmcWY3XQQtvhqxTHJWyYGKEzaIWvL0QlqWXAxwiTXMKprEsziLyIvBKMyrbm7JYxQsUkQwxTGvO90x1TklmlBGc4c7FUGEbLY2TdMhLL5aOcXdFXiLoFg5JL6SWRwfJmDk1rJVGgrDI3SyJcjG0UQpc0GZFnUo3aKdRbyVZeiniq5QbFZVMqIFQqkYChVqFyMJMABFkVq5ULRTWolRchzIBkoXFAXKs6CyyohL1FSWjSqlHIUkLmdAc6ovRyENkBWZSVZCohUhUqjKKFeQqgWFITfDRZRyQGeFIWjIEDg0bIKFhiY2iN1A8ow7nCBJFgK0Occ1XijmqaDUQ+IOaoVQVBYUBTKFJ6FEEAqow7Ln42u5gBDc3mAdJAIabF1+XJdYkDWAOtlwMTxCm7Eii0tIc13iTNoAgA6f3UyS6nOaNrXTpuJHZIqY9jWvcSQKZyu8rtbG1r6jReK4nXq0Kj2tzZafkAktJpu+AMN80ROs2RVeMuc6m17yGiH1SbS4kaZNwG6k7crLk/EdqOZ7rDV2vAIOoDgNDlOhINwmlee4VxCmXVqhcHPAkZbF9MCfIM0OjnFt1swXGW1KDqpIaWyHiZDTeBJgErrGafctnTlSVg4XjRWph0idHRMT0O60uK2t9yOY3MpmWcuVF6tGfMHuehDknOpmVomY4uQ5kvMrzIMgpUQ5lEFnRBCKySCpK5Uemx1lYScysOShY0yhVZlUpQDBVh6BSEAwOVpUFVBVLY0pfoiaEUqEFk9kBplNcAhyKgA0UBoFOCIFCUjL4DlMruS1yolkxRlzu6rLiccXZWtq5HTLh5c8NGZzSDoY6J3FMR4THOg/C45rGCBay8VxPi7atFtQiKoaWZpgX1cQO5tO646uqok6Hf4lWpYsCmKrhJ88WytZLi7KdeXqvI13OpPP8AmDxATcWEhgMQYLZaWkxbW52ChxFzajS5rX6WyZmkXJEwIn99IS3cQc4vy6ycoGg0JFrFw23gbwvJPVvtuRuxnE8cXPL87nCMxJsfMDDQ2dj0HaLrMMOPiNXITEhrg6c3ll1tJ7RCyYrCl5JMtBDj5CHkyPmbPfSVnpuFm7MEEiTZugJgfy9ljG90SjqDxJAacx+BoguzDUZYMjew7bFacOwyGF0tqODS3N5QAbmHaRDh6LkcMxb2vJpkZg4uDjGjpJMj4be5K2UOIFtRjneYsl5FoIkECSIAN1reLp/ESj2vDy6gyqAzyNqNayC5zyXkSXDb4gYHP0XWY4lodlc2bw4QR3Gy83w3iBrPqEPFNjqgrEkNJBBAiXa2Ei1l3uK8RBpA0qjQ55AYTcSRPmG1ufRe/T1VV9iOCY1xSnOTMKwVD4gdLcoaBMgnUut7enVOfh13Ukzk9ORja5GXprqISKjFTNNFZ1XioTKzuBWqMtmzxRzUWFRMRmz0KqURIQloXnPeWIRBBkCNrQgRYCuFZaFXh9UspYJUkoC0qIBmdEwpUqSgHKoSwUWZAEpCGVcpYAe0oYdyTi5DTqgtDgZaRM7RzlLJQsE8iipVZEjQrFxXidNrQx0nxRAIEtANszjIgX5rh47EGjlbQf5Glroa/MWi5vT1IMGY59ysPUSIO+0rzX/yqYLshmQwuh05S0je15Gk+o+fcRZk/wAnMTlLvw3kkHofb3XrMX9osmYU3tIa8uLfk8xlwJtIHUT5jyheWxIcXS5uUOcSHZHBw1Ji8ERqfzXmlU5bEe5jfgczXOaHkN5ZcptPnc58gG0xPKysS1jnuDgfKQCIMTFg4y6+XSF1cO0UneFSPj06hJdbIXZWaNEwZ13+Gwmx9L91ouwdORSc9jAQSMzhLgQWlvwyXAC14mTqtuOxaPJB7IAFJ5afxEyA43kbbdFzqbHeJ4fhhxknLAcINxPOwH99V4zGzd1nbQPhvM3/AJotNHFtApufmhpykW8wJJgB3JcYxcevczTLq+VpDaLIi+UDNnBP4jLbbCxEWWSlxFwJMiZF4k6jVM4hUpPqlzW5Q+TlDSIMi7Tpe+kwszKI3eAIBBvJ6SG6x0Pquzgv9ty1ydNmJc8AtIYNLAtAj4RYcpvb8leF4m1pyvDXNItOUAkGYBboYIExssfDa0Asdl21kg3vIj2RYhjRmAmdm2IAPKIjt06LkklJoh7rDceFU0qNKkWUjlzw4ElsXGmkep0F7L1GHql0jIWNbAE2m2zdgLL5jwfFvp1A3w84aHWdlA8sEEun5Te/6r2vCONPyUxVh73+I4ljmudaSGtY3eOw6r06U+SNHccEt1IrJi+LNpMz1WlnmygSHTaZtoNdeSdgca2qzO0ODSSBmGUmLTB2XoyV0jFLuBUZCSR0W9xCBzluzm4IwFRaHgFUrZzxNkFXBTgoCuFnsxFo6ZTAVdks0okbUHNFI5pbmjkgLeShbNKjoWYgqAFBkMKlkIaUWUqguyqFUKQUAYUlBBRCUAwBcHj2EcwB9LxLSSfGLWNuPlc6LyV32hcD7Uuc4MpBxb4lojW4uXzbUCOvZYnugz53xnGPLokvJENg5nQ6+gHL8ysVLjDhDZcGixI5RBzW1Eld7BfZl7nukQWi/huHkvl2sCBcg87JjqbDUGFYxktMsrOcA4uLicwmQBAJuTbquC041uiKJ5bEYsl0sDnRoIEug6m8jv2WrD4/w3tL87YAOWoKhpk5jnaWyfLINwNZtZd+jwo0jWzvbTcc12loDgG/C2SJBcQPhBM6xJXKrOLmjxHnODek1rZgknMTqJBBlxv1vHRJdEaxHOxFLEVqPhl1MZ4cXuc4Oc+A7IHWgCx3IhMx2OdSNXDUqk05FRohrbRLoMg2dmhrtnDcJmK+zzKTWVKNfwZ0DpqBvXxYbc+WwBFtYXm3OfTeHkmpLfiEzAh2aNYtvEiUq7Qoz49wJnbbf39z7LXhcdLchaNuR31g2nuFr+0jWUxTyZvDcGvpB2U2ynPJAiQ8m3+tcujRIfdpg2PxC5BEXEgz+qSgnFJkaH45t7AWcSQ2CcvONx+ixNdoS47ARY2PUJzJEloEtGu40bIg9R7hKa2QXczAIEDrA7R7pFUgjS/DB1xOVxF7G5AJk7Cf5ZbKlUZIyljxIImc0gwSToAddjKx4d77BgdDREx8Xof7p9WuHAZpY4aAjWZggH29lxd3TMbjMPcEOykkggagR7idfddbgmIdDix2TXOM+V5LYgNJmLjU72JEhcPJVc6WMLmt8pAHl10OXe3ffmVA6c2YkFo84yiYmJIF5HlkEK4O7FHpmY1jnirUGdpMvaSMxe3MGyAAM0ADkdTfT2WExNMtimRDQLC4EiQJFvYr5twCufGb4bMzfEBiGmXGQ2QR5ddRH7fVMo5RYbAaaBejRs5zQl1VLNVMfSB3S/u45r07HFqQGZRQ0iorZmmds05PT6qn4flPZFmRtcvMfQpGN4cNlTay35xulVMK0q2ZcX2M/iplKoBtKU7CCbEhV4JHzD2V2J/khxqKeKlNnchWXdUFjQ9WXpMdVRLuSFyHF8qApLSeSIOKBSHBNYk03pgKhoa0pdVgnNlBMRO8cp5dEYeeSoFAeb4tUxAdlpUGQCSCYPiGCYiCASRN9htqsNPB1nP8WvQplwcHEEVKmtgGszX1O8AC+y9aKcEkC5m5k6rHjQ8fDJzGDM7kARGkWvtCziDyfG+Dl3np0y5hLi5jjHmGaclEASI0aJJlsXmcnDuENc0mnRr0y3M41A2kyR8pms7NdmU2AiZ3XvPuDQDEgublLm6+5vHROpU2NAAAs3ILCzbWHIWHspiatHgaRZXpPw7q1QmkZpipDswJ+IgRmAJgmTpO64HEuHeA6m6pmrsDGtcX5REOLYZTa8FwABtJmL9PovHOAsrUsoOWoJLKmrmuOpneRYrx3EnvploxtEkgj/Ms+mWC0ZQJZYm+t9SlMzZzH4Ok3iGFFJ3i0XinVY1xLgxrsxLBm2GUkA+q1cUwoxPEXsaS4Mu54BNzcC3lmSG9csLzlDEDC4nO0sqBmYsLSSzztJb3AzCR0IX0T7F4AUMOX1nllSq/O9xIBMXynMNAM0+plaaKzi/a/g7MJQPlD31QKbHAQcxcXPOQaS05YEjRZv8AAiGMpGahNNwYGGQyq0S5psLSecczYLpPxTcdxNhac1DCjMDs5+ojnLsvowr0/EeFte8OaAM4c2sQYLmkWix0N9lMdjNo+VYnhtSi5gcIa6+uoN4I2MagrS/BPaQ8sJpjQ3LRJNjsRY/uveD7H0jTex7i5xcS2pq8CAACd9PqtnAfs+MO2HO8QiQCZs05bQSRsfdYwbZk8Di6dai0sa+pTY7KHCCGvDs0SdiIid77BYcfQc3z06pc9wDKggB3mBJB5/Bc+vVfSeN8PJaTTHmDs8SQHRJAMHmTpzXjf8GqGqwsp5cw8TK65YGmSBEDl9AulUVSNn2P4SyqHvNY5srQGtgZbGbgXGYAiIjRey4dhDTDs9Q1CSLkRC5+D4XTac9JrqBmHC0EACwBkRZdR9RahHk5SmG9oSX01PGQiqutHNuLEuKieagVK2YxXJ1UQKEhXl6rge0Iwibl6pYY7mruoWy3tGyA0gihSFQL+7jqh+7dSnQrSyYoz/df9R+iv7ufxfonhEISyYoziieZRilsjKuULSM72wdwrDp3WgOUSxiAwnn9FeZEJ5qjPRBRMyrOqk8lR7fRUBeMRsk1Hk6g+oRhw5BE6sN0I/qZ5j5UJqTq33Cf4oUL0JXufIvt1wunTreJTaQypM2DWipewjQHX0KHhuFdxDEht6TAxpqnMXE5YaSJ3cdOQG+/0D7X4Rr8HVGUeQeI2IEFl7R0keq8r/8Ay6oAazYFww7fKXA//oLJq9j23CuFUMOzJSaGt1O5J5uJ1K3Fjf4UbYI0CsMHILRmhPhjY/qrFOd00UxN2hM8JnKFLLiZzhxzKr7q3r7rU4ckKWxgjK7At5n3S/uA/EVuBVq5Mnlx4MH+HN/EUJ4XyefULoWVmmEzZPKhwcv/AAo/j+ii6mXqrVzkPJhwchnFXRdv7ohxQx8I915k417SZdYGL2jl3CsY9wJB3/fZfH9VPkmTPQO4s8QYAgkHtCo8VqTAy9ei4NWqXTe31kajqhrVHN0mR+RWH4rU5GTPQN4y/TKJ/UIHcbcPlC4wrOEnLcCexROxhbc07xflCeq1ORbOuePn8Kt32gI+S97X26rliu10QNRIP5T0/wC1byyQRyne909Vq8ltnTZx8nZvYhwKJ3GKxBIp0/8A2IPtC5AxFOZBJ2P89lf3tg1JEj+X9lqPjdRdaZLfJ0hxmv8A/SyP6kxnHKk+agPSoP2XN8amRGfUaFD4bG6E8tbj3W34+fCJvyd2nxhu9N46jK79Z+i10+IUz80f1Aj815YZWxLyZIHpyTmix8xN4g/L0ndRePnwaUmelfjKY1e33QjG0j/5G+6822mSbkHlOvr1Qik/kBYnrPRX18uC5HqBi6eviN9wrdiqf42+4XmGUnW0jU23mQiNAnVjevPvZX10vwjI9M/EUvxt9wgGKp/jb7hecDd7CZ0FrFE0A8rdIvr+Sq8c+C5HoxVpndvuFZfT1Bb7heaFJ0+WItE9TdD5oiNzPbRH458DL2PR4qnTexzfL5gW7biF8v8AsJh6tDGgPY4NcH0ySLTqL/1NHuvWht4NoP0/kK6jnCI5/up658DI9OHdlCvJBzjzieZBE/z6qqdao5pEuGoieWsK/wBxX4RketV5l5A4uoCRncCNJ3R0eKVRfNO4/uqv6jp90xkerLlJXnafG3iJg2m+ui0M42d2i2t+66x8dovuMjthygeuWOLs3DgnMxtM6P8Ae35rvHW05dJIZG4OCvxGrKHsPzj3Cp9Ro1dZddhbNPiBRc8YxtxIsY1CtS48jJnkHuDhpI97AAe+iqnWbAJ7TvE2n+bLYMCJOUkbxyP6rA+he+klptr2XwGzmaIbe99SP16JmHAuJsQIJ2P6JH3cXJtIEOntFo6KgQ1uoNwNdNlmwbzTdqDzUYSRzNoB0kiY/Ncr/ECIzSMoueY09dQrZxWBJgw6J3sdirYOrQwrQZdbkCdlYpsAOhGwOvVcerjZAgyIJE9SSETqgtJgHXpfXuFHIHWmmdYtfryQBtNxcCPhIA9Rb8yFlp0mybzIOh9/zTqcDXn+dxdSwF4YDg2BkLo9r/qi+7gnUxY25aR7JtOoMhMSQZ/6CHB0gRmDrm8Sbch2WQC2gIFyb2sDpP8APRaahhptqYvvpf3SzVDHGG2sddSZ9kqvisr3ToACOV9fYJdIDzREDUXHYTp3WiCd7iQeoO/5JFGZEXsLjQgdO6E1srgJNzrbQndVMppDNi7bRC9oOp1tM6RustWsc5B1EwY1nS/eEpuKl0OHWdDHdW0SzotbFyZHokurgGI5LKMYAMjog2kc435LLVrB8gGDB5ibQpkWzruI1BjYrM3FXjkCevlI/Q/RcjO4hzZI8ocOhif52WcvdlD5AIDfqf7+yXZLO2MaczhFjEHY7fso7F+aRdtp6Tr/ADqsdLFtc2TtE9OqniAPEERfsQf59VndCze7HtmNJ/Qf2QMxzCATpEdiefusD4GbLcWlu8cwUp7gJ5Oy5fr9REeiWLOoyuJbmuSwntEhwWosZAA3t9JH6rgsk5CXQ7K4Dl69wnOeQxs8r92GZ9vzTZdS2dR9ES6NrfkUJpxJ7fv+hWSniwCATObfew0/nJNxFe+U7QfQLLXAsVVxGx2N/oEh1Yy6NtOxE/srxLQAec27zEfksf3gCbEOLQAD2En2RRbBVTFEX1i/dLdXJG4HbTdBRpxO4BcL95H0IKe4GM38I1XoycdkzLRkcJJku9CotLqJN8oPqop5hrFnSocR80g/uZFp9kitirtBvlJI6yBv2hcajjMjgQARaPa4WivUBkt3+HoAJH0gH3UcWiHcbWD2mIkBuvMa/RYn4BrvLMZiDJvBgQB7LPwYG+Y/KfcRl+p+idSqOGaR8Lod3tlI/n5qbp7ARXwTzmAMAGI2giJ6Xn2Qs4a+cpB82zY8vL+y6bcYRMm8Wm4sR69UvxSwtJFjA1MCdRPf2VUwJbwgkEsfDxoP0jsnYalLQHtgg37/ALfkio4kh7vwuaHNI+EaCJ9Up+NBLTeHtMO0gg6FTLYGnEUQMryb8xz1EpNZ1nRo7LMbRqfyKMvBaGO+ZoAI2dlt9RqkcKcXi9iw+8jSPdZe+4GGpnsJmWgctJMdRBV+PAnlZ07zEo6dCMpZpOb0iI9LpWIblc6LgP0/qaC30N1igLxzy4tc0kgNB30J3CCrVJeZmIyz1P8AAn4dmUtBcYd5Aem1+YIQV+HO8x/C4kDoY16KgmFxVQMadhqTraf2TMXjJaHQC07wbEi4tfYoeItJ+HXIZA0ILZB+gVYJjmhlNxuXEjuAHCe7Z9lpAIYrMLkAkRPSZFx6X6Ks3wiXTJExa40JmRprCRhWf5mR7fI/OAY0cwZhceoW7CvAGUkj8LtNNWkbEeohHECWtIIabtLc0720I57j0SGUX2AIJY4kEHVjrjvcO91tdiGutmzGmZAy5XZXTIMet+fZINaC82IHxfiA1B9Rm9VHtsC81wTYGACZBB5H+boHUWyQbAwARcEOMtnsR9SiOKBGVwkG8jTmHD0ScS4EAAwHbjbl6aqWwLwUAltQEFpgPHqLgbfutT2gzAIi4PykdIWDD1SMx1IOV466hwRCrDHlkkGHgHbMLgfVbluyGlvyOBuCRA0LTf1Fwl1jt1zM9xY/zYpOFxLXEAbmQex39lnxFcuc03uSCDs9jwfyIhIwd0DTiHTSEHzNJv3bum4XEgzTm4BidA4ASBzGi57HAvqNOhBIP+5sH6LK6qQXuA1a228uiRHo8ei6x0slQN1dxbDnWIBa4f6pLf1+iXUxTi4F7hLmacmuBAMep+iVUxGZtMOJIuH75tCGyO0el0ptdziSWNL4LjmzkASQGjJsP0K7aelyiGg4/MYFw0knnGVxMeqRVxZgnpadtAR/OaQMRma4ZQDf4TYg2sfoiewkW2tytoZ7Aj2XTBRfQW+g7D1IMyYvJvAzNH1j8gjw+NyiC6YIjf5pv/tWagS0jWbzOpGxA9J9EdHR3lmG3jcaD8gszin1B1TWn4YAFrkKLn099dTEd1a5eWi2xPlObLPlsZFpItF51HLZbcFUY4iCA9jmug2EOaNOXYo8Nw/zzTdMg5muaQTEwb66/wAKU9kknKAYIfYZha0bPaTG2YSNVW0ynXo5R52/C45S12jSJGUHv+i0vezMDms5pY8H5gBaeotfosWBYK+HztPmc0FzSNx5Z9wd9OSujTDhkcC1403kgQIOxIIHVcZRabBdagMpHKAZv5SPpEg9uy57KxzGgdYMZtCbZfQ6f2Ti1w8wLrAi1iQy7hG5gkR26xOJYUudTrUry0A+k6zp/wBFIpd/jAfCauem+m60DyneBy6jzfRJFItflAkEyRrBiRHfY9loePKTBDruaRNn/MwjqDI7FE1pc4EiRl2EEtImJHIgx6I2gCysGw4WAAkG+WTIJ6TATqdIZs1NwE6i/Mx9SjxeEkipSOnleHfM2JB5EXb77Fc+g5zdTlMkNnScwLQSNjpP7LDVrYGrEVHZTs+mcwvGYcx3aPom0ce1wbnHxkNJ5OY7yyeUiOyS1ktIuWgeZpMOYTexOguP1WWphCxjgZIzZwTuCS1w6fEOyiSB0HBxZk3ZVkdQfMD7uK1MxgyZ9S2Q7nGpHcD6Bc2nXAkhx82UX2JpmO/mI9uyCjU8oe2PMC/LyeAJ9I/MrNPuU6fEqobkfMj4QRuDc6cgJ9XBXiSGtaCR8pa7ll17iD9VlFVrWWbmZnAaN220+h7yEOJeC5lMXazwy3nkygZu+bboqACHMqVQDEjN/S4AX/4z2K0VG5qTnNEPbBgd2ud6jKfZKxIaWB4nPSaC4TZ7JcP2PYhDjsQadRsD/LNJ49S4OH0DQr16fKBhr4jPSbUFjAaY3s4ZXdNvVY/vjm02uHxlzWEHR7JOZp5wZ/8AZahRAp1G6tLgP6TmBMHbWR/UVmxTjAaR5s1Sowg/M3KT7jOV6NNRbqu/6AZQxRDaZDiQ19SmZ3aMxAPUQ4T2S8PiywPpOB8sm20l0Ob7i3Xsk0W5aMnfxDbm1of6+UvTsSW53OyknwpPmERDxpHNh35LeMbar4n/ACBdHGlrWncPaHC12Fri0/8ArC0YrHGm5rdi9puCfKWnNHZcegCKjIvJg9m2I9ALdCtNXzENkSxzsn+ohxlntEdQBuuktGOXsKCfXayo0t+B8OjWzs0x7ptSscrabpMOaWn+kmWk9ojuuc1rclNp+IC2tmuykXHdx9Vo4i+HOc0zlLm9iHui3qb9AuktNOSXzboRo0YcA1jNpLo6lpOZo6xBhYW1HFoveWsN7nzGZA6Hf8KmIAyhwmS0uPdxLCenwkpNSDDiY8zpMbmNRr69V004Lr/z8gkNpDM4tGs7aGLi23L+6a5pgudIb8puCIuYI9e6ThaI3EkkQ0XMDUAC8zaSCEp2JgkOEA7G5HNpm62429hVvY0NfaGm4IJk6iSQQ2IiI31lbKGK1vExfrEabgysBZ5pAsGgAElxcAd55XG2yMsGW8jQGLxfLMdyPSPXnOMWRpHRw1QakC8xGhN+XI2tzVurDKDbQk84trz/AOiuXSlhDSJ1tLoIF3EwQYudwULy4EGIIZMTpldc+xWHopvqMTs+LBIF79tbqLk04ga3AOjjqAdQoub0ETE9jSqOqZKgAa9jhm0ggxJnWVpGSoXBwGa4IjkRefb3VqLwN9TRlw2HFLzMNh5X8pnUdZIMb803E1oktMfC6Nd7gTpfl/eKKZNvcBUarXF1vmIdpLXaBzT13SaNIkFgPm2MWc0mQHD3HqrUROnQCa9pc8ybgkjUGAHDXcSCD1VYOWODNmwWn/QdP1CtRRukBb3OZvLGy4c4aYP0cR/tSOIMBDgNI8zNiBHwnYgEEekyooqn0ZReHdkqDzS14awGDdr2AtJHrp1jZavEDQ1wdlDneHUabjPAAA1kGD+qii21bBzuI04qvbYHNTLY0a7Lt0ke3ZSmS3I4N0LZvsZzCOsf8QqUWpPZENr6Px0wYLS14I5s07yCgrvDmZmyDam7oS4QW9DDvZRRcV+36lLqujznSBn6tqNLXD3Dvok4ioZc2bgAt708s+7B/wAjyUUWoK/n0AviDwwPDQMvlmdTleGOJ9AQFkxT9I1pOfUb1B8N7Z9XEdgoovRora/r9kC61AeE8MJHh1A5k3OVwc0j2BSKRPjuE/8AiydbtJt/uJUUW9N2pX7/AGILe3LJB+EU6g9W5HfUNQYyhll06VHEdM2Y+8hqii7RbtfOCop1Muc18j/4m1HD/YLe6XXIuDchmb3uPWw91FFuG8q4HczNqeUZjYtEiJloeXEBKcARI+adedzoNNforUXsSrf3N0Si9wE3NOYdpEW0bKmIDZsYkHadIMqKIt5EXUN2ILRlJlp1B5Q0SOR0PutmUhouSOu18sehBjsoouOqkkn7mZdEXiYdMT5mQfTUn6+29ldPzGDAJmm7l5mz+/uoouL2iZFVWutl0ygHuLfkAoootJ7Cz//Z";', max_length=15000),
),
]
|
n = int(input())
card_numbers = list(map(int, input().split()))
result = [0, 0]
i = 0
while card_numbers:
# print(card_numbers[0], card_numbers[-1])
result[i % 2] += max(card_numbers[0], card_numbers[-1])
card_numbers.remove(max(card_numbers[0], card_numbers[-1]))
# print(card_numbers)
i += 1
print(*result)
|
print('===== Exercicio 027 =====')
print('Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o ultimo nome separadamente')
nome = input('Qual seu nome completo? ').strip().split()
print('Seu primeiro nome é {} e o ultimo é {}'.format(nome[0], nome[-1]))
|
import pygame
import time
import random
pygame.init()
wx = 600
wy = 600
WINDOW_SIZE = (wx,wy)
WINDOW_NAME = 'Slither'
#color definitions
white = (255,255,255)
orange = (255,100,50)
red = (255,0,0)
green = (10,155,50)
blue = (0,0,200)
window = pygame.display.set_mode(WINDOW_SIZE)
pygame.display.set_caption(WINDOW_NAME)
fps = 30
#produces centered graphics text on the window
def standard_text(txt,color,fontSize,cent_x = wx/2,cent_y = wy/2):
font = pygame.font.SysFont('comicsansms',fontSize)
text = font.render(txt,True,color)
window.blit(text,[cent_x - text.get_rect().width/2, cent_y + text.get_rect().height/2])
#produces centered graphics text on the window
#and enables the player to move up and down and choose
def menu_text(txt,currentPos,boldPos,isBold,color,fontSize,cent_x = wx/2,cent_y = wy/2):
if isBold[boldPos] and boldPos == currentPos:
font = pygame.font.SysFont('comicsansms',fontSize + 30)
text = font.render(txt,True,color)
window.blit(text,[cent_x - text.get_rect().width/2, cent_y])
else:
font = pygame.font.SysFont('comicsansms',fontSize)
text = font.render(txt,True,color)
window.blit(text,[cent_x - text.get_rect().width/2, cent_y + text.get_rect().height/2])
#displays the main menu
def main_menu():
isBold = [] #bool 1-D array wich decides if the current menu choice shall appear as bold
isBold.append(True) #The first choice will inititally be
isBold.append(False)
isBold.append(False)
boldPos = 0
gameEnter = False
while not gameEnter:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
if boldPos == 0:
gameLoop()
elif boldPos == 1:
Instructions()
else:
pygame.quit()
elif event.key == pygame.K_DOWN and boldPos != 2:
boldPos += 1
isBold[boldPos] = True
isBold[boldPos - 1] = False
elif event.key == pygame.K_UP and boldPos != 0:
boldPos -= 1
isBold[boldPos] = True
isBold[boldPos + 1] = False
window.fill(green)
currentPos = 0
menu_text('Play',currentPos,boldPos,isBold,orange,30,wx/2,wy/4)
currentPos += 1
menu_text('Instructions',currentPos,boldPos,isBold,orange,30,wx/2,wy/2)
currentPos += 1
menu_text('Quit',currentPos,boldPos,isBold,orange,30,wx/2,3*wy/4)
pygame.display.update()
def Instructions():
mainMenu = False
while not mainMenu:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN or event.key == pygame.K_BACKSPACE:
main_menu()
window.fill(green)
standard_text("Use 'up', 'down', 'right', 'left' keys to guide the snake",white,20,wx/2,wy/8)
standard_text("Use 'spacebar' to pause",white,20,wx/2,wy/4)
standard_text("Your goal is to eat the apple as many times as possible",white,20,wx/2,3*wy/8)
standard_text("Do not crash yourself",white,20,wx/2,wy/2)
standard_text("Good luck!",white,20,wx/2,5*wy/8)
standard_text("Main menu",orange,40,wx/2,6*wy/8)
pygame.display.update()
def pause():
while True:
standard_text("pause",orange,25,wx/2,10)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return
def gameLoop():
clock = pygame.time.Clock()
#direction --> used in order to prevent the player from going backwards
direction = ''
#rectangle's and apple's dimensions --> dim_x = dim_y = dim
dim = 10
#rectangle's initial position --> random
head_x = round(random.randint(2*dim, wx - 3*dim)/10.0)*10.0
head_y = round(random.randint(2*dim, wy - 3*dim)/10.0)*10.0
#rectangle's step
dx = 0
dy = 0
#apple's initial position --> random
apple_x = round(random.randint(dim,wx - 2*dim)/10.0)*10.0
apple_y = round(random.randint(dim,wy - 2*dim)/10.0)*10.0
snakeList = []
snakeLength = 2
while True:
if head_x <= 0 or (head_x + dim) >= wx or head_y <= 0 or (head_y + dim) >= wy:
game_over_menu() #you touched the borders...
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
#break is used in order to prevent the player from
#pressing two keys simultaneously
if event.key == pygame.K_SPACE:
pause()
elif event.key == pygame.K_RIGHT and direction != 'left':
dx = dim
dy = 0
direction = 'right'
break
elif event.key == pygame.K_LEFT and direction != 'right':
dx = -dim
dy = 0
direction = 'left'
break
elif event.key == pygame.K_DOWN and direction != 'up':
dx = 0
dy = dim
direction = 'down'
break
elif event.key == pygame.K_UP and direction != 'down':
dx = 0
dy = -dim
direction = 'up'
break
head_x += dx
head_y += dy
#decide if the apple was eaten. If it was, then update its position
if head_x == apple_x and head_y == apple_y:
apple_x = round(random.randint(dim, wx - 2*dim)/10.0)*10.0
apple_y = round(random.randint(dim, wy - 2*dim)/10.0)*10.0
snakeLength += 1
window.fill(green)
pygame.draw.rect(window,red,[apple_x,apple_y,dim,dim]) #draw apple
snakeHead = []
snakeHead.append(head_x)
snakeHead.append(head_y)
snakeList.append(snakeHead)
if len(snakeList) >= snakeLength:
del snakeList[0]
#draw snake as a list of rectangles
for XnY in snakeList:
pygame.draw.rect(window,blue,[XnY[0],XnY[1],dim,dim])
for rectangle in snakeList[:-1]:
if rectangle == snakeHead:
game_over_menu()
pygame.display.update()
clock.tick(fps)
#displays the main menu
def game_over_menu():
isBold = [] #bool 1-D array wich decides if the current menu choice shall appear as bold
isBold.append(True) #The first choice will inititally be
isBold.append(False)
boldPos = 0
entered = False
while not entered:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
if boldPos == 0:
gameLoop()
if boldPos == 1:
pygame.quit()
elif event.key == pygame.K_DOWN and boldPos != 1:
boldPos += 1
isBold[boldPos] = True
isBold[boldPos - 1] = False
elif event.key == pygame.K_UP and boldPos != 0:
boldPos -= 1
isBold[boldPos] = True
isBold[boldPos + 1] = False
window.fill(green)
standard_text('Game over',red,60,wx/2,wy/8)
currentPos = 0
menu_text('Play again',currentPos,boldPos,isBold,orange,30,wx/2,wy/2)
currentPos += 1
menu_text('Quit',currentPos,boldPos,isBold,orange,30,wx/2,6*wy/8)
pygame.display.update()
##################################
main_menu()
gameLoop()
#if you lose, then
game_over_menu()
pygame.quit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def score(seq):
# You need to write this method
str_seq = []
list_str = []
result = 0
if len(seq) == 0:
return result
else:
for num in seq:
print 'initial',num , seq, result
if seq.count(num) == 3:
if num == 1:
result += 1000
print 'after triple 1',num , seq, result
else:
result += num * 100
print 'after triple n',num , seq, result
for trash in range(1, 4):
seq.remove(num)
print'trash removal:', num , seq, result
for index in range(0, len(seq)):
if seq[index] == 1:
result += 100
elif seq[index] == 5:
result += 50
# print 'final', num , seq, result
# print '*'*30
print 'index:', index , 'value:', seq[index]
return result
class tester():
print score([5])
print '^' * 30
print score([2,5,2,2,3])
print '^' * 30
print score([1,5,5,1])
pass
|
from torch.utils.data import DataLoader
from .dataset_lmdb import Dataset
from .sampler import StratifiedSampler
def get_datasets(args):
train_set = Dataset(args, 'train')
val_set = Dataset(args, 'test')
test_set = Dataset(args, 'val')
return train_set, val_set, test_set
def get_data_loaders(train_set,
val_set,
test_set,
train_batch_size,
test_batch_size,
num_workers=4,
rpos = 1,
rneg = 4,
random_state = 1234):
sampler = StratifiedSampler(train_set.get_labels(),
train_batch_size,
rpos = rpos,
rneg = rneg,
random_state=random_state)
train_loader = DataLoader(train_set,
batch_size=sampler.real_batch_size,
# shuffle=True,
sampler=sampler,
num_workers=num_workers)
val_loader = DataLoader(val_set,
batch_size=test_batch_size,
shuffle=True,
num_workers=num_workers)
test_loader = DataLoader(test_set,
batch_size=test_batch_size,
shuffle=True,
num_workers=num_workers)
return train_loader, val_loader, test_loader
__all__ = ['Dataset', 'get_datasets', 'get_data_loaders', 'StratifiedSampler']
|
from itertools import chain, izip
def isSolved(board):
""" is_solved == PEP8 (forced mixedCase by CodeWars) """
cats_game = True
diagonals = [[], []]
for i, row in enumerate(board):
current = set(row)
if current == {1}:
return 1
elif current == {2}:
return 2
if cats_game and 0 in current:
cats_game = False
diagonals[0].append(row[i]) # nw -> se
diagonals[1].append(row[2 - i]) # ne -> sw
for col in chain(izip(*board), diagonals):
current = set(col)
if current == {1}:
return 1
elif current == {2}:
return 2
return 0 if cats_game else -1
|
from rest_framework.permissions import BasePermission
class IsLoggedUser(BasePermission):
"""
Only exposes the endpoint for the logged user
"""
def has_object_permission(self, request, view, obj):
return request.user == obj
|
# -*- coding: utf-8 -*-
"""
django_async_test.tests.testcase
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :py:class:`django_async_test.TestCase`.
"""
import unittest
from django.test import TestCase
from django_async_test.tests.testapp.models import ModelWithBasicField
class TestCaseTestCase(TestCase):
def assertTests(self, tests):
suite = unittest.TestSuite()
suite.addTests(tests)
result = unittest.TestResult()
suite.run(result)
if len(result.errors) > 0:
for testcase, traceback in result.errors:
print(traceback)
if len(result.failures) > 0:
for testcase, traceback in result.failures:
print(traceback)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
def test_transaction_support(self):
"""
Test transaction support of :py:class:`django_async_test.TestCase`.
"""
from django_async_test.tests.testapp.util import DummyTestCase
self.assertTests([
DummyTestCase('test_transaction_support'),
DummyTestCase('test_transaction_support')]
)
self.assertEqual(ModelWithBasicField.objects.count(), 0)
def test_coroutine(self):
"""
Test coroutine support of :py:class:`django_async_test.TestCase`.
"""
from django_async_test.tests.testapp.util import DummyTestCase
self.assertTests([DummyTestCase('test_coroutine')])
def test_transactional_coroutine(self):
"""
Test transactional coroutine support of :py:class:`django_async_test.TestCase`..
"""
from django_async_test.tests.testapp.util import DummyTestCase
self.assertTests([
DummyTestCase('test_transactional_coroutine'),
DummyTestCase('test_transactional_coroutine')]
)
self.assertEqual(ModelWithBasicField.objects.count(), 0)
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 24 10:40:34 2019
@author: andre
"""
import os
import sys
import h5py
import numpy as np
import pandas as pd
import copy
from scipy.signal import butter, lfilter
import scipy.ndimage
from sklearn import preprocessing as pre
from matplotlib import pyplot as plt
# Do some path and import setup
from os.path import expanduser
home = expanduser("~")
sys.path.append("~\\dev\\dev_utils")
from tic_toc import tic,toc
# =============================================================================
# The main purpose of this script is to generate a csv file and data directory
# with the 2 raw signals and then the 'answer' or bramformed and processed signal
# from the hdf5 files processed in MATLAB
# =============================================================================
tic()
"""Read in all the text files and hdf5 files"""
def read_raw_microphone_data(folderPath, fileType='.dat'):
fileList = [f for f in os.listdir(folderPath) if f.endswith(fileType)]
fileList.sort()
DatList = [np.loadtxt(os.path.join(folderPath, dat_file)) for dat_file in fileList]
DatList = np.array(DatList)
DatListExist = True
print('Raw .dat files read in')
return fileList, DatList, DatListExist
def read_hdf5(hdf5fullpath, fileType='.hdf5'):
'''This takes the hdf5 file that is produced in MATLAB and imports the field (file name)
Cleaned Acoustic Pressure, Raw Pressure, and the SmSpectrum (smoothed spectrum)
then plots onto a spectogram and saves as a png'''
hdfFileList = [f for f in os.listdir(hdf5fullpath) if f.endswith(fileType)]
df_list = []
for i in range(0,len(hdfFileList)):
f = h5py.File(hdf5fullpath+ '\\' + hdfFileList[i], 'r')
key_list = list(f)
# print(key_list)
CleanedAcousticPressure = f[key_list[0]][:]
RawPressure = f[key_list[1]][:]
SmSpectrum = f[key_list[2]][:]
field = f[key_list[3]][:]
field = [i.decode('utf-8') for i in field]
# Now, the next thing on the list is to creat a csv file with all the data and
# label it with wall noise, or noise of object. I could make the allocation from
# an hdf5 to dict general but there should really only be the below fields for
# this specific research project.
data_dict = {key_list[3]: field,
key_list[0]: list(RawPressure),
key_list[1]: list(CleanedAcousticPressure),
key_list[2]: list(SmSpectrum),
}
keyList1 = [key_list[3],key_list[0],key_list[1],key_list[2]]
# print(keyList1)
df = pd.DataFrame(data_dict, columns=keyList1)
# print(df[key_list[0]].shape)
df_list.append(df)
return hdfFileList, df_list, key_list
#
#def datfiles_to_csv():
def folder_helper(dirName, parentPath):
if not os.path.exists(os.path.join(parentPath, dirName)):
os.makedirs(os.path.join(parentPath, dirName))
# print("Directory " , dirName , " Created ")
return dirName
else:
# print("Directory " , dirName , " already exists")
return dirName
def saveSpecPlots(Data, MicNum, parentPath, PlotDirectory=None, key_list=None,
hdf5plotFolder=None, fileList=None):
''' The main reason this function is so complicated looking is it is
is equipped to handle the read in hdf5 file or the raw dat files'''
if isinstance(Data, np.ndarray):
temp = folder_helper(PlotDirectory, parentPath)
for i in range(1, len(Data)):
name = fileList[i][:9]+ '_{}_{}'.format(MicNum, i)
plotName = os.getcwd() + '\\'+ PlotDirectory +'\\' + name
plt.specgram(Data[i, :, MicNum], cmap='jet', Fs=48000)
plt.title(name)
plt.xlabel('Time [S]')
plt.ylabel('Frequency [Hz]')
plt.savefig(plotName)
plt.close()
elif isinstance(Data, list):
for hdfFiles in range(0,len(df_list)):
# Take the pd.Datafame out of the list
data = df_list[hdfFiles]
# Make the new plot directory the key list
# Make the maine and sub directories
subDIR = folder_helper(hdf5plotFolder, parentPath)
print(subDIR)
for folder in key_list:
if folder == 'field':
continue
else:
subsubDir = folder_helper(folder, subDIR)
for ind, row in data.iterrows():
name = data['field'][ind]
plotName = name[:9] + '_{}'.format(ind)
fileName = os.path.join(parentPath, subDIR, subsubDir)+ '\\' + plotName
plt.specgram(data[folder][ind], cmap='jet', Fs=48000)
plt.title(plotName)
plt.xlabel('Time [S]')
plt.ylabel('Frequency [Hz]')
plt.savefig(fileName)
plt.close()
else:
raise Exception('saveSpecPlots accepts 3D np.ndarray or a list of pd.core.frame.DataFromes only')
# =============================================================================
# The next thing to do would be to import all the processed .hdf5 files from MATLAB
# and sync up the raw first and last microphone (raw) signal with the processed
# (finalized) signal where is is then batch fed into the model with (research this)
# csv/pandas datafrome? or numpy array?
# =============================================================================
## INPUT COMMANDS BELOW
# Raw data into spec
'''Next thing to do is saveSpecPlots for the hdf5 file... need to figure out how to
save cleaned spectrum. maybe put each type into a different folder and then create a
generate csv with the _1 and _6 with the target in another column.
'''
# Most of the Spectrograms and Inversion are taken from: https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
ws = window_size
ss = window_step
a = X
valid = len(a) - ws
nw = (valid) // ss
out = np.ndarray((int(nw),ws),dtype = a.dtype)
for i in range(int(nw)):
# "slide" the window along the samples
start = i * ss
stop = start + ws
out[i] = a[start : stop]
return out
def stft(X, fftsize=128, step=65, mean_normalize=True, real=False,
compute_onesided=True):
"""
Compute STFT for 1D real valued input X
"""
if real:
local_fft = np.fft.rfft
cut = -1
else:
local_fft = np.fft.fft
cut = None
if compute_onesided:
cut = fftsize // 2
if mean_normalize:
X -= X.mean()
X = overlap(X, fftsize, step)
size = fftsize
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
X = X * win[None]
X = local_fft(X)[:, :cut]
return X
def pretty_spectrogram(d,log = True, thresh= 5, fft_size = 512, step_size = 64):
"""
creates a spectrogram
log: take the log of the spectrgram
thresh: threshold minimum power for log spectrogram
"""
specgram = np.abs(stft(d, fftsize=fft_size, step=step_size, real=False, compute_onesided=True))
if log == True:
specgram /= specgram.max() # volume normalize to max 1
specgram = np.log10(specgram) # take log
specgram[specgram < -thresh] = -thresh # set anything less than the threshold as the threshold
else:
specgram[specgram < thresh] = thresh # set anything less than the threshold as the threshold
return specgram
# Also mostly modified or taken from https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
def invert_pretty_spectrogram(X_s, log = True, fft_size = 512, step_size = 512/4, n_iter = 10):
if log == True:
X_s = np.power(10, X_s)
X_s = np.concatenate([X_s, X_s[:, ::-1]], axis=1)
X_t = iterate_invert_spectrogram(X_s, fft_size, step_size, n_iter=n_iter)
return X_t
def iterate_invert_spectrogram(X_s, fftsize, step, n_iter=10, verbose=False):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
reg = np.max(X_s) / 1E8
X_best = copy.deepcopy(X_s)
for i in range(n_iter):
if verbose:
print("Runnning iter %i" % i)
if i == 0:
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=True)
else:
# Calculate offset was False in the MATLAB version
# but in mine it massively improves the result
# Possible bug in my impl?
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
est = stft(X_t, fftsize=fftsize, step=step, compute_onesided=False)
phase = est / np.maximum(reg, np.abs(est))
X_best = X_s * phase[:len(X_s)]
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
return np.real(X_t)
def invert_spectrogram(X_s, step, calculate_offset=True, set_zero_phase=True):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
size = int(X_s.shape[1] // 2)
wave = np.zeros((X_s.shape[0] * step + size))
# Getting overflow warnings with 32 bit...
wave = wave.astype('float64')
total_windowing_sum = np.zeros((X_s.shape[0] * step + size))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
est_start = int(size // 2) - 1
est_end = est_start + size
for i in range(X_s.shape[0]):
wave_start = int(step * i)
wave_end = wave_start + size
if set_zero_phase:
spectral_slice = X_s[i].real + 0j
else:
# already complex
spectral_slice = X_s[i]
# Don't need fftshift due to different impl.
wave_est = np.real(np.fft.ifft(spectral_slice))[::-1]
if calculate_offset and i > 0:
offset_size = size - step
if offset_size <= 0:
print("WARNING: Large step size >50\% detected! "
"This code works best with high overlap - try "
"with 75% or greater")
offset_size = step
offset = xcorr_offset(wave[wave_start:wave_start + offset_size],
wave_est[est_start:est_start + offset_size])
else:
offset = 0
wave[wave_start:wave_end] += win * wave_est[
est_start - offset:est_end - offset]
total_windowing_sum[wave_start:wave_end] += win
wave = np.real(wave) / (total_windowing_sum + Pref)
return wave
def xcorr_offset(x1, x2):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
x1 = x1 - x1.mean()
x2 = x2 - x2.mean()
frame_size = len(x2)
half = frame_size // 2
corrs = np.convolve(x1.astype('float32'), x2[::-1].astype('float32'))
corrs[:half] = -1E30
corrs[-half:] = -1E30
offset = corrs.argmax() - len(x1)
return offset
### Parameters ###
fft_size = int(2**12) # window size for the FFT
step_size = int(fft_size/16) # distance to slide along the window (in time)
spec_thresh = 3 # threshold for spectrograms (lower filters out more noise)
lowcut = 1000 # Hz # Low cut for our butter bandpass filter
highcut = 48000/2 # Hz # High cut for our butter bandpass filter
sampleRate = 48000 # DAQ Sample rate (S/sec)
NS = 48000 # Number of samples
fn = sampleRate/2 # maximum resolvoble frequency
NFFT = 2**12 # 4096 point FFT
NF = NFFT/2 # No. point for powerspecturm
Pref = 20e-6 # Reference pressure
c = 343 # Speed of sound
MicArrayElements = 7
arraySpacing = 0.00858 # Array spacing (cm)
folderPath = home + '\\Dropbox (CSU Fullerton)\\EGME597_AB\\ML_DATA\\RAWDATA'
parentPath = os.getcwd()
fileList, DatList, DatListExist = read_raw_microphone_data(folderPath)
saveSpecPlots(DatList, MicNum=1, PlotDirectory='RawSTFTPlots', parentPath=parentPath,
fileList=fileList)
#
saveSpecPlots(DatList, MicNum=6, PlotDirectory='RawSTFTPlots', parentPath=parentPath,
fileList=fileList)
# hdf5 file processing to stft in separate folders
hdf5fullpath = home + '\\Dropbox (CSU Fullerton)\\EGME597_AB\\ML_DATA'
hdfFileList, df_list, key_list = read_hdf5(hdf5fullpath)
saveSpecPlots(df_list, MicNum=1, parentPath=parentPath, key_list=key_list,
hdf5plotFolder='ProcessedSTFTPlots')
saveSpecPlots(df_list, MicNum=6, parentPath=parentPath, key_list=key_list,
hdf5plotFolder='ProcessedSTFTPlots')
print('STFT plots generated!')
data = df_list[0]
wav_spectrogram = pretty_spectrogram(data['SmSpecturm'][121], fft_size = fft_size,
step_size = step_size, log = True, thresh = spec_thresh)
fig, ax = plt.subplots(nrows=1,ncols=1, figsize=(20,4))
cax = ax.matshow(np.transpose(wav_spectrogram), interpolation='nearest', aspect='auto', cmap=plt.cm.jet, origin='lower')
fig.colorbar(cax)
plt.title('Raw Spectrogram')
#This recovery method is not working the best right now. The original spectrum is from -188 to about 1400 in the y
# while the recovered is very close to zero... Need to figure out where this is being caused later.
recovered_audio_orig = invert_pretty_spectrogram(wav_spectrogram, fft_size = fft_size,
step_size = step_size, log = True, n_iter = 100)
fig1, ax1 = plt.subplots(nrows=1,ncols=1, figsize=(20,4))
plt.plot(np.linspace(0, recovered_audio_orig.shape[0], num=recovered_audio_orig.shape[0]), recovered_audio_orig)
plt.plot(np.linspace(0, 1400, num= 4094), pre.normalize(data['SmSpecturm'][121], axis=0))
plt.title('different plt')
toc()
# =============================================================================
# Next thing to do is save the data into a numpy array (csv style) to feed into LSTM
# =============================================================================
|
#!/usr/local/bin/python3
import socket
import subprocess
import sys
import argparse
from datetime import datetime
def scan_ports(remoteServer, start_port='1', end_port='1024'):
# Clear the screen
"""
:rtype : object
"""
subprocess.call('clear', shell=True)
# Ask for input
remoteServer = input("Enter a remote host to scan: ")
start_port = input("Enter beginning port number: ")
end_port = input("Enter end port number: ")
start_port = int(start_port)
end_port = int(end_port)
# Print a banner with information on which host we are about to scan
print("-" * 60)
print("Please wait, scanning remote host", remoteServer)
print("On ports: ", start_port, "to", end_port)
print("-" * 60)
print('\n')
# Check what time the scan started
begin_time = datetime.now()
# Take the user input of 'start_port' and 'end_port' numbers and place them in a range
# These are the port numbers to be scanned
try:
remoteServerIP = socket.gethostbyname(remoteServer)
openPorts = []
for port in range(start_port, end_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
result = sock.connect_ex((remoteServerIP, port))
# print(result) - Can be used to test reply codes, 0=ok, 61=TCP RST, etc...
if result == 0:
print("Port {}: \t Open".format(port))
openPorts.append(port)
elif result == 61:
print("Port {}: \t Rejected by Host".format(port))
else:
print("Port {}: \t Timed Out".format(port))
sock.close()
# Error handling in the event host cannot be reached or no DNS available
except KeyboardInterrupt:
print("You pressed Ctrl+C")
sys.exit()
except socket.gaierror:
print('Hostname could not be resolved. Exiting')
sys.exit()
except socket.error:
print('Socket creation failed. Error code: ' +
str(err_msg[0]) + ' Error message: ' + err_msg[1])
sys.exit()
# Check the time once scan is complete, and compare the start - end times.
end_time = datetime.now()
total = end_time - begin_time
# Print the scan time information
print('\n')
print('-' * 60)
print('Scanning Completed in: ', total)
print('Remote Host IP: ', remoteServerIP)
print('Open Ports:', openPorts)
print('-' * 60)
if __name__ == "__main__":
# Command line arguments
parser = argparse.ArgumentParser(description='Remote Port Scanner')
parser.add_argument('--remoteServer', action="store",
dest="remoteServerIP", default='localhost')
parser.add_argument('--start-port', action="store",
dest="start_port", default=1, type=int)
parser.add_argument('--end-port', action="store",
dest="end_port", default=100, type=int)
# Parse arguments
given_args = parser.parse_args()
remoteServerIP, start_port, end_port = given_args.remoteServerIP, given_args.start_port, given_args.end_port
scan_ports(remoteServerIP, start_port, end_port)
|
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import collections
from copy import deepcopy
import json
import multiprocessing
import numpy as np
import os
import six
import tempfile
from six.moves import zip
from smqtk.algorithms.nn_index import NearestNeighborsIndex
from smqtk.exceptions import ReadOnlyError
from smqtk.representation import (
get_data_element_impls,
get_descriptor_index_impls,
get_key_value_store_impls,
)
from smqtk.representation.descriptor_element import elements_to_matrix
from smqtk.utils import plugin, merge_dict, metrics
# Requires FAISS bindings
try:
import faiss
except ImportError:
faiss = None
class FaissNearestNeighborsIndex (NearestNeighborsIndex):
"""
Nearest-neighbor computation using the FAISS library.
"""
@staticmethod
def gpu_supported():
"""
:return: If FAISS seems to have GPU support or not.
:rtype: bool
"""
# Test if the GPU version is available
if hasattr(faiss, "StandardGpuResources"):
return True
else:
return False
@classmethod
def is_usable(cls):
# if underlying library is not found, the import above will error
return faiss is not None
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as
arguments, turning those argument names into configuration dictionary
keys. If any of those arguments have defaults, we will add those
values into the configuration dictionary appropriately. The dictionary
returned should only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this
class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
default = super(FaissNearestNeighborsIndex, cls).get_default_config()
data_element_default_config = plugin.make_config(
get_data_element_impls())
default['index_element'] = data_element_default_config
default['index_param_element'] = deepcopy(data_element_default_config)
di_default = plugin.make_config(get_descriptor_index_impls())
default['descriptor_set'] = di_default
kvs_default = plugin.make_config(get_key_value_store_impls())
default['idx2uid_kvs'] = kvs_default
default['uid2idx_kvs'] = deepcopy(kvs_default)
return default
@classmethod
def from_config(cls, config_dict, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
This method should not be called via super unless and instance of the
class is desired.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: LSHNearestNeighborIndex
"""
if merge_default:
cfg = cls.get_default_config()
merge_dict(cfg, config_dict)
else:
cfg = config_dict
cfg['descriptor_set'] = plugin.from_plugin_config(
cfg['descriptor_set'], get_descriptor_index_impls()
)
cfg['uid2idx_kvs'] = plugin.from_plugin_config(
cfg['uid2idx_kvs'], get_key_value_store_impls()
)
cfg['idx2uid_kvs'] = plugin.from_plugin_config(
cfg['idx2uid_kvs'], get_key_value_store_impls()
)
if (cfg['index_element'] and
cfg['index_element']['type']):
index_element = plugin.from_plugin_config(
cfg['index_element'], get_data_element_impls())
cfg['index_element'] = index_element
else:
cfg['index_element'] = None
if (cfg['index_param_element'] and
cfg['index_param_element']['type']):
index_param_element = plugin.from_plugin_config(
cfg['index_param_element'], get_data_element_impls())
cfg['index_param_element'] = index_param_element
else:
cfg['index_param_element'] = None
return super(FaissNearestNeighborsIndex, cls).from_config(cfg, False)
def __init__(self, descriptor_set, idx2uid_kvs, uid2idx_kvs,
index_element=None, index_param_element=None,
read_only=False, factory_string='IVF1,Flat',
use_multiprocessing=True, use_gpu=False, gpu_id=0,
random_seed=None):
"""
Initialize FAISS index properties. Does not contain a queryable index
until one is built via the ``build_index`` method, or loaded from
existing model files.
:param descriptor_set: Index in which DescriptorElements will be
stored.
:type descriptor_set: smqtk.representation.DescriptorIndex
:param idx2uid_kvs: Key-value storage mapping FAISS indexed vector
index to descriptor UID. This should be the inverse of
`uid2idx_kvs`.
:type idx2uid_kvs: smqtk.representation.KeyValueStore
:param uid2idx_kvs: Key-value storage mapping descriptor UIDs to FAISS
indexed vector index. This should be the inverse of `idx2uid_kvs`.
:type uid2idx_kvs: smqtk.representation.KeyValueStore
:param index_element: Optional DataElement used to load/store the
index. When None, the index will only be stored in memory.
:type index_element: None | smqtk.representation.DataElement
:param index_param_element: Optional DataElement used to load/store
the index parameters. When None, the index will only be stored in
memory.
:type index_param_element: None | smqtk.representation.DataElement
:param read_only: If True, `build_index` will error if there is an
existing index. False by default.
:type read_only: bool
:param factory_string: String to pass to FAISS' `index_factory`;
see the documentation [1] on this feature for more details.
TODO(john.moeller): Flat indexes are not supported, so set the
default to 'IVF1,Flat', which is essentially a flat index.
:type factory_string: str | unicode
:param use_multiprocessing: Whether or not to use discrete processes
as the parallelization agent vs python threads.
:type use_multiprocessing: bool
:param use_gpu: Use a GPU index if GPU support is available. A
RuntimeError is thrown during instance construction if GPU support
is not available and this flag is true. See the following for
FAISS GPU documentation and limitations:
https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
:type use_gpu: bool
:param gpu_id: If the GPU implementation is available for FAISS
(automatically determined) use the GPU with this device number /
ID.
:type gpu_id: int
:param random_seed: Integer to use as the random number generator
seed.
:type random_seed: int | None
[1]: https://github.com/facebookresearch/faiss/wiki/High-level-interface-and-auto-tuning#index-factory
"""
super(FaissNearestNeighborsIndex, self).__init__()
if not isinstance(factory_string, six.string_types):
raise ValueError('The factory_string parameter must be a '
'recognized string type.')
self._descriptor_set = descriptor_set
self._idx2uid_kvs = idx2uid_kvs
self._uid2idx_kvs = uid2idx_kvs
self._index_element = index_element
self._index_param_element = index_param_element
self.read_only = read_only
self.factory_string = str(factory_string)
self.use_multiprocessing = use_multiprocessing
self._use_gpu = use_gpu
self._gpu_id = gpu_id
self.random_seed = None
if random_seed is not None:
self.random_seed = int(random_seed)
# Index value for the next added element. Reset to 0 on a build.
self._next_index = 0
# Place-holder for option GPU resource reference. Just exist for the
# duration of the index converted with it.
self._gpu_resources = None
if self._use_gpu and not self.gpu_supported():
raise RuntimeError("Requested GPU use but FAISS does not seem to "
"support GPU functionality.")
# Lock for accessing FAISS model components.
self._model_lock = multiprocessing.RLock()
# Placeholder for FAISS model instance.
self._faiss_index = None
# Load the index/parameters if one exists
self._load_faiss_model()
def get_config(self):
config = {
"descriptor_set": plugin.to_plugin_config(self._descriptor_set),
"uid2idx_kvs": plugin.to_plugin_config(self._uid2idx_kvs),
"idx2uid_kvs": plugin.to_plugin_config(self._idx2uid_kvs),
"factory_string": self.factory_string,
"read_only": self.read_only,
"random_seed": self.random_seed,
"use_multiprocessing": self.use_multiprocessing,
"use_gpu": self._use_gpu,
"gpu_id": self._gpu_id,
}
if self._index_element:
config['index_element'] = plugin.to_plugin_config(
self._index_element)
if self._index_param_element:
config['index_param_element'] = plugin.to_plugin_config(
self._index_param_element)
return config
def _convert_index(self, faiss_index):
"""
Convert the given index to a GpuIndex if `use_gpu` is True, otherwise
return the index given (no-op).
:param faiss_index: Index to convert.
:type faiss_index: faiss.Index
:return: Optionally converted index.
:rtype: faiss.Index | faiss.GpuIndex
"""
# If we're to use a GPU index and what we're given isn't already a GPU
# index.
if self._use_gpu and not isinstance(faiss_index, faiss.GpuIndex):
self._log.debug("-> GPU-enabling index")
# New resources
self._gpu_resources = faiss.StandardGpuResources()
faiss_index = faiss.index_cpu_to_gpu(faiss.StandardGpuResources(),
self._gpu_id, faiss_index)
return faiss_index
def _index_factory_wrapper(self, d, factory_string):
"""
Create a FAISS index for the given descriptor dimensionality and
factory string.
This *always* produces an index using the L2 metric.
:param d: Integer indexed vector dimensionality.
:type d: int
:param factory_string: Factory string to drive index generation.
:type factory_string: str
:return: Constructed index.
:rtype: faiss.Index | faiss.GpuIndex
"""
self._log.debug("Creating index by factory: '%s'", factory_string)
index = faiss.index_factory(d, factory_string, faiss.METRIC_L2)
return self._convert_index(index)
def _has_model_data(self):
"""
Check if configured model files are configured and not empty.
"""
with self._model_lock:
return (self._index_element and
self._index_param_element and
not self._index_element.is_empty() and
not self._index_param_element.is_empty())
def _load_faiss_model(self):
"""
Load the FAISS model from the configured DataElement
"""
with self._model_lock:
if self._has_model_data():
# Load the binary index
tmp_fp = self._index_element.write_temp()
self._faiss_index = self._convert_index(
# As of Faiss 1.3.0, only str (not unicode) is
# accepted in Python 2.7
faiss.read_index(str(tmp_fp))
)
self._index_element.clean_temp()
# Params pickle include the build params + our local state
# params.
state = json.loads(self._index_param_element.get_bytes())
self.factory_string = state["factory_string"]
self.read_only = state["read_only"]
self.random_seed = state["random_seed"]
self.use_multiprocessing = state["use_multiprocessing"]
self._next_index = state["next_index"]
# Check that descriptor-set and kvstore instances match up in
# size.
assert len(self._descriptor_set) == len(self._uid2idx_kvs) == \
len(self._idx2uid_kvs) == self._faiss_index.ntotal, \
"Not all of our storage elements agree on size: " \
"len(dset, uid2idx, idx2uid, faiss_idx) = " \
"(%d, %d, %d, %d)" \
% (len(self._descriptor_set), len(self._uid2idx_kvs),
len(self._idx2uid_kvs), self._faiss_index.ntotal)
def _save_faiss_model(self):
"""
Save the index and parameters to the configured DataElements.
"""
with self._model_lock:
# Only write to cache elements if they are both writable.
writable = (self._index_element and
self._index_element.writable() and
self._index_param_element and
self._index_param_element.writable())
if writable:
self._log.debug("Storing index: %s", self._index_element)
# FAISS wants to write to a file, so make a temp file, then
# read it in, putting bytes into element.
fd, fp = tempfile.mkstemp()
try:
# Write function needs a CPU index instance, so bring it
# down from the GPU if necessary.
if self._use_gpu and isinstance(self._faiss_index,
faiss.GpuIndex):
to_write = faiss.index_gpu_to_cpu(self._faiss_index)
else:
to_write = self._faiss_index
faiss.write_index(to_write, fp)
# Use the file descriptor to create the file object.
# This avoids reopening the file and will automatically
# close the file descriptor on exiting the with block.
# fdopen() is required because in Python 2 open() does
# not accept a file descriptor.
with os.fdopen(fd, 'rb') as f:
self._index_element.set_bytes(f.read())
finally:
os.remove(fp)
# Store index parameters used.
params = {
"factory_string": self.factory_string,
"read_only": self.read_only,
"random_seed": self.random_seed,
"use_multiprocessing": self.use_multiprocessing,
"next_index": self._next_index,
}
self._index_param_element.set_bytes(json.dumps(params))
def _build_index(self, descriptors):
"""
Internal method to be implemented by sub-classes to build the index
with the given descriptor data elements.
Subsequent calls to this method should rebuild the current index.
This method shall not add to the existing index nor raise an exception
to as to protect the current index.
:param descriptors: Iterable of descriptor elements to build index
over.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
if self.read_only:
raise ReadOnlyError("Cannot modify read-only index.")
self._log.info("Building new FAISS index")
# We need to fork the iterator, so stick the elements in a list
desc_list = list(descriptors)
data, new_uuids = self._descriptors_to_matrix(desc_list)
n, d = data.shape
idx_ids = np.arange(n) # restart IDs from 0.
# Build a faiss index but don't internalize it until we have a lock.
faiss_index = self._index_factory_wrapper(d, self.factory_string)
# noinspection PyArgumentList
faiss_index.train(data)
# TODO(john.moeller): This will raise an exception on flat indexes.
# There's a solution which involves wrapping the index in an
# IndexIDMap, but it doesn't work because of a bug in FAISS. So for
# now we don't support flat indexes.
# noinspection PyArgumentList
faiss_index.add_with_ids(data, idx_ids)
assert faiss_index.d == d, \
"FAISS index dimension doesn't match data dimension"
assert faiss_index.ntotal == n, \
"FAISS index size doesn't match data size"
with self._model_lock:
self._faiss_index = faiss_index
self._log.info("FAISS index has been constructed with %d "
"vectors", n)
self._log.debug("Clearing and adding new descriptor elements")
self._descriptor_set.clear()
self._descriptor_set.add_many_descriptors(desc_list)
assert len(self._descriptor_set) == n, \
"New descriptor set size doesn't match data size"
self._uid2idx_kvs.clear()
self._uid2idx_kvs.add_many(
dict(zip(new_uuids, idx_ids))
)
assert len(self._uid2idx_kvs) == n, \
"New uid2idx map size doesn't match data size."
self._idx2uid_kvs.clear()
self._idx2uid_kvs.add_many(
dict(zip(idx_ids, new_uuids))
)
assert len(self._idx2uid_kvs) == n, \
"New idx2uid map size doesn't match data size."
self._next_index = n
self._save_faiss_model()
def _update_index(self, descriptors):
"""
Internal method to be implemented by sub-classes to additively update
the current index with the one or more descriptor elements given.
If no index exists yet, a new one should be created using the given
descriptors.
:param descriptors: Iterable of descriptor elements to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
:raises RuntimeError: If a given descriptor is already present in this
index. Adding a duplicate descriptor would cause duplicates in
a nearest-neighbor return (no de-duplication).
"""
if self.read_only:
raise ReadOnlyError("Cannot modify read-only index.")
if self._faiss_index is None:
self._build_index(descriptors)
return
self._log.debug('Updating FAISS index')
# We need to fork the iterator, so stick the elements in a list
desc_list = list(descriptors)
data, new_uuids = self._descriptors_to_matrix(desc_list)
n, d = data.shape
with self._model_lock:
# Assert that new descriptors do not intersect with existing
# descriptors.
for uid in new_uuids:
if uid in self._uid2idx_kvs:
raise RuntimeError("Descriptor with UID %s already "
"present in this index.")
old_ntotal = self.count()
next_next_index = self._next_index + n
new_ids = np.arange(self._next_index, next_next_index)
self._next_index = next_next_index
assert self._faiss_index.d == d, \
"FAISS index dimension doesn't match data dimension"
self._faiss_index.add_with_ids(data, new_ids)
assert self._faiss_index.ntotal == old_ntotal + n, \
"New FAISS index size doesn't match old + data size"
self._log.info("FAISS index has been updated with %d"
" new vectors", n)
self._log.debug("Adding new descriptor elements")
self._descriptor_set.add_many_descriptors(desc_list)
assert len(self._descriptor_set) == old_ntotal + n, \
"New descriptor set size doesn't match old + data size"
self._uid2idx_kvs.add_many(
dict(zip(new_uuids, new_ids))
)
assert len(self._uid2idx_kvs) == old_ntotal + n, \
"New uid2idx kvs size doesn't match old + new data size."
self._idx2uid_kvs.add_many(
dict(zip(new_ids, new_uuids))
)
assert len(self._idx2uid_kvs) == old_ntotal + n, \
"New idx2uid kvs size doesn't match old + new data size."
self._save_faiss_model()
def _remove_from_index(self, uids):
"""
Internal method to be implemented by sub-classes to partially remove
descriptors from this index associated with the given UIDs.
:param uids: Iterable of UIDs of descriptors to remove from this index.
:type uids: collections.Iterable[collections.Hashable]
:raises KeyError: One or more UIDs provided do not match any stored
descriptors.
"""
if self.read_only:
raise ReadOnlyError("Cannot modify read-only index.")
with self._model_lock:
# Check that provided IDs are present in uid2idx mapping.
uids_d = collections.deque()
for uid in uids:
if uid not in self._uid2idx_kvs:
raise KeyError(uid)
uids_d.append(uid)
# Remove elements from structures
# - faiss remove_ids requires a np.ndarray of int64 type.
rm_idxs = np.asarray([self._uid2idx_kvs[uid] for uid in uids_d],
dtype=np.int64)
self._faiss_index.remove_ids(rm_idxs)
self._descriptor_set.remove_many_descriptors(uids_d)
self._uid2idx_kvs.remove_many(uids_d)
self._idx2uid_kvs.remove_many(rm_idxs)
self._save_faiss_model()
def _descriptors_to_matrix(self, descriptors):
"""
Extract an (n,d) array with the descriptor vectors in each row,
and a corresponding list of uuids from the list of descriptors.
:param descriptors: List descriptor elements to add to this
index.
:type descriptors: list[smqtk.representation.DescriptorElement]
:return: An (n,d) array of descriptors (d-dim descriptors in n
rows), and the corresponding list of descriptor uuids.
:rtype: (np.ndarray, list[collections.Hashable])
"""
new_uuids = [desc.uuid() for desc in descriptors]
sample_v = descriptors[0].vector()
n, d = len(new_uuids), sample_v.size
data = np.empty((n, d), dtype=np.float32)
elements_to_matrix(
descriptors, mat=data,
use_multiprocessing=self.use_multiprocessing,
report_interval=1.0,
)
self._log.info("data shape, type: %s, %s",
data.shape, data.dtype)
self._log.info("# uuids: %d", n)
return data, new_uuids
def count(self):
"""
:return: Number of elements in this index.
:rtype: int
"""
with self._model_lock:
# If we don't have a searchable index we don't actually have
# anything.
if self._faiss_index:
return self._faiss_index.ntotal
else:
return 0
def _nn(self, d, n=1):
"""
Internal method to be implemented by sub-classes to return the nearest
`N` neighbors to the given descriptor element.
When this internal method is called, we have already checked that there
is a vector in ``d`` and our index is not empty.
:param d: Descriptor element to compute the neighbors of.
:type d: smqtk.representation.DescriptorElement
:param n: Number of nearest neighbors to find.
:type n: int
:return: Tuple of nearest N DescriptorElement instances, and a tuple of
the distance values to those neighbors.
:rtype: (tuple[smqtk.representation.DescriptorElement], tuple[float])
"""
q = d.vector()[np.newaxis, :].astype(np.float32)
self._log.debug("Received query for %d nearest neighbors", n)
with self._model_lock:
s_dists, s_ids = self._faiss_index.search(q, n)
s_dists, s_ids = np.sqrt(s_dists[0, :]), s_ids[0, :]
uuids = [self._idx2uid_kvs[s_id] for s_id in s_ids]
descriptors = self._descriptor_set.get_many_descriptors(uuids)
self._log.debug("Min and max FAISS distances: %g, %g",
min(s_dists), max(s_dists))
descriptors = tuple(descriptors)
d_vectors = elements_to_matrix(descriptors)
d_dists = metrics.euclidean_distance(d_vectors, q)
self._log.debug("Min and max descriptor distances: %g, %g",
min(d_dists), max(d_dists))
order = d_dists.argsort()
uuids, d_dists = zip(*((uuids[oidx], d_dists[oidx]) for oidx in order))
self._log.debug("Returning query result of size %g", len(uuids))
return descriptors, tuple(d_dists)
NN_INDEX_CLASS = FaissNearestNeighborsIndex
|
from aiogram.types import ReplyKeyboardMarkup, KeyboardButton
main_keyboard = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton(text='Главный раздел')
],
],
resize_keyboard=True
)
|
#cost function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#그래프에서 즉시 실행모드로 변환
tf.compat.v1.enable_eager_execution()
X = np.array([1, 2, 3])
Y = np.array([1, 2, 3])
def cost_func(W, X, Y):
hypothesis = X * W
return tf.reduce_mean(tf.square(hypothesis - Y))
#-3에서 5까지를 15로 나눈다.
W_values = np.linspace(-3, 5, num=15)
cost_values = []
print(W_values)
for feed_W in W_values:
#w값에 따라 curr_cost값이 얼마나 나오는지
curr_cost = cost_func(feed_W, X, Y)
cost_values.append(curr_cost)
print("{:6.3f} | {:10.5f}".format(feed_W, curr_cost))
plt.rcParams["figure.figsize"] = (8, 6)
plt.plot(W_values, cost_values, "b")
plt.ylabel('Cost(W)')
plt.xlabel('W')
plt.show()
|
import random;
import Queue;
import BayesianNetwork;
MAX_SAMPLES = 10;
class IdealUpdate:
def __init__(self, addition, vertex, parentInQuestion):
self.addition = addition;
self.vertex = vertex;
self.parentInQuestion = parentInQuestion;
class Sample:
def __init__(self, joint):
self.joint = joint;
self.randNum = random.random();
class SampledNetwork:
def __init__(self, topologicalOrdering):
self.topologicalOrdering = topologicalOrdering;
self.samples = {};
for vert in self.topologicalOrdering:
self.samples[vert.name] = {};
for val in vert.vals:
self.samples[vert.name][val] = Queue.PriorityQueue(maxsize = MAX_SAMPLES);
self.underlyingModel = BayesianNetwork.BayesianNetwork(topologicalOrdering);
def processNewSample(self, sample):
for vertex in self.topologicalOrdering:
vertVal = sample.joint[vertex.name];
if (self.samples[vertex.name][vertVal].full()):
oldSampleRandNum, oldSampleJoint = self.samples[vertex.name][vertVal].get();
if oldSampleRandNum > sample.randNum:
self.samples[vertex.name][vertVal].put((oldSampleRandNum, oldSampleJoint));
else:
self.samples[vertex.name][vertVal].put((sample.randNum, sample.joint));
else:
self.samples[vertex.name][vertVal].put((sample.randNum, sample.joint));
def getJointsFromPQueue(self, pQueue, pQueueCopy, joints):
while (not pQueue.empty()):
newDataPoint = pQueue.get();
pQueueCopy.put(newDataPoint);
randNum, joint = newDataPoint;
joints.append(joint);
print("The size of joints is " + str(len(joints)));
def updateCountsFromSample(self, vertex, vertexCPTTable):
for val in vertex.vals:
jointSamples = [];
copyPQueue = Queue.PriorityQueue(maxsize = MAX_SAMPLES);
self.getJointsFromPQueue(self.samples[vertex.name][val], copyPQueue, jointSamples);
self.samples[vertex.name][val] = copyPQueue;
for joint in jointSamples:
jointParentAssignment = '';
for parent in vertex.parents:
jointParentAssignment += str(joint[parent.name]);
if jointParentAssignment not in vertexCPTTable.keys():
vertexCPTTable[jointParentAssignment] = {};
for val in vertex.vals:
vertexCPTTable[jointParentAssignment][val] = 0;
vertexVal = joint[vertex.name];
vertexCPTTable[jointParentAssignment][vertexVal] += 1;
def marginalizeDataCounts(self, vertexCPTTable, vertex):
for entry in vertexCPTTable:
totalCounts = 0;
for val in vertex.vals:
totalCounts += vertexCPTTable[entry][val];
for val in vertex.vals:
vertexCPTTable[entry][val] = float(vertexCPTTable[entry][val]) / float(totalCounts);
def updateVertexCPTs(self, vertex):
vertexCPTTable = {};
self.updateCountsFromSample(vertex, vertexCPTTable);
self.marginalizeDataCounts(vertexCPTTable, vertex);
vertex.currCPTTable = vertexCPTTable;
def getIdealUpdateModel(self):
idealChange = None;
largestIncrease = 0;
for vertex in self.topologicalOrdering:
self.updateVertexCPTs(vertex);
vertexSamplePoints = [];
for val in vertex.vals:
copyPQueue = Queue.PriorityQueue(maxsize = MAX_SAMPLES);
sampleJoints = [];
self.getJointsFromPQueue(self.samples[vertex.name][val], copyPQueue, sampleJoints);
self.samples[vertex.name][val] = copyPQueue;
print("The sample joints is " + str(sampleJoints));
for dataPoint in sampleJoints:
vertexSamplePoints.append(dataPoint);
currVertexScore = self.underlyingModel.vertexContributionToModelScore(vertex, vertexSamplePoints);
for parent in vertex.possibleParents:
addition = True;
if (parent in vertex.parents):
self.underlyingModel.removeEdge(parent, vertex);
addition = False;
else:
self.underlyingModel.addEdge(parent, vertex);
self.updateVertexCPTs(vertex);
newScore = self.underlyingModel.vertexContributionToModelScore(vertex, vertexSamplePoints);
if (newScore - currVertexScore) > largestIncrease:
largestIncrease = newScore - currVertexScore;
idealChange = IdealUpdate(addition, vertex, parent);
#backtrack
if (not addition):
self.underlyingModel.addEdge(parent, vertex);
else:
self.underlyingModel.removeEdge(parent, vertex);
return idealChange;
def processNewData(self, data):
for dataPoint in data:
print(dataPoint);
samp = Sample(dataPoint);
self.processNewSample(samp);
idealUpdateModel = self.getIdealUpdateModel()
if (idealUpdateModel == None):
return;
if (idealUpdateModel.addition == True):
self.underlyingModel.addEdge(idealUpdateModel.parentInQuestion, idealUpdateModel.vertex);
else:
self.underlyingModel.removeEdge(idealUpdateModel.parentInQuestion, idealUpdateModel.vertex);
|
# Traverse through the tree and return the youngest common ancestor
# for the given 2 child nodes.
class AncestralTree:
def __init__(self, name):
self.name = name
self.ancestor = None
def getYoungestCommonAncestor(topAncestor, descendantOne, descendantTwo):
oneDepth = getDepth(descendantOne, topAncestor)
twoDepth = getDepth(descendantTwo, topAncestor)
if oneDepth > twoDepth:
return backTrackAncestralTree(
descendantOne, descendantTwo, oneDepth - twoDepth
)
else:
return backTrackAncestralTree(
descendantTwo, descendantOne, twoDepth - oneDepth
)
def getDepth(descendant, ancestor):
depth = 0
while descendant != ancestor:
depth += 1
descendant = descendant.ancestor
return depth
def backTrackAncestralTree(lowerDescendant, upperDescendant, diff):
while diff > 0:
lowerDescendant = lowerDescendant.ancestor
diff -= 1
while lowerDescendant != upperDescendant:
lowerDescendant = lowerDescendant.ancestor
upperDescendant = upperDescendant.ancestor
return lowerDescendant
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class FreetypeToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('freetype')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['freetype'].version
values['PFX'] = spec['freetype'].prefix
fname = 'freetype.xml'
contents = str("""
<tool name="freetype" version="${VER}">
<lib name="freetype"/>
<client>
<environment name="FREETYPE_BASE" default="${PFX}"/>
<environment name="INCLUDE" default="$$FREETYPE_BASE/include"/>
<environment name="LIBDIR" default="$$FREETYPE_BASE/lib"/>
</client>
<runtime name="PATH" value="$$FREETYPE_BASE/bin" type="path"/>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
import machine
from machine import Pin,I2C,SPI
import ssd1306
#i2c = I2C(scl=Pin(14), sda=Pin(2), freq=100000)
#display = ssd1306.SSD1306_I2C(128,64, i2c)
spi = SPI(baudrate=10000000, polarity=1, phase=0, sck=Pin(14,Pin.OUT), mosi=Pin(13,Pin.OUT), miso=Pin(12))
display = ssd1306.SSD1306_SPI(128, 64, spi, Pin(5),Pin(4), Pin(16))
def show():
try:
display.poweron()
display.init_display()
display.text('Hi, MicroPython!',1,16)
# Write display buffer
display.show()
except Exception as ex:
display.poweroff()
|
# System Imports
import pytest
from unittest import mock
from unittest.mock import MagicMock
# Framework / Library Imports
# Application Imports
from main import create_app
import config
# Local Imports
@mock.patch('comms_rabbitmq.get_connection')
def test_no_rootpatch(get_conn):
"""
Tests that a blank route returns a 404
"""
get_conn = MagicMock()
get_conn.return_value = True
get_conn.channel = MagicMock()
app = create_app()
app.config['TESTING'] = True
client = app.test_client()
rv = client.get('/')
assert rv.status_code == 404
@mock.patch('comms_rabbitmq.get_connection')
def test_healthcheck(get_conn):
"""
Tests that the healthcheck returns a 200
and a text response of 'OK'
"""
get_conn = MagicMock()
get_conn.return_value = True
get_conn.channel = MagicMock()
app = create_app()
app.config['TESTING'] = True
client = app.test_client()
rv = client.get('/healthcheck')
assert rv.status_code == 200
assert b"OK" in rv.data
@mock.patch('comms_rabbitmq.publish_webhook')
@mock.patch('comms_rabbitmq.get_connection')
def test_rmq_runs_webhook(get_conn, mocked_method):
"""
Tests that when the config mode is RABBITMQ
a publish_nats event is run
"""
get_conn = MagicMock()
get_conn.return_value = True
get_conn.channel = MagicMock()
mocked_method.return_value = True
app = create_app()
app.config['TESTING'] = True
client = app.test_client()
rv = client.post('/clockify/webhook/test')
mocked_method.assert_called_once()
assert rv.status_code == 200
# How to get Flask config
# print(client.application.config)
|
#!/usr/bin/env python3
#Aurel Onuzi
import csv
import os.path
import sys
#user input
names_file = input('Enter a text file with a list of name: ')
nickname_file = input('Enter a file with a list of nicknames, or just enter 0 to skip this step: ')
name_var = input('Select name variation: Single Line or Multiple Lines ( either enter single(or type 0) or enter multiple(or type 1) ): ')
#dictionary where firstname is the key and nicknames are the values, 1:N pair
nickname_dict = {}
#some error handling with file names
if not os.path.isfile(names_file):
raise SystemError('No file found')
if not names_file.endswith('.txt'):
names_file = names_file+'.txt'
if not nickname_file.endswith('.txt'):
nickname_file = nickname_file+'.txt'
if name_var.strip().lower() == 'single':
name_var = '0'
elif name_var.strip().lower() == 'multiple':
name_var = '1'
elif name_var.strip() != '0' and name_var.strip() != '1':
raise SystemError('Wrong selection was given')
def name_variations(first,last):
if name_var == '0':
print('(',end="")
nickname_single_variations(first,last)
print(')',end="")
if name_var == '1':
nickname_multi_variations(first,last)
def generate_single_var(first,last):
print('{0} {1} OR {1}, {0} OR {2}{0} OR {0} w/2 {1}'.format(first, last, last[:1]), end="")
#third variation generates first initial of last name followed by the first name similar to the example
#if you meant the first initial of the first name followed by the last name, change it to the line below
#print('{0} {1} OR {1}, {0} OR {2}{1} OR {0} w/2 {1}'.format(first, last, first[:1]), end="")
def generate_multi_var(first,last):
print('{0} {1}\n{1}, {0}\n{2}{0}\n{0} w/2 {1}'.format(first, last, first[:1]))
def populate_dict(file):
#creating a dictionary with first names as key to nicknames as values
with open(file) as f:
reader = csv.reader(f)
for row in reader:
if row: #ignoring blank lines
if row[0] in nickname_dict:
nickname_dict[row[0]].append(row[1])
elif row:
nickname_dict[row[0]] = [row[1]]
def nickname_single_variations(firstname,last):
# initial name variation
generate_single_var(firstname,last)
#nickname variations
for key, val in nickname_dict.items():
if key == firstname:
for val in nickname_dict[key]:
print(' OR ',end="")
generate_single_var(val,last)
def nickname_multi_variations(firstname,last):
generate_multi_var(firstname,last)
for key, val in nickname_dict.items():
if key == firstname:
for val in nickname_dict[key]:
generate_multi_var(val,last)
def main():
if not nickname_file == '0': #nicknames file available
populate_dict(nickname_file)
base_file = os.path.basename(names_file)
new_file = os.path.splitext(base_file)[0] + '_Search.txt'
origal_stdout = sys.stdout #for reverting back to original standard output
with open(names_file,'r') as input_file, open(new_file,'w') as output_file:
sys.stdout = output_file #sending data from standard output to the file instead
for line in input_file:
name = line.split()
name_variations(name[0],name[1])
sys.stdout = origal_stdout
if __name__ == "__main__":
main()
|
# coding: utf-8
class Digraph(object):
""" Digraph is a simple and more or less efficient implementation of a
directed graph. It aims to provide all necessary methods for digraphs and to
be simple to understand. Therefore, Digraph isn't efficient in any way. When
you are looking for an efficient digraph implementation, look at FastDigraph.
"""
def __init__(self, data=None):
self.arcs = set()
self.arc_weights = {}
if data is not None:
self.update(data)
def add(self, v1, v2, weight=None):
self.add_arc((v1, v2), weight)
def add_arc(self, arc, weight=None):
self.arcs.add(arc)
self.arc_weights[arc] = weight
def weight(self, v1, v2, default=None):
return self.arc_weights.get((v1, v2), default)
def remove(self, v1, v2):
self.remove_arc((v1, v2))
def remove_arc(self, arc):
self.arcs.remove(arc)
def neighbors(self, v):
for v1, v2 in self.arcs:
if v1 == v:
yield v2
def adjacent(self, v1, v2):
return (v1, v2) in self.arcs
def vertices(self):
s = set()
for vs in self.arcs:
for v in vs:
if v not in s:
yield v
s.add(v)
def update(self, arcs):
for t in arcs:
if len(t) == 3:
v1, v2, weight = t
elif len(t) == 2:
weight = None
v1, v2 = t
self.add(v1, v2, weight=weight)
def __iter__(self):
for v1, v2 in self.arcs:
yield (v1, v2, self.weight(v1, v2))
def __contains__(self, arc):
return arc in self.arcs
def __hash__(self):
return hash(self.arcs)
def __len__(self):
return len(self.arcs)
def arcs_dot(self):
for v1, v2 in self.arcs:
yield "\"{v1}\" -> \"{v2}\"".format(v1=hash(v1), v2=hash(v2))
def vertices_dot(self):
for v in self.vertices():
yield "\"{v}\" [label=\"{l}\"]".format(v=hash(v), l=str(v))
def dot(self):
yield from self.vertices_dot()
yield from self.arcs_dot()
def dijkstra(self, src, weight=None):
if weight is None: weight = self.weight
# Dijkstra algorithm state
distance = {}
previous = {}
unvisited = set()
# Define distance from source to source as 0
distance[src] = 0
# Define any other distance to infinity and create entry for previous
# object in hash table
for vertex in self.vertices():
if vertex != src:
distance[vertex] = float("inf")
previous[vertex] = None
unvisited.add(vertex)
while unvisited:
vertex = sorted(unvisited, key=lambda v: distance[v]).pop(0)
unvisited.remove(vertex)
for neighbor in self.neighbors(vertex):
dist = float(weight(vertex, neighbor, 0))
alt = distance[vertex] + dist
if alt < distance[neighbor]:
distance[neighbor] = alt
previous[neighbor] = vertex
return distance, previous
def dijkstra_path(self, src, dst, weight=None):
path = []
dist, prev = self.dijkstra(src, weight=weight)
while dst in prev:
path.insert(0, dst)
dst = prev[dst]
return path
def dijkstra_tree(self, src, weight=None):
if weight is None: weight = self.weight
dist, prev = self.dijkstra(src, weight=weight)
g = Digraph()
for k, v in prev.items():
g.add(v, k, weight(v, k))
return g
def subtree(self, src, initial=[]):
g = Digraph()
g.handle_neighbors = False
s = set(initial)
unvisited = set([src])
while unvisited:
v = unvisited.pop()
s.add(v)
for n in set(self.neighbors(v)) - s:
g.add(v, n, self.weight(v, n))
unvisited.add(n)
return g
|
from .base import BasePaymentMethod
class CardPaymentMethod(BasePaymentMethod):
pass
|
import json
from flask import Flask, request, jsonify
from os import path, makedirs
from subprocess import Popen, PIPE
from govr.util import is_dir_empty
from govr.test_runner import TestRunner
from govr.shields import update_shield
PROJECT_DIR_NAME = "project"
IMG_DIR_NAME = "img"
REPORTS_DIR_NAME = "reports"
COVERAGE_FILE = "coverage.json"
def routes(server):
server.app.add_url_rule("/", methods=["GET"], view_func=server.hello)
server.app.add_url_rule("/coverage", methods=["GET"], view_func=server.coverage)
server.app.add_url_rule("/hook", methods=["POST"], view_func=server.hook)
class Server:
FLASK_APP_NAME = "govr-server"
def __init__(self, args):
if args.server_git_repo == "":
raise Exception("Cannot run server without clonable git repo url, via --server-git-repo=$REPO")
self.git_repo = args.server_git_repo
self.host = args.server_host
self.port = args.server_port
self.debug = args.server_debug
self.state_dir = args.server_state_dir
self.coverage_file = path.join(self.state_dir, COVERAGE_FILE)
self.app = Flask(Server.FLASK_APP_NAME)
self.state_dirs = self._init_state_dirs()
checkout_project(self.git_repo, self.state_dirs[PROJECT_DIR_NAME])
self.test_runner = TestRunner(self.state_dirs[PROJECT_DIR_NAME])
# TODO: Checkout alternative branch to master if configured? Not impl yet.
self._init_coverage_file()
update_shield(self.state_dirs[IMG_DIR_NAME], self._read_total_coverage())
routes(self)
######################################################################
# ROUTE HANDLERS
######################################################################
def hello(self):
return "Break the hairpin\n"
def coverage(self):
return jsonify({"total_coverage": self._read_total_coverage()})
def hook(self):
# TODO: Queue? Concurrant runs? Make sure most recent push is triggering?
new_master_push = \
request.json["ref"] == "refs/heads/master" and \
request.headers["X-Github-Event"] == "push"
# Pull
test_sha = request.json["after"]
self._update_project(test_sha)
# If it's not a push even we care about, ignore
if not new_master_push:
return ("", 200)
self._update_coverage()
update_shield(self.state_dirs[IMG_DIR_NAME], self._read_total_coverage())
return ("Running update", 202)
######################################################################
def run(self):
self.app.run(
debug=self.debug,
host=self.host,
port=int(self.port)
)
def _init_state_dirs(self):
ret = {}
if not path.exists(self.state_dir):
makedirs(self.state_dir)
state_dirs = [PROJECT_DIR_NAME, IMG_DIR_NAME, REPORTS_DIR_NAME]
for state_dir, full_state_dir in [(dd, path.join(self.state_dir, dd)) for dd in state_dirs]:
if not path.exists(full_state_dir):
makedirs(full_state_dir)
ret[state_dir] = full_state_dir
return ret
def _init_coverage_file(self, overwrite=True):
if path.exists(self.coverage_file) and not overwrite:
return
self._update_coverage()
def _update_project(self, sha):
# TODO: Handle errors on these cmds
fetch_cmd = ["git", "fetch", "--all", ]
checkout_cmd = ["git", "checkout", sha]
fetch_p = Popen(fetch_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE,
cwd=self.state_dirs[PROJECT_DIR_NAME])
output, error = fetch_p.communicate()
checkout_p = Popen(checkout_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE,
cwd=self.state_dirs[PROJECT_DIR_NAME])
output, error = checkout_p.communicate()
def _update_coverage(self):
coverage = self.test_runner.run()
print "Writing coverage to file: %s" % self.coverage_file
with open(self.coverage_file, "w") as coverage_file:
coverage_file.write(json.dumps(coverage))
def _read_total_coverage(self):
with open(self.coverage_file) as coverage_file:
# TODO: Error handling?
weights = json.load(coverage_file)
return weights["total_coverage"]
def checkout_project(clone_url, path):
if is_dir_empty(path):
print "Cloning [ %s ] to [ %s ]" % (clone_url, path)
clone_cmd = ["git", "clone", clone_url, path]
clone_p = Popen(clone_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = clone_p.communicate()
print output
|
import pyrebase #import pyrebase
import datetime #import datetime
import time #import time
config = { #sets up judge-prefs-pfd firebase
"apiKey": "apiKey",
"authDomain": "judge-prefs.firebaseapp.com",
"databaseURL": "https://judge-prefs.firebaseio.com/",
"storageBucket": "judge-prefs.appspot.com"
}
firebase = pyrebase.initialize_app(config) #initializes app
db = firebase.database() #sets db as database variable
for upload in db.child('user_uploads').get().each(): #iterates over database of user uploads
new = {} #initializes new judge dictionary
newcomments = {} #initializes judge comments dictionary
firstName = upload.val()['firstName'] #sets full name variable based on upload
lastName = upload.val()['lastName']
jid = "doesnotexist" #assigns that judge does not exist
for jud in db.child('judges').order_by_child('first_name').equal_to(firstName).get().each(): #finds matching judge
if (jud.val()['last_name'] == lastName):
jid = jud.key() #assigns judge key to jid variable
if (jid != "doesnotexist"): #only processes if judge exists
print(jid);
jold = firebase.database().child('judges').child(jid).get() #downloads judge
com = False;
for field in db.child('user_uploads').child(upload.key()).get().each(): #finds matching judge
if field.key() == 'comments':
com = True;
if (com):
if (upload.val()['comments'] != "-1"):
newcomments['fullName'] = firstName + " " + lastName #creates name for new judge
newcomments['comments'] = upload.val()['comments'] #adds comments
ts = time.time() #does something
newcomments['timestamp'] = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') #timestamps comment
db.child('comments').push(newcomments) #updates comments
new['first_name'] = firstName #creates fields for new judge
new['last_name'] = lastName
new['phil'] = jold.val()['phil']
new['num_reviews'] = jold.val()['num_reviews'] + 1
new['spreading'] = ((jold.val()['spreading']) * (jold.val()['num_reviews']) + float(upload.val()['speedPref'])) / (new['num_reviews'])
if (upload.val()['aff_type'] == 'aff_trad'):
new['trad_aff_num'] = (jold.val()['trad_aff_num']) + 1
if (upload.val()['winner'] == 'aff_win'):
new['trad_aff_wr'] = (((jold.val()['trad_aff_wr']) * (jold.val()['trad_aff_num'])) + 1) / (new['trad_aff_num'])
else:
new['trad_aff_wr'] = (((jold.val()['trad_aff_wr']) * (jold.val()['trad_aff_num']))) / (new['trad_aff_num'])
else:
new['k_aff_num'] = (jold.val()['k_aff_num']) + 1
if (upload.val()['winner'] == 'aff_win'):
new['k_aff_wr'] = (((jold.val()['k_aff_wr']) * (jold.val()['k_aff_num'])) + 1) / (new['k_aff_num'])
else:
new['k_aff_wr'] = (((jold.val()['k_aff_wr']) * (jold.val()['k_aff_num']))) / (new['k_aff_num'])
new['CP'] = {}
new['DA'] = {}
new['K'] = {}
new['T'] = {}
new['impact_turn'] = {}
if (upload.val()['neg_choice'] == 'k'):
new['DA'] = jold.val()['DA']
new['T'] = jold.val()['T']
new['impact_turn'] = jold.val()['impact_turn']
new['CP'] = jold.val()['CP']
new['K']['K_num'] = (jold.val()['K']['K_num']) + 1
if (upload.val()['winner'] == 'aff_win'):
new['K']['aff_wr'] = ((jold.val()['K']['aff_wr']) * (jold.val()['K']['K_num']) + 1) / (new['K']['K_num'])
else:
new['K']['aff_wr'] = ((jold.val()['K']['aff_wr']) * (jold.val()['K']['K_num'])) / (new['K']['K_num'])
if (upload.val()['rfd'] == 'framework'):
new['K']['framework_wr'] = ((jold.val()['K']['framework_wr']) * (jold.val()['K']['K_num']) + 1) / (new['K']['K_num'])
else:
new['K']['framework_wr'] = ((jold.val()['K']['framework_wr']) * (jold.val()['K']['K_num'])) / (new['K']['K_num'])
if (upload.val()['rfd'] == 'perm'):
new['K']['perm_wr'] = ((jold.val()['K']['perm_wr']) * (jold.val()['K']['K_num']) + 1) / (new['K']['K_num'])
else:
new['K']['perm_wr'] = ((jold.val()['K']['perm_wr']) * (jold.val()['K']['K_num'])) / (new['K']['K_num'])
if (upload.val()['rfd'] == 'impact_turn'):
new['K']['impact_turn_wr'] = ((jold.val()['K']['impact_turn_wr']) * (jold.val()['K']['K_num']) + 1) / (new['K']['K_num'])
else:
new['K']['impact_turn_wr'] = ((jold.val()['K']['impact_turn_wr']) * (jold.val()['K']['K_num'])) / (new['K']['K_num'])
if (upload.val()['rfd'] == 'no_alt'):
new['K']['no_alt_solvency_wr'] = ((jold.val()['K']['no_alt_solvency_wr']) * (jold.val()['K']['K_num']) + 1) / (new['K']['K_num'])
else:
new['K']['no_alt_solvency_wr'] = ((jold.val()['K']['no_alt_solvency_wr']) * (jold.val()['K']['K_num'])) / (new['K']['K_num'])
if (upload.val()['rfd'] == 'case_outweighs'):
new['K']['case_outweights_wr'] = ((jold.val()['K']['case_outweights_wr']) * (jold.val()['K']['K_num']) + 1) / (new['K']['K_num'])
else:
new['K']['case_outweights_wr'] = ((jold.val()['K']['case_outweights_wr']) * (jold.val()['K']['K_num'])) / (new['K']['K_num'])
if (upload.val()['rfd'] == 'condo'):
new['K']['condo_wr'] = ((jold.val()['K']['condo_wr']) * (jold.val()['K']['K_num']) + 1) / (new['K']['K_num'])
else:
new['K']['condo_wr'] = ((jold.val()['K']['condo_wr']) * (jold.val()['K']['K_num'])) / (new['K']['K_num'])
elif (upload.val()['neg_choice'] == 'cp'):
new['DA'] = jold.val()['DA']
new['T'] = jold.val()['T']
new['impact_turn'] = jold.val()['impact_turn']
new['K'] = jold.val()['K']
new['CP']['CP_num'] = (jold.val()['CP']['CP_num']) + 1
if (upload.val()['winner'] == 'aff_win'):
new['CP']['aff_wr'] = ((jold.val()['CP']['aff_wr']) * (jold.val()['CP']['CP_num']) + 1) / (new['CP']['CP_num'])
else:
new['CP']['aff_wr'] = ((jold.val()['CP']['aff_wr']) * (jold.val()['CP']['CP_num'])) / (new['CP']['CP_num'])
if (upload.val()['rfd'] == 'perm'):
new['CP']['perm_wr'] = ((jold.val()['CP']['perm_wr']) * (jold.val()['CP']['CP_num']) + 1) / (new['CP']['CP_num'])
else:
new['CP']['perm_wr'] = ((jold.val()['CP']['perm_wr']) * (jold.val()['CP']['CP_num'])) / (new['CP']['CP_num'])
if (upload.val()['rfd'] == 'theory'):
new['CP']['cp_theory_wr'] = ((jold.val()['CP']['cp_theory_wr']) * (jold.val()['CP']['CP_num']) + 1) / (new['CP']['CP_num'])
else:
new['CP']['cp_theory_wr'] = ((jold.val()['CP']['cp_theory_wr']) * (jold.val()['CP']['CP_num'])) / (new['CP']['CP_num'])
if (upload.val()['rfd'] == 'solvency_def'):
new['CP']['solvency_deficit'] = ((jold.val()['CP']['solvency_deficit']) * (jold.val()['CP']['CP_num']) + 1) / (new['CP']['CP_num'])
else:
new['CP']['solvency_deficit'] = ((jold.val()['CP']['solvency_deficit']) * (jold.val()['CP']['CP_num'])) / (new['CP']['CP_num'])
if (upload.val()['rfd'] == 'net_ben_offense'):
new['CP']['offense_on_net_benefit'] = ((jold.val()['CP']['offense_on_net_benefit']) * (jold.val()['CP']['CP_num']) + 1) / (new['CP']['CP_num'])
else:
new['CP']['offense_on_net_benefit'] = ((jold.val()['CP']['offense_on_net_benefit']) * (jold.val()['CP']['CP_num'])) / (new['CP']['CP_num'])
if (upload.val()['rfd'] == 'net_ben_links'):
new['CP']['links_to_net_benefit'] = ((jold.val()['CP']['links_to_net_benefit']) * (jold.val()['CP']['CP_num']) + 1) / (new['CP']['CP_num'])
else:
new['CP']['links_to_net_benefit'] = ((jold.val()['CP']['links_to_net_benefit']) * (jold.val()['CP']['CP_num'])) / (new['CP']['CP_num'])
if (upload.val()['rfd'] == 'condo'):
new['CP']['condo_wr'] = ((jold.val()['CP']['condo_wr']) * (jold.val()['CP']['CP_num']) + 1) / (new['CP']['CP_num'])
else:
new['CP']['condo_wr'] = ((jold.val()['CP']['condo_wr']) * (jold.val()['CP']['CP_num'])) / (new['CP']['CP_num'])
elif (upload.val()['neg_choice'] == 'da'):
new['K'] = jold.val()['K']
new['T'] = jold.val()['T']
new['impact_turn'] = jold.val()['impact_turn']
new['CP'] = jold.val()['CP']
new['DA']['DA_num'] = (jold.val()['DA']['DA_num']) + 1
if (upload.val()['winner'] == 'aff_win'):
new['DA']['aff_wr'] = ((jold.val()['DA']['aff_wr']) * (jold.val()['DA']['DA_num']) + 1) / (new['DA']['DA_num'])
else:
new['DA']['aff_wr'] = ((jold.val()['DA']['aff_wr']) * (jold.val()['DA']['DA_num'])) / (new['DA']['DA_num'])
if (upload.val()['rfd'] == 'case_outweighs'):
new['DA']['case_outweights_wr'] = ((jold.val()['DA']['case_outweights_wr']) * (jold.val()['DA']['DA_num']) + 1) / (new['DA']['DA_num'])
else:
new['DA']['case_outweights_wr'] = ((jold.val()['DA']['case_outweights_wr']) * (jold.val()['DA']['DA_num'])) / (new['DA']['DA_num'])
if (upload.val()['rfd'] == 'no_link_thumpers'):
new['DA']['no_link_wr'] = ((jold.val()['DA']['no_link_wr']) * (jold.val()['DA']['DA_num']) + 1) / (new['DA']['DA_num'])
else:
new['DA']['no_link_wr'] = ((jold.val()['DA']['no_link_wr']) * (jold.val()['DA']['DA_num'])) / (new['DA']['DA_num'])
if (upload.val()['rfd'] == 'link_turn'):
new['DA']['link_turn_wr'] = ((jold.val()['DA']['link_turn_wr']) * (jold.val()['DA']['DA_num']) + 1) / (new['DA']['DA_num'])
else:
new['DA']['link_turn_wr'] = ((jold.val()['DA']['link_turn_wr']) * (jold.val()['DA']['DA_num'])) / (new['DA']['DA_num'])
if (upload.val()['rfd'] == 'no_impact'):
new['DA']['no_impact_wr'] = ((jold.val()['DA']['no_impact_wr']) * (jold.val()['DA']['DA_num']) + 1) / (new['DA']['DA_num'])
else:
new['DA']['no_impact_wr'] = ((jold.val()['DA']['no_impact_wr']) * (jold.val()['DA']['DA_num'])) / (new['DA']['DA_num'])
if (upload.val()['rfd'] == 'impact_turn'):
new['DA']['impact_turn_wr'] = ((jold.val()['DA']['impact_turn_wr']) * (jold.val()['DA']['DA_num']) + 1) / (new['DA']['DA_num'])
else:
new['DA']['impact_turn_wr'] = ((jold.val()['DA']['impact_turn_wr']) * (jold.val()['DA']['DA_num'])) / (new['DA']['DA_num'])
if (upload.val()['rfd'] == 'condo'):
new['DA']['condo_wr'] = ((jold.val()['DA']['condo_wr']) * (jold.val()['DA']['DA_num']) + 1) / (new['DA']['DA_num'])
else:
new['DA']['condo_wr'] = ((jold.val()['DA']['condo_wr']) * (jold.val()['DA']['DA_num'])) / (new['DA']['DA_num'])
elif (upload.val()['neg_choice'] == 't'):
new['DA'] = jold.val()['DA']
new['K'] = jold.val()['K']
new['impact_turn'] = jold.val()['impact_turn']
new['CP'] = jold.val()['CP']
new['T']['T_num'] = (jold.val()['T']['T_num']) + 1
if (upload.val()['winner'] == 'aff_win'):
new['T']['aff_wr'] = ((jold.val()['T']['aff_wr']) * (jold.val()['T']['T_num']) + 1) / (new['T']['T_num'])
else:
new['T']['aff_wr'] = ((jold.val()['T']['aff_wr']) * (jold.val()['T']['T_num'])) / (new['T']['T_num'])
if (upload.val()['rfd'] == 'we_meet'):
new['T']['we_meet_p'] = ((jold.val()['T']['we_meet_p']) * (jold.val()['T']['T_num']) + 1) / (new['T']['T_num'])
else:
new['T']['we_meet_p'] = ((jold.val()['T']['we_meet_p']) * (jold.val()['T']['T_num'])) / (new['T']['T_num'])
if (upload.val()['rfd'] == 'aff_flex'):
new['T']['aff_flex_outweighs'] = ((jold.val()['T']['aff_flex_outweighs']) * (jold.val()['T']['T_num']) + 1) / (new['T']['T_num'])
else:
new['T']['aff_flex_outweighs'] = ((jold.val()['T']['aff_flex_outweighs']) * (jold.val()['T']['T_num'])) / (new['T']['T_num'])
if (upload.val()['rfd'] == 'reasonability'):
new['T']['reasonability_p'] = ((jold.val()['T']['reasonability_p']) * (jold.val()['T']['T_num']) + 1) / (new['T']['T_num'])
else:
new['T']['reasonability_p'] = ((jold.val()['T']['reasonability_p']) * (jold.val()['T']['T_num'])) / (new['T']['T_num'])
if (upload.val()['rfd'] == 'condo'):
new['T']['condo_p'] = ((jold.val()['T']['condo_p']) * (jold.val()['T']['T_num']) + 1) / (new['T']['T_num'])
else:
new['T']['condo_p'] = ((jold.val()['T']['condo_p']) * (jold.val()['T']['T_num'])) / (new['T']['T_num'])
elif (upload.val()['neg_choice'] == 'it'):
new['DA'] = jold.val()['DA']
new['T'] = jold.val()['T']
new['K'] = jold.val()['K']
new['CP'] = jold.val()['CP']
new['impact_turn']['it_num'] = (jold.val()['impact_turn']['it_num']) + 1
if (upload.val()['winner'] == 'aff_win'):
new['impact_turn']['aff_wr'] = ((jold.val()['impact_turn']['aff_wr']) * (jold.val()['impact_turn']['it_num']) + 1) / (new['impact_turn']['it_num'])
else:
new['impact_turn']['aff_wr'] = ((jold.val()['impact_turn']['aff_wr']) * (jold.val()['impact_turn']['it_num'])) / (new['impact_turn']['it_num'])
db.child('judges').child(jid).update(new) #updates judge database
db.child('user_uploads').child(upload.key()).remove() #removes judge from user uploads
|
###########tumpukan
# tumpukan = [1,2,3,4,5,6]
# print('data sekarang : ',tumpukan)
#
# #memasukkan data baru
# tumpukan.append(7)
# print('data masuk: ',7)
# tumpukan.append(8)
# print('data masuk: ',8)
#
# print('data sekarang : ',tumpukan)
#
# out = tumpukan.pop()
# print('data keluar : ',out)
# print('data sekarang : ',tumpukan)
# print('\n \n')
# # antrian
# from collections import deque
#
# antrian = deque([1,2,3,4,5])
# print('data sekarang : ', antrian)
######menambahkan data
# antrian.append(6)
# print('data masuk: ', 7)
# print('data sekarang: ',antrian)
#
# #mengurangi antrian
# out = antrian.popleft
# print('data sekarang: ',antrian)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from colors import red
from setuptools import Extension, setup
native_impl = Extension("native.impl", sources=["impl.c"])
setup(
name="native",
version="2.3.4",
packages=["native"],
namespace_packages=["native"],
package_dir={"native": "."},
ext_modules=[native_impl],
description=red("Proof that custom PEP-517 build-time requirements work"),
)
|
import grequests
import requests
from PIL import Image
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import time
import os
import logging
import random
import math
from io import BytesIO
from typing import List, Tuple
from dataclasses import dataclass
import progressbar
_GOOGLE_MAP_URL = 'http://www.google.cn/maps/vt?lyrs=s&x={}&y={}&z={}'
# _GOOGLE_MAP_URL = 'http://www.google.com/maps/vt?lyrs=s&x={}&y={}&z={}'
_USER_AGENTS = [
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36']
TILE_SIZE = 256
def _image_scale(zoom):
"""
根据缩放计算瓦片行(列)数
:param zoom: 瓦片等级
:return: 每行或每列的瓦片数
"""
return 1 << zoom
def _project(lat, lng):
"""
Web Mercator 投影
:param lat: 纬度
:param lng: 经度
:return: 投影坐标
"""
sin_y = math.sin(lat * math.pi / 180)
sin_y = min(max(sin_y, -0.9999), 0.9999) # Truncating to 0.9999 effectively limits latitude to 89.1897
return 0.5 + lng / 360, 0.5 - math.log((1 + sin_y) / (1 - sin_y)) / (4 * math.pi)
def _inverse(w_x, w_y):
"""
反投影
:param w_x: 世界坐标x
:param w_y: 世界坐标y
:return: 经纬度
"""
lat = math.atan(math.sinh(math.pi * (1 - 2 * w_y))) / math.pi * 180
lng = (w_x - 0.5) * 360
return lat, lng
def world_xy(lat, lng):
"""
经纬度转“世界坐标”
:param lat: 纬度
:param lng: 经度
:return: 世界坐标
"""
p_x, p_y = _project(lat, lng)
return TILE_SIZE * p_x, TILE_SIZE * p_y
def pixel_xy(lat, lng, zoom):
"""
经纬度转“像素坐标”
:param lat: 纬度
:param lng: 经度
:param zoom: 瓦片等级
:return: 像素坐标
"""
w_x, w_y = world_xy(lat, lng)
scale = _image_scale(zoom)
return math.floor(w_x * scale), math.floor(w_y * scale)
def tile_xy(lat: float, lng: float, zoom: int) -> Tuple[int, int]:
"""
经纬度转“瓦片坐标”
:param lat: 纬度
:param lng: 经度
:param zoom: 瓦片等级
:return: 瓦片坐标
"""
p_x, p_y = pixel_xy(lat, lng, zoom)
return math.floor(p_x / TILE_SIZE), math.floor(p_y / TILE_SIZE)
def tile_extents(t_x: int, t_y: int, zoom: int) -> Tuple[float, float, float, float]:
"""
获取指定瓦片四至经纬度
:param t_x: x
:param t_y: y
:param zoom: z
:return: 北南西东,四至经纬度
"""
scale = _image_scale(zoom)
unit = 1 / scale
x_min = unit * t_x
x_max = unit * (t_x + 1)
y_min = unit * t_y
y_max = unit * (t_y + 1)
lat_top, lng_left = _inverse(x_min, y_min)
lat_bottom, lng_right = _inverse(x_max, y_max)
return lat_top, lat_bottom, lng_left, lng_right
def get_image_from_tiles(extents: (float, float, float, float), zoom, tiles_root):
"""
将指定范围的瓦片拼接成图片
:param extents: (top lat, bottom lat, left lng, right lng)
:param zoom: google zoom level
:param tiles_root: 瓦片根目录
:return: Image
"""
lat0, lat1, lng0, lng1 = extents
x_s, y_s = tile_xy(lat0, lng0, zoom)
x_e, y_e = tile_xy(lat1, lng1, zoom)
return merge_tiles(x_s, x_e, y_s, y_e, zoom, tiles_root)
def merge_tiles(x_s: int, x_e: int, y_s: int, y_e: int, zoom: int, tiles_root: str) -> Image:
"""
拼接影像
:param x_s: 起始x瓦片坐标
:param x_e: 截至x瓦片坐标
:param y_s: 起始y瓦片坐标
:param y_e: 截至y瓦片坐标
:param zoom: 瓦片等级
:param tiles_root: 瓦片存放根目录
:return: PIL.Image
"""
width = TILE_SIZE * (x_e - x_s)
height = TILE_SIZE * (y_e - y_s)
full_image = Image.new('RGB', (width, height))
tile_path_base = tiles_root + '/{}/{}/{}.jpg'
for x in range(x_s, x_e + 1):
for y in range(y_s, y_e + 1):
tile_path = tile_path_base.format(zoom, x, y)
if os.path.exists(tile_path):
tile_image = Image.open(tile_path)
full_image.paste(tile_image, ((x - x_s) * TILE_SIZE, (y - y_s) * TILE_SIZE))
return full_image
@dataclass
class Task:
zoom: int # 任务层级
size: int # 任务大小
# tiles: [(int, int)] # 任务xy,生成器或列表
x_range: (int, int) # 任务x范围
y_range: (int, int) # 任务y范围
re_list: List[Tuple[int, int]]
name: str = 'DEFAULT' # 任务名称
@staticmethod
def from_father_tile(task_zoom, tile: (int, int, int), name=None):
x, y, z = tile
if task_zoom < z:
raise ValueError('task zoom should less than z')
if name:
task_name = name
else:
task_name = 'SUB_TILES FROM {}-{}-{}'.format(z, x, y)
task_scale = _image_scale(task_zoom)
father_scale = _image_scale(z)
n = int(task_scale / father_scale)
# x_range = range(x * n, (x + 1) * n)
# y_range = range(y * n, (y + 1) * n)
# task_size = len(x_range) * len(y_range)
# task_tiles = ((xx, yy) for xx in x_range for yy in y_range)
x_range = (x * n, (x + 1) * n)
y_range = (y * n, (y + 1) * n)
task_size = ((x + 1) * n - x * n) * ((y + 1) * n - y * n)
return Task(task_zoom, task_size, x_range, y_range, [], task_name)
@staticmethod
def from_rectangle(task_zoom, extents: (float, float, float, float), name=None):
if name:
task_name = name
else:
task_name = 'LEVEL {} TILES FROM ({}, {}, {}, {})'.format(task_zoom, *extents)
lat0, lat1, lng0, lng1 = extents
x_s, y_s = tile_xy(lat0, lng0, task_zoom)
x_e, y_e = tile_xy(lat1, lng1, task_zoom)
# x_range = range(x_s, x_e + 1)
# y_range = range(y_s, y_e + 1)
# task_size = len(x_range) * len(y_range)
# task_tiles = ((xx, yy) for xx in x_range for yy in y_range)
x_range = (x_s, x_e + 1)
y_range = (y_s, y_e + 1)
task_size = (x_e - x_s + 1) * (y_e - y_s + 1)
return Task(task_zoom, task_size, x_range, y_range, [], task_name)
@staticmethod
def from_point(task_zoom, latlng, buffer, name=None):
lat, lng = latlng
extents = lat + buffer, lat - buffer, lng - buffer, lng + buffer
return Task.from_rectangle(task_zoom, extents, name)
class ProgressbarCounter:
def __init__(self, max_value):
self._progress = 0
self._max = max_value
def update(self):
self._progress = self._progress + 1
print("\r" + "Task tiles downloading: {}/{}, {}%".format(self._progress,
self._max,
round((self._progress * 100 / self._max)), 2),
end='', flush=True)
class Downloader:
def __init__(self, store_path: str, task: Task = None, merge=False):
self._task = task
self._root_path = store_path
def run(self, coroutine_num=30):
task_start_time = time.time()
# 下载初始化打印
# print('Task name: {}'.format(self._task.name, ))
# print('Task tiles number: {}'.format(self._task.size))
# 进度条
p = ProgressbarCounter(self._task.size)
# 下载
async_down_tiles(self._task, self._root_path, p, coroutine_num)
# 结束打印
task_end_time = time.time()
# print('\n' + 'Task use time: {}s'.format(task_end_time - task_start_time))
# print('--------------------------------------')
# 如果有下载失败的瓦片,重新下载
if self._task.re_list and len(self._task.re_list) > 0:
# retry task
self._task.name = "RETRY:" + self._task.name
retry_downloader = Downloader(self._root_path, self._task)
retry_downloader.run()
def async_down_tiles(task: Task, store_path, progress_bar, req_limit=30):
# add retry
s = requests.Session()
retries = Retry(total=5, backoff_factor=0.2, status_forcelist=[500, 502, 503, 504], raise_on_redirect=True,
raise_on_status=True)
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
z = task.zoom
if task.re_list and len(task.re_list) > 0:
# if retry
task_urls = (grequests.get(_GOOGLE_MAP_URL.format(x, y, z),
session=s,
hooks={'response': save_tile_hook(zoom=z, x=x, y=y, path=store_path,
p=progress_bar, re_list=task.re_list)},
headers={'user-agent': random.choice(_USER_AGENTS)})
for x, y in task.re_list)
else:
task_urls = (grequests.get(_GOOGLE_MAP_URL.format(x, y, z),
session=s,
hooks={'response': save_tile_hook(zoom=z, x=x, y=y, path=store_path,
p=progress_bar, re_list=task.re_list)},
headers={'user-agent': random.choice(_USER_AGENTS)})
for x in range(*task.x_range)
for y in range(*task.y_range))
grequests.map(task_urls, size=req_limit)
def save_tile_hook(**kwargs):
def save_tile(response, *request_args, **request_kwargs):
zoom, x, y = kwargs['zoom'], kwargs['x'], kwargs['y']
path = kwargs['path']
p = kwargs['p']
re_list = kwargs['re_list']
if response.status_code not in (400, 404, 410):
try:
image = Image.open(BytesIO(response.content))
z_path = path + '/{}'.format(zoom)
if not os.path.exists(z_path):
os.mkdir(z_path)
x_path = z_path + '/{}'.format(x)
if not os.path.exists(x_path):
os.mkdir(x_path)
image_path = x_path + '/{}.jpg'.format(y)
image.save(image_path)
except Exception as e:
if re_list:
re_list.append((x, y))
msg = 'tile( x:{}, y:{}, z:{}) download fail, it will retry after task'
logging.warning(msg.format(x, y, zoom))
logging.exception(e)
p.update()
return save_tile
def read_tasks_file(file_path):
with open(file_path, 'r') as f:
return tuple((tuple(map(int, line.strip().split(','))) for line in f.readlines()))
def download_image(image_path, task: Task, save_tile=True, tile_path=None):
# TODO 完成整图下载
# 下载
# 删除切片文件夹
if not save_tile:
pass
pass
if __name__ == '__main__':
# 洛杉矶 LT_xy:34.2757819620,-118.6073152800 RB_xy:33.4426799945,-116.8645182563
# 常州:
# LT_xy = tile_xy(31.8822313752,119.8790377320, tasks_zoom)
# RB_xy = tile_xy(31.7026003847,120.0009849033, tasks_zoom)
# 拉斯维加斯 LT:36.3058948028,-115.3299402473 RB:36.0329247545,-115.0138758082
# 巴黎 LT:48.9033085728,2.2891089475 RB:48.8169904853,2.4196910441
# 旧金山 LT_xy:37.8027936169,-122.5235894369 RB:37.7117271669,-122.3511555838
tasks_zoom = 20
LT_xy = tile_xy(37.8027936169, -122.5235894369, tasks_zoom)
RB_xy = tile_xy(37.7117271669, -122.3511555838, tasks_zoom)
tiles_path = r"H:\Data\GoogleMap\SanFrancisco"
nPatchs = (RB_xy[0] - LT_xy[0] + 1) * (RB_xy[1] - LT_xy[1] + 1)
nPatch = 0
dataCheck = True
print(LT_xy, RB_xy, nPatchs)
with progressbar.ProgressBar(min_value=0, max_value=nPatchs) as bar:
for coord_x in range(LT_xy[0], RB_xy[0] + 1):
for coord_y in range(LT_xy[1], RB_xy[1] + 1):
if dataCheck:
if os.path.exists(os.path.join(tiles_path, '{zoom}/{x}/{y}.jpg'.format(zoom=tasks_zoom, x=coord_x, y=coord_y))):
print('\r FILE EXISTS', os.path.join(tiles_path, '{zoom}/{x}/{y}.jpg'.format(zoom=tasks_zoom, x=coord_x, y=coord_y)), end='',
flush=True)
nPatch += 1
bar.update(nPatch)
continue
else:
dataCheck = False
test_task = Task.from_father_tile(tasks_zoom, (coord_x, coord_y, tasks_zoom))
downloader = Downloader(tiles_path, test_task)
downloader.run()
nPatch += 1
bar.update(nPatch)
# start_time = time.time()
# tasks_xyz = read_tasks_file('./task_xyz_13_4_rest.txt')
# # tasks_xyz = [(222, 103, 8)]
# tasks_zoom = 19
# tiles_path = 'E:\\'
# t = 0
# T = len(tasks_xyz)
# for xyz in tasks_xyz:
# t = t + 1
# print('Current task: {}/{}'.format(t, T))
# test_task = Task.from_father_tile(tasks_zoom, xyz)
# downloader = Downloader(tiles_path, test_task)
# downloader.run()
# end_time = time.time()
# print('Total use time: {}s'.format(end_time-start_time))
#
# # narita = (35.764701843299996, 140.386001587)
# # lat0, lat1, lng0, lng1 = narita[0]+0.04, narita[0]-0.04, narita[1]-0.04, narita[1]+0.04
# # image = get_image_from_tiles((lat0, lat1, lng0, lng1), 15, 'D:/data/japan/gmap_tiles')
# # image.save('D:/data/narita_15.jpg')
|
import pdb
import os, platform
import matplotlib
if matplotlib.rcParams['backend'] == 'nbAgg':
print('Use IPython notebook backend for matplotlib.')
elif platform.system() == 'Linux':
try:
os.environ['DISPLAY']
matplotlib.use('TkAgg')
print('Use TkAgg backend for matplotlib.')
except KeyError:
matplotlib.use('Agg')
print('Use Agg backend for matplotlib.')
else:
print('Use default backend defined in matplotlibrc: '
'{}'.format(matplotlib.rcParams['backend']))
#from __main__ import run
from api import set_logging, analysis, load, load_config, make_plots, save
set_logging('info')
__all__ = [
'analysis',
'load',
'load_config',
'make_plots',
'save',
'run',
]
|
from rxdrug import drugs_list
ZERO = 0
class Patient:
def __init__(self, name, ailment, drugs):
self.name = name
self.ailment = ailment
self.drugs = drugs
def __str__(self):
# self.self = self
return '{n}'.format(n=self.name) + 'is taking {n}'.format(n=self.drug) + 'for {n}'.format(n=self.ailment)
def main():
# call drugs_list()
rx_dictionary = drugs_list()
patients = []
# open prescriptions file
with open('prescriptions.txt') as prescriptions:
lines = prescriptions.readlines()
for line in lines:
info = line.replace('\n','').split('|')
patients.append(Patient(info[0],info[1],info[2].split(',')))
# iterate through each patient
for patient in patients:
print(patient.name,'is treating',patient.ailment)
print('Current prescription:',', '.join(patient.drugs))
try:
# check for a second drug
second_drug = patient.drugs[1]
interactions = rx_dictionary[second_drug].check_interaction(patient.drugs)
# if the drug list is not empty
# print warning
if len(interactions) > ZERO:
print('Warning: drug-drug interaction between',', '.join(patient.drugs),'\n')
else:
print('No interactions!\n')
# if the drug list is not empty
except:
print(patient.name,'is not taking a second drug, no risk of interactions\n')
main()
|
import uopy
class U2Message:
def __init__(self):
self.items = [
{'id': 1, 'name': 'Flight', 'barcode': '893212299897', 'price': 500},
{'id': 2, 'name': 'Flight', 'barcode': '123985473165', 'price': 1000},
{'id': 3, 'name': 'Tour', 'barcode': '231985128446', 'price': 150},
{'id': 4, 'name': 'Holiday', 'barcode': '231985128446', 'price': 750}
]
self.planitems = [
{'code': '1',
'type': 'Transfer',
'options': [{'description': 'From?', 'status': 'primary'},
{'description': 'To?', 'status': 'success'},
{'description': 'Date?', 'status': 'success'}
],
'actions': [{'description': 'add', 'status': 'primary'},
{'description': 'del', 'status': 'danger'}]
},
{'code': '2',
'type': 'Flight',
'options': [{'description': 'From?', 'status': 'primary'},
{'description': 'To?', 'status': 'success'},
{'description': 'Date?', 'status': 'success'}
],
'actions': [{'description': 'add', 'status': 'primary'},
{'description': 'del', 'status': 'danger'}
]
},
{'code': '3',
'type': 'Tour',
'options': [{'description': 'Where?', 'status': 'primary'},
{'description': 'Date?', 'status': 'success'}],
'actions': [{'description': 'add', 'status': 'primary'},
{'description': 'del', 'status': 'danger'}
]
},
{'code': '4',
'type': 'Hotel',
'options': [{'description': 'Where?', 'status': 'primary'},
{'description': 'Date?', 'status': 'success'}
],
'actions': [{'description': 'add',
'status': 'primary'
},
{'description': 'del',
'status': 'danger'
}
]
}
]
def get_items(self):
return self.items
def get_planitems(self):
return self.planitems
#config = {}
#config['service'] = 'udcs'
#config['account'] = '/unidata/UNIRAMA'
#config['host'] = 't01.astratis.com'
#config['user'] = 'manos'
#config['password'] = 'ursos'
#config['port'] = 31438
#ain_U2_session = uopy.connect(**config)
#cmd = uopy.Command('SORT TAIRPORTS WITH @ID NE "//M" A1')
#cmd.run()
#print(cmd.response)
|
play = True
while play is True:
operation = raw_input("Do you want to add, subtract, multiply, or divide?")
number1 = int(raw_input("Enter a number"))
number2 = int(raw_input("Enter another number"))
if operation == "add":
print number1 + number2
elif operation == "subtract":
print number1 - number2
elif operation == "multiply":
print number1 * number2
elif operation == "divide":
print float(number1)/float(number2)
else:
print "You did not provide a valid operation"
playAgain = raw_input("Do you want to play again?(y/n)")
if playAgain == "n":
play = False
elif playAgain == "y":
play = True
else:
play = False
|
#!/usr/bin/python
#coding=utf-8
#__author__:TaQini
from pwn import *
# rop1
for i in range(20):
p = remote('192.241.138.174',9998)
offset = 64+i
payload = 'A'*offset
print "[+] ",i
p.sendline(payload)
print p.recvall()
p.close()
# p.interactive()
|
# Write a Python program to find the list of words that are longer than n from a given list of words
def lonerthanN(n,listprovided):
for i in range(len(listprovided)):
if len(listprovided[i])>n:
print(listprovided[i])
n = int(input(" Please enter the value of n :"))
listprovided = ['Red', 'Green', 'White', 'Black', 'Pink', 'Yellow','1','2','5','45554']
lonerthanN(n,listprovided)
|
#!/usr/bin/env python3
'''
Label image with Google Cloud Vision API.
Before use it, check "vision_api_test.sh" in google-vision-setting directory. cred.json is required.
Usage : ./auto_labler.py --meme_dir=./meme_cut/ --output_dir=./output_xml/
'''
import sys
from subprocess import call
import subprocess
import json
import argparse
import io
import os
import re
from pathlib import Path
from google.cloud import vision
from google.cloud.vision import types
def get_args_parser():
parser = argparse.ArgumentParser(description='Directories for processing')
parser.add_argument('-i','--meme_dir', type=str, required=True, help='Directory of a input memes.')
parser.add_argument('-o','--output_dir', type=str, required=True, help='Directory of a output xml.')
parser.add_argument('--lang_hint', type=str, required=True,
help="""Google vision detect language hint. =ko for Korean,
=en English =ja Japanese =zh* Chinese
https://cloud.google.com/vision/docs/languages""")
parser.add_argument('-w','--overwrite', default=False, help='Overwrite xml.')
args = parser.parse_args()
return args
def json2xml(json_obj, line_padding=""):
result_list = list()
json_obj_type = type(json_obj)
if json_obj_type is list:
for sub_elem in json_obj:
result_list.append(json2xml(sub_elem, line_padding))
return "\n".join(result_list)
if json_obj_type is dict:
for tag_name in json_obj:
sub_obj = json_obj[tag_name]
result_list.append("%s<%s>" % (line_padding, tag_name))
result_list.append(json2xml(sub_obj, "\t" + line_padding))
result_list.append("%s</%s>" % (line_padding, tag_name))
return "\n".join(result_list)
return "%s%s" % (line_padding, json_obj)
def detect_text(path, hint):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image_file.close()
image = vision.types.Image(content=content)
img_ctxt = vision.types.ImageContext()
img_ctxt.language_hints.append(hint)
response = client.text_detection(image=image, image_context=img_ctxt)
texts = response.text_annotations
res = ''
for text in texts:
res = '"{}"'.format(text.description)
break
return res
def run_tagger(args):
in_dir = os.path.abspath(args.meme_dir)+ '/'
out_dir = os.path.abspath(args.output_dir)+ '/'
hint = args.lang_hint
overwrite_flag = args.overwrite
if not os.path.exists(out_dir):
os.makedirs(out_dir)
episodes = os.listdir(in_dir)
episodes.sort()
# iterate meme dir.
for episode in episodes:
images = os.listdir(in_dir+'/'+episode)
# xml episode folders should not have whitespace in name.
xml_ep = episode.replace(' ', '_')
if not os.path.exists(out_dir+'/'+ xml_ep):
os.makedirs(out_dir + '/' + xml_ep)
if episode == '.ipynb_checkpoints':
continue
print('\n## Episode : ', episode)
images.sort()
for image in images:
img_path = in_dir + episode + '/' + image
if not img_path.lower().endswith(('.png', '.jpg', '.jpeg')):
continue
x_path = out_dir + xml_ep +'/' + image
pre, ext = os.path.splitext(x_path)
x_path = pre + '.xml'
xml_file = Path(x_path)
if xml_file.exists() and not overwrite_flag:
print('xml already exist : %s ' %(x_path.rsplit('/',1)[1]))
continue
print('Label -> %s ' %(image))
with open(x_path, 'w') as f:
res_txt = detect_text(img_path, hint)
if hint == 'ko':
res_txt = re.sub(r'[^가-힣\s]', '', res_txt)
elif hint == 'en':
res_txt = re.sub(r'[^A-z\s]', '', res_txt)
res_txt = re.sub(r'\t{1,}', ' ', res_txt)
res_txt = re.sub(r'\n{1,}', ' ', res_txt)
res_txt = re.sub(r'\s{1,}', ' ', res_txt)
res_txt = re.search(r'\s{0,}(.*)', res_txt).group(1)
print(': ' +res_txt)
s = '{"annotation" : {"folder" : "'+ episode +'", "filename" : "'+ image +'", "segmented": 0, "object" : {"name" : "'+ res_txt +'", "pose" : "Unspecified", "truncated" : 0, "occluded" : 0, "difficult" : 0, "vector" : 0} }}'
j = json.loads(s)
f.write(json2xml(j))
f.close()
print('saved.')
def main():
args = get_args_parser()
print('## Start text detection, using google cloud vision..')
try :
run_tagger(args) # xml
print('\nLabeling & Generate .xml done.')
print('overwrite mode : %s' %(args.overwrite))
print('GCP detect language : %s\n' %(args.lang_hint))
except Exception as e:
print(e)
print('\nAuto detction failed, check out links below.')
print('https://cloud.google.com/vision/docs/libraries')
print('https://cloud.google.com/vision/docs/detecting-text\n')
sys.exit()
if __name__ == '__main__':
main()
|
import os
import json
import logging
logging.getLogger().setLevel(logging.INFO)
class DoProcessing() :
def __init__( self, JSON_FILE_URL, LOG_FILE_URL, STORAGE_DIR ) :
''' Constructor for this class '''
self.JSON_FILE_URL = JSON_FILE_URL
self.LOG_FILE_URL = LOG_FILE_URL
self.STORAGE_DIR = STORAGE_DIR
def getJsonFileContent( self, url ) :
''' Reads the content from filter json and returns it in a dictionary format '''
cmd = 'curl ' + url
jsonFileContent = os.popen( cmd ).read()
jsonFileDict = json.loads( jsonFileContent )
return jsonFileDict
def getLogFileContent( self, url ) :
''' Reads the content from log file and returns it in a line by line split list '''
cmd = 'curl ' + url
logFileContent = os.popen( cmd ).read()
logFileLineList = logFileContent.splitlines()
return logFileLineList
def getInfoFieldsFromLine( self, logFileLine ) :
''' Parses given line from the log file and returns a dictionary containing the extracted information from the line '''
infoDict = { }
splittedLine = logFileLine.split()
infoDict[ 'year' ] = splittedLine[ 0 ]
infoDict[ 'month' ] = splittedLine[ 1 ]
infoDict[ 'day' ] = splittedLine[ 2 ]
infoDict[ 'time' ] = splittedLine[ 3 ]
infoDict[ 'status' ] = splittedLine[ splittedLine.index( 'status:' ) + 1 ]
if infoDict[ 'status' ][ -1 ] == ',' :
infoDict[ 'status' ] = infoDict[ 'status' ][0:-1]
messageStartIndex = splittedLine.index( 'Message:' ) + 1
Message = ''
for i in range( messageStartIndex, len( splittedLine ) ) :
Message += splittedLine[ i ] + ' '
infoDict[ 'Message' ] = Message
return infoDict
def createDirectoryAndFiles( self ) :
''' Does the processing, creates the directory and stores the file with content '''
error = False
try :
jsonFileDict = self.getJsonFileContent( self.JSON_FILE_URL )
except Exception as e :
error = True
logging.error(e)
return error
try :
logFileLineList = self.getLogFileContent( self.LOG_FILE_URL )
except Exception as e :
error = True
logging.error(e)
return error
index = 0
dictIpAddress = { }
for logFileLine in logFileLineList :
splittedLine = logFileLine.split()
ipAddress = splittedLine[ splittedLine.index( 'ip-address:' ) + 1 ][:-1]
if ipAddress in dictIpAddress.keys() :
dictIpAddress[ ipAddress ].append( index )
else :
dictIpAddress[ ipAddress ] = [ index ]
index = index + 1
finalContentDict = {}
for dictionary in jsonFileDict[ 'data' ] :
ipAddress = dictionary[ 'ipAddress' ]
degugFlagList = dictionary[ 'debugFlag' ]
if ipAddress in dictIpAddress.keys() :
indexList = dictIpAddress[ ipAddress ]
for index in indexList:
logFileLine = logFileLineList[ index ]
infoDict = self.getInfoFieldsFromLine( logFileLine )
date = ''
date = infoDict[ 'month' ] + '-' + infoDict[ 'day' ] + '-' + infoDict[ 'year' ]
directoryName = date
fileName = ipAddress + '_' + infoDict[ 'status' ] + '.log'
pathTuple = ( directoryName, fileName )
if pathTuple in finalContentDict.keys() :
finalContentDict[ pathTuple ].append( infoDict[ 'time' ] +' '+ infoDict[ 'Message' ] + '\n' )
else :
finalContentDict[ pathTuple ] = [ infoDict[ 'time' ] +' '+ infoDict[ 'Message' ] + '\n' ]
for pathTuple, messageList in finalContentDict.items() :
directoryName = pathTuple[ 0 ]
fileName = pathTuple[ 1 ]
directoryPath = os.path.join( self.STORAGE_DIR, directoryName )
filePath = os.path.join( directoryPath, fileName )
fullContentOfFile = ''
for message in messageList :
fullContentOfFile += message
if os.path.isdir( directoryPath ) == 0 :
os.mkdir( directoryPath )
file = open( filePath, "w+" )
file.write( fullContentOfFile )
file.close()
logging.info( "Created file with file name %s at path %s", fileName, directoryPath )
return error
class FindContentForFile() :
def __init__( self, STORAGE_DIR, folderName, fileName ) :
''' Constructor for this class '''
self.STORAGE_DIR = STORAGE_DIR
self.folderName = folderName
self.fileName = fileName
def getFileContent( self ) :
''' Gets the content of a file '''
filePath = self.STORAGE_DIR + '/' + self.folderName + '/' + self.fileName
error = False
message = ''
try :
file = open( filePath, "r" )
message = file.read()
file.close()
except FileNotFoundError as e :
error = True
logging.error(e)
except Exception as e :
error = True
logging.error(e)
return ( error, message )
class FindContentForAll() :
def __init__( self, STORAGE_DIR ) :
''' Constructor for this class '''
self.STORAGE_DIR = STORAGE_DIR
def getContentForAll( self ) :
''' Gets the content for all the files and returns a dictionary with file name, directory name and message '''
contentDictList = [ ]
folderNameList = os.listdir( self.STORAGE_DIR )
for folderName in folderNameList :
fileNameList = os.listdir( self.STORAGE_DIR + '/' + folderName )
for fileName in fileNameList :
contentDict = {}
contentDict[ 'folderName' ] = folderName
ipAddress = fileName[ 0 : fileName.index('_') ]
status = fileName[ fileName.index('_') + 1 : -4 ]
contentDict[ 'fileName' ] = ipAddress + '_' + status + '.log'
obj = FindContentForFile( self.STORAGE_DIR, folderName, fileName )
message = obj.getFileContent()
contentDict[ 'message' ] = message
contentDictList.append( contentDict )
return contentDictList
|
'''VoidFinder - Hoyle & Vogeley (2002)'''
################################################################################
#
# IMPORT MODULES
#
################################################################################
from voidfinder import filter_galaxies, find_voids
from astropy.io import fits
from astropy.table import Table
from absmag_comovingdist_functions import Distance
import pickle
################################################################################
#
# USER INPUTS
#
################################################################################
survey_name = 'DESI_dc17_'
# File header
in_directory = '/scratch/kdougla7/VoidFinder/DESI/'
out_directory = '/scratch/kdougla7/VoidFinder/DESI/'
# Input file names
in_filename = in_directory + 'DESI_sgc.fits' # File format: RA, dec, redshift, comoving distance, absolute magnitude
mask_filename = in_directory + 'dc17sgcmask.dat' # File format: RA, dec
# Output file names
out1_filename = out_directory + in_filename[:-5] + '_maximal.txt' # List of maximal spheres of each void region: x, y, z, radius, distance, ra, dec
out2_filename = out_directory + in_filename[:-5] + '_holes.txt' # List of holes for all void regions: x, y, z, radius, flag (to which void it belongs)
'''out3_filename = 'out3_vollim_dr7.txt' # List of void region sizes: radius, effective radius, evolume, x, y, z, deltap, nfield, vol_maxhole
voidgals_filename = 'vollim_voidgals_dr7.txt' # List of the void galaxies: x, y, z, void region '''
#ngrid = 128 # Number of grid cells
max_dist = 3400. # z_ngc = 1.5--> 4158 h-1 Mpc # z_sgc = 1.1 --> 3374
#box = 630. # Size of survey/simulation box
dl = 5. # Cell side length [Mpc/h]
################################################################################
#
# OPEN FILES
#
################################################################################
'''
infile = Table.read(in_filename, format='ascii.commented_header')
maskfile = Table.read(mask_filename, format='ascii.commented_header')
'''
gal_file = fits.open(in_filename)
infile = Table(gal_file[1].data)
maskfile = Table.read(mask_filename, format='ascii.commented_header')
survey_name = 'DC17_SGC_'
################################################################################
#
# FILTER GALAXIES
#
################################################################################
#coord_min_table, mask = filter_galaxies(in_filename, mask_filename, ngrid, box, max_dist)
coord_min_table, mask, ngrid = filter_galaxies(infile, maskfile, dl, max_dist, survey_name)
################################################################################
#
# FIND VOIDS
#
################################################################################
find_voids(ngrid, dl, max_dist, coord_min_table, mask, out1_filename, out2_filename, survey_name)
|
import sys
from collections import defaultdict
import pyparsing as pp
def stripped_lines(filename):
with open(filename) as f:
for line in f.readlines():
yield line.strip()
def make_grammar():
color = pp.Combine(pp.Word(pp.alphas) + pp.Word(pp.alphas), adjacent=False, joinString=' ')
bag = color + pp.oneOf(['bag', 'bags'])
bags = pp.Group(
(pp.Word(pp.nums) + bag) |
('no other bags')
)
return bag + 'contain' + pp.delimitedList(bags) + '.'
def lex(filename):
grammar = make_grammar()
return [grammar.parseString(line) for line in stripped_lines(filename)]
def parse(filename):
all_tokens = lex(filename)
contained = defaultdict(list)
contains = defaultdict(list)
for tokens in all_tokens:
outer_color = tokens[0]
# 'bags' == tokens[1]
# 'contain' == tokens[2]
# '.' == tokens[-1]
inner_colors = []
for bags in tokens[3:-1]:
if bags[0] == 'no other bags':
continue
count = bags[0]
inner_color = bags[1]
# 'bag(s)' = bags[2]
contained[inner_color].append(outer_color)
contains[outer_color].append((inner_color, int(count)))
return (contained, contains)
def find_reachable(start, edges):
# depth first search
result = set()
for e in edges[start]:
result.add(e)
result = set.union(result, find_reachable(e, edges))
return result
def count_total_bags_inside(start, edges):
# depth first search
result = 0
for e, count in edges[start]:
result += count + count * count_total_bags_inside(e, edges)
return result
def main(args):
contained, contains = parse(args[1])
p1_ans = len(find_reachable('shiny gold', contained))
print(f'part one: {p1_ans}')
p2_ans = count_total_bags_inside('shiny gold', contains)
print(f'part two: {p2_ans}')
# part one: 261
# part two: 3765
if __name__ == '__main__':
main(sys.argv)
|
from flask import g, jsonify,request,session
from flask.ext.httpauth import HTTPBasicAuth
from .response import unauthorized, forbidden
from . import api
from ..models import User
#initializes the HTTPBasic auth library to be used for regular authenticaiton
auth = HTTPBasicAuth()
#Login Error Handler
@auth.error_handler
def auth_error():
"""Decorator function for login failed attempt"""
return unauthorized("Invalid Credentials")
#login authenticaiton verifier
@auth.verify_password
def verify_password(email_or_token, password):
""" Handles the login/token verificaiton
@params email/token, optional password
@sets global user object
@returns Bool True/False
"""
#checks if the password is empty
if password is '':
#get the user with the assigned token and store it in the global variable object
g.current_user = User.verify_auth_token(email_or_token)
#returns true if it's verified succesfully
return g.current_user is not None
#checks for the email and returns false if not avaible
user = User.query.filter_by(email=email_or_token).first()
if not user or not user.verify_password(password):
return False
g.current_user = user
return True
#Login endpoint
@api.route('/auth/login/', methods=['POST'])
def login():
""" Logs in A user """
email = request.json.get('email')
password = request.json.get('password')
# verifies and returns a token for the user
if not verify_password(email, password):
return unauthorized("Wrong combination of email and password")
token = get_auth_token()
response = jsonify({'token': token, 'message': 'successfully logged in'})
response.status_code = 200
return response
# logout endpoint
@api.route('/auth/logout/', methods=['GET'])
@auth.login_required
def logout():
session.clear()
response = jsonify({'status': 'Logged Out'})
response.status_code = 201
return response
def get_auth_token():
""" Utility function to generate authentication token
@params None
@return token
"""
token = g.current_user.generate_auth_token()
return token.decode('ascii')
|
from . import onmt
from . import torchtext
|
from django.contrib import admin
from django.urls import path,include
from .views import *
urlpatterns = [
path('getauth',AuthURL.as_view()),
path('redirect',spotify_callback),
path('checkauth',IsAuthenticated.as_view()),
path('currentsong',CurrentSong.as_view()),
path('play',PlaySong.as_view()),
path('pause',PauseSong.as_view()),
path('skip',SkipSong.as_view()),
path('previous',PrevSong.as_view()),
path('usersong',UserSong.as_view()),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.