text
stringlengths
8
6.05M
from random import randint, sample def number_generator(): return sorted(sample(xrange(1, 50), 6)) + [randint(0, 9)] def check_for_winning_category(your_numbers, winning_numbers): superzahl_match = your_numbers.pop() == winning_numbers.pop() matches = len(set(your_numbers).intersection(winning_numbers)) try: return {6: 2, 5: 4, 4: 6, 3: 8}[matches] - superzahl_match except KeyError: return 9 if matches == 2 and superzahl_match else -1
import pandas as pd import csv ''' The initial conditions of the application needs a pre-defined empty csv file named "Student_Data.csv" with column names [name,age,branch,year,semester,prev_sem_score] ''' def getStudentData(): student_data = pd.read_csv("Student_Data.csv",index_col = False) if(len(student_data) == 0): print("There is no data. Add some data and try again") return(student_data) def addStudentData(): try: name = input("Enter name") age = int(input("Enter age")) branch = input("Enter branch") year = int(input("Enter year")) semester = int(input("Enter semester")) prev_sem_score = float(input("Previous semester Score")) except: print("Invalid details, Please try again") return row = [name,age,branch,year,semester,prev_sem_score] with open('Student_Data.csv', 'a') as csvFile: writer = csv.writer(csvFile) writer.writerow(row) csvFile.close() def filter_score():##Sorting data based on prev_sem_score student_data = getStudentData() if(len(student_data)!=-1): student_data.sort_values('prev_sem_score',ascending = False,axis = 0, inplace = True) print(student_data) while(True): print("1.Add data") print("2.Get data") print("3.Show filtered data") print("4.Exit") choice = int(input("Enter Choice:")) if(choice == 1): addStudentData() elif(choice == 2): print(getStudentData()) elif(choice == 3): filter_score() elif(choice == 4): break else: print("Enter valid Choice")
from flask_sqlalchemy import SQLAlchemy from sqlalchemy import cast,Date, and_,or_ import config, bcrypt,subprocess,requests,re,logging from datetime import date, datetime,timedelta from database import match, User from flask import Flask, render_template, request, redirect from flask_login import LoginManager, login_user, logout_user from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from bs4 import BeautifulSoup as btf from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.schedulers.background import BackgroundScheduler app = Flask(__name__) app.secret_key = 'jdkljsalkd&#@!Ksdapg' lm = LoginManager() app.config.from_object(config.Database) db = SQLAlchemy(app) lm.init_app(app) jobstores = { 'default': SQLAlchemyJobStore(url=config.dburl) } executors = { 'default': ThreadPoolExecutor(5) } job_defaults = { 'coalesce': False, 'max_instances': 3 } sched = BackgroundScheduler(jobstores=jobstores, job_defaults=job_defaults,executors=executors) def bigJob(matchid): getmatch = db.session.query(match).filter_by(id=matchid).first() sched.add_job(func=recordTwitch,args=[getmatch.id],trigger='date', run_date=getmatch.datetime, id=(str(matchid)+' record')) getmatch.status = '<font color="orange">Pending</font>' db.session.commit() def recordTwitch(matchid): getmatch = db.session.query(match).filter_by(id=matchid).first() link = getmatch.link mheaders = {'User-Agent': 'Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'} read = requests.get(link, headers=mheaders) soup = btf(read.text, "lxml") vodlink = soup.find('div', class_='panel-heading', string=re.compile('Streams')).find_next('a').get('href') sched.add_job(func=checkLive, trigger='interval', minutes=5, id=(str(matchid) + ' isLive'), args=[link, matchid],replace_existing=True) getmatch.status = '<font color="red">Recording</font>' db.session.commit() cmd = ["streamlink -o '%s/UN %s VS %s %s.mp4' %s best" % (config.cspath,getmatch.teamA,getmatch.teamB,vodlink,getmatch.event)] subprocess.call(cmd, shell=True) def checkLive(link,matchid): dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = ( 'Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30') driver = webdriver.PhantomJS(executable_path=config.homepath+'/phantomjs/bin/phantomjs',desired_capabilities=dcap, service_log_path=config.homepath+'/ghostdriver.log') driver.get(link) html = driver.page_source driver.quit() soup = btf(html, 'lxml') try: time = soup.find('span', class_="label-danger label").text if time == 'Match over': subprocess.call('pkill streamlink', shell=True) sched.remove_job(str(matchid) + ' isLive') getmatch = db.session.query(match).filter_by(id=matchid).first() getmatch.status = '<font color="green">Done</font>' db.session.commit() except: pass sched.start() @lm.user_loader def user_loader(user_id): return db.session.query(User).filter_by(id=user_id).first() @app.route('/') def index(): dllm = db.session.query(match).filter(cast(match.datetime, Date) >= date.today()).order_by(match.datetime).all() return render_template("index.html", matches=dllm,checknow=datetime.now()) @app.route("/nip") def nip(): nipftw = db.session.query(match).filter( and_(or_(match.teamA == 'NiP', match.teamB == 'NiP'), match.datetime > datetime.now())).order_by( match.datetime).all() return render_template("nip.html", matches=nipftw,checknow=datetime.now()) @app.route("/today") def today(): onlytoday = db.session.query(match).filter( and_(cast(match.datetime, Date) == date.today(), match.datetime > datetime.now())).order_by( match.datetime).all() return render_template("today.html", matches=onlytoday,checknow=datetime.now()) @app.route("/past") @app.route('/past/<datet>') def past(datet = date.today() - timedelta(days=1)): pastt = db.session.query(match).filter(cast(match.datetime,Date)==datet).order_by(match.datetime).all() return render_template("past.html", matches=pastt,checknow=datetime.now()) @app.route('/login', methods=['GET', 'POST']) def login(): if request.method == 'GET': return render_template("login.html") username = request.form['username'] user = user_loader(username) if user is not None and bcrypt.checkpw(request.form['password'].encode('utf-8'),user.password.encode('utf-8')): login_user(user,remember=True) return redirect('/') return redirect("http://www.google.com") @app.route('/record') def record(): matchid = request.args.get('matchid') bigJob(matchid) return ('', 204) @app.route('/delete') def delete(): matchid = request.args.get('deid') matchid = matchid.replace('del','') sched.remove_job(matchid+' record') getmatch=db.session.query(match).filter_by(id=matchid).first() getmatch.status = '-' db.session.commit() return ('', 204) @app.route('/stop') def stop(): matchid = request.args.get('stopid') matchid = matchid.replace('del','') cmd = ['pkill streamlink'] subprocess.call(cmd,shell=True) sched.remove_job(matchid + ' isLive') getmatch = db.session.query(match).filter_by(id=matchid).first() getmatch.status = '-' db.session.commit() return ('', 204) @app.route('/pending') def pending(): penn = db.session.query(match).filter(or_(match.status=='<font color="orange">Pending</font>',match.status=='<font color="red">Recording</font>')).all() return render_template('pending.html',matches=penn,checknow=datetime.now()) @app.route('/done') def done(): donee = db.session.query(match).filter_by(status='<font color="green">Done</font>').all() return render_template('pending.html',matches=donee,checknow=datetime.now()) @app.route('/logout') def logout(): logout_user() return redirect('/') if __name__ == '__main__': logging.basicConfig(filename='apsched.log', level=logging.DEBUG) app.run(debug=True,use_reloader=False)
import requests from api.wework import WeWork class DepartmentManege(WeWork): #部门管理 secret = "ATvrnjJTv6Qu3zqUSDLXUJzmsPSBT_lHHd8pW68SVUs" #创建部门 def create(self,name,parentid,**kwargs): data = {"name":name,"parentid":parentid} data.update(kwargs) url = "https://qyapi.weixin.qq.com/cgi-bin/department/create" r = requests.post( url, params={'access_token':self.get_token(self.secret)}, json=data ) self.format(r) return r.json() #更新部门 def update(self,id,**kwargs): data = {"id":id} data.update(kwargs) url = "https://qyapi.weixin.qq.com/cgi-bin/department/update" r = requests.post( url, params={'access_token': self.get_token(self.secret)}, json=data ) self.format(r) return r.json() #删除部门 def delete(self,id): url = "https://qyapi.weixin.qq.com/cgi-bin/department/delete" params = {'access_token': self.get_token(self.secret),"id":id} r = requests.get(url,params=params) self.format(r) return r.json() #获取部门列表 def list(self,id): url = "https://qyapi.weixin.qq.com/cgi-bin/department/list" params = {'access_token': self.get_token(self.secret), "id": id} r = requests.get(url, params=params) self.format(r) return r.json()
#M, D = map(int, input().split()) line = input().split() M = int(line[1]) D = int(line[0]) mp = [ "Thursday", "Friday", "Saturday", "Sunday", "Moday", "Tuesday", "Wednesday" ] count = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] sum = D-1 for i in range(M-1): sum += count[i] print(mp[sum%7])
import pandas as pd from bs4 import BeautifulSoup as soup from urllib.request import urlopen as uReq testurl="file:///C:/Users/vamsi/Desktop/BidAlert.htm"#url to Scrape uClient=uReq(testurl) page_html=uClient.read() uClient.close() page_soup=soup(page_html,"html.parser") #Columns to be Parsed BidAlertNos=[] AgencyBidNos=[] Titles=[] ReceivedDates=[] CloseDates=[] PurchaseTypes=[] DeliveryPoints=[] DeliveryDates=[] IssuingAgencies=[] UsingAgencies=[] States=[] AgencyTypes=[] Contacts=[] #Logic to parse html tables = page_soup.find_all(class_='bidmatches') for i in tables: innertables = i.find_all('table') for i in innertables: subinnertables=i.find_all('table') for sit in subinnertables: table_rows=sit.find_all('tr') for tr in table_rows: td = tr.find_all('td') row = [i.text.strip().replace('\n',' ') for i in td] #print(row) if len(row)==2: if row[0].startswith('Bid Alert No.:'): BidAlertNos.append(row[1]) elif row[0].startswith('Agency Bid No.:'): AgencyBidNos.append(row[1]) elif row[0].startswith('Title:'): Titles.append(row[1]) elif row[0].startswith('Received Date:'): ReceivedDates.append(row[1]) elif row[0].startswith('Close Date:'): CloseDates.append(row[1]) elif row[0].startswith('Delivery Point:'): DeliveryPoints.append(row[1]) elif row[0].startswith('Issuing Agency:'): IssuingAgencies.append(row[1]) elif row[0].startswith('Using Agency:'): UsingAgencies.append(row[1]) elif row[0].startswith('State:'): States.append(row[1]) elif row[0].startswith('Agency Type:'): AgencyTypes.append(row[1]) #Creating Dataframe BidInfoDF = pd.DataFrame( { "BidAlertNo":BidAlertNos, "AgencyBidNos":AgencyBidNos, "Titles":Titles, "ReceivedDates":ReceivedDates, "CloseDates":CloseDates, "DeliveryPoints":DeliveryPoints, "IssuingAgencies":IssuingAgencies, "UsingAgencies":UsingAgencies, "States":States, "AgencyTypes":AgencyTypes, } ) #Writing file to csv BidInfoDF.to_csv("C:\\Users\\vamsi\\Desktop\ETL\\BidInfo.csv",index=False)
import matplotlib.pyplot as plt import pandas as pd df= pd.read_csv('E:\csvdhf5xlsxurlallfiles\percent-bachelors-degrees-women-usa.csv') print(df.shape) year=df['Year'] computer_science=df['Computer Science'] physical_science=df['Physical Sciences'] plt.plot(year, computer_science, color='red') plt.plot(year, physical_science, color='blue') plt.axis((1990, 2010, 0, 50)) plt.show() plt.savefig('axis_limits.png') plt.plot(year, computer_science, color='red', label='Computer science') plt.plot(year, physical_science, color='blue', label='Physical science') plt.legend(loc='lower right') import matplotlib.pyplot as plt cs_max=computer_science.max() yr_max=year[computer_science.argmax()] plt.annotate('maximum', xy=(yr_max, cs_max), xytext=(yr_max+6, cs_max+6), arrowprops=dict(facecolor='black')) plt.subplot(2,1,1) plt.plot(year, computer_science, color) plt.xlabel('year') plt.ylabel('Enroliment(%)') plt.title('Unndergraduate Enroliment of women') plt.show()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Nov 8 13:16:19 2019 Parameter scans @author: fabio """ import numpy as np #import pandas as pd #import matplotlib.pyplot as plt #from smaller_model_function_AtoU import simple_small as ss from AtoU_model import simple import multiprocessing import time import pickle from scipy.optimize import curve_fit #import uncertainties as unc # mp time_start = time.time() #pool = multiprocessing.Pool(multiprocessing.cpu_count(), maxtasksperchild=None) pool = multiprocessing.Pool(multiprocessing.cpu_count()) # Generate determin parameter x and y values #X = [1,2,3,4,5] # list of all global rate S(A->U) values # #X = [10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29] X = [10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200] # list of all global rate S(A->U) values Y = [10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200] #and all local rate S(U->S) values #Y = [70,85,100,115,130] #and all local rate S(U->S) values #X = [22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60] # list of all global rate S(A->U) values #Y = [162,164,166,168,170,172,174,176,178,180,182,184,186,188,190,192,194,196,198,200] #and all local rate S(U->S) values y = Y*len(X) # at the x-axis, the S value range is repeated 10 times x = [] # and for the y-axis, each value is repeated 28 times for i in range(len(X)): for j in range(len(Y)): x.append(X[i]) x = np.array(x) y = np.array(y) # #generate list with all value pairs of all system sizes # data_pairs_small = [] # for i in X: # for j in Y: # data_pairs_small.append([i,j]) # data_pairs = [] # for i in X: # for j in Y: # data_pairs.append([153,i,j]) data_pairs_sm = [] for i in X: for j in Y: data_pairs_sm.append([182,i,j]) # data_pairs_sl = [] # for i in X: # for j in Y: # data_pairs_sl.append([191,i,j]) # data_pairs_l = [] # for i in X: # for j in Y: # data_pairs_l.append([203,i,j]) # state_list = pool.map(ss, data_pairs_small) # # save state_list # with open('state_g_noise.txt', 'wb') as F: # pickle.dump(state_list, F) # with open ('state_g_noise.txt', 'rb') as F: # state_list = pickle.load(F) Timing = [] #all_criteria = [] #Differences = [] b_value_list = [] # cenH_silenced = 0 # EcoRV_silenced = 0 cenH_silenced_sm = 0 EcoRV_silenced_sm = 0 # cenH_silenced_sl = 0 # EcoRV_silenced_sl = 0 duration=201#15 # this for loop goes through all data_pairs (list of 400) and repeats simulation reps times for i in range(len(data_pairs_sm)): #determine how often the simulation for each system and specific parameter pair should be repeated reps=1000 #repeat = [data_pairs[i]]*reps repeat_sm = [data_pairs_sm[i]]*reps #repeat_sl = [data_pairs_sl[i]]*reps #repeat_max = [data_pairs_l[i]]*reps # store the simulation results (status list of cenH and EcoRV) of each simulation if __name__ == '__main__': # # status_small = pool.map(simple, repeat) status_m = pool.map(simple, repeat_sm) # status_large = pool.map(simple, repeat_sl) # status_max = pool.map(simple, repeat_max) # # two dimensional arrays with dimensions len(repeat) and duration (pre allocation) # cenH_list_small = np.zeros([len(repeat),duration]) # EcoRV_list_small = np.zeros([len(repeat),duration]) cenH_list_m = np.zeros([len(repeat_sm),duration]) EcoRV_list_m = np.zeros([len(repeat_sm),duration]) # cenH_list_large = np.zeros([len(repeat),duration]) # EcoRV_list_large = np.zeros([len(repeat),duration]) # cenH_list_max = np.zeros([len(repeat),duration]) # EcoRV_list_max = np.zeros([len(repeat),duration]) # fill the columns with information on the status of different cells in parallel # at a certain (fixed) timepoint for elt in range(len(repeat_sm)): # cenH_small = np.array(status_small[elt][0]) # EcoRV_small = np.array(status_small[elt][1]) # #switch the values of the list (1 stands now for timepoint when reporter is on) # cenH_small=1-cenH_small # EcoRV_small=1-EcoRV_small # # stores the cenH part in a seperate two dimensional array (reps X duration) # cenH_list_small[elt]=cenH_small # EcoRV_list_small[elt]=EcoRV_small cenH_m = np.array(status_m[elt][0]) EcoRV_m = np.array(status_m[elt][1]) #switch the values of the list (1 stands now for timepoint when reporter is on) cenH_m=1-cenH_m EcoRV_m=1-EcoRV_m # stores the cenH part in a seperate two dimensional array (reps X duration) cenH_list_m[elt]=cenH_m EcoRV_list_m[elt]=EcoRV_m # cenH_large = np.array(status_large[elt][0]) # EcoRV_large = np.array(status_large[elt][1]) # #switch the values of the list (1 stands now for timepoint when reporter is on) # cenH_large=1-cenH_large # EcoRV_large=1-EcoRV_large # # stores the cenH part in a seperate two dimensional array (reps X duration) # cenH_list_large[elt]=cenH_large # EcoRV_list_large[elt]=EcoRV_large # cenH_max = np.array(status_max[elt][0]) # EcoRV_max = np.array(status_max[elt][1]) # #switch the values of the list (1 stands now for timepoint when reporter is on) # cenH_max=1-cenH_max # EcoRV_max=1-EcoRV_max # # stores the cenH part in a seperate two dimensional array (reps X duration) # cenH_list_max[elt]=cenH_max # EcoRV_list_max[elt]=EcoRV_max # print(cenH_small) # print(cenH_m) # print(cenH_large) # print(cenH_max) # #output (Histograms!) # cenH_total_small = (sum(cenH_list_small))/reps # # # ys = (sum(EcoRV_list_small))/reps cenH_total_m = (sum(cenH_list_m))/reps # ym = (sum(EcoRV_list_m))/reps # cenH_total_large = (sum(cenH_list_large))/reps # # # yl = (sum(EcoRV_list_large))/reps # cenH_total_max = (sum(cenH_list_max))/reps # ymax = (sum(EcoRV_list_max))/reps #ys = ys[3:] ym = ym[3:] #yl = yl[3:] #ymax = ymax[3:] x = np.array(range(duration-3)) # fitting function def model(x,a,b): return a*np.exp(-x/b) # #Perform the curve fit # popt_s, pcov_s = curve_fit(model, x, ys, p0=[1,1], maxfev=5000) # #print(popt_s) # b_s = popt_s[1] # #a_s, b_s = unc.correlated_values(popt_s, pcov_s) #Perform the curve fit popt_m, pcov_m = curve_fit(model, x, ym, p0=[1,1], maxfev=5000) #print(popt_s) b_m = popt_m[1] #a_m, b_m = unc.correlated_values(popt_m, pcov_m) # #Perform the curve fit # popt_l, pcov_l = curve_fit(model, x, yl, p0=[1,1], maxfev=5000) # #print(popt_s) # b_l = popt_l[1] # #a_l, b_l = unc.correlated_values(popt_l, pcov_l) # #Perform the curve fit # popt_max, pcov_max = curve_fit(model, x, ymax, p0=[1,1], maxfev=5000) # #print(popt_s) # b_max = popt_max[1] # #a_max, b_max = unc.correlated_values(popt_max, pcov_max) #b = [b_s, b_m, b_l, b_max] print(b_m) b_value_list.append(b_m) # # if cenH has been silenced within chosen intervall # #if cenH_Average >= 2 and cenH_Average <= 5 # if cenH_Average >= 2.0 and cenH_Average <= 3.5: # # this timing criterion has been met # cenH_timing = 1 # else: # cenH_timing = 0 # if b_s >= 2.5 and b_s <= 5.9: #change later back to 3.5 # EcoRV_timing = 1 # else: # EcoRV_timing = 0 if b_m > 14.5 and b_m <= 22.5: EcoRV_timing_sm = 1 else: EcoRV_timing_sm = 0 # if b_l > 35 and b_l <= 60: # #if EcoRV_Average_sl >= 15 and EcoRV_Average_sl <= 19: # # this timing criterion has been met # EcoRV_timing_sl = 1 # else: # EcoRV_timing_sl = 0 # if b_max > 90 and b_max <= 150: # #if EcoRV_Average_sl >= 15 and EcoRV_Average_sl <= 19: # # this timing criterion has been met # EcoRV_timing_max = 1 # else: # EcoRV_timing_max = 0 # # change color of state_list from blue to lighter blue # if state_list[i] == 'white': # state_list[i] = 7#'royalblue' # # change color of state_list from blue to lighter blue # if state_list[i] == 'cyan': # state_list[i] = 0#'royalblue' # # change color of state_list from blue to lighter blue # if state_list[i] == 'blue': # state_list[i] = 1#'royalblue' # if b_l > 2*b_s and state_list[i] == 1: # Diff = 2#'darkblue' # else: # Diff = state_list[i] # Differences.append(Diff) # if EcoRV_timing_max == 1 and EcoRV_timing_sl == 1 and EcoRV_timing_sm == 1 and EcoRV_timing == 1: # timing = 6 # elif EcoRV_timing_sl == 1 and EcoRV_timing_sm == 1 and EcoRV_timing == 1: # timing = 5 # elif EcoRV_timing_sm == 1 and EcoRV_timing == 1: # timing = 4 # elif EcoRV_timing == 1: # timing = 3 # else: # timing = Diff if EcoRV_timing_sm == 1: timing = 4 else: timing = 1 Timing.append(timing) # if timing == 6 and state_list[i]==1: # # if cenH_timing == 1: # # criteria = 'black' # #else: # criteria = 6#'gold' # elif timing == 5 and state_list[i]==1: # # if cenH_timing == 1: # # criteria = 'black' # #else: # criteria = 5#'red' # elif timing == 4 and state_list[i]==1: # # if cenH_timing == 1: # # criteria = 'black' # #else: # criteria = 4#'red' # elif timing == 3 and state_list[i]==1: # # if cenH_timing == 1: # # criteria = 'black' # #else: # criteria = 3#'springgreen' # elif timing == 3 and state_list[i]==0: # # if cenH_timing == 1: # # criteria = 'black' # # else: # criteria = 3#'springgreen' # else: # criteria = timing # all_criteria.append(criteria) # # save Timing list # with open('Timing_atf1.txt', 'wb') as F: # pickle.dump(Timing, F) # # save Timing list # with open('all_criteria_noise.txt', 'wb') as F: # pickle.dump(all_criteria, F) # # save Timing list # with open('Differences_noise.txt', 'wb') as F: # pickle.dump(Differences, F) # save Timing list with open('b_list_atf1.txt', 'wb') as F: pickle.dump(b_value_list, F) # print(Differences) # print(len(Differences)) # print(len(x)) # print(len(y)) # df = pd.DataFrame(dict(x=x, y=y, state=np.array(Differences))) # colors can be white, royalbue, cyan or darkblue # Df = pd.DataFrame(dict(x=x, y=y, label=np.array(Timing))) # dataframe of the x and y values and labels (inner dot colors) # #Df = pd.DataFrame(dict(x=x, y=y, label=np.array(Differences))) # Dataframe = pd.DataFrame(dict(x=x, y=y, Label=np.array(all_criteria))) # #prints simulation time # print("Took {}".format(time.time() - time_start)) # # plot # nuc_states = df.groupby('state') # Timing = Df.groupby('label') # Both = Dataframe.groupby('Label') # fig, ax = plt.subplots(figsize=(9,9)) # a new (quadratic) figure is generated # #fig, ax = plt.subplots(figsize=(5,5)) # a new (quadratic) figure is generated # #ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling # for name, state in nuc_states: # goes through all 3 groups # if name == 0: # name = 'cyan' # elif name == 1: # name = 'royalblue' # elif name == 2: # name = 'darkblue' # else: # name='white' # ax.plot(state.x, state.y, marker = 's', color = name, linestyle='', ms = 20, label=name) # for Name, label in Timing: # goes through all 3 groups # if Name == 0: # Name = 'cyan' # elif Name == 1: # Name = 'royalblue' # elif Name == 2: # Name = 'darkblue' # elif Name == 3: # Name = 'yellowgreen' # elif Name == 4: # Name = 'red' # elif Name == 5: # Name = 'silver' # elif Name == 6: # Name = 'gold' # ax.plot(label.x, label.y, marker = 'o', color = Name, linestyle='', ms = 18, label=Name) # for NAME, Label in Both: # goes through all 3 groups # if NAME == 0: # NAME = 'cyan' # elif NAME == 1: # NAME = 'royalblue' # elif NAME == 2: # NAME = 'darkblue' # elif NAME == 3: # NAME = 'yellowgreen' # elif NAME == 4: # NAME = 'red' # elif NAME == 5: # NAME = 'silver' # elif NAME == 6: # NAME = 'gold' # ax.plot(Label.x, Label.y, marker = 'o', color = NAME, linestyle='', ms = 10, label=NAME) # ax.set_ylabel('S(U-->S) local rate', fontsize = 30) # ax.set_xlabel('S(A-->U) global rate', fontsize = 30) # ax.tick_params(labelsize='18') # #ax.set_xlabel('direct conversion', fontsize = 30) # #ax.set_xlabel('Special: A --> U', fontsize = 30) # plt.savefig("fig_SAU_S300_1x.pdf")
# Program to connect to ActiveMQ broker and read messages from the FuSE.ModelOutput topic and then publish messages # to MCITOPIC to be consumed by the phone. import paho.mqtt.client as mqtt import paho.mqtt.publish as publish import json import time broker = "localhost" # The mqtt broker location (can be changed to an IP address if so desired port = 1883 # The mqtt broker port WORKLOAD: int = 0 def on_message_workload(client, userdata, msg): global WORKLOAD payload = dict(json.loads(msg.payload)) WORKLOAD = int(payload['outputValues'][0]['value']) print(WORKLOAD) def on_message_skills(client, userdata, msg): global WORKLOAD print(WORKLOAD) print(type(WORKLOAD)) skill_dict = json.loads(msg.payload) print(type(skill_dict)) num_skills = len(skill_dict['ExpectedNextState']) print(num_skills) print(type(skill_dict['ExpectedNextState'])) i = 0 skills: dict = {} while i < num_skills: skills[skill_dict['ExpectedNextState'][i]['ID']] = float(skill_dict['ExpectedNextState'][i]['Value']) print(skills) i += 1 listofkeys = list(skills.keys()) print(listofkeys) msg_list = [] i = 0 for _ in listofkeys: if WORKLOAD <= 33: if listofkeys[i] == '2': print("Made it this far!!") if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) COP Picture') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) COP Picture') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: print("Low priority alert for skill: ", listofkeys[i]) msg_list.append('(L) COP Picture') i += 1 elif listofkeys[i] == '3': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) mIRC Chat') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) mIRC Chat') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) mIRC Chat') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '4': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Comm. Plan') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Comm. Plan') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Comm. Plan') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '5': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Comm. Standards') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Comm. Standards') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Comm. Standards') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '6': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Problem Solving') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Problem Solving') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Problem Solving') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '7': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Aircraft Monitoring') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Aircraft Monitoring') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Aircraft Monitoring') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '8': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Mission Timeline') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Mission Timeline') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Mission Timeline') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '9': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Flight Plan Routes') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Flight Plan Routes') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Flight Plan Routes') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '10': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Airspace') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Airspace') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Airspace') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '11': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Target Features') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Target Features') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Target Features') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '12': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Theater Environment') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Theater Environment') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Theater Environment') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '13': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Basic Piloting Skills') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Basic Piloting Skills') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Basic Piloting Skills') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '14': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Search Plans') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Search Plans') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Search Plans') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '15': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Geographic Features') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Geographic Features') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Geographic Features') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '16': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Target Area Awareness') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Target Area Awareness') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Target Area Awareness') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '17': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Sensor Application') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Sensor Application') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Sensor Application') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '18': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Airfield Conditions') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Airfield Conditions') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Airfield Conditions') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '19': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Divert') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Divert') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 3.5 <= skills[listofkeys[i]]: msg_list.append('(L) Divert') print("Low priority alert for skill: ", listofkeys[i]) i += 1 elif 33 < WORKLOAD <= 67: if listofkeys[i] == '2': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) COP Picture') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) COP Picture') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '3': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) mIRC Chat') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) mIRC Chat') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '4': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Comm. Plan') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Comm. Plan') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '5': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Comm. Standards') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Comm. Standards') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '6': if 0 <= skills[listofkeys[i]] < 2: msg_list('(H) Problem Solving') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Problem Solving', 0) msg_list('(M) Problem Solving') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '7': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Aircraft Monitoring', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Aircraft Monitoring', 0) print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '8': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Mission Timeline', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Mission Timeline', 0) print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '9': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Flight Plan Routes', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Flight Plan Routes', 0) print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '10': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Airspace', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Airspace', 0) print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '11': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Target Features', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Target Features', 0) print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '12': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Theater Environment', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Theater Environment', 0) print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '13': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Basic Piloting Skills', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: publish.single('MCITOPIC', '(M) Basic Piloting Skills', 0) print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '14': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Search Plans') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Search Plans') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '15': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Geographic Features') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Geographic Features') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '16': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Target Area Awareness') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Target Area Awareness') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '17': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Sensor Application') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Sensor Application') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '18': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Airfield Conditions') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Airfield Conditions') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '19': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Divert') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif 2 <= skills[listofkeys[i]] < 3.5: msg_list.append('(M) Divert') print("Med. priority alert for skill: ", listofkeys[i]) i += 1 elif 67 < WORKLOAD: if listofkeys[i] == '2': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) COP Picture', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '3': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) mIRC Chat', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '4': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Communication Plan', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '5': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Communication Standards', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '6': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Problem Solving', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '7': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Aircraft Monitoring', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '8': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Mission Timeline', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '9': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Flight Plan Routes', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '10': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Airspace', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '11': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Target Features', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '12': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Theater Environment', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '13': if 0 <= skills[listofkeys[i]] < 2: publish.single('MCITOPIC', '(H) Basic Piloting Skills', 0) print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '14': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Search Plans') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '15': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Geographic Features') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '16': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Target Area Awareness') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '17': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Sensor Application') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '18': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Airfield Conditions') print("High priority alert for skill: ", listofkeys[i]) i += 1 elif listofkeys[i] == '19': if 0 <= skills[listofkeys[i]] < 2: msg_list.append('(H) Divert') print("High priority alert for skill: ", listofkeys[i]) i += 1 x = 0 k = 0 while x < len(msg_list): publish.single('MCITOPIC', msg_list[k], 0) time.sleep(1) k += 1 x += 1 def on_connect(client, userdata, flags, rc): print("Connected with result code " + str(rc)) client.subscribe("Toolkit/DataMessage/JSON") client.subscribe("measure") publish.single("MCITOPIC", "(S)", 0) # this will be used to sync the WAD timer with the connection of this program client = mqtt.Client() client.connect(broker, port) client.on_connect = on_connect client.message_callback_add("Toolkit/DataMessage/JSON", on_message_workload) client.message_callback_add("measure", on_message_skills) client.loop_forever()
import mine import os import mysvg from manim import * def rate_dump(): print (rate_functions.__all__) for rate_function in rate_functions.__all__: call = f"{rate_function}(.5)" bar = eval(call) if not callable(bar): mysvg.ink_text_animation("Cross-smooth", "abcde", iterations=10, mode=0, rate_function=rate_function, output="/home/peter/dumps/rate_dumps") # iterable : [(0, 'y', '-0.2'), (0, 'width', '1.2'), (0, 'height', '1.4'), (0, 'x', '-0.1'), (1, 'radius', '2.4'), (2, 'numOctaves', '1'), (4, 'scale', '10.319410319410318'), (5, 'flood-opacity', '1'), (6, 'radius', '3.8'), (7, 'stdDeviation', '2.4'), (10, 'dy', '-7'), (10, 'dx', '-4.5'), (11, 'stdDeviation', '4.8')] acceptable = ["stdDeviation", "radius", "scale"] def mode_dump(): filter_key = "Cross-smooth" filter_key = mysvg.get_filter_key(filter_key) iterable = mysvg.get_iterables(filter_key) for i, it in enumerate(iterable): if it[1] in acceptable: mysvg.ink_text_animation(filter_key, "abcde", iterations=10, mode=i, output="/home/peter/dumps/mode_modes") # mysvg.ink_text_animation("Protrusions__Fire", "abcde", iterations=10, mode=0, output="/home/peter/fire") def all_dump(): for filter_key in mine.iterate_file("/home/peter/setup/ink/style_keys"): filter_key = mysvg.get_filter_key(filter_key) iterable = mysvg.get_iterables(filter_key) try: for i, it in enumerate(iterable): if it[1] in acceptable: mysvg.ink_text_animation(filter_key, "abcde", iterations=6, mode=i, output="/home/peter/dumps/all_styles") except: pass if __name__ == '__main__': # all_dump() # rate_dump() mode_dump() # all_gallery() # gallery("Protrusions__Fire", "abc") # iterable = get_iterables("Protrusions__Fire") # print(f"iterable : {iterable }") # mine.wslview(mysvg.ink_text_animation("Cross-smooth", "abc", iterations=4, mode=0, output="/tmp/dump")) # mine.wslview(ink_text_animation("Cross-smooth", "abc", iterations=10, mode=0, output="/tmp/dump2")) # mine.wslview(doManim("/tmp/dump2"))
# encoding=UTF-8 # Клонирует или обновляет репозитории from dotenv import load_dotenv, find_dotenv from pathlib import Path import json import os import os.path import pymysql import traceback import time import sys import re import subprocess import io from google.cloud import speech from google.cloud.speech import enums from google.cloud.speech import types from scipy.io import wavfile from pydub import AudioSegment from pydub.silence import split_on_silence from socketserver import * import noisereduce as nr import os import wave import numpy as np import concurrent.futures import soundfile as sf path = os.path.dirname(os.path.abspath(__file__)) sys.path.append(path + "/../") import reviewgramdb from reviewgramlog import * from pythonlanguage import PythonLanguage from genericlanguage import GenericLanguage file = os.path.dirname(os.path.abspath(__file__)) + "/recognize_pid.txt" env_path = Path(path + "/../") / '.env' load_dotenv(dotenv_path=env_path) #os.chdir(path + "/../repos/"); def ogg2wav_convert(old, new): result = subprocess.run(['ffmpeg', "-hide_banner", "-loglevel", "panic", "-y", "-i", old, new], stdout=subprocess.PIPE, stderr=subprocess.PIPE) return result.stderr.decode("UTF-8") def try_recognize_voice(arg): hints = arg[0] fileName = arg[1] encoding = enums.RecognitionConfig.AudioEncoding.LINEAR16 sample_rate_hertz = 48000 language_code = 'en-US' model = 'command_and_search' config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code, 'model': model, 'speech_contexts': [ speech.types.SpeechContext(phrases=hints) ]} streaming_config = types.StreamingRecognitionConfig(config=config) content = "" with io.open(fileName, "rb") as f: content = f.read() audio = {'content': content} stream = [content] requests = (types.StreamingRecognizeRequest(audio_content=chunk) for chunk in stream) client = speech.SpeechClient.from_service_account_file(os.getenv("GOOGLE_SPEECH_CREDENTIALS")) responses = client.streaming_recognize(streaming_config, requests) total_result = [] for response in responses: for result in response.results: alternative = result.alternatives[0] total_result.append(alternative.transcript) return " ".join(total_result) def try_recognize(fileName, table, lang, sourceFileContent): start = time.perf_counter() perfLogFileName = os.getenv("APP_FOLDER") + "/perf_log.txt" fileObject = open(perfLogFileName, 'at') hints = [] for row in table: hints.append(row[1]) if lang is not None: hints = hints + lang.getRecognitionHints() measure1 = time.perf_counter() fileObject.write("Making hints table: " + str(measure1 - start) + "\n") source = fileName rate, data = wavfile.read(fileName) length = len(data)/rate noise_length = 1 dest = source reduce = False if (length > 2.0): data = data/1.0 reduce = True rate, data = wavfile.read(source) data = data/1.0 noisy_part = data[0:rate*noise_length] measure2 = time.perf_counter() fileObject.write("Denoising: " + str(measure2 - measure1) + "\n") original = AudioSegment.from_wav(source) chunks = split_on_silence ( original, min_silence_len = 300, silence_thresh = -70 ) print("Split file " + source + " into " + str(len(chunks)) + "") args = [] for i, chunk in enumerate(chunks): my_dest = dest + "-" + str(i) + ".wav" chunk.export(my_dest, format="wav") #if (reduce): # rate, data = wavfile.read(my_dest) # data = data/1.0 # reduced_noise = nr.reduce_noise(audio_clip=data, noise_clip=noisy_part, verbose=True) # wavfile.write(my_dest, rate, reduced_noise) data, samplerate = sf.read(my_dest) sf.write(my_dest, data, samplerate, subtype='PCM_16') args.append([hints, my_dest]) measure3 = time.perf_counter() fileObject.write("Split on silence: " + str(measure3 - measure2) + "\n") result = "" with concurrent.futures.ThreadPoolExecutor() as executor: futures = [executor.submit(try_recognize_voice, arg) for arg in args] result = " ".join([future.result() for future in futures]) measure4 = time.perf_counter() fileObject.write("Recognition: " + str(measure4 - measure3) + "\n") for arg in args: os.remove(arg[1]) measure5 = time.perf_counter() fileObject.write("Removing files: " + str(measure5 - measure4) + "\n") if (lang is None): lang = GenericLanguage() new_result = lang.recognizeStatement(result, table, sourceFileContent) measure6 = time.perf_counter() fileObject.write("recognizeStatement: " + str(measure6 - measure5) + "\n") fileObject.close() return new_result def select_and_perform_task(): expiration_time = 30 * 60 start = time.perf_counter() con = reviewgramdb.connect_to_db() perfLogFileName = os.getenv("APP_FOLDER") + "/perf_log.txt" fileObject = open(perfLogFileName, 'at') with con: timestamp = int(time.time()) request = "SELECT" \ " r.ID, " \ " r.FILENAME, " \ " r.RES," \ " r.LANG_ID," \ " r.LOG," \ " r.CONTENT," \ " r.REPO_ID " \ " FROM " \ "`recognize_tasks` AS r " \ " WHERE " \ " ((DATE_START IS NULL) OR (" + str(timestamp) + " - UNIX_TIMESTAMP(`DATE_START`) >= " + str(expiration_time) + ")) AND (DATE_END IS NULL) " \ " LIMIT 1 " row = reviewgramdb.select_and_fetch_one(con, request, []) if (row is not None): id = row[0] fileName = row[1] if (row[6] is not None): repoId = int(row[6]) else: repoId = 0 table = [] rows = reviewgramdb.select_and_fetch_all(con, "SELECT FROM_TEXT, TO_TEXT FROM `replace_tables` WHERE `REPO_ID` = %s ORDER BY `ID` ASC" ,[repoId]) for localRow in rows: table.append([localRow[0], localRow[1]]) langId = 0 if (row[3] is not None): langId = int(row[3]) lang = None if (langId == 1): lang = PythonLanguage() content = "" if (row[5] is not None): content = row[5] measure1 = time.perf_counter() fileObject.write("Fetching task from db: " + str(measure1 - start) + "\n") reviewgramdb.execute_update(con, "UPDATE `recognize_tasks` SET `DATE_START` = NOW() WHERE `ID` = %s", [id]) if (os.path.exists(fileName) and os.path.isfile(fileName)): try: print(fileName) newFileName = fileName.replace("ogg", "wav") errors = ogg2wav_convert(fileName, newFileName) if (len(errors) > 0): print("Errors while converting ogg to wav:" + errors) reviewgramdb.execute_update(con, "UPDATE `recognize_tasks` SET `RES` = %s, `LOG` = %s WHERE `ID` = %s", ['', 'Errors in running ffmpeg ' + errors, id]) else: reviewgramdb.execute_update(con, "UPDATE `recognize_tasks` SET `RES` = %s, `LOG` = %s WHERE `ID` = %s", ['', 'Processed ogg 2 wav', id]) print("Recognizing...") result = try_recognize(newFileName, table, lang, content) measure2 = time.perf_counter() fileObject.write("Total run for try_recognize: " + str(measure2 - measure1) + "\n") reviewgramdb.execute_update(con, "UPDATE `recognize_tasks` SET `RES` = %s, `LOG` = %s WHERE `ID` = %s", [result, 'Successfully processed result', id]) measure3 = time.perf_counter() fileObject.write("Updating result in DB: " + str(measure3 - measure2) + "\n") fileObject.close() except Exception as e: fileObject.close() print('Exception: ' + traceback.format_exc()) reviewgramdb.execute_update(con, "UPDATE `recognize_tasks` SET `RES` = %s, `LOG` = %s WHERE `ID` = %s", ['', 'Exception: ' + traceback.format_exc(), id]) else: fileObject.close() reviewgramdb.execute_update(con, "UPDATE `recognize_tasks` SET `RES` = %s, `LOG` = %s WHERE `ID` = %s", ['', 'Unable to find file ', id]) reviewgramdb.execute_update(con, "UPDATE `recognize_tasks` SET `DATE_END` = NOW() WHERE `ID` = %s", [id]) print("Performed") class TCPHandler(StreamRequestHandler): def handle(self): select_and_perform_task() def pid_exists(pid): if pid < 0: return False try: os.kill(pid, 0) except ProcessLookupError: return False except PermissionError: return True else: return True if __name__== "__main__": host = '127.0.0.1' port = 9090 addr = (host,port) mypid = os.getpid() print("PID storage file " + file) notRunningAlready = True if (os.path.isfile(file)): with open(file, 'rt') as handle: pid = handle.read().replace('\n', '') try: pid = int(pid) except: pid = -1 notRunningAlready = not pid_exists(pid) if (notRunningAlready): print("Running with pid: " + str(mypid)) with open(file, 'wt') as handle: handle.write(str(mypid)) i = 0 max = 20 #max performed tasks start_time = int(time.time()) while (i < max): i = i + 1 select_and_perform_task() print("Done serving tasks, starting task") server = TCPServer(addr, TCPHandler) server.serve_forever() print("Done!")
import cv2 as cv import os, os.path import numpy as np def read_an_image_series(series='birds'): """ Read a series of images :param series: :return: """ # general path that contain all images of a series main_path = 'C:\\Users\\Home\\Dropbox\\codes\\CV\\96131125_HW06\\JPEGS_min\\JPEGS\\' # specific path of a series path_of_series = main_path + series + '\\' # number of images images_name = os.listdir(path_of_series) num_images = len(images_name) # images imgs = [] # read all images of sequence for i in range(1, num_images+1): # read image name_of_image = 'frame_{}.jpg'.format(i) img = cv.imread(filename=path_of_series+name_of_image, flags=cv.IMREAD_GRAYSCALE) imgs.append(img) # return all images of sequence imgs = np.stack(imgs) return imgs # test function if __name__ == '__main__': # read a sequence of images imgs = read_an_image_series(series='birds') cv.imshow('birds 1', imgs[0]) cv.imshow('birds 10', imgs[10]) cv.waitKey(0) cv.destroyAllWindows()
def hashify(string): result = {} string = string + string[0] for a in xrange(len(string) - 1): k, v = string[a:a + 2] try: result[k].append(v) # dictionary value is a list except AttributeError: result[k] = [result[k], v] # dictionary value is NOT a list except KeyError: result[k] = v # dictionary key does NOT exist return result
from django.contrib import admin from .models import WalletPlatform, Wallet class WalletInLine(admin.TabularInline): """ Inline admin for wallet relation """ model = Wallet class WalletPlatformAdmin(admin.ModelAdmin): list_display = ('name', ) inlines = [ WalletInLine ] admin.site.register(WalletPlatform, WalletPlatformAdmin)
#!/usr/bin/env python import sdm import sys, random from time import time from sdm import Bitstring, Hardlocation def test_uniform_distribution(qty=10000): n = sdm.get_dimension() v = [0]*n for i in xrange(qty): a = Bitstring() for j in range(n): v[j] += a.bitsign(j) import pylab pylab.hist(v, bins=15) return v def table_7_1(): n = sdm.get_dimension() a = Bitstring() v = [] for i in range(550): b = a.copy() b.bitrandomswap(i) v.append([ i, sdm.thread_radius_count_intersect(a, b) ]) print i sys.stdout.flush() return v def critical_distance(a, b, n, v, read=sdm.thread_read, debug=0): ret = [] for i in xrange(a, b): ret2 = [] for j in xrange(n): u = v.copy() u.bitrandomswap(i) w = read(u) d = v.distance_to(w) if debug>1: print ' ', i, j, d ret2.append(d) d = 1.0*sum(ret2)/len(ret2) if debug>0: print '#%d'%i, d sys.stdout.flush() ret.append([ i, d ]) return ret def critical_distance2(a, b, n, v, iterated_reading=6, read=sdm.thread_read, debug=0): ret = [] for i in xrange(a, b): ret2 = [] for j in xrange(n): u = v.copy() u.bitrandomswap(i) w = read(u) for k in range (iterated_reading-1): w = read (w) d = v.distance_to(w) if debug>1: print ' ', i, j, d ret2.append(d) d = 1.0*sum(ret2)/len(ret2) if debug>0: print '#%d'%i, d sys.stdout.flush() ret.append([ i, d ]) return ret def write_random(qty, use_threads=True): w = sdm.write if not use_threads else sdm.thread_write t0 = time() for i in range(qty): a = Bitstring() print '#%d'%i, w(a, a) t1 = time() print 'Time:', t1-t0, 'seconds' def sample(): print 'Initializing SDM' sdm.initialize() dimension = sdm.get_dimension() addr = Bitstring() dist = sdm.distance(addr) sdm.free() return dist def sample_radius(rounds=1, verbose=0): if verbose > 0: print 'Initializing SDM' sdm.initialize() dimension = sdm.get_dimension() v = [] for i in xrange(rounds): if verbose > 0: print 'Round #%d' % (i+1) addr = Bitstring() dist = sdm.distance(addr) if verbose > 0: print ' Processing results' w = [0]*(dimension+1) for d in dist: w[d] += 1 u = [0]*(dimension+1) for d, n in enumerate(w): for j in range(d, dimension+1): u[j] += n v.append(u) sdm.free() return v def chart_sample_radius(rounds=1, verbose=0): import pylab data = sample_radius(rounds=rounds, verbose=verbose) #for i in range(sdm.get_dimension()+1): # data[-1][i] += 100000 dist = range(sdm.get_dimension()+1) args = [] kwargs = { 'linewidth': 1.0 } for v in data: args.extend([ dist, v, 'k-' ]) pylab.plot(*args, **kwargs) pylab.xlabel('distance') pylab.ylabel('qty of hardlocations') pylab.title('Qty of hardlocations around random address') pylab.grid(True) pylab.show() arr = array(zip(*data)) stat = [ (i, m, v) for i, (m, v) in enumerate(zip(arr.mean(axis=1), arr.std(axis=1))) ] return data, stat
from .Basic import fac def binomial(n, k): if k > n: return 0 if k < 0: return 0 return float(fac(n)) / float(fac(k) * fac(n - k)) def perm(n, k): k = float(k) n = float(n) if k > n: return 0 if k < 0: raise ValueError("k must be non-negative") return float(fac(n)) / float(fac(n - k))
jpy = 100 usd = jpy * 0.0094 eur = jpy * 0.0084 print(f"JPY={jpy}") print(f"小数点以下0桁:{jpy:.0f}") print(f"小数点以下1桁:{jpy:.1f}") print(f"小数点以下2桁:{jpy:.2f}") print() print(f"USD={usd}") print(f"小数点以下0桁:{usd:.0f}") print(f"小数点以下1桁:{usd:.1f}") print(f"小数点以下2桁:{usd:.2f}") print() print(f"EUR={eur}") print(f"小数点以下0桁:{eur:.0f}") print(f"小数点以下1桁:{eur:.1f}") print(f"小数点以下2桁:{eur:.2f}") #
""" Usage: call with <filename> <typename> """ import sys import clang.cindex def find_typerefs(node, typename): """ Find all references to the type named 'typename' """ if node.kind.is_reference(): ref_node = node.get_definition() if ref_node: if ref_node.spelling == typename: print (typename, node.location.line, node.location.column) # Recurse for children of this node for c in node.get_children(): find_typerefs(c, typename) # use the llvm come with visual studio installation clang.cindex.Config.set_library_path('C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/Llvm/x64/bin/') index = clang.cindex.Index.create() tu = index.parse(sys.argv[1]) print ('Translation unit:', tu.spelling) find_typerefs(tu.cursor, sys.argv[2])
import re from api.models import * from api.serializers import * from django.contrib.auth.models import User from django.shortcuts import render from rest_framework import viewsets from rest_framework.views import APIView from rest_framework.authentication import SessionAuthentication, BasicAuthentication from rest_framework.permissions import IsAuthenticated, AllowAny from rest_framework.response import Response from rest_framework.views import APIView from django.contrib.auth import authenticate, login, logout from rest_framework import status from rest_framework_json_api.views import RelationshipView from rest_framework_json_api import serializers def update_solved(team, challenge): """ Updates the database points for a given team. """ # Save the time that the challenge was solved. timestamp = ChallengeTimestamp.objects.create(team=team, challenge=challenge) timestamp.save() # Update the team points and last timestamp in the database. team.last_timestamp = timestamp.created team.save() challenge.save() def check_flag(team, challenge, flag): """ Checks a given flag against the challenge flag. """ # Check if team has already solved the challenge. res = team.solved.filter(id=challenge.id) error = None # If the team has not solved the challenge, check the flag else the team # has already solved the challenge so return an error message. if not res: correct = re.compile(r'^{flag}$'.format(flag=challenge.flag)).match(flag) #correct = challenge.flag == flag # If the user input the correct flag, update the team's correct flag # count else update the wrong flags count and return an error. if correct: team.correct_flags = team.correct_flags + 1 team.save() # update timestamps update_solved(team, challenge) return True, error else: error = 'Invalid flag' team.wrong_flags = team.wrong_flags + 1 team.save() return False, error else: error = 'Already solved' return False, error class FlagViewDetail(APIView): """ Manages flag submit and statistics by challenge id requests. """ permission_classes = (AllowAny,) def get(self, request, challenge_id, format=None): """ Handles flag submit for challenge id """ return Response(str(request.user)) def post(self, request, challenge_id, format=None): """ Handles flag submit for challenge id """ #return Response(str(request.user)) try: challenge = Challenge.objects.get(id=challenge_id) except ObjectDoesNotExist: return Response('Challenge not found') if 'flag' not in request.data or not request.data['flag']: return Response('Flag not given') flag = request.data['flag'] try: if not request.user.team: return Response('No team associated with user') except ObjectDoesNotExist: return Response('No team associated with user') team = request.user.team success, error = check_flag(team, challenge, flag) if success: return Response('Correct flag') else: return Response(error)
#!/usr/bin/env python import time,datetime import sys curDate= datetime.datetime(*(time.localtime(time.time()))[0:6]) for line in sys.stdin: red="" print line line= line.strip() AthleteID, FirstName, LastName, DOB, Gender, Country= line.split('\t') if DOB!="": date1= time.strptime(DOB, "%Y/%m/%d %H:%M:%S") date2= datetime.datetime(*date1[0:6]) red= curDate- date2 print ",".join([AthleteID, FirstName, LastName, str((red.days)/365)]) else: red="NULL" print ",".join([AthleteID, FirstName, LastName, str(red)])
from django import forms from student.models import Student class FeeForm(forms.Form): roll_no = forms.IntegerField()
import numpy as np def mutation_tags(tags, mutpb): """Mutation that alters each tag with 0.1 probability.""" # Number of guides n_guides = len(tags) new_tags = [] for i in range(n_guides): rnd = np.random.rand(1)[0] if rnd <= mutpb: new_tags.append(1-tags[i]) else: new_tags.append(tags[i]) return new_tags
# coding: utf-8 """ Various import statements """ import os from material.data_types.types_tryout import globals_d from material.data_types.main import main as dt_main from material.functions import main as fn_main_aa
import numpy as np from numpy import linalg as la import math import os import sys trainlabel = [] testlabel = [] icurr = 0 icurrTest = 0 #arrTest = np.zeros((xaxis,yaxis,zmaxtesting)) #defines dimension of 3d array abnormalTraining = 72 abnormalTesting = 48 normalTraining = 500 normalTesting = 336 i = 572 #No of training samples j = 104 k = 6 t = 384 #No of testing samples r = 24 s = 6 def calc_eigen(J,e): U = [] a = [] w,v = la.eig(J) #print v b = w b.sort() #print "1" for x in range (len(b)): a.append(b[len(b)-x-1]) #print "2" for x in range(0,e): for j1 in range(0,len(a)): if (a[x]==w[j1]): U.append(v[:,j1]) U = np.array(U) U = U.transpose() # U = U.transpose() #print type(U) #print U return U def svd_2d(T,M,num): T_mean = np.zeros((j,k), dtype=np.float) New_T = [] New_T_trans = [] F = np.zeros((j,j),dtype = np.float) G = np.zeros((k,k),dtype = np.float) for y in range(0,j): for z in range(0,k): for x in range(0,num): T_mean[y][z] += T[x][y][z] T_mean /= num for x in range(0,num): New_T.append(T[x] - T_mean) New_T_trans.append(New_T[x].transpose()) for x in range(0,num): F += np.dot(New_T[x],New_T_trans[x]) for x in range(0,num): G += np.dot(New_T_trans[x],New_T[x]) F /= num G /= num U = calc_eigen(F,r) V = calc_eigen(G,s) U_trans = U.transpose() V_trans = V.transpose() for x in range(0,num): M[x] = np.dot(np.dot(U_trans,T[x]),V) return (M) def calc_distance(X,Y): sq_rt = 0.0 for x in range(0,s): term = 0.0 square = 0.0 for y in range(0,r): term = X[y][x] - Y[y][x] square += term**2 sq_rt += math.sqrt(square) return (sq_rt) def KNN_1(M,N): nearest = [] pos = 0 for x in range(0,t): value = 15000.0 result = 0.0 for y in range(0,i): result = calc_distance(N[x],M[y]) if value>result : value = result pos = y nearest.append(pos) return (nearest) def extractSeriesTesting(T,src_dir ,label , noOfSeries): global icurrTest,testlabel count = 0 j1=0 k1=0 i1=icurrTest if not os.path.exists(src_dir): print ("Source directory does not exist... ") sys.exit() files_in_directory = os.listdir(src_dir) files_in_directory.sort() for file in files_in_directory: if (count >= (noOfSeries-1)): i1 += 1 icurrTest = i1 break if (file[8] == 'b' or file[8] == 'a'): continue if (file[8:10] == '11'): j1=0 k1=0 i1=icurrTest #open file and do other parts src_path = os.path.join(src_dir, file) if not os.path.exists(src_path): print ("File does not exist") continue if (file[8] == '6'): k1 = 0 elif (file[8] == '7'): k1=1 elif (file[8] == '8'): k1=2 count += 1 i1 += 1 icurrTest = i1 testlabel.append(label) #count is increased to notify that all attribute values have been stored in a matrix elif (file[8:10] == "11"): k1=3 elif (file[8:10] == "12"): k1=4 elif (file[8:10] == "15"): k1=5 else: k1=0 continue temp = 0 fp = open (src_path , 'r') while (temp < j): text = fp.readline().split()[1] #arrTest[temp][y][z]=int(text) T[i1][temp][k1]=int(text) #print (str(temp)+" "+str(y)+" "+str(z)) temp += 1 fp.close() k1 = 0 icurrTest = i1 #reintitializing values icurrTest = i1 return T def function(): global testlabel,trainlabel trainlabel.append(1) trainlabel.append(1) testlabel.append(0) testlabel.append(1) def extractSeries(T,src_dir , label , noOfSeries): global icurr,trainlabel count = 0 j1=0 k1=0 i1=icurr if not os.path.exists(src_dir): print ("Source directory does not exist... ") sys.exit() files_in_directory = os.listdir(src_dir) files_in_directory.sort() for file in files_in_directory: if (count >= (noOfSeries-1)): i1 += 1 icurr = i1 break if (file[8] == 'b' or file[8] == 'a'): continue if (file[8:10] == '11'): j1=0 k1=0 i1=icurr #open file and do other parts src_path = os.path.join(src_dir, file) if not os.path.exists(src_path): print ("File does not exist") continue if (file[8] == '6'): k1=0 elif (file[8] == '7'): k1=1 elif (file[8] == '8'): k1=2 count += 1 i1 += 1 icurr = i1 trainlabel.append(label) #count is increased to notify that all attribute values have been stored in a matrix elif (file[8:10] == "11"): k1=3 elif (file[8:10] == "12"): k1=4 elif (file[8:10] == "15"): k1=5 else: k1=0 continue temp = 0 fp = open (src_path , 'r') while (temp < j): text = fp.readline().split()[1] T[i1][temp][k1]=int(text) #print (str(i1)+" "+str(temp)+" "+str(k1)) temp += 1 fp.close() k1 = 0 icurr = i1 #reintitializing values icurr = i1 return T def main(): global testlabel,trainlabel T = np.zeros((i,j,k)) arrTest = np.zeros((i,j,k)) M = np.zeros((i,r,s),dtype = np.float) N = np.zeros((t,r,s),dtype = np.float) count_correct =0 count_wrong = 0 print ("Machine Learning Project on Multivariate Time Series Analysis Classification") print ("Input Source Directory for Abnormal Dataset:") src_dir = raw_input() T = extractSeries(T,src_dir , 0, abnormalTraining) #print zcurr print ("Input Source Directory for Normal Dataset :") src_dir = raw_input() T = extractSeries(T,src_dir , 1 , normalTraining) ################################################################################## #print "The i current value is ", #print icurr print ("\n\nTraining over...\n\n") ##################################################################################### M = svd_2d (T,M,i) print ("Input Source Directory for Testing Abnormal Dataset:") src_dir = raw_input() arrTest = extractSeriesTesting(arrTest,src_dir , 0, abnormalTesting) #0 is label for abnormal dataset print ("Input Source Directory for Normal Dataset :") src_dir = raw_input() #print icurrTest arrTest = extractSeriesTesting(arrTest,src_dir , 1 , normalTesting) function() N = svd_2d (arrTest,N,t) nearest = KNN_1(M,N) #print nearest for x in range(t): #if (nearest[x] >= 382) if (int(testlabel[x]) == int(trainlabel[nearest[x]])): count_correct += 1 else: count_wrong += 1 print "Accuracy:" print ((count_correct+0.0)/(count_correct+count_wrong)) if __name__ == '__main__': main()
# Generated by Django 2.2.4 on 2019-10-08 12:34 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('webapp', '0022_notice_images'), ] operations = [ migrations.RenameField( model_name='notice', old_name='files', new_name='pdf', ), ]
import logging logging.root.setLevel(logging.NOTSET) def get_custom_console_handler(): c_handler = logging.StreamHandler() c_handler.setLevel(logging.INFO) c_format = logging.Formatter('%(name)s - %(levelname)s: \n%(message)s\n--------------------') c_handler.setFormatter(c_format) return c_handler def get_custom_file_handler(directory): f_handler = logging.FileHandler(directory) f_handler.setLevel(logging.INFO) f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: \n%(message)s\n---------------------') f_handler.setFormatter(f_format) return f_handler
#作图用 #按照GO,将gene ID计数。并加上class 列 input_file = open("gene_swiss_GO.id") output_file = open("go_gene_sum.count","w") go_gene_dict = {} for line in input_file: text_list = (line.strip()).split("\t") gene_id = text_list[0] if len(text_list) != 2: go_all = text_list[2] go_list = go_all.split(";") for go_id in go_list: if go_id not in go_gene_dict.keys(): go_gene_dict[go_id] = [] go_gene_dict[go_id].append(gene_id) for go_id in go_gene_dict.keys(): gene_count = len(go_gene_dict[go_id]) gene_all = (";").join(go_gene_dict[go_id]) go_id = go_id.strip() output_file.write(go_id + "\t" + str(gene_count) + "\t" + gene_all + "\n") output_file.close()
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-03-21 00:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('choosing', '0001_initial'), ] operations = [ migrations.AlterField( model_name='choose', name='phase', field=models.IntegerField(choices=[(0, 'Waiting'), (1, 'Approved'), (2, 'Denied'), (3, 'Deleted')], default=0), ), ]
from bs4 import BeautifulSoup import CustomWebGen as cwg import logging import glob import yaml import os logging.basicConfig( level=logging.NOTSET, format="%(asctime)s:%(levelname)s:%(name)s:%(message)s", handlers=[logging.FileHandler("debug.log"), logging.StreamHandler()], ) root_path="." yaml_files = glob.glob(root_path+"/articles/*.yaml") html_files = glob.glob(root_path+"/*.html") html_list = [os.path.basename(i) for i in html_files] # Create new pages from content files def new_page_creation(): for yml in yaml_files: logging.info("CREATING NEW WEB PAGE.") logging.info("YAML Loading: {}".format(yml)) with open(yml, "r") as stream: logging.info("Opening {}".format(yml)) try: yam = yaml.safe_load(stream) except yaml.YAMLError as exc: logging.error(exc) # Fetching values from yaml files filename = yam["filename"] + ".html" title = yam["title"] description = yam["desc"] content = yam["content"] nav_project = yam["project_menu"] nav_other = yam["other_menu"] nav_sidebar = yam["sidebar"] if filename not in html_list: # Opening a base template file for modification # This modified base file then becomes the new web page logging.info("Loading Template Files for {}".format(filename)) with open(root_path+"/squeeznet-nano.html") as fp: soup = BeautifulSoup(fp, "html.parser") p = cwg.WebPageGenerator( soup, title, description, content, filename, ["cv.html"] ) # To prevent creation of new bugs # No navigation links for navbar is cretated # Only sidebar with outline button is created p.newpage() logging.info( "New HTML page has been created for {}.".format(filename) ) # p.addlinks(nav_other=True,nav_sidebar=True) # writes the new webpage to the root directory with open(root_path+"/" + filename, "w") as file: file.write(str(p.soup)) logging.info("Writing {} to disk.".format(filename)) # After creating the new web pages its links will be automatically # added to all other static html files logging.info("Link generation for static html files") logging.info("{} COMPLETED\n\n".format(filename)) def adding_links2pages(): # Adding menu and sidebar links html_files = glob.glob(root_path+"/*.html") for yml in yaml_files: logging.info("CREATING LINKS") with open(yml, "r") as stream: logging.info("Opening {}".format(yml)) try: yam = yaml.safe_load(stream) except yaml.YAMLError as exc: logging.error(exc) filename = yam["filename"] + ".html" title = yam["title"] description = yam["desc"] content = yam["content"] nav_project = yam["project_menu"] nav_other = yam["other_menu"] nav_sidebar = yam["sidebar"] for html in html_files: if "cv.html" in os.path.basename(html): logging.info("skipped cv.html file") continue logging.info("Link Loading {}".format(html)) with open(html) as fp: soup = BeautifulSoup(fp, "html.parser") page_link = cwg.WebPageGenerator( soup, title, description, content, filename, ["cv.html"] ) if "index.html" in os.path.basename(html): page_link.addlinks( nav_project=nav_project, nav_other=nav_other, nav_sidebar=False, ) elif filename == os.path.basename(html): page_link.addlinks( nav_project=nav_project, nav_other=nav_other, nav_sidebar=nav_sidebar, new=True, ) else: page_link.addlinks( nav_project=nav_project, nav_other=nav_other, nav_sidebar=nav_sidebar, ) logging.info("Links has been created for {}".format(filename)) with open(html, "w") as file: file.write(str(page_link.soup)) logging.info("Writing {} to disk.".format(html)) logging.info("All Process COMPLETED\n\n") def main(): print("------START OF PROGRAM------") new_page_creation() print("#" * 60) print() adding_links2pages() print("------END OF PROGRAM--------") if __name__ == "__main__": main()
# Import the random package to randomly generate cross-over points import random # Import the superclass (also called base class), which is an abstract class, # to implement the subclass BlendCrossoverOperator import GeneticOperator # Import the Individual package import Individual as IND # The subclass that inherits of GeneticOperator class BlendCrossoverOperator(GeneticOperator.GeneticOperator): # Contructor # aProbability: operator's probability # aMutationOperator: optional mutation operator (to mutate the newly created individual) def __init__(self, aProbability: float, aMutationOperator = None): # Apply the constructor of the abstract class super().__init__(aProbability); # Set the name of the new operator self.__name__ = "Blend crossover operator"; # Save the mutation operator self.mutation_operator = aMutationOperator; # Get a SystemRandom instance out of random package self.system_random = random.SystemRandom(); # Perform the operator's actual action def apply(self, anEA): self.use_count += 1; # Select the parents from the population parent1_index = parent2_index = anEA.selection_operator.select(anEA.current_solution_set) # Make sure parent 1 is different from parent2 i = 0; while parent2_index == parent1_index and i < 10: parent2_index = anEA.selection_operator.select(anEA.current_solution_set); i += 1; # Perform the crossover child_gene = []; for p1_gene, p2_gene in zip(anEA.current_solution_set[parent1_index].parameter_set, anEA.current_solution_set[parent2_index].parameter_set): alpha = self.system_random.uniform(0.0, 1.0); child_gene.append(alpha * p1_gene + (1.0 - alpha) * p2_gene); child = IND.Individual( anEA.current_solution_set[parent1_index].objective_function, child_gene ); # Mutate the child if self.mutation_operator != None: self.mutation_operator.mutate(child) return child;
test_case = int(input()) for _ in range(test_case): x = int(input()) print((x + 7 - 1)//7)
#!/usr/bin/env python3.7 import os import sys import subprocess import re acceptinput = "Y" def environment_variables(): print("HOME: " + os.environ.get("HOME", "")) print("PATH: " + os.environ.get("PATH", "")) print("UNASSIGNED: " + os.environ.get("UNASSIGNED", "")) print(sys.argv) ## Prints the program name and the arguments as a list if acceptinput != "Y": values = input("Input a math equation to be evaulated:") try: print(values) print(eval(values)) except: print("Invalid math equation: " + values) # enviornment_variables() def running_subprocesses(): date = subprocess.run(["date"]) #Saves value AND displays value print("Type of subprocess.date: " + str(type(date)) + " Value as saved: " + str(date)) # subprocess.run(["sleep", '2']) ## Sleeps for two seconds subprocess.run(["ls", "-l", "data"], capture_output=False) #capture_output requires Python 3.7+ nofile= subprocess.run(["ls", "nonexistentfie.txt"], capture_output=True) print("\nValue of nofile: {}. Return code of ls: {}".format(nofile, nofile.returncode)) print(nofile.stdout) print(nofile.stderr) #capture_output requires Python 3.7+ result = subprocess.run(["host", "8.8.8.8"], capture_output=True) print("\nReturn code of host lookup: {}".format(result.returncode)) #result.stdout returns the captured output as a byte string and shows non-print chars print(result.stdout) #decode() decodes the captured output as UTF-8 print(result.stdout.decode().split()) # running_subprocesses() def setting_environments(): #Copy the system environment variables my_env = os.environ.copy() #Adds a new path the copy of the PATH environment my_env["PATH"] = os.pathsep.join(["/home/adouglasx/repos/python-practice", my_env["PATH"]]) #Executes the helloworld.py script using the copy of the environment result = subprocess.run(["helloworld.py"], env=my_env) # setting_environments() def read_logfiles(file): pattern = r"(.*\((\w+)\)$)" pat = re.compile(pattern) print(str(pat)) if os.path.exists(file): with open(file) as f: for line in f: # if "CRON" not in f: # continue result = re.search(pat, line) if result != None: # print(result.group(1)) print("Line: {} Para: {}".format(str(result.group(1).strip()), result.group(2))) #the sample file used here is not an ideal log file # read_logfiles("/var/log/mintsystem.log") print("\nEntering '$?' on the command line will display the exit code. NOTE: Don't sweat the 'command not found' message") sys.exit(3)
#!/bin/python import os import sys if int(sys.argv[1]) == 0x1a7: arg = "/bin/sh" gid = os.getegid() uid = os.geteuid() os.setresgid(gid, gid, gid) os.setresuid(uid, uid, uid) os.execv("/bin/sh", [arg]) else: print("No !")
# Exercício 9.2 - Livro import sys args = sys.argv if len(args) != 4: print(f'Dica: {args[0]} arquivo início fim') else: inicio = args[2] fim = args[3] nome = args[1] file = open(nome, 'r') for linha, dado in enumerate(file.readlines()): if (linha + 1) >= int(inicio) and (linha + 1) <= int(fim): print(dado) file.close()
class ListUtil: def __init__(self, the_list): self.the_list = the_list def flatten(self): """ Flatten an arbitraily nested list """ return self.__flatten(self.the_list) def __flatten(self, the_list, buf=None): if buf is None: buf = list() for item in the_list: if isinstance(item, list): buf = self.__flatten(item, buf) else: buf.append(item) return buf
import math from repartition_experiments.algorithms.utils import Volume, get_file_manager, get_blocks_shape from repartition_experiments.file_formats.hdf5 import HDF5_manager from repartition_experiments.algorithms.utils import get_opened_files def get_entity_sizes(cs, bytes_per_voxel, partition): bs = cs[0] * cs[1] * cs[2] * bytes_per_voxel # block size brs = bs * partition[2] # block row size bss = brs * partition[1] # block slice size return bs, brs, bss def get_strategy(buffer_mem_size, block_size, block_row_size, block_slice_size): """ Get clustered writes best load strategy given the memory available for io optimization. Returns: --------- strategy """ if buffer_mem_size < block_size: raise ValueError("Buffer size too small for one chunk") if math.floor(buffer_mem_size / block_slice_size) > 0: return 2 else: if math.floor(buffer_mem_size / block_row_size) > 0: return 1 else: return 0 def compute_buffers(buffer_mem_size, strategy, origarr_size, cs, block_size, block_row_size, block_slice_size, partition, R, bytes_per_voxel): """ partition: partition tuple of R by O = nb chunks per dimension """ def get_last_slab(): return buffers = dict() index = 0 if strategy == 2: slices_per_buffer = math.floor(buffer_mem_size / block_slice_size) buffer_shape = (slices_per_buffer * cs[0], R[1], R[2]) buffer_size = buffer_shape[0] * buffer_shape[1] * buffer_shape[2] * bytes_per_voxel nb_plain_buffers = math.floor(origarr_size / buffer_size) for i in range(nb_plain_buffers): lowcorner = (i * buffer_shape[0], 0, 0) upcorner = ((i + 1) * buffer_shape[0], buffer_shape[1], buffer_shape[2]) buffers[i] = Volume( i, lowcorner, upcorner) if nb_plain_buffers > 0: prev_buff = buffers[nb_plain_buffers-1] if prev_buff.p2[0] != (R[0]): buffers[nb_plain_buffers] = Volume(nb_plain_buffers, (nb_plain_buffers * buffer_shape[0], 0, 0), R) else: buffers[nb_plain_buffers] = Volume(nb_plain_buffers, (nb_plain_buffers * buffer_shape[0], 0, 0), R) elif strategy == 1: nb_block_slices = partition[0] nb_block_rows_per_buffer = math.floor(buffer_mem_size/block_row_size) buffer_size = nb_block_rows_per_buffer * block_row_size for i in range(nb_block_slices): nb_buffers_per_slice = math.floor(block_slice_size / buffer_size) for j in range(nb_buffers_per_slice): # a buffer is one or more block rows here lowcorner =(i*cs[0], j * nb_block_rows_per_buffer * cs[1], 0) upcorner = ((i+1)*cs[0], (j+1) * nb_block_rows_per_buffer * cs[1], R[2]) buffers[index] = Volume(index, lowcorner, upcorner) index += 1 prev_buff = buffers[index-1] if prev_buff.p2[1] != (R[1]): buffers[index] = Volume(index, (i * cs[0], nb_buffers_per_slice * cs[1], 0), ((i + 1) * cs[0], R[1], R[2])) index += 1 elif strategy == 0: for i in range(partition[0]): start_i, end_i = i*cs[0], (i+1)*cs[0] for j in range(partition[1]): start_j, end_j = j*cs[1], (j+1)*cs[1] nb_blocks_per_buff = math.floor(buffer_mem_size/block_size) buffer_size = nb_blocks_per_buff * block_size nb_buffer_per_row = math.floor(block_row_size / buffer_size) for k in range(nb_buffer_per_row): if k == 0: start_k = 0 else: start_k = buffers[index-1].p2[2] end_k = (k+1) * nb_blocks_per_buff * cs[2] buffer_volume = Volume(index, (start_i, start_j, start_k), (end_i, end_j, end_k)) buffers[index] = buffer_volume index += 1 prev_buff = buffers[index-1] if prev_buff.p2[2] != (R[2]): last_buffer = Volume(index, (start_i, start_j, prev_buff.p2[2]), (end_i, end_j, R[2])) buffers[index] = last_buffer index += 1 else: raise ValueError("Strategy does not exist") return buffers def read_buffer(arr, file_manager, buffer): p1, p2 = buffer.get_corners() # print("reading slices:", p1[0], p2[0], p1[1], p2[1], p1[2], p2[2]) return arr[p1[0]: p2[0], p1[1]: p2[1], p1[2]: p2[2]] def write_splits(file_manager, buffer, buffer_data, cs, outdir_path): p1, p2 = buffer.get_corners() first_index = (p1[0]/cs[0], p1[1]/cs[1], p1[2]/cs[2]) buffer_shape = (p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]) buff_partition = get_blocks_shape(buffer_shape, cs) _3d_index = first_index for i in range(buff_partition[0]): for j in range(buff_partition[1]): for k in range(buff_partition[2]): split_data = buffer_data[ i * cs[0]:(i+1) * cs[0], j * cs[1]:(j+1) * cs[1], k * cs[2]:(k+1) * cs[2]] region = ((0, cs[0]), (0, cs[1]), (0, cs[2])) file_manager.write_data(int(_3d_index[0] + i), int(_3d_index[1] + j), int(_3d_index[2] + k), outdir_path, split_data, region, cs) def clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path): """ Implementation of the clustered strategy for splitting a 3D array. Output file names are following the following regex: outdir_path/{i}_{j}_{k}.extension WARNING: this implementation loads the whole input array in RAM. We had 250GB of RAM for our experiments so we decided to use it. Arguments: ---------- R: original array shape m: memory available for the buffer cs: chunk shape bpv: number of bytes per voxel ff: file_format outdir_path: where to write the splits """ strategies = { 0: "blocks", 1: "block_rows", 2: "block_slices" } file_manager = get_file_manager(ff) partition = get_blocks_shape(R, cs) bs, brs, bss = get_entity_sizes(cs, bpv, partition) strategy = get_strategy(m, bs, brs, bss) origarr_size = R[0] * R[1] * R[2] * bpv buffers = compute_buffers(m, strategy, origarr_size, cs, bs, brs, bss, partition, R, bpv) origarr = file_manager.get_dataset(origarr_filepath, '/data') for buffer_index in range(len(buffers.values())): buffer = buffers[buffer_index] buffer_data = read_buffer(origarr, file_manager, buffer) write_splits(file_manager, buffer, buffer_data, cs, outdir_path) file_manager.close_infiles() get_opened_files()
#!/usr/bin/env python import numpy as np import csv from model import NB_Classifier import time # import the required packages here def run(Xtrain_file, Ytrain_file, test_data_file=None, pred_file=None): '''The function to run your ML algorithm on given datasets, generate the predictions and save them into the provided file path Parameters ---------- Xtrain_file: string the path to Xtrain csv file Ytrain_file: string the path to Ytrain csv file test_data_file: string the path to test data csv file pred_file: string the prediction file to be saved by your code. You have to save your predictions into this file path following the same format of Ytrain_file ''' ## your implementation here # read data from Xtrain_file, Ytrain_file and test_data_file start = time.time() X_train = np.genfromtxt(Xtrain_file, delimiter=',').astype(int) Y_train = np.genfromtxt(Ytrain_file, delimiter=',').astype(int) print "It took {} seconds to load files".format(time.time()-start) if test_data_file != None: test = np.genfromtxt(test_data_file, delimiter=',').astype(int) start = time.time() model = NB_Classifier() model.fit(X_train, Y_train) print "It took {} seconds to train model".format(time.time()-start) start = time.time() results = model.predict(test) print "It took {} seconds to predict".format(time.time()-start) # for x in results: # print(x) # with open(pred_file, 'w') as f: # writer = csv.writer(f) # for val in results: # writer.writerow([val]) # your algorithm # save your predictions into the file pred_file # define other functions here if __name__ == '__main__': run('../../data/Xtrain.csv', '../../data/Ytrain.csv', '../../data/Xtrain.csv', '../../data/pred_file.csv')
from random import randint class Hospital(object): def __init__(self, name, capacity): self.patients = [] self.name = name self.capacity = capacity def admit(self, patient): if len(self.patients) >= self.capacity: print "The hospital is full." else: self.patients.append(patient) self.bedNum = patient.bedNum print "Confirming admission. Your bed number is: {}".format (self.bedNum) def discharge(self, bedNum): for patient in self.patients: if self.bedNum == bedNum: originalBed = self.bedNum self.bedNum = "none" print self.bedNum print self.patients # self.patients.remove() print "Discharged. Bed number {} is now empty.".format (originalBed) def displayInfo(self): for patient in self.patients: patient.displayPatientInfo() class Patient(object): def __init__(self, name, allergies): self.id = randint(1, 100) self.name = name self.allergies = allergies self.bedNum = self.bedNumber() def bedNumber(self): bedNum = 0 for num in range(0, len(hospital1.patients)): bedNum += 1 return bedNum def displayPatientInfo(self): print "Patient ID #: {}".format (self.id) print "Patient Name: {}".format (self.name) print "Patient Allergies: {}".format (self.allergies) print "Patient Bed #: {}".format (self.bedNum) hospital1 = Hospital("Danger Free Zone", 5) hospital1.admit(Patient("Jennifer", "Pollen")) hospital1.admit(Patient("Mia", "Dog")) hospital1.admit(Patient("Ming", "Soy")) hospital1.displayInfo() hospital1.discharge(2)
# GIF Creator A program that puts together multiple images (PNGs, JPGs, TIFFs) to make a smooth GIF that can be exported # Optional: Make the program convert small video files to GIFs as well. # import the library from appJar import gui from tkinter import * from PIL import Image, ImageTk import imageio # Handle Button Events def press(button): if button == "Upload": app.startLabelFrame("Images") # Set it up to where you can upload less than 6 images and not get an error user_input = app.getEntry("File Directory of Image 1") if user_input == "": pass else: photo1 = Image.open(user_input) photo11 = photo1.resize((200, 200), Image.ANTIALIAS) pic1 = ImageTk.PhotoImage(photo11) app.addImageData("pic 1", pic1, 0, 0, 3, 1, fmt="PhotoImage") usr_in = app.getEntry("File Directory of Image 2") if usr_in == "": pass else: photo2 = Image.open(usr_in) photo22 = photo2.resize((200, 200), Image.ANTIALIAS) pic2 = ImageTk.PhotoImage(photo22) app.addImageData("pic 2", pic2, 1, 0, 3, 1, fmt="PhotoImage") usr_in3 = app.getEntry("File Directory of Image 3") if usr_in3 == "": pass else: photo3 = Image.open(usr_in3) photo33 = photo3.resize((200, 200), Image.ANTIALIAS) pic3 = ImageTk.PhotoImage(photo33) app.addImageData("pic 3", pic3, 2, 0, 3, 1, fmt="PhotoImage") usr_in4 = app.getEntry("File Directory of Image 4") if usr_in4 == "": pass else: photo4 = Image.open(usr_in4) photo44 = photo4.resize((200, 200), Image.ANTIALIAS) pic4 = ImageTk.PhotoImage(photo44) app.addImageData("pic 4", pic4, 0, 1, 3, 1, fmt="PhotoImage") usr_in5 = app.getEntry("File Directory of Image 5") if usr_in5 == "": pass else: photo5 = Image.open(usr_in5) photo55 = photo5.resize((200, 200), Image.ANTIALIAS) pic5 = ImageTk.PhotoImage(photo55) app.addImageData("pic 5", pic5, 1, 1, 3, 1, fmt="PhotoImage") usr_in6 = app.getEntry("File Directory of Image 6") if usr_in6 == "": pass else: photo6 = Image.open(usr_in6) photo66 = photo6.resize((200, 200), Image.ANTIALIAS) pic6 = ImageTk.PhotoImage(photo66) app.addImageData("pic 6", pic6, 2, 1, 3, 1, fmt="PhotoImage") app.stopLabelFrame() elif button == "Create GIF": # After uploading the images, you click the "Create GIF" button to make a GIF out of the images # Somehow compile images into a GIF user_input = app.getEntry("File Directory of Image 1") usr_in = app.getEntry("File Directory of Image 2") usr_in3 = app.getEntry("File Directory of Image 3") usr_in4 = app.getEntry("File Directory of Image 4") usr_in5 = app.getEntry("File Directory of Image 5") usr_in6 = app.getEntry("File Directory of Image 6") filenames = [user_input, usr_in, usr_in3, usr_in4, usr_in5, usr_in6] images = [] for filename in filenames: images.append(imageio.imread(filename)) gif_name = app.getEntry("Name GIF") if gif_name == "": gif_name = "Cool_GIF" imageio.mimsave('C:\\Users\\cbort\\Pictures\\Gif Creator\\'+gif_name+'.gif', images) else: imageio.mimsave('C:\\Users\\cbort\\Pictures\\' + gif_name + '.gif', images) app.startSubWindow("GIF Created", modal=True) app.addLabel("l10", " The Gif has been saved in your \n Pictures folder.") app.stopSubWindow() app.showSubWindow("GIF Created") elif button == "Cancel": app.stop() app = gui("GIF Creator", "1400x1200") app.setBg("royalblue") app.setFont(18) app.addLabel("title", "Welcome to GIF Creator!",0,0,0,0) app.setLabelBg("title", "red") app.setLabelFg("title", "white") app.addLabel("l1", "Upload the Images you want to be turned into a GIF. Then name the gif and click the Create GIF button!",1,0,0,0) # add & configure widgets - widgets get a name, to help referencing them later app.startLabelFrame("Upload") app.addLabelEntry("File Directory of Image 1",0,0,1,0) app.addLabelEntry("File Directory of Image 2",1,0,1,0) app.addLabelEntry("File Directory of Image 3",2,0,1,0) app.addLabelEntry("File Directory of Image 4",0,1,1,0) app.addLabelEntry("File Directory of Image 5",1,1,1,0) app.addLabelEntry("File Directory of Image 6",2,1,1,0) app.stopLabelFrame() app.addLabelEntry("Name GIF", 3) app.setFocus("File Directory of Image 1") app.addButtons(["Upload", "Cancel", "Create GIF"], press) # start the GUI app.go()
import json import os import sys import time from http import HTTPStatus from os import path from typing import List import requests import yaml from flask import Flask, Response, abort from flask import request import re from logging.config import dictConfig dictConfig({ 'version': 1, 'formatters': {'default': { 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s', }}, 'handlers': {'wsgi': { 'class': 'logging.StreamHandler', 'stream': 'ext://flask.logging.wsgi_errors_stream', 'formatter': 'default' }}, 'root': { 'level': 'INFO', 'handlers': ['wsgi'] } }) app = Flask(__name__) HTTP_METHODS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH'] ENDPOINT_SET_MOCKS_FOLDER = 'mocks_folder' ENDPOINT_SET_MOCK_OUTPUT = 'mock_output' ENDPOINT_WRITE_MOCK_FILE = 'mock_write' MOCK_OUTPUT_FILE_NAME = 'mock_output' MOCKS_FOLDER_FILE_NAME = 'mocks_folder' def set_mocks_folder(mock_list_folder: str): with open(MOCKS_FOLDER_FILE_NAME, "w") as text_file: text_file.write(mock_list_folder) def set_mock_output(mock_output: str): with open(MOCK_OUTPUT_FILE_NAME, "w") as text_file: text_file.write(mock_output) def get_response(filepath: str, current_request, origin_request): with open(filepath) as file: m = yaml.load(file, Loader=yaml.FullLoader) if m.get('method'): if not m.get('method') == current_request.get('method'): return None if m.get('path') is not None: if current_request.get('path') is None: return None if re.fullmatch(m.get('path'), current_request.get('path')) is None: return None if m.get('body'): if current_request.get('body') is None: return None if re.fullmatch(m.get('body'), current_request.get('body')) is None: return None delete = m.get('delete') if delete == True: os.remove(filepath) response = m.get('response') if response is None: reference = m.get('reference') if reference: method = origin_request.method url = reference if origin_request.headers: headers = {key: value for (key, value) in origin_request.headers if key != 'Host'} data = origin_request.get_data() cookies = origin_request.cookies allow_redirects = False resp = requests.request( method=method, url=url, headers=headers, data=data, cookies=cookies, allow_redirects=allow_redirects) current_request['reference'] = reference write_mock_yaml_file(filepath, current_request, resp.content.decode()) return resp.content return response def get_mocks_folder() -> str: if path.exists(MOCKS_FOLDER_FILE_NAME): with open(MOCKS_FOLDER_FILE_NAME, 'r') as file: mock_list_folder = file.read().replace('\n', '') return mock_list_folder else: return "mocks" def read_mock_list(mock_list_folder) -> List[str]: files = [] # r=root, d=directories, f = files for r, d, f in os.walk(mock_list_folder): for file in f: files.append(os.path.join(r, file)) return files @app.route('/', defaults={'path': ''}, methods=HTTP_METHODS) @app.route('/<path:path>', methods=HTTP_METHODS) def handler(path): # check default output if os.path.isfile(MOCK_OUTPUT_FILE_NAME): content = open(MOCK_OUTPUT_FILE_NAME, 'r').read() app.logger.info('default output found, response is %s', content) os.remove(MOCK_OUTPUT_FILE_NAME) app.logger.info('default output deleted') return content req = { 'method': request.method, } qs = request.query_string.decode() if qs: req['path'] = "{}?{}".format(path, qs) else: req['path'] = path body_content = '' if request.method != 'GET': body_content = request.data.decode() if body_content != 'null': req['body'] = body_content app.logger.info('request %s', req) # special command/request if req['method'] == 'PUT' and req['path'] == ENDPOINT_SET_MOCKS_FOLDER: app.logger.info('set mocks folder to %s', body_content) set_mocks_folder(body_content) return Response(response='{"msg":"ok"}', status=200, mimetype="application/json") if req['method'] == 'PUT' and req['path'] == ENDPOINT_SET_MOCK_OUTPUT: app.logger.info('set mock output to %s', body_content) set_mock_output(body_content) return Response(response='{"msg":"ok"}', status=200, mimetype="application/json") if req['method'] == 'PUT' and req['path'] == ENDPOINT_WRITE_MOCK_FILE: file_content = body_content yaml_parse = yaml.safe_load(file_content) location = yaml_parse.get('location') app.logger.info('try to write mock file %s', file_content) if not location or not location.endswith('.yaml') or location.startswith("/"): app.logger.info('location not valid, must end with yaml must not start with /') return Response(response='{"msg":"location not valid, must end with yaml, must not start with /"}', status=HTTPStatus.BAD_REQUEST, mimetype="application/json") write_raw_mock_yaml_file(location, file_content) return Response(response='{"msg":"ok"}', status=200, mimetype="application/json") mock_list_folder = get_mocks_folder() app.logger.info('mock list folder is %s', mock_list_folder) mock_list = read_mock_list(mock_list_folder) app.logger.info('mock list is %s', mock_list) for ml in mock_list: resp = get_response(ml, req, request) if resp: if resp == 'abort(504)': app.logger.info('return fake abort 504') abort(504) return Response(response=resp, status=200, mimetype="application/json") filename = get_mock_filename(path, mock_list_folder, request.method) response_text = "CHANGEME in file {}".format(filename) app.logger.info('filename is %s, response_text is %s', filename, response_text) # create new mock list try: app.logger.info('writing mock yaml file filename: %s, req: %s, response_text: %s', filename, req, response_text) write_mock_yaml_file(filename, req, response_text) except Exception as e: app.logger.info('{"msg":"fail to write file, please check mocks folder"}') return Response(response='{"msg":"fail to write file, please check mocks folder"}', status=HTTPStatus.INTERNAL_SERVER_ERROR, mimetype="application/json") return response_text def get_mock_filename(path: str, mock_list_folder: str, method: str) -> str: milliseconds = int(round(time.time() * 1000)) filename = "{}/{}_{}_{}.yaml".format(mock_list_folder, method, path.replace('/', '_'), str(milliseconds)) return filename def represent_int(s: str) -> bool: try: int(s) return True except ValueError: return False def write_mock_yaml_file(filename: str, req, response_text: str): try: with open(filename, "w") as text_file: req['response'] = response_text text_file.write(yaml.dump(req)) except Exception as e: raise e def write_raw_mock_yaml_file(filename: str, file_content: str): try: with open(filename, "w") as text_file: text_file.write(file_content) except Exception as e: raise e def main(): if len(sys.argv) > 1: if represent_int(sys.argv[1]): app.run(host='0.0.0.0', port=int(sys.argv[1])) else: print("Usage python main.py [port]") else: app.run(host='0.0.0.0', port=30000) def init(): if __name__ == '__main__': sys.exit(main()) init()
class Estado(): def __init__(self): print ("estado atual: " + str(self)) def __str__(self): return self.__class__.__name__ # estado 1 class link_down(Estado): def on_event(self, event): if event == 'interface_ok': return send_start() print ("estado atual: " + str(self)) return self # estado 2 class send_start(Estado): def on_event(self, event): if event == 'pacote_ok_recebido': return receive_start() elif event == 'interface_nok': return link_down() return self # estado 3 class receive_start(Estado): def on_event(self, event): if event == 'pacote_ok_recebido': return link_ok() elif event == 'interface_nok': return link_down() elif event == 'nao_recebeu_resposta': return send_start() return self # estado 4 class link_ok(Estado): def on_event(self, event): if event == 'interface_nok': return link_down() elif event == 'pacote_nok_recebido' or event == 'nao_recebeu_resposta': return send_start() return self
# Output handler for solver statistics import numpy as np from ..DREAMException import DREAMException from ..DataObject import DataObject import DREAM.Settings.Solver class Solver: def __init__(self, solverdata=None, output=None): """ Constructor. """ self.solverdata = None self.output = output def __contains__(self, item): """ Overrides the Python 'in' operator. """ return (item in self.__dict__) def __getitem__(self, index): """ Direct access by name to the timing information. """ return self.__dict__[index] def __repr__(self): """ Convert this object to an "official" string. """ return self.__str__() def plot(self): """ Generic plotting routine """ print("Method 'plot()' not implemented for this solver.")
from pydantic import BaseModel class UserModel(BaseModel): nanoid: str name: str username: str password: str class PostModel(BaseModel): nanoid: str post: str user: str replyTo: str isReply: bool
from django.shortcuts import render from .models import Film, Review from django.views.generic import ListView, DetailView, CreateView from django.contrib.auth.views import LoginView, LogoutView from django.views.generic import TemplateView from .forms import RegistrationForm, LoginForm from django.contrib.auth.models import User class AboutView(TemplateView): template_name = 'catalog/about.html' def get_context_data(self, *args, **kwargs): context = super(AboutView, self).get_context_data() context['active_page'] = '/about/' return context class FilmView(ListView): queryset = Film.objects.prefetch_related('actor__actors', 'genre', 'review_set').all() context_object_name = 'films' template_name = 'catalog/list_films.html' class FilmDetailView(DetailView): queryset = Film.objects.prefetch_related('actor__actors', 'genre', 'review_set').all() context_object_name = 'film' template_name = 'catalog/film_detail.html' class ReviewCreateView(CreateView): model = Review template_name = 'catalog/create_review.html' success_url = '/' fields = '__all__' class FilmCreateView(CreateView): model = Film template_name = 'catalog/create_film.html' success_url = '/' fields = '__all__' class UserCreateView(CreateView): model = User form_class = RegistrationForm success_url = '/' template_name = 'catalog/register.html' class LoginUserView(LoginView): form_class = LoginForm success_url = '/' template_name = 'catalog/login.html' class LogoutUserView(LogoutView): pass
# transcendental.py # Adrian Del Maestro # 09.13.2012 # A graphical solution of a transcendental equation import matplotlib.pyplot as plt import numpy as np plt.style.use('notebook'); x_sol = [] # ---------------------------------------------------------------------------- def trans(x,a): ''' A transcendental equation. ''' return np.tanh(x/a) def onclick(event): x_sol.append(event.xdata) # ---------------------------------------------------------------------------- # main program # ---------------------------------------------------------------------------- def main(): a = np.linspace(0.1,1.0,6) x = np.arange(0.0,1.21,0.01) # Show the graphical solution fig = plt.figure(1) plt.plot(x,x,'k-', label='_nolegend_') for i,ca in enumerate(a): label = 'a = %5.3f' % ca plt.plot(x,trans(x,ca),'-', label=label) plt.legend(prop={'size':14}, ncol=3, loc='lower right') plt.axis([-0.2,1.2,-0.2,1.2]) # Determine the graphical solution cid = fig.canvas.mpl_connect('button_press_event', onclick) plt.show() # visually identifying the solution sol = np.array(x_sol) # plot the identified solution plt.figure(2) plt.plot(a,sol,'o-', linewidth=1, markersize=5, markeredgecolor='gray') plt.xlabel(r'$a$') plt.ylabel(r'$x$') plt.ylim(0,1.1) plt.show() if __name__ == '__main__': main()
import os from definitions import NOMENCLATURES_DIR from definitions import TRAINING_DIR from definitions import TFIDF_NATIONALITIES_DIR import definitions import wsdm.ts.helpers.persons.persons as p_lib def init_dictionary(): nationalities = {} with open(os.path.join(NOMENCLATURES_DIR, 'nationalities.txt'), encoding='utf8', mode='r') as f: for line in f: nationality = line.rstrip() nationalities[nationality] = "" return nationalities def add_training_data(nationalities): with open(os.path.join(TRAINING_DIR, 'all_positive_nationality.train'), encoding='utf8', mode='r') as f: for i, line in enumerate(f): splitted = line.rstrip().split(' ') person = splitted[0] nationality = splitted[1] with open(os.path.join(definitions.PERSONS_DIR, p_lib.remove_spaces(person) + ".txt"), 'r', encoding='utf8') as pf: nationalities[nationality] += pf.read() + "\n" print(i, person) def write_files(nationalities): for nationality, text in nationalities.items(): with open(os.path.join(TFIDF_NATIONALITIES_DIR, nationality + ".txt"), encoding='utf8', mode='x') as f: f.write(text) def main(): nationalities = init_dictionary() add_training_data(nationalities) write_files(nationalities) if __name__ == '__main__': main()
class GameWorker(webapp2.RequestHandler): def post(self): players={}
# 배열 array의 i번째 숫자부터 j번째 숫자까지 자르고 정렬했을 때, k번째에 있는 수를 구하려 합니다. # 예를 들어 array가 [1, 5, 2, 6, 3, 7, 4], i = 2, j = 5, k = 3이라면 # array의 2번째부터 5번째까지 자르면 [5, 2, 6, 3]입니다. # 1에서 나온 배열을 정렬하면 [2, 3, 5, 6]입니다. # 2에서 나온 배열의 3번째 숫자는 5입니다. # 배열 array, [i, j, k]를 원소로 가진 2차원 배열 commands가 매개변수로 주어질 때, commands의 모든 원소에 대해 앞서 설명한 연산을 적용했을 때 나온 결과를 배열에 담아 return 하도록 solution 함수를 작성해주세요. def solution(array, commands): answer = [] for i in commands: index = i[-1] a = sorted(array[i[0]-1:i[1]]) answer.append(a[index-1]) return answer solution([1, 5, 2, 6, 3, 7, 4],[[2, 5, 3], [4, 4, 1], [1, 7, 3]])
from scrapUtil import get_pages, get_tables class Train(): def __init__(self, id): self.id = id
class Rectangulo: def __init__(self, base, altura): self.base = base self.altura = altura def area(self): return print("El area del rectangulo es:", self.base*self.altura) baseRectangulo = int(input("Ingrese la base del rectangulo: ")) alturaRectangulo = int(input("Ingrese la altura del rectangulo: ")) rectangulo = Rectangulo(baseRectangulo, alturaRectangulo) print(rectangulo.area())
#Accept two numbers from user and print their GCD and LCM def GCD_LCM(no1,no2): if no1<=0 or no2<=0: return lcm=1 i=2 temp1=no1 temp2=no2 while(no1!=1 or no2!=1): if no1%i==0 or no2%i==0: lcm*=i else: i+=1 continue if no1%i==0: no1//=i if no2%i==0: no2//=i gcd=1 i=2 no1=temp1 no2=temp2 while no1!=1 and no2!=1: if no1%i==0 and no2%i==0: gcd*=i no1//=i no2//=i else: i+=1 if i>min(temp1,temp2): break if gcd!=1: return lcm,gcd else: if temp1==1 or temp2==1: return lcm,1 #if input is 1,1 1,5 6,1 gcd will be 1 #approach2 #lcm=no1*no2/gcd def GCD_LCMX(no1,no2): if no1<=0 or no2<=0: return temp1=no1 temp2=no2 while no1%no2!=0: rem=no1%no2 no1,no2=no2,rem gcd=no2 no1,no2=temp1,temp2 lcm=(no1*no2)//gcd return lcm,gcd print("LCM and GCD ",GCD_LCM(36,24)) print("LCM and GCD ",GCD_LCMX(36,24))
class HumanObject: def __init__(self, weight, cost): self.weight = weight self.cost = cost def __str__(self): result = f"HumanObject for backpack with weight of: {self.weight} and cost of: {self.cost}" return result if __name__ == '__main__': size = 20 # generating random objects from random import randint backpack_objects = [HumanObject(randint(2, 15), randint(5, 20)) for _ in range(size)] for backobj in backpack_objects: print(backobj) cost_weight_raport = [(obj.cost / obj.weight) for obj in backpack_objects] print("Cost over weight raport:") print(cost_weight_raport) # sorting reverse cost_weight_raport = sorted(cost_weight_raport, reverse=True) print("Sorted reverse") print(cost_weight_raport) remaining_weight = 50 solution = [0 for _ in range(len(backpack_objects))] for index, backpack_object in enumerate(backpack_objects): if backpack_object.weight <= remaining_weight: solution[index] = 1 remaining_weight -= backpack_object.weight else: solution[index] = remaining_weight // backpack_object.weight for jndex in range(index + 1, len(backpack_objects)): solution[jndex] = 0 print("Final solution:") print(tuple(solution))
# coding:utf-8 def handle(event, context): print("done")
# from multiprocessing import set_start_method # set_start_method("spawn") import tarfile import os import sys import pickle #import tensorflow as tf from datetime import datetime from multiprocessing import Pool from multiprocessing import Process import multiprocessing import getopt from itertools import repeat import psutil from shutil import copyfile import joblib sys.path.append('../../lib/') import return_type_lib import common_stuff_lib import tarbz2_lib import pickle_lib import disassembly_lib #import tfrecord_lib def print_one_pickle_list_item(pickle_file_content): item = next(iter(pickle_file_content)) if item: print(f'function-signature: {item[0]}') print(f'gdb-ptype: {item[1]}') print(f'function-name: {item[2]}') print(f'function-file-name: {item[3]}') print(f'disassembly-att: {item[4]}') print(f'disassembly-intel: {item[5]}') print(f'package-name: {item[6]}') print(f'binary-name: {item[7]}') else: print('Error item[0]') def serialize_example(feature0, feature1): """ Creates a tf.train.Example message ready to be written to a file. """ # Create a dictionary mapping the feature name to the tf.train.Example-compatible # data type. feature0 = feature0.numpy() feature1 = feature1.numpy() feature = { 'caller_callee': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature0])), 'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[feature1])), } # Create a Features message using tf.train.Example. example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(f0,f1): tf_string = tf.py_function( serialize_example, (f0,f1), # pass these args to the above function. tf.string) # the return type is `tf.string`. return tf.reshape(tf_string, ()) # The result is a scalar def proc_build(tarbz2_file, work_dir, save_dir, config): # with get_context("spawn").Pool() as pool: tarbz2_lib.untar_file_to_path(tarbz2_file, work_dir) #untar_one_pickle_file(tarbz2_file, work_dir) pickle_file = work_dir + os.path.basename(tarbz2_file).replace('.tar.bz2', '') pickle_file_content = pickle_lib.get_pickle_file_content(pickle_file) #pickle_file_content = get_pickle_file_content(work_dir + os.path.basename(pickle_file).replace('.tar.bz2', '')) binaries = set() functions = set() for elem in pickle_file_content: binaries.add(elem[7]) functions.add(elem[2]) print(f'binaries >{binaries}<') counter = 0 dataset_list = list() ## 1. get one binary ## 2. get one function of this binary ## 3. get disassembly of this function ## 4. check if this disassembly calls another function ## 4.1 filter @plt ## 5. if yes: get disassembly of caller function ## 6. save caller, callee, func_signature ## 7. check again, if it calls another function ## 8. if yes: get disassembly of caller function ## 9. save caller, calle, func_signature ##10. get disassembly of next function of this binary ##11. check if .... for bin in binaries: for func in functions: ## search for bin and func for elem in pickle_file_content: ### if we found bin and func if elem[7] == bin and elem[2] == func: ## get att disassembly att_dis = elem[4] #print(f'att-dis >{att_dis}<') ## check every line if there is a call for item in att_dis: ## find call in disas if disassembly_lib.find_call_in_disassembly_line(item): ## if found, get callee name callee_addr = '' callee_name = disassembly_lib.get_callee_name_from_disassembly_line(item) callee_addr = disassembly_lib.get_callee_addr_from_disassembly_line(item) # print(f'item >{item}<') # print(f'callee_addr >{callee_addr}<') # print(f'callee_name >{callee_name}<') ## search for same bin, but callee func for elem2 in pickle_file_content: ### if we found it, get return type and disassembly if elem2[7] == bin and elem2[2] == callee_name: if (len(elem2[4]) > (int(config['tokenized_disassembly_length'])/2)) or (len(att_dis) > (int(config['tokenized_disassembly_length'])/2)) or (len(elem2[4]) < 1) or (len(att_dis) < 1): continue return_type_func_sign = return_type_lib.get_return_type_from_function_signature(elem2[0]) return_type = return_type_lib.get_return_type_from_gdb_ptype(elem2[1]) ###for debugging, what string is still unknown ?? should show nothing if return_type == 'unknown': print(f'string_before_func_name: {return_type_func_sign}') if return_type == 'unknown': #print('unknown found') #breaker = True #break pass elif return_type == 'delete': #print('delete found') ### no return type found, so delete this item pass elif return_type == 'process_further': print(f'ERRROOOORRRR---------------') else: tmp_att_dis = att_dis #print(f'len att-dis 1 >{len(tmp_att_dis)}<') tmp_att_dis = disassembly_lib.clean_att_disassembly_from_comment(tmp_att_dis) callee_dis = disassembly_lib.clean_att_disassembly_from_comment(elem2[4]) #print(f'len att-dis 1 >{len(tmp_att_dis)}<') #print(f'att-dis >{tmp_att_dis}<') dis1_str = ' '.join(tmp_att_dis) #dis2_str = ' '.join(elem2[4]) dis2_str = ' '.join(callee_dis) dis1_str = disassembly_lib.split_disassembly(dis1_str) dis2_str = disassembly_lib.split_disassembly(dis2_str) #dis1_str = dis_split(dis1_str) #dis2_str = dis_split(dis2_str) ##the max-seq-length blows memory (>160GB ram) with model.fit() if e.g. over 6million if (len(dis1_str) > (int(config['tokenized_disassembly_length'])/2)) or (len(dis2_str) > (int(config['tokenized_disassembly_length'])/2)) or (len(dis1_str) < 1) or (len(dis2_str) < 1): #print(f'tokenized_disassembly_length caller >{len(dis1_str)}<') #print(f'tokenized_disassembly_length callee >{len(dis2_str)}<') #print(f"package >{elem[2]}< bin >{elem[3]}< file >{elem[6]}< func >{elem[7]}<") #print(f"package >{elem2[2]}< bin >{elem2[3]}< file >{elem2[6]}< func >{elem2[7]}<") pass else: ##callee_addr is for NLP, so that it knows which of all the calls in caller ##disassembly is the right one callee_addr_split = [char if char != '0' else 'null' for char in callee_addr] callee_addr = ' '.join(callee_addr_split) #print(f'callee_addr >{callee_addr}<') dis_str = dis1_str + ' caller_callee_separator ' + callee_addr + ' ' + dis2_str #print(f'dis_str >{dis_str}<') dataset_list.append((dis_str, return_type)) counter += 1 break if dataset_list: if config['save_file_type'] == 'pickle': ret_file = open(config['save_dir'] + os.path.basename(pickle_file).replace('.tar.bz2', ''), 'wb+') pickle_list = pickle.dump(dataset_list, ret_file) ret_file.close() else: ## save as tfrecord dis_list = list() ret_list = list() for item in dataset_list: dis_list.append(item[0]) ret_list.append(item[1]) raw_dataset = tf.data.Dataset.from_tensor_slices( (dis_list, ret_list )) serialized_features_dataset = raw_dataset.map(tf_serialize_example) filename = config['save_dir'] + os.path.basename(tarbz2_file).replace('.pickle.tar.bz2','') + '.tfrecord' writer = tf.data.experimental.TFRecordWriter(filename) writer.write(serialized_features_dataset) #return counter def check_config(config): if config['base_dir'] == '': print(f'Please specify a base-dir (-b or --base-dir) , where all work is done. Check -h for help.') exit() if not os.path.isdir(config['git_repo_path']): print(f"There is no git repo at >{config['git_repo_path']}<") exit() if not os.path.isdir(config['base_dir']): print(f"Creating >{config['base_dir']}<") os.mkdir(config['base_dir']) if not os.path.isdir(config['pickle_dir']): print(f"Creating >{config['pickle_dir']}<") os.mkdir(config['pickle_dir']) if not os.path.isdir(config['work_dir']): print(f"Creating >{config['work_dir']}<") os.mkdir(config['work_dir']) if not os.path.isdir(config['save_dir']): print(f"Creating >{config['save_dir']}<") os.mkdir(config['save_dir']) if not os.path.isdir(config['tfrecord_save_dir']): print(f"Creating >{config['tfrecord_save_dir']}<") os.mkdir(config['tfrecord_save_dir']) print(f'config >{config}<') print() def copy_files_to_build_dataset(config): pickle_files = common_stuff_lib.get_all_filenames_of_type(config['pickle_dir'], '.tar.bz2') if len(pickle_files) > 0: decision = 'z' while( (decision != 'y') and (decision != 'n' ) ): decision = input(f"There are still files in >{config['pickle_dir']}< . Do you want to use them: Type in (y/n):") if decision == 'y': print(f'Using files still there') return pickle_path = config['git_repo_path'] + '/ubuntu-20-04-pickles/' pickle_files = common_stuff_lib.get_all_filenames_of_type(pickle_path, '.tar.bz2') counter = 0 for file in pickle_files: counter += 1 nr_files = 'z' while( not nr_files.isdecimal()): nr_files = input(f'In directory >{pickle_path}< are >{counter}< files.\nHow many files to use for dataset? Type in:') counter = 0 for file in pickle_files: print(f'Copy file >{file}< ', end='\r') copyfile(pickle_path + file, config['pickle_dir'] + file) counter += 1 if counter >= int(nr_files): break print(f'Copied >{nr_files}< files') print() def main(): config = common_stuff_lib.parseArgs() check_config(config) nr_of_cpus = psutil.cpu_count(logical=True) print(f'We got >{nr_of_cpus}< CPUs for threading\n') print() copy_files_to_build_dataset(config) ### get all pickle files #pickle_files = get_all_tar_filenames(config['pickle_dir']) pickle_files = common_stuff_lib.get_all_filenames_of_type(config['pickle_dir'], '.tar.bz2') ### print 5 files, check and debug pickle_lib.print_X_pickle_filenames(pickle_files, 5) ### build # p = Pool(nr_of_cpus) # #p = Pool(len(pickle_files)) # pickle_files = [config["pickle_dir"] + "/" + f for f in pickle_files] star_list = zip(pickle_files, repeat(config['work_dir']), repeat(config['save_dir']), repeat(config)) # # all_ret_types = p.starmap(proc_build, star_list) # p.close() # p.join() test = joblib.Parallel(n_jobs=-1, prefer="processes")(joblib.delayed(proc_build)(a, b, c, d) for a,b,c,d in star_list) print("Done. Run build_ret_type__vocab__seq_len.py next") if __name__ == "__main__": # mp.set_start_method("forkserver") main()
import tensorflow as tf from nolearn.lasagne import BatchIterator class Trainer: def __init__(self, graph_model, epochs, batch_size, logdir, save_path, val_epoch=100, save_epoch=200): self.graph_model = graph_model self.epochs = epochs self.val_epoch = val_epoch self.save_epoch = save_epoch self.batch_size = batch_size self.logdir = logdir self.save_path = save_path self.session = None def eval(self, x, y, tf_x, tf_y, tf_train, tf_loss): loss = self.session.run([tf_loss], feed_dict={ tf_x: x, tf_y: y, tf_train: False }) return loss def train(self, data): print 'Start training ...' x_train = data['train']['x'] y_train = data['train']['y'] x_val = data['val']['x'] y_val = data['val']['y'] x_test = data['test']['x'] y_test = data['test']['y'] graph, init_graph = self.graph_model.get_graph() optimizer = self.graph_model.optimizer x_placeholder, y_placeholder, is_training_placeholder = self.graph_model.get_placeholders() print 'Running a session ...' tf_config = tf.ConfigProto(device_count={'GPU': 1}) tf_config.gpu_options.allow_growth = True with tf.Session(graph=graph, config=tf_config) as self.session: self.session.run(init_graph) saver = tf.train.Saver() summary_op = tf.summary.merge_all() writer = tf.summary.FileWriter(logdir=self.logdir, graph=self.session.graph) for epoch in range(self.epochs): print '%s / %s th epoch, training ...' % (epoch, self.epochs) batch_iterator = BatchIterator(batch_size=self.batch_size, shuffle=True) for x_train_batch, y_train_batch in batch_iterator(x_train, y_train): _, summary = self.session.run([optimizer, summary_op], feed_dict={ x_placeholder: x_train_batch, y_placeholder: y_train_batch, is_training_placeholder: True }) if epoch % self.val_epoch == 0: print '[Validating Round]' loss_train = self.eval(x=x_train, y=y_train, tf_x=x_placeholder, tf_y=y_placeholder, tf_train=is_training_placeholder, tf_loss=self.graph_model.loss) loss_val = self.eval(x=x_val, y=y_val, tf_x=x_placeholder, tf_y=y_placeholder, tf_train=is_training_placeholder, tf_loss=self.graph_model.loss) print '%s th epoch:\n' \ ' train loss: %s' \ ' val loss: %s' \ % (epoch, loss_train, loss_val) writer.add_summary(summary, epoch) if (epoch % self.save_epoch == 0) or (epoch == self.epochs - 1): print '[Testing Round]' snapshot_path = saver.save(sess=self.session, save_path="%s_%s_" % (self.save_path, epoch)) print 'Snapshot of %s th epoch is saved to %s' % (epoch, snapshot_path) loss_test = self.eval(x=x_test, y=y_test, tf_x=x_placeholder, tf_y=y_placeholder, tf_train=is_training_placeholder, tf_loss=self.graph_model.loss) print '%s th epoch:\n' \ ' test loss: %s' \ % (epoch, loss_test) save_path = saver.save(self.session, self.save_path) print 'Training ended and model file is in here: ', save_path
import socket IP = '10.2.2.243' # 修改为别人的 IP PORT port = 29529 address = (IP, port) cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) cli.connect(address) while True: msg=input('type your msg') msg='C罗:{}'.format(msg) cli.send(msg.encode('utf8')) remsg= cli.recv(1024) print(remsg.decode('utf8')) if remsg is None: cli.close() break
import os import re import shutil from PIL import Image from PIL import ImageOps from tqdm import tqdm import time def ResizeToSquare(path): desired_size = 1024 im = Image.open(path) old_size = im.size # old_size[0] is in (width, height) format ratio = float(desired_size) / max(old_size) new_size = tuple([int(x * ratio) for x in old_size]) im = im.resize(new_size, Image.ANTIALIAS) # new_im = Image.new("RGB", (desired_size, desired_size)) # new_im.paste(im, ((desired_size - new_size[0]) // 2, # (desired_size - new_size[1]) // 2)) # # new_im.show() delta_w = desired_size - new_size[0] delta_h = desired_size - new_size[1] padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2)) new_im = ImageOps.expand(im, padding, fill= (225, 225, 225)) #new_im.show() return new_im counter = 0 #dirPath = "D:\PythonProjectsDDrive\ClothesTryOnStage2\\traindata\\train" dirPath = "D:\\Ny mappe\\Ind_clothing_pieces\\Ind_clothing_pieces" #dirPath = "D:\DeepFashionDatsets\img_highres" for (root, dirs, files) in tqdm(os.walk(dirPath, topdown=False)): for i, file in enumerate(files): if file.endswith(".jpg") or file.endswith(".png"): #if file == "target.jpg":#bool(re.search("\d\.jpg", file)) or bool(re.search("\d_target\.jpg", file)): counter += 1 #print(root + dirs[0] + "\\" + file) #resChangeImg = Image.open(root + "\\" + file) paddedImage = ResizeToSquare(root + "\\" + file) #print(resChangeImg.size) #if resChangeImg.size != (192, 256): # print(resChangeImg.size, counter) paddedImage.thumbnail((1024, 1024), Image.ANTIALIAS) #print(resChangeImg.size) #print(paddedImage.size) paddedImage.save(f"Ind_clothing_pieces_squared\\{counter}.jpg", "JPEG")#"clothesDataset256\\{counter}.jpg", "JPEG") #shutil.copy(root + "\\" + file, f"SimplifiedDataset/{counter}.jpg")
import os from obfuscator_source.python_obfuscator import Obfuscator from shutil import copy import ntpath class File: def __init__(self, file_path, new_file_path): self.file_path = file_path self.extension = self.__extract_file_extension() self.file_name = self.__extract_file_name() self.new_file_path = new_file_path def is_python_file(self): return self.extension == '.py' def copy_to_new_path(self): copy(self.file_path, self.new_file_path) def __extract_file_extension(self): file_name = ntpath.basename(self.file_path) return ntpath.splitext(file_name)[1] def __extract_file_name(self): file_name = ntpath.basename(self.file_path) return ntpath.splitext(file_name)[0] class ObfuscationFileHandler: def __init__(self, obfuscated_folder_path): self.obfuscated_folder_path = obfuscated_folder_path def run_obfuscator_directory(self, target_path): current_dict = {} obfuscatable_list = self.__collect_all_files(target_path) import_list = list(map(lambda x: x.file_name, obfuscatable_list)) for file_object in obfuscatable_list: obfuscator = Obfuscator(import_list, current_dict) obf_code = obfuscator.run_obfuscator(self.__read_source(file_object.file_path)) current_dict = obfuscator.name_map self.__write_obfuscated_source(obf_code, file_object.new_file_path) def run_obfuscator_file(self, target_path): assert ntpath.split(target_path)[-1].endswith('.py'), "Not a Valid Python File" obfuscator = Obfuscator() obf_code = obfuscator.run_obfuscator(self.__read_source(target_path)) self.__write_obfuscated_source(obf_code, target_path) def __collect_all_files(self, target_path): # Initialize list that will later contain file names to determine what modules are obfuscatable obfuscatable_list = [] # Iterate through directories for dir_path, dir_names, file_names in os.walk(target_path): # obf_folder_path maintains the internal directory structure in the obfuscated folder # path that was present within the non-obfuscated directory obf_folder_path = ntpath.join(self.obfuscated_folder_path, dir_path[len(target_path) + 1:]) # checks if directory exists, creates directory if not self.__verify_directory(obf_folder_path) # Iterate through files for file_name in file_names: # Initialize file object for current file file_object = File(ntpath.join(dir_path, file_name), ntpath.join(obf_folder_path, file_name)) # Check if file is a python file, if it is append it the obfuscatable_list if file_object.is_python_file(): obfuscatable_list.append(file_object) else: # If the file isn't a valid python file, just copy it to the path with no obfuscation file_object.copy_to_new_path() return obfuscatable_list def __verify_directory(self, obf_folder_path): if not os.path.isdir(obf_folder_path): os.mkdir(obf_folder_path) def __read_source(self, file_path): with open(file_path, "rb") as source: source_str = source.read() return source_str def __write_obfuscated_source(self, obfuscated_code, target_path): with open(target_path, 'w', encoding='utf-8') as obf_file: obf_file.write(obfuscated_code)
# -*- coding: utf-8 -*- """ Created on Thu Jan 16 10:04:01 2020 @author: KelvinOX25 """ import pyvisa import time import logging import numpy as np import struct from qcodes import VisaInstrument, validators as vals from qcodes.instrument_drivers.Keysight.Keysight_B2962A import B2962A class B2962A_Isrc(B2962A): def __init__(self, name, address): super().__init__(name, address) self.ch1.source_mode.set('current') self.ch2.enable.set('off') self.add_parameter('I_limit', get_cmd = self.ch1.current_limit.get, get_parser=float, set_cmd= self.ch1.current_limit.set, unit='A') self.add_parameter('enable', get_cmd= self.ch1.enable.get, set_cmd= self.ch1.enable.set) self.add_parameter('I', label='Current', unit = 'A', get_cmd = self.ch1.source_current.get, set_cmd= self.ch1.source_current.set) self.ch2.enable.set('off') def set_R_Attn( self, R_bias, Attn ): pass ##Testing our codes #from qcodes.instrument.base import Instrument #try: # Instrument.close_all() #except KeyError: # pass #except NameError: # pass # #gen = AWG3252_Isrc('gen', 'TCPIP0::192.168.13.32::inst0::INSTR', R_bias = 1e9, Attn=1) #gen.I.set(1e-9) #we expected to see 1V from AWG #gen.set_R_bias (1e8, Attn=10) #time.sleep(1) #gen.I.set(0.3e-8) #we expected to see 3V from AWG #gen.set_R_bias (1e9, Attn=1) #time.sleep(1) #gen.I.set(0)
from sys import argv if len(argv) != 3: print("usage: ./converter.py <in> <out>") exit(-1) data = [] with open(argv[1], "r") as f: for l in f: data.append(int(l, 16)) with open(argv[2], "w") as f: for x in data[2:]: f.write("%08x\n"%x) f.write("00c00093\n") f.write("00008067\n")
import numpy as numpy import cv2 import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter from scipy.special import erfc import sys import logging logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) #size of constellation (N symbols per frame; N frames per constellation) N=64 image = cv2.imread('example.jpg', 0) # wczytanie pliku jpg _, bw_img = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY) # konwersja na tablice binara #cv2.imshow("Binary Image",bw_img) #testowe wyswietlenie przekonwertowanego obrazu data_bin = numpy.empty([len(bw_img), len(bw_img[0])]) # zamiana wszystkich 255 na 1 for i in range(len(bw_img)): for j in range(len(bw_img[i])): if bw_img[i][j] > 0: data_bin[i][j] = 1 else: data_bin[i][j] = 0 print(data_bin) data_len = len(data_bin) print("Długość przesyłanego ciągu bitów: ") print(data_len * len(data_bin[0])) # próba zwężenia do macierzy 2D x= numpy.array(data_bin).flatten() y= numpy.array(data_bin).flatten() #x=2*numpy.random.random_integers(0,1,(N,N))-1 #real part of symbol matrix 's' #y=2*numpy.random.random_integers(0,1,(N,N))-1 #imaginary part of symbol matrix 's' s=x+1j*y #complex matrix (x+y) of QPSK symbols t,w=(numpy.empty((N,N), dtype=complex) for i in range(2)) #generate two empty, NxN arrays for use later logging.debug('s=%s',s) # definitions for the plot axes left, width=0.1,0.65 bottom, height=0.1,0.65 bottom_h=left_h=left+width+0.02 rect_scatter=[left,bottom,width,height] rect_histx=[left,bottom_h,width,0.2] rect_histy=[left_h,bottom,0.2,height] # start with a rectangular figure plt.figure(1, figsize=(8,8)) # set up plots axScatter=plt.axes(rect_scatter) axHistx=plt.axes(rect_histx) axHisty=plt.axes(rect_histy) # no axis labels for box plots axHistx.xaxis.set_major_formatter(NullFormatter()) axHisty.yaxis.set_major_formatter(NullFormatter()) # Generate SNR scale factor for AWGN generation: error_sum=0.0 # initialize counter to zero to be used in BER calculation SNR_MIN=-10 SNR_MAX=10 SNR=SNR_MAX # desired SNR used to determine noise power Eb_No_lin=10**(SNR/10.0) # convert SNR to decimal logging.debug('Eb_No_lin=%s',Eb_No_lin) No=1.0/Eb_No_lin # Linear power of the noise; average signal power = 1 (0dB) scale=numpy.sqrt(No/2) # variable to scale random noise values in AWGN loop logging.debug('No=%s',No) logging.debug('scale=%s',scale) # loop through each frame, modulate, add gaussian noise (AWGN) then decode back in symbols for i in range(N): n=numpy.fft.ifftn(numpy.random.normal(scale=scale, size=N)+1j*numpy.random.normal(scale=scale, size=N)) # array of noise #n=0 # uncomment here and comment above if you want to remove all noise logging.debug('n[%s]=\n%s',i,n) t[i]=numpy.fft.ifftn(s[i]) w[i]=numpy.fft.fftn(t[i]+n) # add noise here # decode received signal + noise back into bins/symbols z=numpy.sign(numpy.real(w[i]))+1j*numpy.sign(numpy.imag(w[i])) logging.debug('z of loop %s=\n%s',i,z) logging.debug('z!=s[%s]=\n%s',i,z!=s[i]) # find errors err=numpy.where(z != s[i]) logging.debug('err[%s]=\n%s',i,err) # add up errors per frame error_sum+=float(len(err[0])) logging.debug('error_sum[%s]=\n%s',i,error_sum) # show total error for entire NxN message BER=error_sum/N**2 logging.debug('Final error_sum = %s out of a total possible %s symbols',error_sum,N**2) logging.debug('Total BER=%s',BER) # scatter plot: axScatter.scatter(numpy.real(w),numpy.imag(w)) # draw axes at origin axScatter.axhline(0, color='black') axScatter.axvline(0, color='black') # add title (at x-axis) to scatter plot #title = 'Zero noise' title = 'SNR = %sdB with a BER of %s' % (SNR,BER) axScatter.xaxis.set_label_text(title) # now determine nice limits by hand: binwidth = 0.25 # width of histrogram 'bins' xymax = numpy.max( [numpy.max(numpy.fabs(numpy.real(w))), numpy.max(numpy.fabs(numpy.imag(w)))] ) # find abs max symbol value; nominally 1 lim = ( int(xymax/binwidth) + 1) * binwidth # create limit that is one 'binwidth' greater than 'xymax' axScatter.set_xlim( (-lim, lim) ) # set the data limits for the xaxis -- autoscale axScatter.set_ylim( (-lim, lim) ) # set the data limits for the yaxis -- autoscale bins = numpy.arange(-lim, lim + binwidth, binwidth) # create bins 'binwidth' apart between -lin and +lim -- autoscale axHistx.hist(numpy.real(w), bins=bins) # plot a histogram - xaxis are real values axHisty.hist(numpy.imag(w), bins=bins, orientation='horizontal') # plot a histogram - yaxis are imaginary values axHistx.set_xlim( axScatter.get_xlim() ) # set histogram axes to match scatter plot axes limits axHisty.set_ylim( axScatter.get_ylim() ) # set histogram axes to match scatter plot axes limits plt.show()
import unittest from katas.kyu_7.radio_dj_helper_function import longest_possible class LongestPossibleTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(longest_possible(215), 'For Reasons Unknown') def test_equals_2(self): self.assertEqual(longest_possible(270), 'YYZ') def test_false(self): self.assertFalse(longest_possible(13))
#!/usr/bin/env python3 import argparse import sys import os our_version = 10 def do_version(): return '{}.0.0'.format(our_version) def do_components(): return 'all all-targets analysis asmparser asmprinter binaryformat bitreader bitwriter codegen core coroutines coverage debuginfocodeview debuginfodwarf debuginfomsf debuginfopdb demangle dlltooldriver engine executionengine fuzzmutate globalisel gtest gtest_main instcombine instrumentation interpreter ipo irreader libdriver lineeditor linker lto mc mcdisassembler mcjit mcparser mirparser native nativecodegen objcarcopts object objectyaml option orcjit passes profiledata runtimedyld scalaropts selectiondag support symbolize tablegen target testingsupport transformutils vectorize windowsmanifest x86 x86asmparser x86asmprinter x86codegen x86desc x86disassembler x86info x86utils' def do_targets_built(): return 'X86 Aarch64' def get_includedir(): sysroot = os.environ['XBSTRAP_SYSROOT_DIR'] return sysroot + '/usr/include' def get_libdir(): sysroot = os.environ['XBSTRAP_SYSROOT_DIR'] return sysroot + '/usr/lib' def do_has_rtti(): return 'YES' def do_shared_mode(): return 'shared' def do_libs(): return '-lLLVM-{}'.format(our_version) def do_system_libs(): return '-lLLVM-{}'.format(our_version) def do_cppflags(): return '' def do_cxxflags(): return '' def do_ldflags(): return '-L' + get_libdir() parser = argparse.ArgumentParser() parser.add_argument('--version', action='append_const', dest='command', const=do_version) parser.add_argument('--targets-built', action='append_const', dest='command', const=do_targets_built) parser.add_argument('--components', action='append_const', dest='command', const=do_components) parser.add_argument('--includedir', action='append_const', dest='command', const=get_includedir) parser.add_argument('--libdir', action='append_const', dest='command', const=get_libdir) parser.add_argument('--has-rtti', action='append_const', dest='command', const=do_has_rtti) parser.add_argument('--shared-mode', action='append_const', dest='command', const=do_shared_mode) parser.add_argument('--link-shared', action='store_const', dest='link', const='shared') parser.add_argument('--cppflags', action='append_const', dest='command', const=do_cppflags) parser.add_argument('--cxxflags', action='append_const', dest='command', const=do_cxxflags) parser.add_argument('--ldflags', action='append_const', dest='command', const=do_ldflags) parser.add_argument('--libs', action='append_const', dest='command', const=do_libs) parser.add_argument('--system-libs', action='append_const', dest='command', const=do_system_libs) parser.add_argument('components', type=str, nargs='*') print("cross-llvm-config:", sys.argv, file=sys.stderr) args = parser.parse_args() for command in args.command: result = command() print("cross-llvm-config yields:", result, file=sys.stderr) print(result)
import unittest from katas.kyu_5.first_non_repeating_letter import first_non_repeating_letter class FirstNonRepeatingLetterTestCase(unittest.TestCase): def test_equal_1(self): self.assertEqual(first_non_repeating_letter('a'), 'a') def test_equal_2(self): self.assertEqual(first_non_repeating_letter('stress'), 't') def test_equal_3(self): self.assertEqual(first_non_repeating_letter('moonmen'), 'e') def test_equal_4(self): self.assertEqual(first_non_repeating_letter(''), '') def test_equal_5(self): self.assertEqual(first_non_repeating_letter('abba'), '') def test_equal_6(self): self.assertEqual(first_non_repeating_letter('aa'), '') def test_equal_7(self): self.assertEqual(first_non_repeating_letter('~><#~><'), '#') def test_equal_8(self): self.assertEqual(first_non_repeating_letter('hello world, eh?'), 'w') def test_equal_9(self): self.assertEqual(first_non_repeating_letter('sTreSS'), 'T') def test_equal_10(self): self.assertEqual(first_non_repeating_letter( "Go hang a salami, I'm a lasagna hog!"), ',')
# Day 2: I Was Told There Would Be No Math # <ryc> 2021 import re def inputdata(): with open('day_02_2015.input') as stream: data = stream.readlines() data = [ [ int(number) for number in re.findall('\d+', line) ] for line in data ] return data def get_wrapping_paper(data): count = 0 for dims in data: surfaces = [dims[0]*dims[1], dims[1]*dims[2], dims[0]*dims[2]] surfaces.sort() count += surfaces[0] * 3 + surfaces[1] * 2 + surfaces[2] * 2 return count def get_ribbon(data): count = 0 for dims in data: dims.sort() count += dims[0] * 2 + dims[1] * 2 + dims[0] * dims[1] * dims[2] return count def run(): print( '\nDay 2: I Was Told There Would Be No Math' ) data = inputdata( ) print('\nwrapping paper =', get_wrapping_paper(data)) print('\nribbon =', get_ribbon(data)) if __name__ == '__main__': run()
import sys import time def lazy_evaluation(n): print("1 sec") time.sleep(1) return n """Iterator 와 Generator 의 차이""" n_list = [lazy_evaluation(n) for n in range(1, 6)] for i in n_list: print(i) n_generator =(lazy_evaluation(n) for n in range(1,6)) for j in n_generator: print(j) x_range = range(1, 100+1) x_list = ["a", "b", "c", "d"] x_set = {1,2,3,4,5} x_str = "iterable" x_list2 = [1,2,3,4,5,6,7] print(x_list2) x_list2 = iter(x_list2) print(type(x_list2)) print(next(x_list2)) print(next(x_list2)) """ FOR Statement operation swap object iterable temporary like List,str(They are not iterator) """ a = (i for i in range(1, 10000000000+1)) print("ASK") """iterator 는 메모리에 값을 저장하고 Generator 는 효율적으로 사용 가능""" def generator(): yield "hello" yield "python" yield 11 gen = generator() print(next(gen)) print(next(gen)) print(next(gen)) print(sys.getsizeof([i for i in range (1, 10000+1)])) print(sys.getsizeof([j for j in range (1, 10000+1)]))
from file.content import ContentManager from parser import ServerHttpRequest, ServerHttpResponse from conf.basic import SUPRESS_EXCEPTION def process_request((client_socket, addr), server): try: req_msg = read_socket(client_socket) http_req = ServerHttpRequest(req_msg) proto_version = http_req.get_proto_version() status, content = ContentManager(http_req.get_url(), server.get_doc_root()).read() http_res = ServerHttpResponse(status, content, proto_version) client_socket.send(http_res.generate_response()) except Exception, e: if not SUPRESS_EXCEPTION: print e.message def read_socket(input_socket): chunks = [] size = 128 buffer_size = 128 while size == buffer_size: chunk = input_socket.recv(buffer_size) chunks.append(chunk) size = len(chunk) return ''.join(chunks)
operation=input("Dime la operación (multiplicacion, suma, resta, division):") first_number=float(input("Primer número: ")) second_number=float(input("segundo número: ")) if operation == "multiplicacion": result=first_number*second_number print("el resultado es: {}".format(result)) elif operation == "suma": result=first_number+second_number print("el resultado es: {}".format(result)) elif operation=="resta": result=first_number-second_number print("el resultado es: {}".format(result)) elif operation=="division": result=first_number/second_number print("el resultado es: {}".format(result)) else: print("La operación está mal introducida")
# -*- coding=utf-8 -*- ''' Created on 20171031 @author: leochechen @summary: ctf全局变量 ''' import weakref import threading from functools import wraps # 线程级全局变量,该变量会存储CTF Server和Client连接中 CTFWorkerLocal = threading.local() # CTF全局字典 CTFGlobal = {} # CTF全局互斥锁 CTFLock = threading.Lock() # 获取当前线程的local变量的弱引用 def get_weakref_local_variate(name): return weakref.proxy(getattr(CTFWorkerLocal, name)) # 对操作进行加锁 def lock_self(func): @wraps(func) def wrapper(*args, **kwargs): try: CTFLock.acquire() return func(*args, **kwargs) finally: CTFLock.release() return wrapper
def main(): year = 1900 month = 1 months_30 = [4,6,9,11] day = 1 i = 1 sunday_counter = 0 while True: if month in months_30: limit = 30 elif month == 2: limit = 28 if year%4==0 and year % 100 !=0: limit = 29 if year%400 == 0: limit = 29 else: limit = 31 if i > limit: i = 1 month+=1 if month>12: month = 1 year+=1 if day==7: if i == 1: if year >=1901: sunday_counter +=1 day = 0 if year > 2000: break i+=1 day+=1 print(sunday_counter) if __name__ == '__main__': main()
from pyscrap3.spiders import Spider from pyscrap3.spiders import Item from pyscrap3.spiders import ItemList
import time import pandas as pd import numpy as np from IPython.display import display CITY_DATA = { 'chicago': 'chicago.csv', 'new york city': 'new_york_city.csv', 'washington': 'washington.csv' } def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all" to apply no month filter (str) day - name of the day of week to filter by, or "all" to apply no day filter """ print('Hello! Let\'s explore some US bikeshare data!') while True : city = input('Would you like to choose chicago,new york city or washington \n').lower() if city not in ['chicago','new york city','washington']: print('Please enter a correct city as shown') else: break months = ['january','febraury','march','april','may','june','all'] while True : month = input('Would you like to filter by a January,Febraury,March,April,May,June or All \n').lower() if month not in months: print('-'*40) print('Please enter a correct month or choose all') print('-'*40) else: break days = ['Saturday','Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','All'] while True : day = input('Would you like to filter by Saturday,Sunday,Monday,........or All \n').lower().title() if day not in days: print('-'*40) print('Please enter a correct day or choose all') print('-'*40) else: break print('-'*40) return city, month, day def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all" to apply no month filter (str) day - name of the day of week to filter by, or "all" to apply no day filter Returns: df - Pandas DataFrame containing city data filtered by month and day """ df = pd.read_csv(CITY_DATA[city]) months_2 = ['january','febraury','march','april','may','june'] df['Start Time'] = pd.to_datetime(df['Start Time']) df['month'] = df['Start Time'].dt.month df['day'] = df['Start Time'].dt.day_name() if month != 'all': month = months_2.index(month)+1 df = df[df['month'] == month] if day != 'All': df= df[df['day']== day] return df def time_stats(df): """Displays statistics on the most frequent times of travel.""" print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() print('-'*40) if month != 'all': print('You filtered by {}'.format(month.title())) else: months_2 = ['january','febraury','march','april','may','june'] month_res = months_2[df['month'].mode()[0] - 1] print('Most Common month is : ' + month_res.title() ) if day !='All': print('You filtered by {}'.format(day)) else: day_res = df['day'].mode()[0] print('Most Common day is : ' + day_res) df['Start Time'] = pd.to_datetime(df['Start Time']) df['hour'] = df['Start Time'].dt.hour hour_res = df['hour'].mode()[0] if hour_res >12: print('Most Common hour is : ' + str(hour_res-12) + ' pm' ) else: print('Most Common hour is : ' + str(hour_res) + ' am') print('-'*40) return print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() print('-'*40) most_start = df['Start Station'].mode()[0] print('Most Common Start Station : ' + most_start) print('-'*40) most_end = df['End Station'].mode()[0] print('Most Common End Station : ' + most_end) print('-'*40) df['Trip SE'] = df['Start Station'] +' to ' +df['End Station'] most_trip = df['Trip SE'].mode()[0] print('Most Common Trip : From ' + most_trip) return print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def trip_duration_stats(df): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() print('-'*40) total_travel = df['Trip Duration'].sum().round() print('Total Travel time is : ' + str(total_travel) ) avg_travel = float(round(df['Trip Duration'].mean())) print('Average Travel time is : ' + str(avg_travel) ) return print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() print('-'*40) user_type_counts = df['User Type'].value_counts() print('Number of Subscribers is : ' + str(user_type_counts['Subscriber'])) print('Number of Customers is : ' + str(user_type_counts['Customer'])) if city !='washington': print('-'*40) gender_counts = df['Gender'].value_counts() print('Number of Males is : ' + str(gender_counts['Male'])) print('Number of Females is : ' + str(gender_counts['Female'])) print('-'*40) earliest_year = df['Birth Year'].min() mostrecent_year = df['Birth Year'].max() most_common_year = df['Birth Year'].mode()[0] print('Earliest year of birth is : ' + str(earliest_year)) print('Most recent year of birth is : ' + str(mostrecent_year)) print('Most common year of birth is : ' + str(most_common_year)) return print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def display_five(df): rows= 0 while True: print('-'*40) x = input('Would you like to show 5 lines of the data (Yes or No) \n').lower() if x == 'yes': past_rows = rows rows += 5 if city !='washington': result = df.iloc[past_rows:rows,0:9] else: result = df.iloc[past_rows:rows,0:7] display(result) elif x == 'no': break else: print('-'*40) print('Please enter Yes or No') return def main(): while True: global city,month,day city, month, day = get_filters() df = load_data(city, month, day) time_stats(df) station_stats(df) trip_duration_stats(df) user_stats(df) display_five(df) restart = input('\nWould you like to restart? Enter yes or no.\n') if restart.lower() != 'yes': break if __name__ == "__main__": main()
import subprocess subprocess.call(['./first.sh'])
import uuid from datetime import datetime from django.db import models from django.template.defaultfilters import slugify from django.contrib.sites.shortcuts import get_current_site from django.utils.functional import SimpleLazyObject from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.utils.translation import gettext_lazy as _ from django.urls import reverse from ckeditor_uploader.fields import RichTextUploadingField from accounts.validators import validate_file_extension, validate_Image_extension from accounts.models import User, UserRole from track.models import UrlHit HELPCENTER_CHOICE = ( {'', 'Select Help Category'}, ('Getting Started', 'Getting Started'), ('Login', 'Login'), ('Account Settings', 'Account Settings'), ('Reading Stories', 'Reading Stories'), ('User Memebership', 'User Memebership'), ('Navigating', 'Navigating'), ('Managing posts', 'Managing posts'), ('Writing && editing', 'Writing & editing'), ('Distribution', 'Distribution'), ('Holy L.M Publication', 'Holy L.M Publication'), ('Comments', 'Comments'), ) USERREQUEST_CHOICE = ( {'', 'Select Help Category'}, ('Account issues', 'Account issues'), ('Publishing issues', 'Publishing issues'), ('Other', 'Other'), ) class Section(models.Model): name = models.CharField(max_length=100, null=True, blank=True) slug = models.SlugField(default=slugify(name), max_length=100) is_active = models.BooleanField(default=True) class Meta: ordering = ['id'] def save(self, *args, **kwargs): tempslug = slugify(self.name) if self.id: section = Section.objects.get(pk=self.id) if section.name != self.name: self.slug = create_section_slug(tempslug) else: self.slug = create_section_slug(tempslug) super(Section, self).save(*args, **kwargs) def __str__(self): return self.name def section_posts(self): return HelpCenter.objects.filter(section=self).count() def get_absolute_url(self): kwargs = { 'slug': self.slug, } return reverse('section_view', kwargs=kwargs) @property def hit_count(self): url, created = UrlHit.objects.get_or_create(url=self.get_absolute_url()) return url.hits def create_section_slug(tempslug): slugcount = 0 while True: try: Section.objects.get(slug=tempslug) slugcount += 1 tempslug = tempslug + '-' + str(slugcount) except ObjectDoesNotExist: return tempslug def MakeOTP(): import random,string allowed_chars = ''.join((string.ascii_letters, string.digits)) return ''.join(random.choice(allowed_chars) for _ in range(10)) class HelpCenter(models.Model): help_hex = models.CharField(max_length=10, unique=True, editable=False, default=MakeOTP) title = models.CharField(max_length=500, unique=True) slug = models.SlugField(default=slugify(title), max_length=500, unique=True) section = models.ForeignKey(Section, on_delete=models.CASCADE) content = RichTextUploadingField(blank=True, null=True) created_on = models.DateTimeField(auto_now_add=True) updated_on = models.DateField(auto_now=True) is_active = models.BooleanField(default=True) class Meta: ordering = ['id'] def save(self, *args, **kwargs): tempslug = slugify(self.title) if self.id: blogpost = HelpCenter.objects.get(pk=self.id) if blogpost.title != self.title: self.slug = create_helpcenter_slug(tempslug) else: self.slug = create_helpcenter_slug(tempslug) super(HelpCenter, self).save(*args, **kwargs) def __str__(self): return '{title}: {help_hex}'.format(title=str(self.title), help_hex=str(self.help_hex)) def is_deletable_by(self, user): if self.user == user or user.is_superuser: return True return False def get_absolute_url(self): kwargs = { 'help_hex': self.help_hex } return reverse('articles_detail', kwargs=kwargs) @property def hit_count(self): url, created = UrlHit.objects.get_or_create(url=self.get_absolute_url()) return url.hits @property def image_file(self): return self.featured_image.path def create_helpcenter_slug(tempslug): slugcount = 0 while True: try: HelpCenter.objects.get(slug=tempslug) slugcount += 1 tempslug = tempslug + '-' + str(slugcount) except ObjectDoesNotExist: return tempslug class UsersRequest(models.Model): help_hex = models.CharField(max_length=20, unique=True, editable=False, default=uuid.uuid4().hex[:10]) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) description = models.TextField() status = models.CharField(max_length=50, choices=USERREQUEST_CHOICE) created_on = models.DateTimeField(auto_now_add=True) def save(self, *args, **kwargs): tempslug = slugify(self.status) if self.id: blogpost = UsersRequest.objects.get(pk=self.id) if blogpost.status != self.status: self.slug = create_request_slug(tempslug) else: self.slug = create_request_slug(tempslug) super(UsersRequest, self).save(*args, **kwargs) def __str__(self): return self.status def create_request_slug(tempslug): slugcount = 0 while True: try: UsersRequest.objects.get(slug=tempslug) slugcount += 1 tempslug = tempslug + '-' + str(slugcount) except ObjectDoesNotExist: return tempslug
aluno = dict() aluno['nome'] = str(input('Nome: ')) aluno['media'] = float(input('Media: ')) if aluno['media'] >= 7: aluno['situaçao'] = "aprovado" else: aluno['situaçao'] = "Reprovado" for x, y in aluno.items(): print(f'-{x} é igual a {y}') print(aluno.keys()) print(aluno.values())
def main(): print("This programs generates usernames from a file of names") infilename = input("Which file has all the names: ") outfilename = input("Place usernames in this file: ") infile = open(infilename, "r") outfile = open(outfilename, "w") for i in infile: uname = i.upper() print(uname, file = outfile) infile.close() outfile.close() print("These usernames have been written in:", outfilename) main()
import os import json import requests from requests.auth import HTTPBasicAuth DEFAULT_API_URL = 'http://localhost:8080' class HttpClient(object): def __init__(self, username="", password="", api_url=DEFAULT_API_URL, create_user=True): self.api_url = api_url self.username = username self.user = None self.apikey = None self.secret = None if create_user: self.createUser(username=self.username, password=password) self._login(password) def do(self, payload): if 'params' not in payload: payload['params'] = {} if self.apikey: payload['params']['apikey'] = self.apikey print(json.dumps(payload)) return requests.post("{0}/api/v1".format(self.api_url), json=payload) def _login(self, password): resp = requests.post("{0}/login?format=json".format(self.api_url), auth=HTTPBasicAuth(self.username, password)) if 200 != resp.status_code: raise ValueError(resp.text) print(resp.text) respData = resp.json() self.user = respData['data']['user'] self.apikey = self.user['apikey'] self.secret = self.user['secret_token'] def createUser(self, username="", password=""): return self.do({ "method": "create_user", "params": { "username": username, "password": password } }) def createDevice(self, name="", type=""): return self.do({ "method": "create_device", "params": { "name": name, "type": type } }) def fetchDevices(self): return self.do({ "method": "get_devices" }) def createSensor(self, device_id="", name="", type=""): return self.do({ "method": "create_sensor", "params": { "device_id": device_id, "name": name, "type": type } }) def fetchSensors(self, device_id=""): return self.do({ "method": "get_sensors", "params": { "device_id": device_id } }) def createLocation(self, name="", longitude=0.0, latitude=0.0): return self.do({ "method": "create_location", "params": { "name": name, "longitude": longitude, "latitude": latitude } }) def fetchLocations(self): return self.do({ "method": "get_locations" }) def importMeasurements(self, device_id=None, location_id=None, data={}): return self.do({ "method": "import_measurements", "params": { "location_id": location_id, "device_id": device_id, "data": data } }) # def analyzeMeasurements(self, device_id=None, location_id=None, data={}): # return self.do({ # "method": "analyze", # # "location_id": location_id, # "device_id": device_id, # "data": data # })
temp.py import random import numpy as np import cv2 import matplotlib.pyplot as plt img = cv2.imread('test2.png',0) delete_freq=5          i=0 def feature_sparsity(img): akaze = cv2.AKAZE_create(threshold=0.0007)    #smaller, more points kp_akaze = akaze.detect(img,None)           #keypoints of akaze img_akaze = cv2.drawKeypoints(img,kp_akaze,img,color=(255,0,0)) cv2.imshow('AKAZE',img_akaze) cv2.waitKey(0) pts=cv2.KeyPoint_convert(kp_akaze)            #positions of keypoints while i < len(kp_akaze)-6: # cv2.circle(img,(pts[i][0],pts[i][1]),2, (255, 0, 0), thickness =1) #draw pts[i][0] circle in the image pts_2[i][0] = pts[i][0] pts_2[i][1] = pts[i][1] i=i+5 print(pts_2) return(pts_2)
###################################################################### ############### Naive Bayes Classifier ############################### ###################################################################### import math import numpy as np from sklearn import datasets from sklearn.decomposition import PCA # load in iris dataset for examples iris = datasets.load_iris() X = iris.data y = iris.target names = iris.target_names def naive_bayes_classification(): pass
# -*- coding: utf-8 -*- """ Created on Wed Oct 23 15:26:46 2019 @author: Valeria """ import iv_save_module as ivs import iv_utilities_module as ivu import matplotlib.pyplot as plt #%% Parameters this_filename = 'C:\\Users\\Valeria\\OneDrive\\Labo 6 y 7\\Análisis\\Potencia_M_20191018_10\\Resultados.txt' #%% Load data this_data, this_header, this_footer = ivs.loadTxt(this_filename) #%% Plot # Plot results for the different rods fig, ax1 = plt.subplots() # Frequency plot, right axis ax1.set_xlabel('Repetición') ax1.set_ylabel('Frecuencia (GHz)', color='tab:red') ax1.plot(this_data[:,1], 'ro') ax1.tick_params(axis='y', labelcolor='tab:red') # Quality factor, left axis ax2 = ax1.twinx() # Second axes that shares the same x-axis ax2.set_ylabel('Tiempo de decaimiento (ps)', color='tab:blue') ax2.plot(this_data[:,2], 'bx') ax2.tick_params(axis='y', labelcolor='tab:blue') fig.tight_layout() # otherwise the right y-label is slightly clipped plt.show() # Format graph plt.grid(which='both', axis='x') ax1.tick_params(length=2) ax1.grid(axis='x', which='both') # Save plot ivs.saveFig(this_filename) #%% Make table terms_heading = ["Repetición", "F (GHz)", "\u03C4 (ps)", "Q"] terms_heading = '\t'.join(terms_heading) terms_table = ['\t'.join([str(element) for element in row]) for row in this_data] terms_table = '\n'.join(terms_table) terms_table = '\n'.join([terms_heading, terms_table]) ivu.copy(terms_table)
''' Convert CSV to COCO (test) ''' import os import json import argparse import numpy as np import pandas as pd import glob import os import shutil from IPython import embed from sklearn.model_selection import train_test_split classname_to_id = {'Aortic enlargement':0, 'Atelectasis':1, 'Calcification':2, 'Cardiomegaly':3, 'Consolidation':4, 'ILD':5, 'Infiltration':6, 'Lung Opacity':7, 'Nodule/Mass':8, 'Other lesion':9, 'Pleural effusion':10, 'Pleural thickening':11, 'Pneumothorax':12, 'Pulmonary fibrosis':13 } class Csv2Coco: def __init__(self, img_dir, total_annot, arg): self.images = [] self.annotations = [] self.categories = [] self.img_id = 0 self.ann_id = 0 self.img_dir = img_dir self.total_annot = total_annot self.arg = arg def save_coco_json(self, instance, save_path): json.dump(instance, open(save_path,'w'), ensure_ascii=False, indent=2) def to_coco(self, keys): self._init_categories() for key in keys: shape = self.total_annot[key] self.images.append(self._image(key)) self.img_id += 1 instance = {} instance['info'] = 'AQX' instance['license'] = ['license'] instance['images'] = self.images instance['annotations'] = self.annotations instance['categories'] = self.categories return instance def _init_categories(self): for k, v in classname_to_id.items(): categories = {} categories['id'] = v categories['name'] = k self.categories.append(categories) def _image(self, path): image = {} image['height'] = self.arg.image_size image['width'] = self.arg.image_size image['id'] = path image['file_name'] = path + '.' + self.arg.file_type return image if __name__ == '__main__': parser = argparse.ArgumentParser(description='VinBigData_test') parser.add_argument('--image-size', type=int, default=1024, help='image size used for training') parser.add_argument('--file-type', type=str, default='png', help='image extension name') parser.add_argument('--fold-num', type=int, default=5, help='number of training folds') parser.add_argument('--save-path', type=str, default='datacoco', help='saved path') args = parser.parse_args() print(args) #read test data csv_file = '../data/csv/test.csv' image_dir = '' saved_coco_path = '../data/' + args.save_path total_annotation = {} test_rows = pd.read_csv(csv_file, header=None, skiprows=1).values for row in test_rows: test_key = row[0].split(os.sep)[-1] #image_id value = np.array(row[1:]) total_annotation[test_key] = value test_keys = list(total_annotation.keys()) print('Number of test: {}'.format(len(test_keys))) for fold in range(args.fold_num): print('Fold {}...'.format(fold)) annot_path = os.path.join(saved_coco_path, 'annotation_fold{}_{}'.format(fold, args.image_size)) #Convert test csv to json print('Converting Testset...') l2c_test = Csv2Coco(img_dir=image_dir, total_annot=total_annotation, arg=args) test_instance = l2c_test.to_coco(test_keys) l2c_test.save_coco_json(test_instance, os.path.join(annot_path, 'instances_test.json'))
import sys # methods def init_Snap(archived_pnt, value, trade_date, time,POSITIVE_DEV,NEGATIVE_DEV): prev_val = float(archived_pnt['value']) prev_time = int(archived_pnt['time_value']) time = int(time) value = float(value) Smax = (value+POSITIVE_DEV*value-prev_val)/(time-prev_time) Smin = (value-NEGATIVE_DEV*value-prev_val)/(time-prev_time) slope = (value-prev_val)/(time-prev_time) return { 'value' : value, 'trade_date' : trade_date, 'time': time, 'Smax': Smax, 'Smin': Smin, 'Slope' : slope } def snap2archive(snapshot, bool): return { 'value' : snapshot['value'], 'trade_date' : snapshot['trade_date'], 'time_value' : snapshot['time'], 'is_snap' : bool, } # SETUP STAGE if len(sys.argv) == 1: path = raw_input("Enter the path of the textfile: ") filename = raw_input("Enter the filename of the textfile: ") POSITIVE_DEV = float(raw_input("Enter the maximum positive deviation (in %): "))/100 NEGATIVE_DEV = float(raw_input("Enter the maximum negative deviation (in %): "))/100 metric = raw_input("Enter the metric (open, close, high, low, volume): ") elif len(sys.argv) == 6: print str(sys.argv) path = sys.argv[1] filename = sys.argv[2] POSITIVE_DEV = float(sys.argv[3])/100 NEGATIVE_DEV = float(sys.argv[4])/100 metric = sys.argv[5] else: raise Exception('run either with no arguements, or with arguements' ' path, filename, positive_dev, negative_dev, metric') output = '\nStarting Compression using the '+ metric \ + ' price \nwith positive deviation '+ str(POSITIVE_DEV) \ +'\nwith negative deviation '+ str(NEGATIVE_DEV) \ +'\nfor the file '+ path + filename print output CONVERSION = { 'symbol' : 0, 'date' : 1, 'open' : 2, 'high' : 3, 'low' : 4, 'close' : 5, 'volume' : 6, } SYMBOL = CONVERSION['symbol'] METRIC = CONVERSION[metric] DATE = CONVERSION['date'] # ARCHIVE array object format = [{value: value, date: trade_date, time_value: counter,},] ARCHIVE = [ ] # Array index of the next archive value archive_count = 0 # SNAPSHOT format = {value: value, date: trade_date, time_value: counter, Smax: smax, Smin: Smin, slope: slope} SNAPSHOT = { 'value' : None, 'trade_date' : None, 'time': None, 'Smax': None, 'Smin': None, 'Slope' : None } # INCOMING format = {value: value, date: trade_date, time_value: counter, Smax: smax, Smin: Smin} INCOMING = { 'value' : None, 'trade_date' : None, 'time': None, 'Smax': None, 'Smin': None, 'Slope' : None } f=open(path + filename,'r') counter = 0 # NOTE THIS ASSUMES THAT EVERY NEW LINE IN THE FILE IS 1 TRADE DAY AFTER THE PREVIOUS LINE for line in f.readlines(): data = line.split(',') value = data[METRIC] trade_date = data[DATE] if counter == 0: # This is the header so we skip this iteration pass elif counter == 1: # This is the first data point, always added into archive SYMBOL = data[SYMBOL] ARCHIVE = [{ 'value' : value, 'trade_date' : trade_date, 'time_value' : counter, 'is_snap' : False, }] archive_count += 1 elif counter == 2: # This is the first snapshot that we will recieved SNAPSHOT = init_Snap( ARCHIVE[archive_count-1], value, trade_date, counter, POSITIVE_DEV, NEGATIVE_DEV, ) else: # Set up incoming value INCOMING = init_Snap( ARCHIVE[archive_count-1], value, trade_date, counter, POSITIVE_DEV, NEGATIVE_DEV, ) if SNAPSHOT['Smin'] <= INCOMING['Slope'] <= SNAPSHOT['Smax']: # It is within the filtration bounds, edit the INCOMING and # set the SNAP. When editing INCOMING, make sure that the incoming # slopes are not bigger than the current SNAPSHOT's slopes INCOMING['Smax'] = min(SNAPSHOT['Smax'],INCOMING['Smax']) INCOMING['Smin'] = max(SNAPSHOT['Smin'],INCOMING['Smin']) SNAPSHOT = INCOMING else: # It is outside the bounds so we must archive the current SNAP # and init a new snap using this new archived point and INCOMING ARCHIVE.append(snap2archive(SNAPSHOT, False)) archive_count += 1 SNAPSHOT = init_Snap( ARCHIVE[archive_count-1], value, trade_date, counter, POSITIVE_DEV, NEGATIVE_DEV, ) counter += 1 # Always add the latest point into the archive ARCHIVE.append(snap2archive(SNAPSHOT, True)) temp = filename.split('.csv') target = open(path+temp[0]+'_compressed.csv', 'w') target.truncate() # Create Header target.write('SYMBOL,TRADE_DATE,'+metric.upper()+',IS_SNAP' ) target.write("\n") line_count = 1 for obj in ARCHIVE: line = SYMBOL+','+obj['trade_date']+','+str(obj['value'])+','+str(obj['is_snap']) target.write(line) target.write("\n") line_count += 1 print "Completed Compression of " + str(counter) + "lines to " + str(line_count) +"lines."
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # fmt: off # isort: skip_file from .object_meta_pb2 import ObjectMeta from .config_management_service_pb2 import GetTimeModelRequest, GetTimeModelResponse, SetTimeModelRequest, SetTimeModelResponse, TimeModel from .config_management_service_pb2_grpc import ConfigManagementServiceStub from .identity_provider_config_service_pb2 import CreateIdentityProviderConfigRequest, CreateIdentityProviderConfigResponse, DeleteIdentityProviderConfigRequest, DeleteIdentityProviderConfigResponse, GetIdentityProviderConfigRequest, GetIdentityProviderConfigResponse, IdentityProviderConfig, ListIdentityProviderConfigsRequest, ListIdentityProviderConfigsResponse, UpdateIdentityProviderConfigRequest, UpdateIdentityProviderConfigResponse from .identity_provider_config_service_pb2_grpc import IdentityProviderConfigServiceStub from .metering_report_service_pb2 import GetMeteringReportRequest, GetMeteringReportResponse from .metering_report_service_pb2_grpc import MeteringReportServiceStub from .package_management_service_pb2 import ListKnownPackagesRequest, ListKnownPackagesResponse, PackageDetails, UploadDarFileRequest, UploadDarFileResponse from .package_management_service_pb2_grpc import PackageManagementServiceStub from .participant_pruning_service_pb2 import PruneRequest, PruneResponse from .participant_pruning_service_pb2_grpc import ParticipantPruningServiceStub from .party_management_service_pb2 import AllocatePartyRequest, AllocatePartyResponse, GetParticipantIdRequest, GetParticipantIdResponse, GetPartiesRequest, GetPartiesResponse, ListKnownPartiesRequest, ListKnownPartiesResponse, PartyDetails, UpdatePartyDetailsRequest, UpdatePartyDetailsResponse from .party_management_service_pb2_grpc import PartyManagementServiceStub from .user_management_service_pb2 import CreateUserRequest, CreateUserResponse, DeleteUserRequest, DeleteUserResponse, GetUserRequest, GetUserResponse, GrantUserRightsRequest, GrantUserRightsResponse, ListUserRightsRequest, ListUserRightsResponse, ListUsersRequest, ListUsersResponse, RevokeUserRightsRequest, RevokeUserRightsResponse, Right, UpdateUserRequest, UpdateUserResponse, User from .user_management_service_pb2_grpc import UserManagementServiceStub __all__ = [ "AllocatePartyRequest", "AllocatePartyResponse", "ConfigManagementServiceStub", "CreateIdentityProviderConfigRequest", "CreateIdentityProviderConfigResponse", "CreateUserRequest", "CreateUserResponse", "DeleteIdentityProviderConfigRequest", "DeleteIdentityProviderConfigResponse", "DeleteUserRequest", "DeleteUserResponse", "GetIdentityProviderConfigRequest", "GetIdentityProviderConfigResponse", "GetMeteringReportRequest", "GetMeteringReportResponse", "GetParticipantIdRequest", "GetParticipantIdResponse", "GetPartiesRequest", "GetPartiesResponse", "GetTimeModelRequest", "GetTimeModelResponse", "GetUserRequest", "GetUserResponse", "GrantUserRightsRequest", "GrantUserRightsResponse", "IdentityProviderConfig", "IdentityProviderConfigServiceStub", "ListIdentityProviderConfigsRequest", "ListIdentityProviderConfigsResponse", "ListKnownPackagesRequest", "ListKnownPackagesResponse", "ListKnownPartiesRequest", "ListKnownPartiesResponse", "ListUserRightsRequest", "ListUserRightsResponse", "ListUsersRequest", "ListUsersResponse", "MeteringReportServiceStub", "ObjectMeta", "PackageDetails", "PackageManagementServiceStub", "ParticipantPruningServiceStub", "PartyDetails", "PartyManagementServiceStub", "PruneRequest", "PruneResponse", "RevokeUserRightsRequest", "RevokeUserRightsResponse", "Right", "SetTimeModelRequest", "SetTimeModelResponse", "TimeModel", "UpdateIdentityProviderConfigRequest", "UpdateIdentityProviderConfigResponse", "UpdatePartyDetailsRequest", "UpdatePartyDetailsResponse", "UpdateUserRequest", "UpdateUserResponse", "UploadDarFileRequest", "UploadDarFileResponse", "User", "UserManagementServiceStub", ]
#!/usr/bin/python import sys import csv from datetime import datetime ## Convert a string to datetime def toDatetime(date): return datetime.strptime(date[:-3], '%Y-%m-%d %H:%M:%S.%f') reader = csv.reader(sys.stdin, delimiter="\t") for entry in reader: author_id = entry[3] added_at = entry[8] ## If we have an exception, ignore this entry. try: print '{0}\t{1}\t{2}'.format(author_id, toDatetime(added_at).hour, 1) except: pass
class OutOfRange(Exception): """Raised when resulting *progress* is more than *total* value.""" def __init__(self): super(OutOfRange, self).__init__( "resulting progress is out of range" ) class DoesNotExist(Exception): """Raised when requested Celery task does not exist. :param: task_id: Unique task identifier. :type task_id: str. """ def __init__(self, task_id): super(DoesNotExist, self).__init__( f"requested progressbar for task {task_id} does not exist" )
#! /usr/bin/env python3 import argparse, sys, os, time, urllib, urllib.request, urllib.parse, json from pyclustering.cluster import cluster_visualizer from pyclustering.cluster.optics import optics from pyclustering.utils import read_sample # return list of path files from argument string, space delimitted def filelist(files): files = files.split() for i in range(len(files)): files[i] = os.path.normpath(files[i]) return files # remove values outside of tolerances # tries to import file via pyclustering module # if it fails due to presence of headers then import as pandas df, remove header, save, reimport def data_truncate(data,minrt,maxrt,minint): while True: try: with open(data, 'r') as chrom_data: imported_data = [] for line in chrom_data: if line[0] != '#': if line[-1] == '\n': line = line.rstrip() imported_data.append(line) break except FileNotFoundError: print('One of more file(s) not found. Exit status 3.') sys.exit(3) curated_list = [] if maxrt != 0: for row in imported_data: row = row.split(sep='\t') row = [float(i) for i in row] if row[0] >= minrt: if row[0] <= maxrt: if row[2] >= minint: curated_list.append(row) else: for row in imported_data: row = row.split(sep='\t') row = [float(i) for i in row] if row[0] >= minrt and row[2] >= minint: curated_list.append(row) return curated_list def pyclustering_optics(curated_list,rt_toler,mz_toler,neighbors): mz_norm_coeff = rt_toler/mz_toler cluster_input = [] for row in curated_list: cluster_input.append([row[0],row[1]*mz_norm_coeff]) optics_instance = optics(cluster_input, rt_toler, neighbors) optics_instance.process() cluster_index_list = optics_instance.get_clusters() clusters = [] try: for cluster in cluster_index_list: clusters.append([]) for index in cluster: clusters[-1].append(curated_list[index]) for cluster in clusters: cluster.sort(key = lambda x: x[0]) except TypeError: print('\n\nNo clusters found') return clusters def cluster_apex(clusters): apex_list = [] for cluster in clusters: apex = 0 for index in range(len(cluster)): if cluster[index][2] > apex: apex = cluster[index][2] apex_index = index apex_list.append([cluster[apex_index],apex_index]) return apex_list # atom_dict = { minc, maxc, minh, maxh, minn, maxn, mino, maxo, mins, maxs, minp, maxp }, values must be strings # param_dict = { 'mindus', 'maxdus', 'msrange' } # returns a list of dictionaries containing mf, unsat, and ppm info for each def chemcalc( mass, atom_dict, param_dict, ppmax ): chemcalcURL = 'http://www.chemcalc.org/chemcalc/em' for key in atom_dict: atom_dict[key] = str(atom_dict[key]) param_dict['monoisotopicMass'] = mass mfRange = 'C{minc}-{maxc}H{minh}-{maxh}N{minn}-{maxn}O{mino}-{maxo}S{mins}-{maxs}P{minp}-{maxp}'.format(**atom_dict) param_dict['mfRange'] = mfRange while True: try: response = urllib.request.urlopen(chemcalcURL, urllib.parse.urlencode(param_dict).encode('utf-8')) break except ValueError: print('Waiting on server') time.sleep( 5 ) jsondata = response.read() rawdata = json.loads(jsondata.decode('utf-8')) cur_list_dict = [] for result in rawdata['results']: if abs( result['ppm'] ) <= ppmax and result['unsat'] >= param_dict['mindus'] and result['unsat'] <= param_dict['maxdus']: cur_list_dict.append({ 'molForm': result['mf'], 'unsat': result['unsat'], 'ppm': result['ppm'] }) return cur_list_dict
import numpy as np from dataserver import get_file from demo_helpers import generate_rank1_data, generate_rank2_data f = get_file('accumulating_test.h5').get_numbered_child() f.create_dataset('line', rank=1) f.create_dataset('img', rank=2) for x, trace in zip(generate_rank1_data(), generate_rank2_data()): #time.sleep(.1) print "Enter to step", raw_input() f['line'].append(np.random.normal()) f['img'].append(np.random.normal(size=100)) print 'Done'
import os from dropbox.client import DropboxClient from dropbox.datastore import DatastoreManager, DatastoreConflictError from bottle import route, request, static_file, run, template, TEMPLATE_PATH from config import DROPBOX_TOKEN, IMAGES_PATH, MAX_RETRIES from Logger import _logger import datetime @route('/') def root(): return static_file('pages/index.html', root='.') @route('/delete') def root(): return static_file('pages/delete.html', root='.') @route('/deleteRecord', method='POST') def do_delete(): deleteR() return "All Records delete" @route('/upload', method='POST') def do_upload(): begin = request.forms.get('beginDate') end = request.forms.get('endDate') upload = request.files.get('upload') if upload is not None: raw = upload.file.read() path = saveOnDisk(upload.filename, raw) saveOnDropbox(path, upload.filename, begin, end) else: return "Need to select an image, try again." return "Upload Sucessful" @route('/activationForm') def root(): return static_file('pages/index2.html', root='.') @route('/activation', method='POST') def do_uploadData(): phone = request.forms.get('phone') serial = request.forms.get('serial') date = request.forms.get('instDate') instalation = request.forms.get('onoffswitch') saveDataOnDropbox(phone, serial, date, instalation) return "Upload Data Sucessful" @route('/galeria', method='GET') def genera_galeria(): return static_file('pages/galeria.tpl', root='.') @route('/images/<filename:path>', method='GET') def serve_files(filename): return static_file(filename, root='images/') @route('/get_images', method='GET') def get_images_of_this_month(): client = DropboxClient(DROPBOX_TOKEN) manager = DatastoreManager(client) datastore = manager.open_default_datastore() offer_table = datastore.get_table('offers') offers = offer_table.query() images_to_show = [] for offer in offers: # dropbox.datastore.Record name = offer.get('offerName') begin = datetime.datetime.strptime(offer.get('begin'), "%Y-%m-%d").date() end = datetime.datetime.strptime(offer.get('end'), "%Y-%m-%d").date() begin_month = '{:02d}'.format(begin.month) end_month = '{:02d}'.format(end.month) current_month = '{:02d}'.format(datetime.datetime.now().month) year = '{:4d}'.format(datetime.datetime.now().year) if current_month == begin_month or current_month == end_month: # belong to the current month, so we show it images_to_show.append(name) images_paths = download_and_save_images(images_to_show, year, current_month) TEMPLATE_PATH.insert(0,'pages/') return template('galeria', images=images_paths) def download_and_save_images(list_images, year, month): client = DropboxClient(DROPBOX_TOKEN) DESTINATION_FOLDER = 'images/' + year + month + '/' if not os.path.exists(DESTINATION_FOLDER): os.makedirs(DESTINATION_FOLDER) final_images = [] for image in list_images: f, metadata = client.get_file_and_metadata(image) out = open(DESTINATION_FOLDER + image, 'wb') final_images.append(DESTINATION_FOLDER + image) out.write(f.read()) out.close() return final_images def saveOnDropbox(full_path, filename, begin, end): client = DropboxClient(DROPBOX_TOKEN) manager = DatastoreManager(client) datastore = manager.open_default_datastore() offer_table = datastore.get_table('offers') for _ in range(MAX_RETRIES): try: first_offer = offer_table.insert(offerName=filename, begin=begin, end=end) datastore.commit() _logger.debug("data saved on offers table = (%s, %s, %s)" % (filename, begin, end)) break except DatastoreConflictError: datastore.rollback() # roll back local changes datastore.load_deltas() # load new changes from Dropbox image = open(full_path, 'rb') response = client.put_file(filename, image) image.close() _logger.debug("%s saved on dropbox" % filename) def saveDataOnDropbox(phone, serial, date, instalation): client = DropboxClient(DROPBOX_TOKEN) manager = DatastoreManager(client) datastore = manager.open_default_datastore() devices_table = datastore.get_table('uk_devices') for _ in range(MAX_RETRIES): try: first_offer = devices_table.insert(phoneNumber=phone, serialNumber=serial, dateActivate=date, instStatus=instalation) datastore.commit() _logger.debug("data saved on offers table = (%s, %s, %s, %s)" % (phone, serial, date, instalation)) break except DatastoreConflictError: datastore.rollback() # roll back local changes datastore.load_deltas() # load new changes from Dropbox def deleteR(): client = DropboxClient(DROPBOX_TOKEN) manager = DatastoreManager(client) datastore = manager.open_default_datastore() tasks_table = datastore.get_table('uk_devices') tasks = tasks_table.query(instStatus='on') for task in tasks: print task.get('serialNumber') task.delete() datastore.transaction(deleteR, max_tries=4) datastore.commit() def saveOnDisk(filename, upload): if not os.path.exists(IMAGES_PATH): os.makedirs(IMAGES_PATH) file_path = IMAGES_PATH + filename image = open(file_path, 'wb') image.write(upload) image.close() _logger.debug("%s saved on disk" % file_path) return file_path if __name__ == '__main__': run(host='localhost', port=8086)
# -*- coding: utf-8 -*- class UnionFind: def __init__(self, n): self.ids = list(range(n)) self.sizes = [1] * n def root(self, i): while i != self.ids[i]: self.ids[i] = self.ids[self.ids[i]] i = self.ids[i] return i def union(self, p, q): i, j = self.root(p), self.root(q) if i == j: return if self.sizes[i] < self.sizes[j]: self.ids[i] = j self.sizes[j] += self.sizes[i] else: self.ids[j] = i self.sizes[i] += self.sizes[j] def find(self, p, q): return self.root(p) == self.root(q) class Solution: def findRedundantConnection(self, edges): n = float("-inf") for u, v in edges: n = max(n, u, v) result = [] union_find = UnionFind(n) for u, v in edges: if union_find.find(u - 1, v - 1): result = [u, v] union_find.union(u - 1, v - 1) return result if __name__ == "__main__": solution = Solution() assert [2, 3] == solution.findRedundantConnection([[1, 2], [1, 3], [2, 3]]) assert [1, 4] == solution.findRedundantConnection( [[1, 2], [2, 3], [3, 4], [1, 4], [1, 5]] ) assert [5, 25] == solution.findRedundantConnection( [ [6, 13], [15, 22], [10, 13], [12, 24], [3, 23], [19, 20], [3, 12], [2, 16], [19, 23], [2, 11], [18, 23], [1, 25], [2, 17], [4, 5], [14, 19], [2, 3], [1, 7], [4, 6], [9, 10], [8, 22], [7, 22], [13, 18], [13, 21], [15, 23], [5, 25], ] )
import os from telethon.sessions import StringSession from telethon.sync import TelegramClient from dotenv import load_dotenv load_dotenv() api_id1 = int(os.getenv("api_id1")) api_hash1 = str(os.getenv("api_hash1")) with TelegramClient(StringSession(), api_id1, api_hash1) as client: print("Скопируйте код 1 сессии") print(client.session.save()) api_id2 = int(os.getenv("api_id2")) api_hash2 = str(os.getenv("api_hash2")) with TelegramClient(StringSession(), api_id2, api_hash2) as client: print("Скопируйте код 2 сессии") print(client.session.save())
import sys from PyQt5.QtWidgets import ( QDialog, QDialogButtonBox, QLabel, QVBoxLayout, QMessageBox ) class CustomDialog(QDialog): icon_dict = { 'info' : QMessageBox.Information, 'question' : QMessageBox.Question, 'warning' : QMessageBox.Warning, 'critical' : QMessageBox.Critical } @property def title(self): return self._title @title.setter def title(self,title): self._title = title @property def text(self): return self._text @text.setter def text(self,text): self._text = text @classmethod def init(cls,win = None,title = "Notice!",text ="A dialog box"): instance = cls(win) instance.title = title instance.text = text return instance @classmethod def init_message(cls,title = "Notice!",text ="A dialog box",type = 'info'): msg = QMessageBox() msg.setWindowTitle(title) msg.setIcon(cls.icon_dict.get(type,QMessageBox.Information)) msg.setText(text) msg.setStandardButtons(QMessageBox.Ok) return msg def show(self): self.setWindowTitle(self._title) QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel self.buttonBox = QDialogButtonBox(QBtn) self.buttonBox.accepted.connect(self.accept) self.buttonBox.rejected.connect(self.reject) self.layout = QVBoxLayout() message = QLabel(self._text) self.layout.addWidget(message) self.layout.addWidget(self.buttonBox) self.setLayout(self.layout) return self.exec_() def __init__(self, parent=None): super().__init__(parent=parent) self._title = "Notice!" self._text = "A dialog box" # def button_clicked(self, s): # print("click", s) # # dlg = CustomDialog() # If you pass self, the dialog will be centered over the main window as before. # if dlg.exec_(): # print("Success!") # else: # print("Cancel!")
# Generated by Django 3.2.12 on 2022-03-20 12:40 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('osmcal', '0029_alter_event_description'), ] operations = [ # This deletes already existing duplicates in the DB: migrations.RunSQL(""" DELETE FROM osmcal_eventparticipation WHERE id IN ( SELECT p.id FROM (SELECT MIN(id) AS min_id, event_id, user_id FROM osmcal_eventparticipation GROUP BY(user_id, event_id) HAVING COUNT(*) > 1) s, osmcal_eventparticipation p WHERE p.event_id = s.event_id AND p.user_id = s.user_id AND p.id != s.min_id ORDER BY p.added_on )"""), migrations.AlterUniqueTogether( name='eventparticipation', unique_together={('event', 'user')}, ), ]
from Tested_Method.MethodToTest import working_function_3 from unittest.mock import patch import pytest TESTED_MODULE = 'Tested_Method.MethodToTest' # mocking just the public function # if you have functions such as: connection to DB ecc that executes in more tests we can create fixture # pre-processing for test @patch(f'{TESTED_MODULE}.get_element_1', return_value = -10) @patch(f'{TESTED_MODULE}.get_element_2',return_value= 5) @patch(f'{TESTED_MODULE}.sendAPI') def test_working_function__apply_division_of_number1_by_number2_and_send(mock_sendAPi,mock_get_element_1,mock_get_element_2,login): #check if the exception is generated with pytest.raises(ValueError): working_function_3()
# Example directly sending a text string: import requests r = requests.post( "https://api.deepai.org/api/summarization", data={ 'text': 'YOUR_TEXT_HERE', }, headers={'api-key': '79728e79-d56e-40bc-be98-ece560a7dd3c'} ) print(r.json())
from unittest import TestCase import os from graph_db.access import db from graph_db.engine.api import EngineAPI from graph_db.engine.error import GraphEngineError from graph_db.engine.types import DFS_CONFIG_PATH class ParserCase(TestCase): temp_dir = 'db/' queries = [ 'create graph: test_graph', 'create node: Cat', 'create node: Mouse', 'create relationship: catches from Cat to Mouse', 'create node: Cat', 'match node: Cat', 'match node: Mouse', 'match relationship: catches', 'create node: Jerry Animal:Mouse', 'create node: Tom Animal:Cat', 'create relationship: catches from Jerry to Tom Durability:2', 'create relationship: fights from Tom to Jerry Time:10', 'create node: system Type:PC CPU:Intel GPU:NVidia', 'create relationship: plays from Tom To system Since:2016 Game:MadMax', 'match graph: test_graph', 'create node: boy age:20 sex:male', 'create node: girl age:19 sex:female', 'create relationship: loves from boy to girl since:2015', 'match node: age>19', 'match node: sex=male', 'match node: age<100', 'match relationship: since=2015', 'match node: Cats', 'match relationship: False', 'create node: a', 'create node: b', 'create relationship: ab from a to b', 'delete relationship: id:5', 'match relationship: ab', 'match relationship: id:5', 'create relationship: catches from id:17 to id:18', 'update node: id:3 color:brown' ] queries_invalid = [ 'create graph:test_graph', 'create graph test_graph', 'create graph', 'create node:Cat', 'create node: ', 'create', 'match node: ', 'match node', 'create relationship: catches', 'create relationship: catches from Cat', 'create relationship: catches from to id:4', 'create node: Jerry Animal:', 'create node: Tom Animal', 'create node: Tom :Cat', 'create relationship: catches from Jerry to Tom :2', 'create node: system Type:PC CPU:Intel GPU:', 'create node: system Type:PC CPU:Intel :', 'create node: system Type:PC CPU:Intel:NVidia', 'match graph: ', 'match node: age>', 'match node: =', 'match node: <100', 'match relationship: since= =2015', ] def setUp(self): self.db = db.connect(config_path=DFS_CONFIG_PATH) self.cursor = self.db.cursor() self.graph_engine: EngineAPI = self.db.get_engine() def tearDown(self): self.db.close() # deleting created temp stores for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: os.rmdir(os.path.join(root, name)) os.removedirs(self.temp_dir) with self.assertRaises(FileNotFoundError): os.listdir(self.temp_dir) def test_queries(self): # Graph creation self.cursor.execute(self.queries[0]) # Node creation #1 self.cursor.execute(self.queries[1]) self.assertEqual(1, self.db.get_stats()['NodeStorage'], 'Storage contains extra data') retrieved_node = self.graph_engine.select_node(node_id=0) self.assertEqual(0, retrieved_node.get_id(), 'Node id is incorrect') label = retrieved_node.get_label() self.assertEqual(0, label.get_id(), 'Label id is incorrect') self.assertEqual('Cat', label.get_name(), 'Label name is incorrect') # Node creation #2 self.cursor.execute(self.queries[2]) self.assertEqual(2, self.db.get_stats()['NodeStorage'], 'Storage contains extra data') retrieved_node = self.graph_engine.select_node(node_id=1) self.assertEqual(1, retrieved_node.get_id(), 'Node id is incorrect') label = retrieved_node.get_label() self.assertEqual(1, label.get_id(), 'Label id is incorrect') self.assertEqual('Mouse', label.get_name(), 'Label name is incorrect') # Relationship creation #1 self.cursor.execute(self.queries[3]) self.assertEqual(3, self.db.get_stats()['LabelStorage'], 'Label storage contains extra data') retrieved_relationship = self.graph_engine.select_relationship(rel_id=0) self.assertEqual(0, retrieved_relationship.get_id(), 'relationship id is incorrect') label = retrieved_relationship.get_label() self.assertEqual(2, label.get_id(), 'relationship label id is incorrect') self.assertEqual('catches', label.get_name(), 'relationship label name is incorrect') # Node creation with the same 'Cat' label self.cursor.execute(self.queries[4]) self.assertEqual(3, self.db.get_stats()['NodeStorage'], 'Storage contains extra data') retrieved_node = self.graph_engine.select_node(node_id=2) self.assertEqual(2, retrieved_node.get_id(), 'Node id is incorrect') label = retrieved_node.get_label() self.assertEqual(0, label.get_id(), 'Label id is incorrect') self.assertEqual(3, self.db.get_stats()['LabelStorage'], 'Label storage contains extra data') self.assertEqual('Cat', label.get_name(), 'Label name is incorrect') # Match nodes with 'Cat' label self.cursor.execute(self.queries[5]) result = self.cursor.fetch_all() self.assertEqual(2, self.cursor.count(), 'Number of nodes with label is incorrect') self.assertEqual(2, len(result), 'Number of nodes with label is incorrect') # Match 'Mouse' node self.cursor.execute(self.queries[6]) result = self.cursor.fetch_all() self.assertEqual(1, len(result), 'Number of nodes with label is incorrect') label = result[0].get_label() self.assertEqual('Mouse', label.get_name(), 'Label of matched node is incorrect') # Match 'catches' relationship self.cursor.execute(self.queries[7]) result = self.cursor.fetch_all() self.assertEqual(1, len(result), 'Number of relationships with label is incorrect') label = result[0].get_label() self.assertEqual('catches', label.get_name(), 'Label of matched relationship is incorrect') # Create nodes with property self.cursor.execute(self.queries[8]) retrieved_node = self.graph_engine.select_node(node_id=3) self.assertEqual(3, retrieved_node.get_id(), 'Node id is incorrect') prop = retrieved_node.get_first_property() self.assertEqual('Animal', prop.get_key(), 'Key of property is incorrect') self.assertEqual('Mouse', prop.get_value(), 'Value of property is incorrect') self.cursor.execute(self.queries[9]) retrieved_node = self.graph_engine.select_node(node_id=4) self.assertEqual(4, retrieved_node.get_id(), 'Node id is incorrect') prop = retrieved_node.get_first_property() self.assertEqual('Animal', prop.get_key(), 'Key of property is incorrect') self.assertEqual('Cat', prop.get_value(), 'Value of property is incorrect') # Create relationships with property self.cursor.execute(self.queries[10]) retrieved_relationship = self.graph_engine.select_relationship(rel_id=1) self.assertEqual(1, retrieved_relationship.get_id(), 'relationship id is incorrect') prop = retrieved_relationship.get_first_property() self.assertEqual('Durability', prop.get_key(), 'Key of property is incorrect') self.assertEqual(2, prop.get_value(), 'Value of property is incorrect') self.cursor.execute(self.queries[11]) retrieved_relationship = self.graph_engine.select_relationship(rel_id=2) self.assertEqual(2, retrieved_relationship.get_id(), 'relationship id is incorrect') prop = retrieved_relationship.get_first_property() self.assertEqual('Time', prop.get_key(), 'Key of property is incorrect') self.assertEqual(10, prop.get_value(), 'Value of property is incorrect') # Create a node with multiple properties self.cursor.execute(self.queries[12]) retrieved_node = self.graph_engine.select_node(node_id=5) self.assertEqual(3, len(retrieved_node.get_properties()), 'Number of properties is incorrect') self.assertEqual('CPU', retrieved_node.get_properties()[1].get_key(), 'Retrieved key is incorrect') self.assertEqual('NVidia', retrieved_node.get_properties()[2].get_value(), 'Retrieved value is incorrect') # Create an relationship with multiple properties self.cursor.execute(self.queries[13]) retrieved_relationship = self.graph_engine.select_relationship(rel_id=3) self.assertEqual(2, len(retrieved_relationship.get_properties()), 'Number of properties is incorrect') self.assertEqual('MadMax', retrieved_relationship.get_properties()[1].get_value(), 'Retrieved value is incorrect') self.assertEqual(9, self.graph_engine.get_stats()['PropertyStorage'], 'Incorrect number of properties') # Graph traverse with match graph: graph self.cursor.execute(self.queries[14]) result = self.cursor.fetch_all() self.assertEqual(10, len(result), 'Number of objects in graph is incorrect') # Create 2 nodes and 1 relationship with properties to match for query in self.queries[15:18]: self.cursor.execute(query) # Match nodes by property self.cursor.execute(self.queries[18]) result = self.cursor.fetch_all() self.assertEqual(1, len(result), 'Incorrect number of matched nodes') self.assertEqual(20, int(result[0].get_first_property().get_value()), 'Retrieved value of property is incorrect') # Node self.cursor.execute(self.queries[19]) result = self.cursor.fetch_all() self.assertEqual(1, len(result), 'Incorrect number of matched nodes') # Node self.cursor.execute(self.queries[20]) result = self.cursor.fetch_all() self.assertEqual(2, len(result), 'Incorrect number of matched nodes') # Relationship self.cursor.execute(self.queries[21]) result = self.cursor.fetch_all() self.assertEqual(1, len(result), 'Incorrect number of matched relationships') # Not existing objects self.cursor.execute(self.queries[22]) result = self.cursor.fetch_all() self.assertEqual(0, len(result), 'Incorrect number of matched nodes') self.cursor.execute(self.queries[23]) result = self.cursor.fetch_all() self.assertEqual(0, len(result), 'Incorrect number of matched nodes') # Delete relationship for query in self.queries[24:28]: self.cursor.execute(query) self.cursor.execute(self.queries[28]) result = self.cursor.fetch_all() self.assertEqual(0, len(result), 'Relationship was not deleted') with self.assertRaises(GraphEngineError): self.cursor.execute(self.queries[29]) # Create relationship for invalid nodes with self.assertRaises(GraphEngineError): self.cursor.execute(self.queries[30]) self.cursor.execute(self.queries[31]) retrieved_node = self.graph_engine.select_node(node_id=3) prop = retrieved_node.get_first_property() self.assertEqual('Animal', prop.get_key(), 'Key of property is incorrect') self.assertEqual('Mouse', prop.get_value(), 'Value of property is incorrect') prop = retrieved_node.get_last_property() self.assertEqual('color', prop.get_key(), 'Key of property is incorrect') self.assertEqual('brown', prop.get_value(), 'Value of property is incorrect') def test_queries_invalid(self): # Graph creation for query in self.queries_invalid[0:3]: with self.assertRaises(SyntaxError): self.cursor.execute(query) self.cursor.execute(self.queries[0]) # Node creation for query in self.queries_invalid[3:6]: with self.assertRaises(SyntaxError): self.cursor.execute(query) # Node match for query in self.queries_invalid[6:11]: with self.assertRaises(SyntaxError): self.cursor.execute(query) # Create nodes with property for query in self.queries_invalid[11:14]: with self.assertRaises(SyntaxError): self.cursor.execute(query) # Create relationships with property with self.assertRaises(SyntaxError): self.cursor.execute(self.queries_invalid[14]) # Create a node with multiple properties for query in self.queries_invalid[15:18]: with self.assertRaises(SyntaxError): self.cursor.execute(query) # Graph traverse with match graph: graph with self.assertRaises(SyntaxError): self.cursor.execute(self.queries_invalid[18]) # Match nodes by property for query in self.queries_invalid[19:23]: with self.assertRaises(SyntaxError): self.cursor.execute(query)
__author__ = 'Justin' import geojson import networkx as nx from geopy.distance import vincenty as latlondist # DESCRIPTION: # This script converts geojson road geometry files to a tractable networkx object (.gexf) # Geojson map data such as node positions, edge connections, edge types, max speeds, and edge names are extracted # The Geojson network is pruned to remove redundant network nodes. # # INPUT: # geojson MapZen extract # # OUTPUT: # networkx object file (.gexf) # Load geoJSON strings from file with open('road data.geojson', 'r') as myfile: geoJSONstring=myfile.read().replace('\n', '') with open('roadspeeds.geojson', 'r') as myfile: geoJSONstring2=myfile.read().replace('\n', '') # Load geoJSON strings into geojson objects roaddata = geojson.loads(geoJSONstring) speeddata = geojson.loads(geoJSONstring2) # Extract Speeds roadspeeds = {} for feature in speeddata.features: if("maxspeed" in feature.properties and "name" in feature.properties ): speedstr = feature.properties["maxspeed"] speednum = [int(s) for s in speedstr.split() if s.isdigit()] roadspeeds[feature.properties["name"]] = speednum[0] # Extract nodes and edges meterconv = 1609.344 # miles to meters FullGraph = nx.DiGraph() for feature in roaddata.features: # get road edge properties if("name" in feature.properties and feature.properties["name"] is not None): edgename = feature.properties["name"] else: edgename = 'unknown' if(edgename in roadspeeds): speed = roadspeeds[edgename] elif(feature.properties["type"] == 'residential'): speed = 20 else: speed = 30 if("type" in feature.properties): edgetype = feature.properties["type"] else: edgetype = 'unknown' # check for oneway vs. twoway streets (twoway streets need two directed edges) if(feature.properties["oneway"]== 0 ): #add directed edges and associated nodes for feature element for latlon in feature.geometry.coordinates: FullGraph.add_node(str(latlon),lon =latlon[0] ,lat =latlon[1]) for counter in range(len(feature.geometry.coordinates)-1,0,-1): #find distance between node pair roaddistance = meterconv*latlondist(feature.geometry.coordinates[counter],feature.geometry.coordinates[counter-1]).miles basetime = roaddistance/meterconv/speed*3600 #add edge with edge properties edgedict = {'weight':0,'type':edgetype,'distance':roaddistance,'basetime':basetime,'name':edgename} FullGraph.add_edge(str(feature.geometry.coordinates[counter]),str(feature.geometry.coordinates[counter-1]),attr_dict=edgedict ) #add directed edges and associated nodes for feature element (opposite direction of edges within if statement) for latlon in feature.geometry.coordinates: FullGraph.add_node(str(latlon),lon =latlon[0] ,lat =latlon[1]) for counter in range(0,len(feature.geometry.coordinates)-1): #find distance between node pair distance = meterconv*latlondist(feature.geometry.coordinates[counter],feature.geometry.coordinates[counter+1]).miles basetime = distance/meterconv/speed*3600 #add edge with distance weight edgedict = {'weight':0,'type':edgetype,'distance':roaddistance,'basetime':basetime,'name':edgename} FullGraph.add_edge(str(feature.geometry.coordinates[counter]),str(feature.geometry.coordinates[counter+1]),attr_dict=edgedict ) # Save FullGraph nx.write_gexf(FullGraph,'FullGraph.gexf') # Remove Unnecessary nodes (exactly two neighbors or one neighbor and two edges) Intersections = FullGraph count = 0 print('Number of Graph Nodes',len(FullGraph.nodes())) for node in Intersections.nodes(): neighbors = Intersections.neighbors(node) if(len(neighbors)==2): A = neighbors[0] B = neighbors[1] Aneighbors = Intersections.neighbors(A) Bneighbors = Intersections.neighbors(B) if((node in Aneighbors) and (node in Bneighbors)): weightA_node = Intersections[A][node]['weight'] weightnode_A = Intersections[node][A]['weight'] weightB_node = Intersections[B][node]['weight'] weightnode_B = Intersections[node][B]['weight'] edgetype = Intersections[A][node]['type'] Intersections.add_edge(A,B,weight = weightA_node+weightnode_B, type = edgetype) Intersections.add_edge(B,A,weight = weightB_node+weightnode_A, type = edgetype) Intersections.remove_node(node) else: count += 1 elif(len(neighbors)== 1 and Intersections.degree(node)==2): end = neighbors[0] start = Intersections.in_edges(node)[0][0] weightstart = Intersections[start][node]['weight'] weightend = Intersections[node][end]['weight'] edgetype = Intersections[start][node]['type'] Intersections.add_edge(start,end,weight = weightstart+weightend, type = edgetype) Intersections.remove_node(node) rescount = 0 for edge in Intersections.edges(): edgedict = Intersections.get_edge_data(edge[0],edge[1]) if(edgedict['type'] == 'residential'): rescount += 1 print('Number of Sections',len(Intersections.edges())) print('Number of Residential Sections',rescount) # Save Intersections nx.write_gexf(Intersections,'Intersections.gexf') # Export Intersections Nodes (geoJSON format) Features =[] lons = nx.get_node_attributes(Intersections,'lon') lats = nx.get_node_attributes(Intersections,'lat') for point in Intersections.nodes(): Features.append(geojson.Feature(geometry=geojson.Point((lons[point], lats[point])))) Collection = geojson.FeatureCollection(Features) dump = geojson.dumps(Collection) text_file = open("Intersections.txt", "w") text_file.write(dump) text_file.close()
# Generated by Django 2.2.6 on 2019-10-19 11:01 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('checkout', '0006_shippingaddress_current_address'), ('carts', '0001_initial'), ] operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='checkout.ShippingAddress'), ), ]
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import sys import types import unittest import uuid from contextlib import contextmanager from dataclasses import dataclass from typing import Any from pkg_resources import ( Distribution, EmptyProvider, VersionConflict, WorkingSet, working_set, yield_lines, ) from pants.base.exceptions import BuildConfigurationError from pants.build_graph.build_configuration import BuildConfiguration from pants.build_graph.build_file_aliases import BuildFileAliases from pants.engine.rules import rule from pants.engine.target import COMMON_TARGET_FIELDS, Target from pants.init.extension_loader import ( PluginLoadOrderError, PluginNotFound, load_backend, load_backends_and_plugins, load_plugins, ) from pants.option.subsystem import Subsystem from pants.util.frozendict import FrozenDict from pants.util.ordered_set import FrozenOrderedSet class MockMetadata(EmptyProvider): def __init__(self, metadata): self.metadata = metadata def has_metadata(self, name): return name in self.metadata def get_metadata(self, name): return self.metadata[name] def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class DummySubsystem(Subsystem): options_scope = "dummy-subsystem" class DummyTarget(Target): alias = "dummy_tgt" core_fields = COMMON_TARGET_FIELDS class DummyTarget2(Target): alias = "dummy_tgt2" core_fields = () class DummyObject1: pass class DummyObject2: pass @dataclass(frozen=True) class RootType: value: Any @dataclass(frozen=True) class WrapperType: value: Any @rule def example_rule(root_type: RootType) -> WrapperType: return WrapperType(root_type.value) class PluginProduct: pass @rule def example_plugin_rule(root_type: RootType) -> PluginProduct: return PluginProduct() class LoaderTest(unittest.TestCase): def setUp(self): self.bc_builder = BuildConfiguration.Builder() self.working_set = WorkingSet() for entry in working_set.entries: self.working_set.add_entry(entry) @contextmanager def create_register( self, build_file_aliases=None, rules=None, target_types=None, module_name="register", ): package_name = f"__test_package_{uuid.uuid4().hex}" self.assertFalse(package_name in sys.modules) package_module = types.ModuleType(package_name) sys.modules[package_name] = package_module try: register_module_fqn = f"{package_name}.{module_name}" register_module = types.ModuleType(register_module_fqn) setattr(package_module, module_name, register_module) sys.modules[register_module_fqn] = register_module def register_entrypoint(function_name, function): if function: setattr(register_module, function_name, function) register_entrypoint("build_file_aliases", build_file_aliases) register_entrypoint("rules", rules) register_entrypoint("target_types", target_types) yield package_name finally: del sys.modules[package_name] def assert_empty(self): build_configuration = self.bc_builder.create() registered_aliases = build_configuration.registered_aliases self.assertEqual(0, len(registered_aliases.objects)) self.assertEqual(0, len(registered_aliases.context_aware_object_factories)) self.assertEqual(build_configuration.subsystem_to_providers, FrozenDict()) self.assertEqual(0, len(build_configuration.rules)) self.assertEqual(0, len(build_configuration.target_types)) def test_load_valid_empty(self): with self.create_register() as backend_package: load_backend(self.bc_builder, backend_package) self.assert_empty() def test_load_valid_partial_aliases(self): aliases = BuildFileAliases(objects={"obj1": DummyObject1, "obj2": DummyObject2}) with self.create_register(build_file_aliases=lambda: aliases) as backend_package: load_backend(self.bc_builder, backend_package) build_configuration = self.bc_builder.create() registered_aliases = build_configuration.registered_aliases self.assertEqual(DummyObject1, registered_aliases.objects["obj1"]) self.assertEqual(DummyObject2, registered_aliases.objects["obj2"]) def test_load_invalid_entrypoint(self): def build_file_aliases(bad_arg): return BuildFileAliases() with self.create_register(build_file_aliases=build_file_aliases) as backend_package: with self.assertRaises(BuildConfigurationError): load_backend(self.bc_builder, backend_package) def test_load_invalid_module(self): with self.create_register(module_name="register2") as backend_package: with self.assertRaises(BuildConfigurationError): load_backend(self.bc_builder, backend_package) def test_load_missing_plugin(self): with self.assertRaises(PluginNotFound): self.load_plugins(["Foobar"]) @staticmethod def get_mock_plugin( name, version, reg=None, alias=None, after=None, rules=None, target_types=None ): """Make a fake Distribution (optionally with entry points) Note the entry points do not actually point to code in the returned distribution -- the distribution does not even have a location and does not contain any code, just metadata. A module is synthesized on the fly and installed into sys.modules under a random name. If optional entry point callables are provided, those are added as methods to the module and their name (foo/bar/baz in fake module) is added as the requested entry point to the mocked metadata added to the returned dist. :param string name: project_name for distribution (see pkg_resources) :param string version: version for distribution (see pkg_resources) :param callable reg: Optional callable for goal registration entry point :param callable alias: Optional callable for build_file_aliases entry point :param callable after: Optional callable for load_after list entry point :param callable rules: Optional callable for rules entry point :param callable target_types: Optional callable for target_types entry point """ plugin_pkg = f"demoplugin{uuid.uuid4().hex}" pkg = types.ModuleType(plugin_pkg) sys.modules[plugin_pkg] = pkg module_name = f"{plugin_pkg}.demo" plugin = types.ModuleType(module_name) setattr(pkg, "demo", plugin) sys.modules[module_name] = plugin metadata = {} entry_lines = [] if reg is not None: setattr(plugin, "foo", reg) entry_lines.append(f"register_goals = {module_name}:foo\n") if alias is not None: setattr(plugin, "bar", alias) entry_lines.append(f"build_file_aliases = {module_name}:bar\n") if after is not None: setattr(plugin, "baz", after) entry_lines.append(f"load_after = {module_name}:baz\n") if rules is not None: setattr(plugin, "qux", rules) entry_lines.append(f"rules = {module_name}:qux\n") if target_types is not None: setattr(plugin, "tofu", target_types) entry_lines.append(f"target_types = {module_name}:tofu\n") if entry_lines: entry_data = "[pantsbuild.plugin]\n{}\n".format("\n".join(entry_lines)) metadata = {"entry_points.txt": entry_data} return Distribution(project_name=name, version=version, metadata=MockMetadata(metadata)) def load_plugins(self, plugins): load_plugins(self.bc_builder, plugins, self.working_set) def test_plugin_load_and_order(self): d1 = self.get_mock_plugin("demo1", "0.0.1", after=lambda: ["demo2"]) d2 = self.get_mock_plugin("demo2", "0.0.3") self.working_set.add(d1) # Attempting to load 'demo1' then 'demo2' should fail as 'demo1' requires 'after'=['demo2']. with self.assertRaises(PluginLoadOrderError): self.load_plugins(["demo1", "demo2"]) # Attempting to load 'demo2' first should fail as it is not (yet) installed. with self.assertRaises(PluginNotFound): self.load_plugins(["demo2", "demo1"]) # Installing demo2 and then loading in correct order should work though. self.working_set.add(d2) self.load_plugins(["demo2>=0.0.2", "demo1"]) # But asking for a bad (not installed) version fails. with self.assertRaises(VersionConflict): self.load_plugins(["demo2>=0.0.5"]) def test_plugin_installs_alias(self): def reg_alias(): return BuildFileAliases( objects={"FROMPLUGIN1": DummyObject1, "FROMPLUGIN2": DummyObject2}, ) self.working_set.add(self.get_mock_plugin("aliasdemo", "0.0.1", alias=reg_alias)) # Start with no aliases. self.assert_empty() # Now load the plugin which defines aliases. self.load_plugins(["aliasdemo"]) # Aliases now exist. build_configuration = self.bc_builder.create() registered_aliases = build_configuration.registered_aliases self.assertEqual(DummyObject1, registered_aliases.objects["FROMPLUGIN1"]) self.assertEqual(DummyObject2, registered_aliases.objects["FROMPLUGIN2"]) def test_rules(self): def backend_rules(): return [example_rule] with self.create_register(rules=backend_rules) as backend_package: load_backend(self.bc_builder, backend_package) self.assertEqual(self.bc_builder.create().rules, FrozenOrderedSet([example_rule.rule])) def plugin_rules(): return [example_plugin_rule] self.working_set.add(self.get_mock_plugin("this-plugin-rules", "0.0.1", rules=plugin_rules)) self.load_plugins(["this-plugin-rules"]) self.assertEqual( self.bc_builder.create().rules, FrozenOrderedSet([example_rule.rule, example_plugin_rule.rule]), ) def test_target_types(self): def target_types(): return [DummyTarget, DummyTarget2] with self.create_register(target_types=target_types) as backend_package: load_backend(self.bc_builder, backend_package) assert self.bc_builder.create().target_types == (DummyTarget, DummyTarget2) class PluginTarget(Target): alias = "plugin_tgt" core_fields = () def plugin_targets(): return [PluginTarget] self.working_set.add( self.get_mock_plugin("new-targets", "0.0.1", target_types=plugin_targets) ) self.load_plugins(["new-targets"]) assert self.bc_builder.create().target_types == (DummyTarget, DummyTarget2, PluginTarget) def test_backend_plugin_ordering(self): def reg_alias(): return BuildFileAliases(objects={"override-alias": DummyObject2}) self.working_set.add(self.get_mock_plugin("pluginalias", "0.0.1", alias=reg_alias)) plugins = ["pluginalias==0.0.1"] aliases = BuildFileAliases(objects={"override-alias": DummyObject1}) with self.create_register(build_file_aliases=lambda: aliases) as backend_module: backends = [backend_module] build_configuration = load_backends_and_plugins( plugins, self.working_set, backends, bc_builder=self.bc_builder ) # The backend should load first, then the plugins, therefore the alias registered in # the plugin will override the alias registered by the backend registered_aliases = build_configuration.registered_aliases self.assertEqual(DummyObject2, registered_aliases.objects["override-alias"])
import cv2 citra = cv2.imread('PVHotSpot.jpg') citra1 = cv2.imread('PVHotSpot.jpg', cv2.IMREAD_GRAYSCALE) if not citra is None: cv2.imshow('Gambar Seek Thermal.png', citra1) cv2.imshow('Gambar convert gray scale', citra) cv2.waitKey(0)
from mongoengine import * from login import User class Post(Document): """ A post is the last object in the big list of referenced fields. It works like this, one Board has many Categories one Category has many topics (threads) one Thread has many Posts one Post has one User """ author = ReferenceField(User, dbref=False) content = StringField() date = StringField()