text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from .oauth2 import Oauth2
class Qq(Oauth2):
NAME = 'QQ'
AUTHORIZATION_URL = 'https://graph.qq.com/oauth2.0/authorize'
ACCESS_TOKEN_URL = 'https://graph.qq.com/oauth2.0/token'
GET_USERINFO_URL = 'https://graph.qq.com/oauth2.0/me'
def __init__(self):
super(Qq, self).__init__()
def parse_token_response(self, response):
d = self._query_to_dict(response)
self._access_token = d.get('access_token', '')
self._expires_in = d.get('expires_in', '')
self._refresh_token = d.get('refresh_token', '')
def parse_user_info(self, response):
r = response[response.index('(') + 1: response.rindex(')')]
d = json.loads(r)
uid = d.get('openid', '')
detail = self.access_resource(
'GET',
'https://graph.qq.com/user/get_user_info',
params={
'oauth_consumer_key': self._config['client_id'],
'openid': uid
})
return {
'uid': uid,
'name': detail.get('nickname'),
'avatar': detail.get('figureurl_qq_1'),
'raw': detail
}
|
"""
ghub - Package of the modules.
"""
|
#if 'LocalInputFileList' in locals():
# print "LocalInputFileList is already set"
#else:
# LocalInputFileList="Z.list"
##LocalInputFileList="local_valid_r9026.list"
##LocalInputFileList="r9311.list"
##LocalInputFileList="r9539_Zmumu.list"
##LocalInputFileList="r9573.list";
##LocalInputFileList="testData17.list";
##LocalInputFileList="r9193.list"
##LocalInputFileList="inEITrkConfFalsev3.list"
#print "LocalInputFileList is"
#print LocalInputFileList
#f = open( LocalInputFileList, 'r' )
#InputFileList = f.read().splitlines()
#print InputFileList
#-----------------------------------------------------------------------------
# Athena imports
#-----------------------------------------------------------------------------
from AthenaCommon.Constants import *
from AthenaCommon.AppMgr import theApp
from AthenaCommon.AppMgr import ServiceMgr
from AthenaCommon.AppMgr import ToolSvc
import AthenaPoolCnvSvc.ReadAthenaPool
from AthenaCommon.AlgSequence import AlgSequence
job = AlgSequence()
#-----------------------------------------------------------------------------
# Message Service
#-----------------------------------------------------------------------------
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
ServiceMgr.MessageSvc.OutputLevel = ERROR
import AthenaServices
AthenaServices.AthenaServicesConf.AthenaEventLoopMgr.OutputLevel = ERROR
#-----------------------------------------------------------------------------
# Input Datasets
#-----------------------------------------------------------------------------
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags as acf
if not acf.EvtMax.is_locked():
acf.EvtMax=-1
#acf.FilesInput = InputFileList
acf.FilesInput += [
"/gpfs/home/yfukuhar/yazawa-root-file/data15_13TeV.00284484.physics_Main.deriv.DAOD_MUON0.r9264_p3083_p3180/DAOD_MUON0.11515458._000161.pool.root.1"
#"/gpfs/fs6001/yyazawa/data/valid1.424100.Pythia8B_A14_CTEQ6L1_Jpsimu4mu4.recon.AOD.e5112_s3091_r9122_tid10750758_00/AOD.10750758._000194.pool.root.1",
#"/gpfs/fs6001/yyazawa/data/valid1.424100.Pythia8B_A14_CTEQ6L1_Jpsimu4mu4.recon.AOD.e5112_s2887_r9026_tid10522817_00/AOD.10522817._000212.pool.root.1"
]
#-----------------------------------------------------------------------------
# Algorithms
#-----------------------------------------------------------------------------
rec.doCBNT=False
from RecExConfig.RecFlags import rec
rec.doTrigger=True
from RecExConfig.RecAlgsFlags import recAlgs
recAlgs.doTrigger=True
recAlgs.doAtlfast=False
recAlgs.doMonteCarloReact=False
from TriggerJobOpts.TriggerFlags import TriggerFlags
TriggerFlags.doTriggerConfigOnly=True
rec.doWriteAOD=False
rec.doWriteESD=False
rec.doWriteTAG=False
rec.doAOD=False
rec.doDPD=False
rec.doESD=False
doTAG=False
rec.doTruth=False
rec.doRecoTiming=False
rec.doDetStatus=False
rec.doShowSizeStatistics=False
rec.readTAG=False
rec.readRDO=False
rec.doHist=False
rec.doContainerRemapping=False
rec.doJiveXML=False
rec.doEdmMonitor=False
rec.doDumpPoolInputContent=False
rec.doHeavyIon=False
rec.doHIP=False
rec.doWriteBS=False
rec.doPhysValMonHists=False
rec.doVP1=False
rec.doJiveXML=False
rec.doMuon=False
rec.doCheckDictionary=False
rec.doFileMetaData=False
rec.doCalo=False
rec.doAODCaloCells=False
rec.doEgamma=False
#rec.doESD.set_Value_and_Lock(False) # uncomment if do not run ESD making algorithms
#rec.doWriteESD.set_Value_and_Lock(False) # uncomment if do not write ESD
#rec.doAOD.set_Value_and_Lock(False) # uncomment if do not run AOD making algorithms
#rec.doWriteAOD.set_Value_and_Lock(False) # uncomment if do not write AOD
#rec.doWriteTAG.set_Value_and_Lock(False) # uncomment if do not write TAG
#
#include("RecExCommon/RecExCommon_topOptions.py")
#
#ToolSvc.TrigDecisionTool.TrigDecisionKey='xTrigDecision'
#from TrigDecisionTool.TrigDecisionToolConf import Trig__TrigDecisionTool
#from TrigNavigation.TrigNavigationConf import HLT__Navigation
#ToolSvc += CfgMgr.Trig__TrigDecisionTool( "TrigDecisionTool" )
#ToolSvc += CfgMgr.HLT__Navigation( "Navigation" )
#ToolSvc.TrigDecisionTool.Navigation.ReadonlyHolders=True
#ServiceMgr.MessageSvc.setWarning += [ "", "HolderFactory" ]
#ServiceMgr.MessageSvc.infoLimit = 99999999
#ServiceMgr.MessageSvc.debugLimit = 99999999
# GRL
ToolSvc += CfgMgr.GoodRunsListSelectionTool("MyGRLTool",GoodRunsListVec=["current_grl.xml"])
include("RecExCommon/RecExCommon_topOptions.py")
from CalcEfficiency.CalcEfficiencyConf import *
job += CalcEffAlg( message = "2",
OutputLevel = ERROR,
OutputFile = "test_data15_1220_OnlyOneLine_01.root",
TapMethod = "JPZtap",
Extrapolate = True,
GRL = False,
DataType = "data15"
)
#GRL = True
#TapMethod = "NoTag",
#TapMethod = "Ztap",
#TapMethod = "JPZtap",
#OutputFile = "test_inEITrkConfFalsev3.root",
#Datatype = "data16"
include("TriggerTest/TriggerTestCommon.py")
print job
#-----------------------------------------------------------------------------
|
import sys
from numpy import *
from matplotlib import pyplot as plt
# string variable pointing to filesystem location of csv
fn="../data/data_provinces.csv"
# loading file with 3 columns
name=loadtxt(fn, unpack=True, delimiter=',', skiprows=1, dtype='a',
usecols=arange(1)) # array defined for the first column
region=loadtxt(fn, unpack=True, delimiter=',', skiprows=1, dtype='a',
usecols=arange(1)+1) # array defined for the second column
population,lifeExpectancy,incomePerCapita,expenditurePerCapita=loadtxt(fn,unpack=True, delimiter=',', skiprows=1,usecols=arange(4)+2) # array defined for the remaining columns
# processing for data visualization using matplotlib
region_list=unique(region)
color_list=array(['r', 'b', 'r', 'c', 'r','g','b','b','r','b','g','c','r','b','c','b','r'])
colors=zeros(len(name), dtype='a')
# list length and assign elements a color
for i in arange(len(name)):
colors[i]=color_list[region_list==region[i]][0]
plt.clf()
# plot elements
plt.scatter(incomePerCapita,lifeExpectancy, s=population/min(population)*20, c=colors, alpha=0.5)
# plt.xscale(2)
plt.show()
# Main
if __name__ == '__main__':
main()
## experimental
# N=50
# colors = random.rand(N)
# area = pi * (15 * random.rand(50))**2 # 0 to 15 point radiuses
#
# plt.scatter(x, y, s=area, c=colors, alpha=0.5)
# plt.show()
|
from django.shortcuts import render,redirect
from django.contrib.auth import login
from django.views.generic import TemplateView,ListView, CreateView, UpdateView
from django.http import HttpResponse
from ..models import User,IndividualProfile,InstitutionProfile,City,State,EventModel,EventImage,Category,ApplyEventModel,SeatsEventModel
from django.urls import reverse_lazy
from ..forms import IndividualProfileForm,IndividualSignUpForm,ApplyEventForm,SeatsEventForm
class IndividualSignUpView(CreateView):
model = User
form_class = IndividualSignUpForm
template_name = 'individual/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'Individual'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('/individual_profile')
def IndividualProfileView(request):
if request.method == "POST":
form=IndividualProfileForm(request.POST)
if form.is_valid() :
profile = form.save(commit = False)
profile.user_id=request.user.id
profile.save()
return redirect('/')
else:
form = IndividualProfileForm()
return render(request, "individual/profile.html",{"form":form})
def load_cities(request):
state_id = request.GET.get('state')
cities = City.objects.filter(state_id=state_id).order_by('name')
return render(request, 'individual/city_drop_list.html', {'cities': cities})
# def IndividualDashboardView(request):
# return render(request,'individual/individual_home.html')
def ApplyEvent(request, event_id):
print(event_id)
uid=request.user.id
ev_type=EventModel.objects.get(id=event_id)
evimg=EventImage.objects.filter(event_id=event_id)
evinst=InstitutionProfile.objects.get(user_id=ev_type.user_id)
city=City.objects.get(id=ev_type.city_id)
state=State.objects.get(id=ev_type.state_id)
category=Category.objects.get(id=ev_type.category_id)
check=ApplyEventModel.objects.filter(event_id=ev_type.id,user_id=uid)
sev=SeatsEventModel.objects.get(event_id=event_id)
upcoming = EventModel.objects.all().order_by('start_date')[:3]
inst_pro = User.objects.get(id=uid)
clen=len(check)
print(upcoming)
if request.method == "POST":
form=ApplyEventForm(request.POST)
if form.is_valid():
apply = form.save(commit = False)
apply.user_id=request.user.id
apply.event_id=event_id
if ev_type.fee == 0:
apply.is_confirm == True
if not check:
apply.save()
a=int(sev.available_seats)
a=a-1
sev.available_seats=a
sev.save()
else:
print("error")
#return redirect('/techei/eventimage/')
else:
print(form.errors)
else:
form = ApplyEventForm()
return render(request, "individual/applyevent.html",{"form":form,"event_id":event_id,"evimg":evimg,"evinst":evinst,"ev_type":ev_type,"city":city,"state":state,"category":category,"sev":sev,"clen":clen,"upcoming":upcoming,"inst_pro":inst_pro})
|
import os
import sys
sys.path.append(os.getenv('cf'))
import cartoforum_api
from cartoforum_api import config |
#
#3D Ising model on simple cubic lattice
#
#
import pyalps
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pyalps.plot
#%matplotlib inline
import numpy as np
from scipy import optimize
from scipy import interpolate
import pyalps.fit_wrapper as fw
numeratorfigs=1
#prepare the input parameters
parms = []
for l in [2,4,6,8,10,12]:
for t in np.linspace(0.0,6.0,60):
parms.append(
{
'LATTICE' : "simple cubic lattice",
'T' : t,
'J' : 1 ,
'THERMALIZATION' : 20000,
'SWEEPS' : 100000,
'UPDATE' : "cluster",
'MODEL' : "Ising",
'L' : l
}
)
#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm7a',parms)
pyalps.runApplication('spinmc',input_file,Tmin=5)
# use the following instead if you have MPI
#pyalps.runApplication('spinmc',input_file,Tmin=5,MPI=2)
pyalps.evaluateSpinMC(pyalps.getResultFiles(prefix='parm7a'))
#load the susceptibility and collect it as function of temperature T
data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm7a'),['|Magnetization|', 'Connected Susceptibility', 'Specific Heat', 'Binder Cumulant', 'Binder Cumulant U2'])
magnetization_abs = pyalps.collectXY(data,x='T',y='|Magnetization|',foreach=['L'])
connected_susc = pyalps.collectXY(data,x='T',y='Connected Susceptibility',foreach=['L'])
spec_heat = pyalps.collectXY(data,x='T',y='Specific Heat',foreach=['L'])
binder_u4 = pyalps.collectXY(data,x='T',y='Binder Cumulant',foreach=['L'])
binder_u2 = pyalps.collectXY(data,x='T',y='Binder Cumulant U2',foreach=['L'])
#make plots
plt.figure()
pyalps.plot.plot(magnetization_abs)
plt.xlabel('Temperatura $T$')
plt.ylabel('Magnetizacija $|m|$')
plt.title('3D Izingov model')
plt.legend(loc='best')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
plt.figure()
pyalps.plot.plot(connected_susc)
plt.xlabel('Temperatura $T$')
plt.ylabel('Susceptibilnost $\chi$')
plt.title('3D Izingov model')
plt.legend(loc='best')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
plt.figure()
pyalps.plot.plot(spec_heat)
plt.xlabel('Temperatura $T$')
plt.ylabel('Specificna toplota $c_v$')
plt.title('3D Izingov model')
plt.legend(loc='best')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
plt.figure()
pyalps.plot.plot(binder_u4)
plt.xlabel('Temperatura $T$')
plt.ylabel('Binderov kumulant U4 $g$')
plt.title('3D Izingov model')
plt.legend(loc='best')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
plt.figure()
pyalps.plot.plot(binder_u2)
plt.xlabel('Temperatura $T$')
plt.ylabel('Binderov kumulant U2 $g$')
plt.title('3D Izingov model')
plt.legend(loc='best')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
# OK OK OK OK OK OK
###################################################################
f = open('binderdata_3d_Ising_SC.txt','w')
f.write(pyalps.plot.convertToText(binder_u4))
f.close()
# OK OK OK OK OK OK
#############################################################
# ROUND
r=binder_u4
for d in r:
d.x = np.around(d.x,1)
fg = open('binderdata_rounded_t_3d_Ising_SC.txt','w')
fg.write(pyalps.plot.convertToText(r))
fg.close()
# OK OK OK OK OK OK
####################################################################
# REDOSLED
red=binder_u4
for d in red:
d.x=np.around(d.x,1)
fh=open('binderdata_rounded_t_3d_Ising_SC.txt','w')
fh.write(pyalps.plot.convertToText(red))
fh.close()
lvrednost=np.array([q.props['L'] for q in red])
print(lvrednost)
sel=np.argsort(lvrednost)
print(sel)
red=np.array(red)
red=red[sel]
s=open('binderdata_rounded_t_redosled_3d_Ising_SC.txt','w')
s.write(pyalps.plot.convertToText(red))
s.close()
# OK OK OK OK OK OK OK OK
###################################################################################################################################################################
# Tc procena
for Tc in np.linspace(4.0,5.0,100):
listfortc=binder_u4
for d in listfortc:
d.x -= Tc
d.x = d.x/Tc
plt.figure()
pyalps.plot.plot(binder_u4)
plt.xlabel('$t=(T-T_c)/T_c, T_c=%.3f$'%(Tc))
plt.ylabel('Binderov kumulant U4 $g$')
plt.title('3D Izingov model')
plt.legend(loc='best')
plt.savefig('figure_SC%d.eps'%(numeratorfigs),dpi=300)
numeratorfigs+=1
# OK OK OK OK OK OK OK
#################################################################################3
# a_nu procena
Tc=4.51
for a in np.linspace(1.0,4.0,40):
s=binder_u4
for d in s:
d.x -= Tc
d.x = d.x/Tc
l = d.props['L']
d.x = d.x * pow(float(l),a)
plt.figure()
pyalps.plot.plot(s)
plt.xlabel('$L^a(T-T_c)/T_c, a=%.2f$' % a)
plt.ylabel('Binderov kumulant U4 $g$')
plt.title('3D Izingov model, BCC')
plt.legend(loc='best')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
# OK OK OK OK OK OK OK OK OK
#############################################################################33
def test_funk(x,a,b):
return a*x**b
#
# NUMERICAL DERIVATIVE
#
#
#
#
def derivative(f,a,method='central',h=0.1):
if method == 'central':
return (f(a + h) - f(a - h))/(2*h)
elif method == 'forward':
return (f(a + h) - f(a))/h
elif method == 'backward':
return (f(a) - f(a - h))/h
else:
raise ValueError("Method must be 'central', 'forward' or 'backward'.")
file=open('binderdata_rounded_t_redosled_3d_Ising_SC.txt')
file_data=np.loadtxt(file,usecols=(0,1))
x=file_data[:,0]
y=file_data[:,1]
llista = [2,4,6,8,10,12]
n=60
for i in range(0,len(llista)):
exec("x%d = x[i*n:i*n+n]" % (llista[i]));
for j in range(0,len(llista)):
exec("y%d = y[j*n:j*n+n]" % (llista[j]));
#funk = interpolate.interp1d(x, y)
for k in range(0,len(llista)):
exec("funk%d = interpolate.interp1d(x%d, y%d)" % (llista[k],llista[k],llista[k]));
for l in range(0,len(llista)):
exec("xizv%d = np.arange(0.1,4,0.1)" % (llista[l]));
for m in range(0,len(llista)):
exec("yizv%d = derivative(funk%d,xizv%d)" % (llista[m],llista[m],llista[m]));
lista=[]
tc=4.511
for p in range(0,len(llista)):
exec("rez = derivative(funk%d,tc)" % (llista[p]));
lista.append(rez)
plt.figure()
plt.plot(x12,y12,label='$U_{4}$',color='b')
plt.plot(xizv12,yizv12,label='$dU_{4}/dT$',color='r')
plt.legend(loc='best')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
params,params_covariance=optimize.curve_fit(test_funk,llista,lista)
plt.figure()
plt.scatter(llista,lista,label='Podaci',color='b')
plt.plot(llista,test_funk(llista,params[0],params[1]),label='Fit',color='r')
plt.xlabel('$L$')
plt.ylabel(r'$dU_{4}/dT|T_{C}\approx L^{1/\nu}$')
plt.title(r'$1/\nu=$ %.13s,$\nu=$ %.13s' % (params[1],1/params[1]))
plt.legend(loc='upper left')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
# OK OK OK OK OK OK OK OK OK
##############################################################################################
#
#
#
#
#
#
Tc=4.51
a=1.6
#make a data collapse of the connected susceptibility as a function of (T-Tc)/Tc:
suscol=connected_susc
for d in suscol:
d.x -= Tc
d.x = d.x/Tc
l = d.props['L']
d.x = d.x * pow(float(l),a)
g=suscol
for two_minus_eta in np.linspace(1.0,3.0,30):
suscol=g
for d in suscol:
l = d.props['L']
d.y = d.y/pow(float(l),two_minus_eta)
plt.figure()
pyalps.plot.plot(suscol)
plt.xlabel('$L^a(T-T_c)/T_c, Tc=%.3f, a=%.2f$'%(Tc,a))
plt.ylabel(r'$L^{\gamma/\nu}\chi_c,\gamma/\nu=$ %.4s' % two_minus_eta)
plt.title('3D Ising model')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
# OK OK OK OK OK OK
#############################################################################################
#
#
#
#
#
#
Tc=4.51
a=1.6
#make a data collapse of the |magnetization| as a function of (T-Tc)/Tc
magcol=magnetization_abs
for d in magcol:
d.x -= Tc
d.x = d.x/Tc
l = d.props['L']
d.x = d.x * pow(float(l),a)
h=magcol
for beta_over_nu in np.linspace(0.5,0.6,100):
magcol=h
for d in magcol:
l = d.props['L']
d.y = d.y / pow(float(l),-beta_over_nu)
plt.figure()
pyalps.plot.plot(magnetization_abs)
plt.xlabel('$L^a(T-T_c)/T_c, Tc=%.3f, a=%.2f$'%(Tc,a))
plt.ylabel(r'Magnetizacija $|m|L^\beta/\nu, \beta/\nu=$ %.4s' % beta_over_nu)
plt.title('3D Izingov model')
plt.savefig("figure_SC%d.eps"%(numeratorfigs),dpi=300)
numeratorfigs+=1
# OK OK OK OK OK OK
#########################
|
# Generated by Django 3.0 on 2019-12-30 04:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0004_remove_custm_user_sxj'),
]
operations = [
migrations.RemoveField(
model_name='custm_user',
name='createTime',
),
migrations.AddField(
model_name='custm_user',
name='lastUpdatedTime',
field=models.DateTimeField(auto_now=True),
),
]
|
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from tweepy import Cursor
import twitter_credentials
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt, mpld3
import datetime as dt
import re
from textblob import TextBlob
import spacy
from collections import Counter
nlp = spacy.load('en')
from wtforms import Form, StringField, validators
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth = TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_timeline_tweets(self, num_tweets):
tweets = []
for tweet in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_tweets):
tweets.append(tweet)
return tweets
def get_friend_list(self, num_friends):
friend_list = []
for friend in Cursor(self.twitter_client.user_timeline, id=self.twitter_user).items(num_friends):
num_friends.append(friend)
return num_friends
def get_home_timeline_tweets(self, num_tweets):
home_timeline_tweets =[]
for tweet in Cursor(self.twitter_client.home_timeline).items(num_tweets):
home_timeline_tweets.append(tweet)
return home_timeline_tweets
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
return auth
class Streamer():
def __init__(self):
self.twitter_authenticator = TwitterAuthenticator()
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
listener = TwitterListener(fetched_tweets_filename)
auth = self.twitter_authenticator.authenticate_twitter_app()
stream = Stream(auth, listener)
stream.filter(track=hash_tag_list)
class TwitterListener(StreamListener):
def __init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on data: %s" % str(e))
return True
def on_error(self, status):
if status == 420:
return False
print(status)
class TweetAnalyzer():
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analyze_sentiment(self, tweet):
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
def find_most_common_words(self, tweet_string):
parsed_tweet_string = nlp(tweet_string)
cleaned = [ token.text for token in parsed_tweet_string if token.is_stop != True and token.is_punct != True and token.pos_ == ('NOUN' or 'VERB' or 'PROPN') ]
word_count = Counter(cleaned)
most_common_words = word_count.most_common(15)
return most_common_words
def tweets_to_data_frame(self, tweets):
df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['tweets'])
# df['id'] = np.array([tweet.id for tweet in tweets])
# df['len'] = np.array([len(tweet.text) for tweet in tweets])
df['date'] = np.array([tweet.created_at for tweet in tweets])
# df['source'] = np.array([tweet.source for tweet in tweets])
df['likes'] = np.array([tweet.favorite_count for tweet in tweets])
df['retweets'] = np.array([tweet.retweet_count for tweet in tweets])
df['sentiment'] = np.array([self.analyze_sentiment(tweet) for tweet in df['tweets']])
return df
def get_daily_total(self, df):
count_df = df['date'].groupby(df['date'].dt.floor('d')).size().reset_index(name='count')
daily_totals = count_df['count']
return daily_totals
class Tweeter():
def __init__(self):
self.twitter_client = TwitterClient()
self.tweet_analyzer = TweetAnalyzer()
self.api = TwitterClient().get_twitter_client_api()
## staying DRY
def get_tweets(self, user, count):
tweets = self.api.user_timeline(screen_name=user, count=count)
return tweets
def latest_tweets(self, user, count):
tweets = self.get_tweets(user, count)
df = self.tweet_analyzer.tweets_to_data_frame(tweets)
latest = df[['tweets', 'date']].head(5)
return latest
def most_popular(self, user, count):
tweets = self.get_tweets(user, count)
df = self.tweet_analyzer.tweets_to_data_frame(tweets)
most_popular = df.sort_values(by=['likes'], ascending=False)
popular_selection = most_popular.head(5)
return popular_selection
def positivity_rating(self, user, count):
tweets = self.get_tweets(user, count)
df = self.tweet_analyzer.tweets_to_data_frame(tweets)
positivity = df['sentiment']
overall_positivity = np.mean(positivity)
return overall_positivity
def posting_frequency(self, user, count):
tweets = self.get_tweets(user, count)
df = self.tweet_analyzer.tweets_to_data_frame(tweets)
daily_totals = self.tweet_analyzer.get_daily_total(df)
fig = plt.figure()
plt.plot(daily_totals)
plt.xlabel('Number of daily tweets')
htmlfig = mpld3.fig_to_html(fig)
return htmlfig
def most_common_words(self, user, count):
tweets = self.get_tweets(user, count)
tweet_array = np.array([ tweet.text for tweet in tweets ])
tweet_bag = np.array2string(tweet_array)
most_common_words = self.tweet_analyzer.find_most_common_words(tweet_bag)
return most_common_words
|
import random
names = input("enter the names separated by a comma ")
names = names.split(", ")
random_int = random.randint(0, len(names)-1)
print(f"{names[random_int]} should be paying the bill") |
from . import db,login_manager
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from datetime import datetime
import os
|
import pyperclip
import pyautogui
scriptPath = r'C:\Users\Ramstein\PycharmProjects\Keras\Recognizing Handwritten Digits.py'
with open(scriptPath) as f:
lines = f.readlines()
for line in lines:
pyperclip.copy(line)
fromClipboard = pyautogui.hotkey('ctrl', 'v')
print(fromClipboard, '\b')
'/html/body/div/span[2]/pre/a'
|
# -*- coding:utf-8 -*-
class Solution:
# s, pattern都是字符串
def match(self, s, pattern):
# write code here
if not s and not pattern:
return True
if not pattern:
return False
if not s: # s不存在 a*, b**都可以消去
for i in range(len(pattern) - 1):
if pattern[i] != "*" and pattern[i + 1] != "*":
return False
if pattern[len(pattern) - 1] != "*": # 最后一个为字母不可消除
return False
return True
i = 0
j = 0
while i < len(s) and j < len(pattern):
if s[i] == pattern[j]:
i += 1
j += 1
else:
if pattern[j] == "*": # 考虑为*的情况
if j == 0: # 第一个不能为*
return False
elif j > 0 and pattern[j - 1] != s[i] and pattern[j - 1] != ".": # 不能用前一个补的情况
if j + 1 < len(pattern) - 1 and i - 1 >= 0 and pattern[j + 1] == s[i - 1]:
# 把前一个消去 如 cb 和 c*cb
if j + 2 < len(pattern) - 1 and pattern[j + 2] == s[i]:
# 可以消去
j += 3
else:
# 不可消去
return False
elif j + 1 < len(pattern) - 1 and pattern[j + 1] == s[i]:
# 把*作废
j += 2
else:
return False
elif j > 0 and (pattern[j - 1] == s[i] or pattern[j - 1] == "."):
# 能用前一个补的情况
times_i = 1
times_j = 0
while i + 1 < len(s) and s[i + 1] == s[i]:
# 把所有的与s[i]等的 如 aaaa
times_i += 1
i += 1
while j + 1 < len(pattern) and pattern[j + 1] == s[i]:
# 把所有的与s[i] 如 a*aa
times_j += 1
j += 1
if j + 1 < len(pattern) and pattern[j + 1] == "*":
# 把所有的与s[i] 如 a*aa*
j += 1
times_j -= 1
while j + 1 < len(pattern) and pattern[j + 1] == s[i]:
# 把所有的与s[i] 如 a*aa*a
times_j += 1
j += 1
if j + 2 < len(pattern) and pattern[j + 2] == '*':
# 把所有的与s[i] 如 a*aac*
j += 2
while j + 1 < len(pattern) and pattern[j + 1] == s[i]:
# 把所有的与s[i] 如 a*aac*aa
times_j += 1
j += 1
if times_j <= times_i + 1:
j += 1
i += 1
else:
return False
elif pattern[j] == ".":
i += 1
j += 1
elif j + 1 < len(pattern) and pattern[j + 1] == "*":
# 后一个为*
j += 2
else:
return False
if i == len(s) and j == len(pattern):
return True
else:
if j != len(pattern):
for k in range(j, len(pattern) - 1):
if pattern[k] != "*" and pattern[k + 1] != "*":
return False
if pattern[len(pattern) - 1] != "*":
return False
else:
return True
else:
return False
s = Solution()
print(s.match("bbbba", ".*a*a")) |
import tkinter as tk
print("Start Program")
root = tk.Tk() #This builds your window
root.mainloop()
print("END PROGRAM") |
from node import *
class Searcher(object):
"""Searcher that manuplate searching process."""
def __init__(self, start, goal):
self.start = start
self.goal = goal
def print_path(self, state):
path = []
while state:
path.append(state)
state = state.prev
path.reverse()
print("\n-->\n".join([str(state) for state in path]))
def generalSearch(self,step):
"""Run general search algorthim"""
#TODO: Implement breadth first search
queue = [self.start]
#this is the visited means the closed states
visited = set()
found = False
state=start
# the queue means the open states
while (len(queue)!=0)&(state != self.goal):
step += 1 # count the steps
state = queue.pop()
if state in visited:
continue
visited.add(state)
for s in state.next():
queue.insert(0, s)
if state == self.goal:
found=state
if found:
self.print_path(state)
print("Find solution")
print("the steps taken to find the solution: %s" % step)
else:
print("No solution found")
#the main function
if __name__ == "__main__":
#Unit test
print("Search for solution\n")
#intialized state from class node
start = Node([1,0,2,5,4,3,8,7,6])
#intialized goal from class node
goal = Node([1,2,3,4,5,6,7,8,0])
# use a general search algorithm
search = Searcher(start, goal)
step=0
search.generalSearch(step)
print("Elabse Time untill find a solution: %s" % elapsed)
|
#-*- coding: utf-8 -*-
import zipfile
from xmind.tests import logging_configuration as lc
from xmind.tests import base
from xmlscomparator.xml_diff import create_xml_diff_from_strings
from xmlscomparator.comparators.type_comparator import TypeComparator
from xmlscomparator.comparators.text_comparator import TextComparator
from xmlscomparator.comparators.attr_comparator_policy import AttrComparatorPolicy
from xmlscomparator.comparators.attr_comparator import AttrComparator
from xmind.tests.create_xmind_file_from_json import CreateXmindFileFromJson
class TestE2ECreate(base.Base):
def getLogger(self):
if not getattr(self, '_logger', None):
self._logger = lc.get_logger('TestE2EOpen')
return self._logger
def test_e2e_create(self):
obj = CreateXmindFileFromJson('test.xmind', 'xmind/tests/test_file.json')
obj.create_xmind_file()
unarchived = zipfile.ZipFile('test.xmind', 'r')
test_file_to_compare = unarchived.read(unarchived.namelist()[0])
unarchived = zipfile.ZipFile('xmind/tests/test_file.xmind', 'r')
test_file = unarchived.read(unarchived.namelist()[1])
_type_comparator = TypeComparator(lc.get_logger('type_comparator'))
_text_comparator = TextComparator(lc.get_logger('text_comparator'))
_attr_comparator = AttrComparator(lc.get_logger('attr_comparator'))
_attr_policy = AttrComparatorPolicy(lc.get_logger('attr_comparator_policy'))
_attr_policy.add_attribute_name_to_skip_compare('svg:width')
_attr_policy.add_attribute_name_to_compare('marker-id')
_attr_policy.add_attribute_name_to_compare('type')
_attr_comparator.set_attr_comparator_policy(_attr_policy)
_attr_comparator.set_check_values(False)
_text_comparator.set_next_comparator(_attr_comparator)
_type_comparator.set_next_comparator(_text_comparator)
_comparator = create_xml_diff_from_strings(test_file, test_file_to_compare, lc.get_logger('create_xml_diff_from_strings'))
_comparator.set_comparator(_type_comparator)
_comparator.add_types_to_skip('extensions')
_comparator.add_types_to_skip('notes')
_comparator.add_types_to_skip('control-points')
self.assertTrue(_comparator.compare())
|
import logging
import os
import threading
import time
from typing import *
import matplotlib.pyplot as plt
import schedule
from dotmap import DotMap
import charts
import log
import processor
from access import accessControl
from db import db, Const
from lib import app
accessdenied = 'accessdenied'
_macd_data = DotMap()
def check_loggers():
for h in log.handlers:
log.info(h)
for h in logging.getLogger().handlers:
log.info(h)
def wrapper(fn):
def wrapped(task):
start = time.time_ns()
try:
msg = fn(task)
if msg and len(msg) > 0 and task.message.chat.id:
log.info("chat id %s",task.message.chat.id)
if not isinstance(task.message.chat.id, int) \
or ( isinstance(task.message.chat.id, str) and not task.message.chat.id.isnumeric()):
log.warn("Please set the adminChatID");
else:
app.send_msg(msg, task.message.chat.id)
if task.message.source == 'terminal':
log.info("\n%s", msg)
elif task.message.chat.id:
pass
# app.notify_action(task.message.chat.id)
except Exception as e:
log.exception(e)
end = time.time_ns()
log.debug("time taken for %s : %s ms", fn.__name__, app.floor((end - start) / 1000 / 1000, 2))
return wrapped
def identity(x):
return x
def overlap(source, target, types, functions=None):
for x in enumerate(source):
index, value = x
fn = functions[index] if functions and len(functions) > index and functions[index] else identity
target[index] = fn(types[index](value))
return target
@wrapper
def cm_echo(task):
return 'Alive!'
def util_balance_hash(balances: Dict[str, float]):
sorted_keys = sorted(balances.keys())
return "".join([x + str(app.floor(balances[x], 3)) for x in sorted_keys if balances[x] > 0.005])
@wrapper
def cm_bal(task):
amounts, balances = app.account_total()
previous_amounts = db.config(Const.BALANCE, {})
previous_bal_hash = db.config(Const.BALANCE_HASH, "")
total = app.floor(sum(x[1] for x in amounts), 2)
if task.params and len(task.params) > 0 and task.params[0] == 'short':
return f"Balance: {total}"
account_bal_hash = util_balance_hash(balances)
is_hash_matching = account_bal_hash == previous_bal_hash
msg_lines = []
for x in amounts:
prc_diff = app.floor((x[1] - previous_amounts[x[0]]) * 100 / previous_amounts[x[0]]) \
if x[0] in previous_amounts else 'NA'
bal_str = f'{x[0]} -> {x[1]} ({prc_diff})' if is_hash_matching else f'{x[0]} -> {x[1]}'
if x[1] > 10:
msg_lines.append(bal_str)
msg_lines.append("." * 15)
msg_lines.append(f'Total: {total}')
msg = "\n".join(msg_lines)
if not is_hash_matching:
db.set_config(Const.BALANCE, {x[0]: x[1] for x in amounts})
db.set_config(Const.BALANCE_HASH, account_bal_hash)
log.info("saved new balances!")
return msg
@wrapper
def cm_bye(task):
print("Exiting.. Bye!")
os._exit(0)
@wrapper
def cm_revise_sl(task):
params = overlap(task.params, ['ALL', 4], [str, float], [str.upper])
resp = app.revise_sl(*params)
return f'new sl orders:{len(resp)}'
@wrapper
def cm_create_sl(task):
params = overlap(task.params, ['ALL', 4], [str, float], [str.upper])
if params[0] == 'ALL':
free_balances = app.get_free_balances_non_usdt()
symbols = [x[0].upper() for x in free_balances]
else:
symbols = [app.symbol_with_currency(params[0])]
resp = app.create_stop_loss_orders(symbols=symbols, test=False, stoploss_prc=params[1])
return f'new sl orders:{len(resp)}' if len(resp) > 0 else None
@wrapper
def cm_price_alerts(task):
params = overlap(task.params, ['5m', 6, 0], [str, int, float])
messages = []
tickers = app.tickers()
for x in db.config(Const.SYMBOLS, []):
base_currency = 'USDT' if f'{x}USDT' in tickers else 'BTC'
symbol = x + base_currency
msg = app.price_alert(symbol=symbol, timeframe=params[0], count=int(params[1]), threshold=float(params[2]))
if msg:
messages.append(msg)
if len(messages) > 0:
up = list(filter(lambda y: y[0] == '⬆', messages))
down = list(filter(lambda y: y[0] != '⬆', messages))
result = ['- ' * 10,f'Coins Activity: {params[0]} x {params[1]}']
result.append('- ' * 10)
result.extend(up)
result.append('- ' * 10)
result.extend(down)
return "\n".join(result)
@wrapper
def cm_current_prices(task):
tickers = app.tickers()
msg = ""
for x in db.config(Const.SYMBOLS, []):
symbol = app.symbol_with_currency(x)
price = tickers[symbol]
price_USDT = price if 'USDT' in symbol else price * tickers['BTCUSDT']
msg += f"{symbol}: {app.floor(price_USDT, 4)}, \n"
return msg
@wrapper
def cm_add_symbols(task):
symbols = set(db.config(Const.SYMBOLS, ['ADA']))
for x in list(task.params):
log.debug("adding symbol %s", x)
symbols.add(x.upper())
db.set_config(Const.SYMBOLS, list(symbols))
@wrapper
def cm_rm_symbols(task):
symbols = set(db.config(Const.SYMBOLS, []))
for x in list(task.params):
log.debug("removing symbol %s", x)
symbols.discard(x.upper())
db.set_config(Const.SYMBOLS, list(symbols))
@wrapper
def cm_stop_loss_info(task):
stats = app.stop_loss_orders_percentage()
msg = []
total = 0
for x in stats:
price = app.floor(x[2] * x[4], 2)
msg.append(f'{x[1]} -> {price} , {app.floor(x[5])}%')
total += price
if len(msg) > 0:
msg.append('-' * 15)
msg.append(f'Stoploss Total -> {app.floor(total, 2)}')
return "\n".join(msg)
else:
return "No Stoploss Orders Found!"
@wrapper
def cm_cancel_sl(task):
symbol = task.params[0].upper() if task.params and len(task.params) > 0 else 'ALL'
log.debug("Cancelling Stop Losses for Symbol: %s ", symbol)
stats = app.cancel_all_sl_orders(symbol=symbol)
return f'cancelled {len(stats)} Stop Loss Orders'
@wrapper
def cm_order_sell_cancel(task):
symbol = task.params[0].upper() if task.params and len(task.params) > 0 else 'ALL'
orders = app.get_open_orders(side='SELL')
cancelled = []
for x in orders:
if symbol == 'ALL' or symbol in x['symbol']:
log.info("Cancelling Sell order for : %s", x['symbol'])
app.client.cancel_order(symbol=x['symbol'], orderId=str(x['orderId']))
cancelled.append(x)
return f"{len(cancelled)} Sell Orders Cancelled"
@wrapper
def cm_order_buy_cancel(task):
symbol = task.params[0].upper() if task.params and len(task.params) > 0 else 'ALL'
orders = app.get_open_orders(side='BUY')
cancelled = []
for x in orders:
if symbol == 'ALL' or symbol in x['symbol']:
log.info("Cancelling Buy order for : %s", x['symbol'])
app.client.cancel_order(symbol=x['symbol'], orderId=str(x['orderId']))
cancelled.append(x)
return f"{len(cancelled)} Buy Orders Cancelled"
@wrapper
def cm_sell_x_prc(task):
params = overlap(task.params, ['ALL', None, 0], [str, float, float], [str.upper])
return sell_x_prc_internal(params[0], params[1], params[2], test=False)
@wrapper
def cm_buy_x_prc_test(task):
params = overlap(task.params, [None, None, 0], [str, float, float], [str.upper])
order = app.buy_x_prc(params[0], params[1], params[2], test=True)
total = app.floor_new(float(order['quantity']) * float(order['price']), 2)
return f"Test Buy Order Placed: {order['symbol']}, price:{order['price']}, total: {total}"
@wrapper
def cm_buy_x_prc(task):
params = overlap(task.params, [None, None, 0], [str, float, float], [str.upper])
order = app.buy_x_prc(params[0], params[1], params[2], test=False)
total = app.floor_new(float(order['quantity']) * float(order['price']), 2)
return f"Buy Order Placed: {order['symbol']}, price:{order['price']}, total: {total}"
@wrapper
def cm_sell_x_prc_test(task):
params = overlap(task.params, ['ALL', None, 0], [str, float, float], [str.upper])
return sell_x_prc_internal(params[0], params[1], params[2], test=True)
def sell_x_prc_internal(symbol, quantity, priceOverMarket, test=True):
log.debug(f'attempting for {quantity}, {priceOverMarket}')
response = app.sell_x_percent(symbol, quantity, priceOverMarket, test)
log.debug(response)
msgs = [f'{x["symbol"]} -> {app.floor(float(x["quantity"]) * float(x["price"]), 2)}' for x in response]
msgs.append('.' * 15)
total = app.floor(sum([float(x["quantity"]) * float(x["price"]) for x in response]), 2)
msgs.append(f'Sell Total : {total}')
test_str = "Test " if test else ""
msg = '\n'.join([f' {test_str}Sell Orders Placed'] + msgs)
return msg
@wrapper
def cm_save_snapshot(task):
snapshot = app.get_snapshot()
db.set_config(Const.SNAPSHOT, snapshot)
snapshot_str = ['Snapshot'] + [f'{x} -> {snapshot[x]}' for x in snapshot]
msg = "\n".join(snapshot_str)
return msg
@wrapper
def cm_snapshot_total(task):
return app.snapshot_total()
@wrapper
def cm_ta(task):
task.params = overlap(task.params, [None, '1h', 1000], [str, str, int], [app.symbol_with_currency])
return create_chart_internal(task, chart='ta', draw=True)
@wrapper
def cm_ta_hints(task):
task.params = overlap(task.params, [None, '1h', 1000], [str, str, int], [app.symbol_with_currency])
return create_chart_internal(task, chart='ta', draw=False)
@wrapper
def cm_candles(task):
task.params = overlap(task.params, [None, '15m', 50], [str, str, int], [app.symbol_with_currency])
return create_chart_internal(task, chart='candles', draw=True)
def create_chart_internal(task, chart='ta', draw=True):
params = task.params
log.debug("charting: %s", params)
if not params[0]:
raise Exception("The Asset name is needed")
asset = params[0]
files = []
signals = []
msg1 = msg2 = msg3 = None
if chart == 'ta':
if params[1] == 'all':
fig, ax = plt.subplots(3, 4) if draw else (
None, [[None]*4]*3)
msg1 = charts.ta(asset, '1h', params[2], fig, ax[0])
msg2 = charts.ta(asset, '4h', params[2], fig, ax[1])
msg3 = charts.ta(asset, '1d', params[2], fig, ax[2])
else:
fig, ax = plt.subplots(4) if draw else (None, [None]*4)
msg1 = charts.ta(asset, params[1], params[2], fig, ax)
if msg1:
signals.append(msg1)
if msg2:
signals.append(msg2)
if msg3:
signals.append(msg3)
if draw:
filename = 'charts/' + asset + "_" + str(int(round(time.time() * 1000))) + '.png'
fig.savefig(filename, dpi=300)
files.append((filename, asset))
elif chart == 'candles':
if params[0] == 'ALLBTC' or params[0] == 'ALLUSDT':
for symbol in db.config(Const.SYMBOLS):
filename, msg = charts.candles(app.symbol_with_currency(symbol), params[1], params[2])
files.append((filename, app.symbol_with_currency(symbol)))
else:
filename, msg = charts.candles(asset, params[1], params[2])
files.append((filename, asset))
if msg:
signals.append(msg)
for filename in files:
app.send_photo(filename[0], caption=f'Chart: {filename[1]}, {params[1]} x {params[2]}',
chat_id=task.message.chat.id)
os.remove(filename[0])
return "\n".join(signals)
@wrapper
def cm_balance_pie(task):
filename, total = charts.balance_pie()
app.send_photo(filename, caption=f'Balance: {total}', chat_id=task.message.chat.id)
os.remove(filename)
@wrapper
def cm_balance_chart(task):
filename = charts.balance_chart(db.config('balcheckpoints',[]))
app.send_photo(filename, caption=f'Balance Chart', chat_id=task.message.chat.id)
os.remove(filename)
@wrapper
def cm_balance_checkpoint(task):
amounts, balances = app.account_total()
total = sum([x[1] for x in amounts])
balances = db.config('balcheckpoints', [])
while len(balances) > 500:
balances.pop(0)
balances.append(total)
db.set_config('balcheckpoints', balances)
@wrapper
def cm_my_commands(task):
author = task.message.chat.username.lower()
messages = list(filter(lambda x: author == accessControl.adminUserId or (accessManagement[x] and author in accessManagement[x]),
accessManagement.keys()))
messages.remove('accessdenied')
messages.remove('mine')
return "\n".join(messages)
@wrapper
def cm_schd(task):
seconds = int(task.params[0])
command_str = "/" + " ".join(task.params[1:])
sub_command = task.params[1]
new_message = DotMap(task.message.toDict())
new_message.text = command_str
new_message.scheduletag = time.time()
# Avoid cyclic scheduled commands and the accessdenied ones
log.info("trying to schedule: %s", command_str)
if accessdenied not in command_str and 'schd' not in command_str and sub_command in commands:
scheduled_obj = schedule \
.every(seconds).seconds \
.do(processor.process_message, new_message).tag(sub_command, 'all', new_message.scheduletag)
log.info(scheduled_obj)
def cm_price_reach_condition(task, condition):
symbol = app.symbol_with_currency(task.params[0])
price = float(task.params[1])
tickers = app.tickers()
log.debug(f"Checking if Price is {condition} : {symbol} {price}")
curr_price = tickers[symbol]
test = curr_price > price if condition == 'above' else curr_price < price
if test:
log.info(f"target price reached: {symbol} {condition} {price}")
command = task.params[2:]
command_str = "/" + " ".join(command)
new_message = DotMap(task.message.toDict())
new_message.text = command_str
processor.process_message(new_message)
if task.message.scheduletag:
log.info("clearing the schedule of Price Condition")
schedule.clear(task.message.scheduletag)
@wrapper
def cm_is_price_above(task):
return cm_price_reach_condition(task, 'above')
@wrapper
def cm_is_price_below(task):
return cm_price_reach_condition(task, 'below')
@wrapper
def cm_clear_schedule(task):
if len(task.params) == 0:
return "missing tags for clearing schedules:\ncommand name | all"
schedule.clear(task.params[0])
@wrapper
def cm_print_schd(task):
msg = "\n".join([
"Every " + str(x.interval) + " " + str(x.unit) + " " + x.job_func.args[0].text for x in
schedule.jobs
])
if msg and len(msg) > 0:
return msg
else:
return "No Scheduled Tasks found!"
@wrapper
def cm_access_denied(task):
return "access denied : " + " ".join(task.params)
@wrapper
def cm_macd_show(task):
return "\n".join([f"{x} -> {_macd_data[x][0]} at {_macd_data[x][1]}" for x in _macd_data])
@wrapper
def cm_macd(task):
params = overlap(task.params, [None, '1h', 1000], [str, str, int])
log.debug(params)
if(params[0].lower() == "all"):
params[0] = ",".join(db.config(Const.SYMBOLS, []))
messages = []
for symbol in params[0].split(","):
symbol_with_currency = app.symbol_with_currency(symbol)
df = app.dataframe(app.klines(symbol_with_currency, params[1], params[2]))
key = f'{symbol_with_currency}{params[1]}'
resp = charts.macd_x_over(df)
signal, signal_time = resp['signal'], resp['time']
old = _macd_data[key]
if not old or ( old[0] != signal and old[1] < signal_time):
_macd_data[key] = signal, signal_time
messages.append(f"{symbol}: {signal} at {signal_time}")
if len(messages) > 0:
return "\n".join([f"{params[1]} -> "] + messages)
commands = {
'echo': cm_echo,
'bal': cm_bal,
'bye': cm_bye,
'sellcancel': cm_order_sell_cancel,
'buycancel': cm_order_buy_cancel,
'slrevise': cm_revise_sl,
'slcreate': cm_create_sl,
'slcancel': cm_cancel_sl,
'slinfo': cm_stop_loss_info,
'hot': cm_price_alerts,
'now': cm_current_prices,
'symboladd': cm_add_symbols,
'symbolrm': cm_rm_symbols,
'sellx': cm_sell_x_prc,
'buyx': cm_buy_x_prc,
'buyxtest': cm_buy_x_prc_test,
'sellxtest': cm_sell_x_prc_test,
'snapshot': cm_save_snapshot,
'snapshotbal': cm_snapshot_total,
'ta': cm_ta,
'hints': cm_ta_hints,
'candles': cm_candles,
'schd': cm_schd,
'schdcancel': cm_clear_schedule,
'schdinfo': cm_print_schd,
'balpie': cm_balance_pie,
'balchart': cm_balance_chart,
'balcheckpoint': cm_balance_checkpoint,
'mine': cm_my_commands,
'ifabove': cm_is_price_above,
'ifbelow': cm_is_price_below,
'accessdenied': cm_access_denied,
'macd': cm_macd,
'macdshow': cm_macd_show
}
examples = {
'sellxtest': "10 .01",
'hot': "1h 200 1",
'symboladd': "ADA",
'symbolrm': "ADA",
'ta': "ADA 1h 300",
'candles': "ADA 1h 300",
'schd': "10 echo",
'bal': "short",
'bal': "save",
"now": ""
}
# If a command is not specified below, which means it has access to only Admin `adminUserId`
publicMembers = accessControl.groups.public
privateMembers = accessControl.groups.private
accessManagement: Dict[str, List] = {
'echo': publicMembers,
'bal': privateMembers,
'hot': publicMembers,
'now': publicMembers,
'slinfo': privateMembers,
'sellxtest': privateMembers,
'buyxtest': privateMembers,
'snapshotbal': privateMembers,
'ta': publicMembers,
'hints': publicMembers,
'candles': publicMembers,
'mine': publicMembers,
'accessdenied': publicMembers,
'schd': privateMembers,
'schdinfo': privateMembers
}
def run_command(command: str, params: List):
task = DotMap()
task.message.chat.id = accessControl.adminChatId
task.params = params
task.message.source = 'terminal'
return commands[command](task)
|
from pytriqs.gf.local import *
from pytriqs.archive import *
import numpy as np
import matplotlib.pyplot as plt
class TightBinding:
"""
With this class one can contruct and study a single or multi-band Hubbard model
either on a square or a cubic lattice.
"""
def __init__(self,lattice,numk,archive=None):
self.lattice = lattice
self.numk = numk
self.hopping = hopping
def dispersion_relation(self,kx,ky):
pass
def set_atoms(self,atom):
"""
Define the unit-cell of the model
Arguments:
----------
unit_cell: list of tuples, where the tuples denote the position of the orbital in the
unit cell (real space coordinates)
"""
def set_primitive_vectors(self,vectors):
"""
Define the primitive vectors in real space
Arguments:
---------
vectors: list of tuples, where each tuple represent the coordinates of the primitive vector
"""
for name,coord in vectors: setattr(self,"a"+name,coord); setattr(self,"k"+name,
def set_hopping(self,hopping):
pass
atom = {"A":(0,0),"B":(1,0)
unit_cell = [(0,0),(0,0),(0,0)]
real_space_vectors = {"a1":(1,0),"a2":(0,1)}
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root == None:
return []
self.result = {}
def traverse(root):
if root.left != None:
traverse(root.left)
if root.val not in self.result.keys():
self.result[root.val] = 1
else:
self.result[root.val] += 1
if root.right != None:
traverse(root.right)
traverse(root)
maximum = max(self.result.values())
temp = []
for i in self.result.keys():
if self.result[i] == maximum:
temp.append(i)
return temp |
import numpy as np
import matplotlib.pyplot as plt
#from scipy.optimize import fsolve
from scipy.optimize import newton
def euler(f,y0,a,h):
""" Calculates the Euler solution of the IVP
y'=f(t,y), with y(a)=y0, a[0]<=t<=a[1]. Using the
explicit Euler formula. If h<=1, then h is the step size
else h is the number of nodes to use """
if h <= 1:
t = np.arange(a[0],a[1]+h,h)
else:
t = np.linspace(a[0],a[1],h)
n = len(t)
h = t[1] - t[0]
y = np.zeros_like(t)
y[0] = y0
i = 1
while i < n:
k = f(t[i-1],y[i-1])
y[i] = y[i-1] + h*k
i += 1
return t,y
def beuler(f,y0,a,h):
""" Calculates the Euler solution of the IVP
y'=f(t,y), with y(a)=y0, a[0]<=t<=[1]. Using the implicit
Backward Euler formula. If h<=1, then h is the step size
else h is the number of nodes to use """
if h <= 1:
t = np.arange(a[0],a[1]+h,h)
else:
t = np.linspace(a[0],a[1],h)
n = len(t)
h = t[1] - t[0]
y = np.zeros_like(t)
y[0] = y0
i = 1
while i < n:
f2 = lambda x: y[i-1]+h*f(t[i],x)-x
#y[i] = fsolve(f2,y[i-1])
y[i] = newton(f2,y[i-1])
i += 1
return t,y |
from onegov.election_day import _
from onegov.election_day.formats.common import FileImportError
def unsupported_year_error(year):
return FileImportError(
_(
"The year ${year} is not yet supported", mapping={'year': year}
)
)
def set_locale(request):
""" Sets the locale of the request by the Accept-Language header. """
locale = request.headers.get('Accept-Language') or 'en'
locale = locale if locale in request.app.locales else 'en'
request.locale = locale
def translate_errors(errors, request):
""" Translates and interpolates the given error messages. """
if isinstance(errors, list):
# List of line errors or FileImportErrors
for ix, value in enumerate(errors):
translation_string = getattr(value, 'error', value)
result = {
'message': request.translate(translation_string),
}
if hasattr(value, 'filename'):
result['filename'] = value.filename
if hasattr(value, 'line'):
result['line'] = value.line
errors[ix] = result
return
for key, values in errors.items():
errors[key] = []
for value in values:
result = {
'message': request.translate(getattr(value, 'error', value)),
}
if hasattr(value, 'filename'):
result['filename'] = value.filename
if hasattr(value, 'line'):
result['line'] = value.line
errors[key].append(result)
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import numpy
##usamos la semilla 35 para hacer nuestro proyecto producible
numpy.random.seed(35)
#Grafico de codo
def elbowPlot(data,maxKClusters):
inertias=list()
for i in range(1,maxKClusters+1):
myCluster=KMeans(n_clusters=i)
myCluster.fit(data)
inertias.append(myCluster.inertia_)
plt.figure()
x=[i for i in range(1,maxKClusters+1)]
y=[i for i in inertias]
plt.plot(x,y, 'ro-', markersize=8, lw=2)
plt.grid(True)
plt.xlabel('Number of Clusters')
plt.ylabel('Inertia')
plt.show()
######---------------------MAIN-------------------------------------
df = pd.read_csv("hf.csv")
df.head()
df.describe()
#agrupamos por año
print(df.groupby('year').size())
#filtramos por el año 2016 que es el mas reciente
datos2016=df[df['year']==2016]
#eliminamos los valores na
datos2016=datos2016.dropna()
#eliminamos las 3 primeras columnas correspondientes a el año, pais y region
datos2016_2=datos2016.drop(['year','countries','region'],axis=1)
#tomaremos 3 clusters
elbowPlot(datos2016_2,10)
cluster=KMeans(n_clusters=3)
cluster.fit(datos2016_2)
#analizamos los centros y les agregamos sus nombres de columna
centros=cluster.cluster_centers_
centros=pd.DataFrame(centros)
centros.columns=list(datos2016_2)
#Exportamos los datos
centros.to_csv("centros.csv")
datos2016['cluster']=cluster.labels_
datos2016.plot.scatter(x='pf_score',y='ef_score',c='cluster',colormap='viridis')
#ya tenemos los clusters en datos2016 y datos 2016_2 representa las columnas usadas para el clustering
datos2016[datos2016['countries']=='Colombia']
#observamos las caracteristicas de otros paises como mexico que caen en el mismo cluster de colombia
datos2016[datos2016['countries']=='Mexico']
#filtamos paises de cada cluster
datos2016[datos2016['cluster']==1]
datos2016[datos2016['cluster']==2]
|
###### ITC 106 - Jarryd Keir - Student Number 11516086
import os.path
import traceback
import sys
#### Variable Section - ensure that variables are clear before starting ####
inputMarkAss1 = -1
inputMarkAss2 = -1
inputMarkExam = -1
outputMarkAss1 = 0
outputMarkAss2 = 0
outputMarkExam = 0
AssWeight1 = 20
AssWeight2 = 30
ExamWeight = 50
BonusMark1Percent = 10
BonusMark2Percent = 15
BonusMark2Base = 2
BonusMark3Percent = 5
BonusMark3Base = 20
TotalWeightAssMark = 0
WeightedTotalMark = 0
BonusPlusWeight = 0
BonusMarkOutput = 0
StudentID = -1
StudentName = ""
RetryInputLoop = True
RetryInput = "Y"
StudentTextHeader ="Student ID Student Name A1 A2 Final Exam Weighted Total Weighted Total with Bonus\n---------------------------------------------------------------------------------------------------------"
##TODO
#1#Input array for file IUA.txt(Stu ID,name,WA1,WA2,TWA,TWFE,TWS,BM,TMWBM)
#display the student ID, name, weighted mark for each assignment, the total weighted mark of the assignments, weighted mark for the Final Exam, total weighted mark of the subject, bonus mark and total mark with bonus
#2#Prompt for another entry of marks(Y/N)
#'Do you want to enter marks for another student (Y/N)?' If they enter 'Y' the system will allow them to enter details and marks for another student as before, if they enter 'N' the system will quit, otherwise it will ask the same question again.
#3#Thank you's between input of marks
#4#Store Array of marks in IUA.txt in working directory
#other# TASK'S ASSIGNMENT
#1 NS Diagram of logic
#2 Test Data
#3 FUCKING REDUCE COMMENTS! Sumbit py code
##ENDTODO
#### Main Code ####
try:
WorkDir = os.getcwd
dir, filename = os.path.split(os.path.abspath(__file__))
WorkDir = str(WorkDir) + "/IUA.txt"
if not os.path.isfile(dir + "/IUA.txt"):
#do nothing
file = open(dir + "/IUA.txt", "w+")
file.write(StudentTextHeader)
file.close()
except ValueError:
print("error")
print("-----------------------------------------------------------------------------------------\nThe Innovation University of Australia (IUA) Grade System\n-----------------------------------------------------------------------------------------\n")
print("Please enter all marks out of 100.")
while RetryInputLoop is True:
while isinstance(StudentID,str) or StudentID < 0:
StudentID = input("Please enter student ID: ")
try:
StudentID = int(StudentID)
print(StudentID)
except ValueError:
print("Student ID is not a number, please try again.")
StudentID = -1
while isinstance(StudentName,int) or not StudentName:
StudentName = input("Please enter student Name: ")
try:
StudentName = str(StudentName)
except ValueError:
print("Student name is not valid, please try again.")
StudentName = null
#### While loop to ensure that input from screen prompt is a valid number and can be converted to an int
while isinstance(inputMarkAss1,str) or inputMarkAss1 < 0:
inputMarkAss1 = input("Please enter the marks for Assignment 1: ") #get input from user and store input in inputMarkAss1
try: #Attempt to convert the input to an int
inputMarkAss1 = int(inputMarkAss1)
if inputMarkAss1 > 100 or inputMarkAss1 < 0: #TRUE - ensure that it's within 0 to 100 (inclusive of 0 and 100) then ensure that inputMarkAss1 is -1 still to ensure that the while loop continues ### FALSE - will step to the next input of mark for Assessment2
print("Please enter a value between 0 and 100!")
inputMarkAss1 = -1
except ValueError: #if the input that is currently stored in inputMarkAss1 fails to be cast to an int then catch the error here and display error message
print("Please enter all marks out of 100.")
#### While loop to ensure that input from screen prompt is a valid number and can be converted to an int
while isinstance(inputMarkAss2,str) or inputMarkAss2 < 0:
inputMarkAss2 = input("Please enter the marks for Assignment 2: ") #get input from user and store input in inputMarkAss2
try: #Attempt to convert the input to an int
inputMarkAss2 = int(inputMarkAss2)
if inputMarkAss2 > 100 or inputMarkAss2 < 0: #TRUE - ensure that it's within 0 to 100 (inclusive of 0 and 100) then ensure that inputMarkAss2 is -1 still to ensure that the while loop continues ### FALSE - will step to the next input of mark for Final Exam
print("Please enter a value between 0 and 100!")
inputMarkAss2 = -1
except ValueError:#if the input that is currently stored in inputMarkAss2 fails to be cast to an int then catch the error here and display error message
print("Please enter all marks out of 100.")
#### While loop to ensure that input from screen prompt is a valid number and can be converted to an int
while isinstance(inputMarkExam,str) or inputMarkExam < 0:
inputMarkExam = input("Please enter the marks for the Final Exam: ") #get input from user and store input in inputMarkExam
try: #Attempt to convert the input to an int
inputMarkExam = int(inputMarkExam)
if inputMarkExam > 100 or inputMarkExam < 0: #TRUE - ensure that it's within 0 to 100 (inclusive of 0 and 100) then ensure that inputMarkExam is -1 still to ensure that the while loop continues ### FALSE - will step to the calculation of the weighted marks and output
print("Please enter a value between 0 and 100!")
inputMarkExam = -1
except ValueError: #if the input that is currently stored in inputMarkExam fails to be cast to an int then catch the error here and display error message
print("Please enter all marks out of 100.")
print("\nThank you!\n") #print Thank You!
outputMarkAss1 = inputMarkAss1 * (AssWeight1/100) #calculate the weighted mark for Assessment 1, holds the weight of 20%
outputMarkAss2 = inputMarkAss2 * (AssWeight2/100) #calculate the weighted mark for Assessment 1, holds the weight of 30%
outputMarkExam = inputMarkExam * (ExamWeight/100) #calculate the weighted mark for Exam, holds the weight of 50%
TotalWeightAssMark = outputMarkAss1 + outputMarkAss2 #calculate the combine weighted mark for Assessment 1 & 2
WeightedTotalMark = outputMarkAss1 + outputMarkAss2 + outputMarkExam #calculate the combine weighted mark for Assessment 1, 2, & Exam
#If total mark is between 50 and 70, for every mark over 50 calculate 5% of those marks and store that in the BonusMarkOutput
if int(WeightedTotalMark) > 50 and WeightedTotalMark <= 70:
BonusMarkCalc = WeightedTotalMark - 50
BonusMarkOutput = BonusMarkCalc * (BonusMark1Percent / 100)
#If total mark is between 70 and 90, for every mark over 70 calculate 10% of those marks plus 2 additional marks and store that in the BonusMarkOutput
if WeightedTotalMark > 70 and WeightedTotalMark <=90:
BonusMarkCalc = WeightedTotalMark -70
BonusMarkCalc = BonusMarkCalc * (BonusMark2Percent / 100)
BonusMarkOutput = BonusMarkCalc + BonusMark2Base
#If total mark is between 90 and 100, for every mark over 90 calculate 15% of those marks plus 5 additional marks and store that in the BonusMarkOutput
if WeightedTotalMark > 90 and WeightedTotalMark <=100:
BonusMarkCalc = WeightedTotalMark -90
BonusMarkCalc = BonusMarkCalc * (BonusMark3Percent / 100)
BonusMarkOutput = BonusMarkCalc + BonusMark3Base
BonusPlusWeight = float((BonusMarkOutput + int(WeightedTotalMark))) #calculate Bonus + Weighted Mark
if BonusPlusWeight > 100: #if total mark (bonus + weighted mark) is over 100, set mark to 100 as absolute max mark is 100.
BonusPlusWeight = 100
print("Weighted mark for Assignment 1: ", int(outputMarkAss1)) #output the weighted mark for Assessment 1
print("Weighted mark for Assignment 2: ", int(outputMarkAss2)) #output the weighted mark for Assessment 2
print("Total weighted mark of the assignments: ", int(TotalWeightAssMark), "\n") #calculate the combine weighted mark for Assessment 1 & 2
print("Weighted mark for the Final Exam is: ", int(outputMarkExam)) #output the weighted mark for Exam
print("Total weighted mark for the subject: ", int(WeightedTotalMark), "\n") #output the combine weighted mark for Assessment 1, 2, & Exam
print("Bonus Mark: ", round(BonusMarkOutput,2)) #print calculated bonus mark
print("Total mark with bonus: ", round(BonusPlusWeight,2), "\n") # print total mark plus bonus
WorkDir = os.getcwd
dir, filename = os.path.split(os.path.abspath(__file__))
WorkDir = str(WorkDir) + "/IUA.txt"
if os.path.isfile(dir + "/IUA.txt"):
#do nothing
file = open(dir + "/IUA.txt", "a")
file.write("\n" + str(StudentID) +" " + str(StudentName) + " " + str(int(outputMarkAss1)) + " " + str(int(outputMarkAss2)) + " " + str(int(outputMarkExam)) + " " + str(int(WeightedTotalMark)) + " " + str(round(BonusPlusWeight,2)))
file.close()
RetryInput = input("Do you want to enter marks for another student (Y/N)? ")
if RetryInput is "Y" or RetryInput is "N":
if RetryInput is "N":
RetryInputLoop = False
print("Goodbye.") #print Goodbye.
#end
|
# Generated by Django 2.2.1 on 2019-06-27 09:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('faculty', '0012_auto_20190623_1030'),
]
operations = [
migrations.AddField(
model_name='loadshift',
name='od',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='faculty.OD'),
),
migrations.AlterField(
model_name='loadshift',
name='leave',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='faculty.Leave'),
),
]
|
numbers = [10,100,30,-1,8,-13]
max_num,min_num,max_index,min_index = 0,0,0,0
for i in range(len(numbers)):
num = numbers[i]
if num > max_num:
max_num = num
max_index = i
if num < min_num:
min_num = num
min_index = i
numbers[max_index], numbers[min_index] = min_num, max_num
print(numbers)
|
""" Provides commands used to initialize gazette websites. """
import click
import transaction
from dateutil import parser
from onegov.core.cli import command_group
from onegov.core.cli import pass_group_context
from onegov.core.crypto import random_password
from onegov.core.csv import convert_excel_to_csv
from onegov.core.csv import CSVFile
from onegov.gazette import _
from onegov.gazette.collections import CategoryCollection
from onegov.gazette.collections import IssueCollection
from onegov.gazette.collections import OrganizationCollection
from onegov.gazette.models import GazetteNotice
from onegov.gazette.models import IssueName
from onegov.gazette.utils import SogcImporter
from onegov.user import User
from onegov.user import UserCollection
from onegov.user import UserGroupCollection
from sedate import standardize_date
cli = command_group()
@cli.command(context_settings={'creates_path': True})
@pass_group_context
def add(group_context):
""" Adds a gazette instance to the database. For example:
onegov-gazette --select '/onegov_gazette/zug' add
"""
def add_instance(request, app):
if not app.principal:
click.secho("principal.yml not found", fg='yellow')
click.echo("Instance was created successfully")
return add_instance
@cli.command(name='import-editors')
@click.argument('file', type=click.File('rb'))
@click.option('--clear/--no-clear', default=False)
@click.option('--dry-run/--no-dry-run', default=False)
@click.option('--locale', default='de_CH')
@pass_group_context
def import_editors(ctx, file, clear, dry_run, locale):
""" Imports editors and groups. For example:
onegov-gazette --select '/onegov_gazette/zug' import-editors data.xlsx
"""
def import_editors_and_groups(request, app):
request.locale = locale
headers = {
'group': request.translate(_("Group")),
'name': request.translate(_("Name")),
'email': request.translate(_("E-Mail"))
}
session = app.session()
users = UserCollection(session)
groups = UserGroupCollection(session)
if clear:
click.secho("Deleting all editors", fg='yellow')
for user in users.query().filter(User.role == 'member'):
session.delete(user)
click.secho("Deleting all groups", fg='yellow')
for group in groups.query():
session.delete(group)
csvfile = convert_excel_to_csv(
file, sheet_name=request.translate(_("Editors"))
)
csv = CSVFile(csvfile, expected_headers=headers.values())
lines = list(csv.lines)
columns = {
key: csv.as_valid_identifier(value)
for key, value in headers.items()
}
added_groups = {}
for group in set([line.gruppe for line in lines]):
added_groups[group] = groups.add(name=group)
count = len(added_groups)
click.secho(f"{count} group(s) imported", fg='green')
count = 0
for line in lines:
count += 1
email = getattr(line, columns['email'])
realname = getattr(line, columns['name'])
group = getattr(line, columns['group'])
group = added_groups[group] if group else None
users.add(
username=email,
realname=realname,
group=group,
password=random_password(),
role='member',
)
click.secho(f"{count} editor(s) imported", fg='green')
if dry_run:
transaction.abort()
click.secho("Aborting transaction", fg='yellow')
return import_editors_and_groups
@cli.command(name='import-organizations')
@click.argument('file', type=click.File('rb'))
@click.option('--clear/--no-clear', default=False)
@click.option('--dry-run/--no-dry-run', default=False)
@click.option('--locale', default='de_CH')
@pass_group_context
def import_organizations(ctx, file, clear, dry_run, locale):
""" Imports Organizations. For example:
onegov-gazette --select '/onegov_gazette/zug' \
import-organizations data.xlsx
"""
def _import_organizations(request, app):
request.locale = locale
headers = {
'id': request.translate(_("ID")),
'name': request.translate(_("Name")),
'title': request.translate(_("Title")),
'active': request.translate(_("Active")),
'external_name': request.translate(_("External ID")),
'parent': request.translate(_("Parent Organization"))
}
session = app.session()
organizations = OrganizationCollection(session)
if clear:
click.secho("Deleting organizations", fg='yellow')
for organization in organizations.query():
session.delete(organization)
csvfile = convert_excel_to_csv(
file, sheet_name=request.translate(_("Organizations"))
)
csv = CSVFile(csvfile, expected_headers=headers.values())
lines = list(csv.lines)
columns = {
key: csv.as_valid_identifier(value)
for key, value in headers.items()
}
count = 0
for line in lines:
count += 1
id_ = int(getattr(line, columns['id']))
name = getattr(line, columns['name'])
parent = getattr(line, columns['parent'])
parent = int(parent) if parent else None
title = getattr(line, columns['title'])
active = bool(int(getattr(line, columns['active'])))
external_name = getattr(line, columns['external_name'])
organization = organizations.add_root(
id=id_,
name=name,
title=title,
active=active,
external_name=external_name,
order=count
)
organization.parent_id = parent
click.secho(f"{count} organization(s) imported", fg='green')
if dry_run:
transaction.abort()
click.secho("Aborting transaction", fg='yellow')
return _import_organizations
@cli.command(name='import-categories')
@click.argument('file', type=click.File('rb'))
@click.option('--clear/--no-clear', default=False)
@click.option('--dry-run/--no-dry-run', default=False)
@click.option('--locale', default='de_CH')
@pass_group_context
def import_categories(ctx, file, clear, dry_run, locale):
""" Imports categories. For example:
onegov-gazette --select '/onegov_gazette/zug' \
import-categories data.xlsx
"""
def _import_categories(request, app):
request.locale = locale
headers = {
'id': request.translate(_("ID")),
'name': request.translate(_("Name")),
'title': request.translate(_("Title")),
'active': request.translate(_("Active"))
}
session = app.session()
categories = CategoryCollection(session)
if clear:
click.secho("Deleting categories", fg='yellow')
for category in categories.query():
session.delete(category)
csvfile = convert_excel_to_csv(
file, sheet_name=request.translate(_("Categories"))
)
csv = CSVFile(csvfile, expected_headers=headers.values())
lines = list(csv.lines)
columns = {
key: csv.as_valid_identifier(value)
for key, value in headers.items()
}
count = 0
for line in lines:
count += 1
id_ = int(getattr(line, columns['id']))
name = getattr(line, columns['name'])
title = getattr(line, columns['title'])
active = bool(int(getattr(line, columns['active'])))
categories.add_root(
id=id_,
name=name,
title=title,
active=active,
order=count
)
click.secho(f"{count} categorie(s) imported", fg='green')
if dry_run:
transaction.abort()
click.secho("Aborting transaction", fg='yellow')
return _import_categories
@cli.command(name='import-issues')
@click.argument('file', type=click.File('rb'))
@click.option('--clear/--no-clear', default=False)
@click.option('--dry-run/--no-dry-run', default=False)
@click.option('--locale', default='de_CH')
@click.option('--timezone', default=None)
@pass_group_context
def import_issues(ctx, file, clear, dry_run, locale, timezone):
""" Imports issues. For example:
onegov-gazette --select '/onegov_gazette/zug' import-issues data.xlsx
"""
def _import_issues(request, app):
if not app.principal:
return
request.locale = locale
headers = {
'number': request.translate(_("Number")),
'date': request.translate(_("Date")),
'deadline': request.translate(_("Deadline"))
}
session = app.session()
issues = IssueCollection(session)
if clear:
click.secho("Deleting issues", fg='yellow')
for category in issues.query():
session.delete(category)
csvfile = convert_excel_to_csv(
file, sheet_name=request.translate(_("Issues"))
)
csv = CSVFile(csvfile, expected_headers=headers.values())
lines = list(csv.lines)
columns = {
key: csv.as_valid_identifier(value)
for key, value in headers.items()
}
count = 0
for line in lines:
count += 1
number = int(getattr(line, columns['number']))
date_ = parser.parse(getattr(line, columns['date'])).date()
deadline = standardize_date(
parser.parse(getattr(line, columns['deadline'])),
timezone or request.app.principal.time_zone
)
name = str(IssueName(date_.year, number))
issues.add(
name=name,
number=number,
date=date_,
deadline=deadline
)
click.secho(f"{count} categorie(s) imported", fg='green')
if dry_run:
transaction.abort()
click.secho("Aborting transaction", fg='yellow')
return _import_issues
@cli.command(name='import-sogc')
@click.option('--clear/--no-clear', default=False)
@click.option('--dry-run/--no-dry-run', default=False)
@pass_group_context
def import_sogc(ctx, clear, dry_run):
""" Imports from the SOGC. For example:
onegov-gazette --select '/onegov_gazette/zug' import-sogc
"""
def _import_sogc(request, app):
if not app.principal:
return
if not getattr(request.app.principal, 'sogc_import', None):
return
session = request.session
if clear:
click.secho("Deleting imported notices", fg='yellow')
existing = session.query(GazetteNotice)
existing = existing.filter(GazetteNotice.source.isnot(None))
existing.delete()
count = SogcImporter(session, request.app.principal.sogc_import)()
click.secho(f"{count} notice(s) imported", fg='green')
if dry_run:
transaction.abort()
click.secho("Aborting transaction", fg='yellow')
return _import_sogc
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
GoogLeNet v1 Benchmark
https://github.com/soumith/convnet-benchmarks
./googlenet_v1.py
./googlenet_v1.py -d f16
Derived from full model found here:
https://github.com/NervanaSystems/ModelZoo/tree/master/ImageClassification/ILSVRC2012/Googlenet
"""
from neon import NervanaObject
from neon.util.argparser import NeonArgparser
from neon.initializers import Xavier
from neon.layers import Conv, Pooling, GeneralizedCost, Affine, MergeBroadcast
from neon.layers import Tree
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule
from neon.transforms import Rectlin, CrossEntropyMulti
from neon.transforms import Rectlin, Logistic, Softmax
from neon.models import Model
from neon.data import ArrayIterator
import numpy as np
import deepstacks
from deepstacks.macros import *
from deepstacks.neon import curr_layer,curr_stacks,curr_flags,curr_model
parser = NeonArgparser(__doc__)
args = parser.parse_args()
NervanaObject.be.enable_winograd = 4
# setup data provider
X_train = np.random.uniform(-1, 1, (128, 3 * 224 * 224))
y_train = np.random.randint(0, 999, (128, 1000))
train = ArrayIterator(X_train, y_train, nclass=1000, lshape=(3, 224, 224))
init1 = Xavier(local=False)
initx = Xavier(local=True)
relu = Rectlin()
common = dict(activation=relu, init=initx)
commonp1 = dict(activation=relu, init=initx, padding=1)
commonp2 = dict(activation=relu, init=initx, padding=2)
pool3s1p1 = dict(fshape=3, padding=1, strides=1)
pool3s2p1 = dict(fshape=3, padding=1, strides=2, op='max')
#def inception(kvals):
# (p1, p2, p3, p4) = kvals
#
# branch1 = [Conv((1, 1, p1[0]), **common)]
# branch2 = [Conv((1, 1, p2[0]), **common), Conv((3, 3, p2[1]), **commonp1)]
# branch3 = [Conv((1, 1, p3[0]), **common), Conv((5, 5, p3[1]), **commonp2)]
# branch4 = [Pooling(op="max", **pool3s1p1), Conv((1, 1, p4[0]), **common)]
# return MergeBroadcast(layers=[branch1, branch2, branch3, branch4], merge="depth")
l_in = deepstacks.neon.InputLayer((None,)+train_set.shape,'image')
# NOTE: neon's orig layers dose not add bias, so it's much faster than us
network,stacks,paramlayers,errors,watchpoints=deepstacks.neon.build_network(l_in,(
(0,64,7,2,0,0,{}),
(0,0,3,2,0,0,{'maxpool'}),
(0,64,1,1,0,0,{}),
(0,192,3,1,0,0,{}),
(0,0,3,2,0,0,{'maxpool'}),
(inception,(32, 64, 96, 128, 16, 32,)),
(inception,(64, 128, 128, 192, 32, 96,)),
(0,0,3,2,0,0,{'maxpool'}),
(inception,(64, 192, 96, 208, 16, 48, )),
(inception,(64, 160, 112, 224, 24, 64, )),
(inception,(64, 128, 128, 256, 24, 64, )),
(inception,(64, 112, 144, 288, 32, 64, )),
(inception,(128, 256, 160, 320, 32, 128,)),
(0,0,3,2,0,0,{'maxpool'}),
(inception,(128, 256, 160, 320, 32, 128, )),
(inception,(128, 384, 192, 384, 48, 128, )),
(0,0,7,1,0,0,{'meanpool':True,'pad':0}),
(0,1000,0,0,0,0,{'dense':True,'linear':True}),
))
model = Model(layers=network)
#model = Model(layers=Tree([network]))
#model = Model(layers=[
# Conv((7, 7, 64), padding=3, strides=2, **common),
# Pooling(**pool3s2p1),
# Conv((1, 1, 64), **common),
# Conv((3, 3, 192), **commonp1),
# Pooling(**pool3s2p1),
# inception([(64,), (96, 128), (16, 32), (32,)]),
# inception([(128,), (128, 192), (32, 96), (64,)]),
# Pooling(**pool3s2p1),
# inception([(192,), (96, 208), (16, 48), (64,)]),
# inception([(160,), (112, 224), (24, 64), (64,)]),
# inception([(128,), (128, 256), (24, 64), (64,)]),
# inception([(112,), (144, 288), (32, 64), (64,)]),
# inception([(256,), (160, 320), (32, 128), (128,)]),
# Pooling(**pool3s2p1),
# inception([(256,), (160, 320), (32, 128), (128,)]),
# inception([(384,), (192, 384), (48, 128), (128,)]),
# Pooling(fshape=7, strides=1, op="avg"),
# Affine(nout=1000, init=init1)])
weight_sched = Schedule([22, 44, 65], (1 / 250.)**(1 / 3.))
opt_gdm = GradientDescentMomentum(0.01, 0.0, wdecay=0.0005, schedule=weight_sched)
opt = MultiOptimizer({'default': opt_gdm})
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.benchmark(train, cost=cost, optimizer=opt, niterations=10, nskip=1)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'zhj'
import re
s1 = '<div class="address"><div class="houseInfo"><span class="houseIcon"></span><a href="https://wh.lianjia.com/xiaoqu/3711062825014/" target="_blank" data-log_index="1" data-el="region">保利城 </a> | 4室2厅 | 174平米 | 南 北 | 毛坯</div></div>'
s = """<div class="priceInfo"><div class="totalPrice"><span>360</span>万</div><div class="unitPrice" data-hid="104102476933" data-rid="3711062825014" data-price="20690"><span>单价20690元/平米</span></div></div>
"""
p = re.compile("""<div class="address"><div .*? data-el="region">(.*?) </a>""",re.S)
st = p.findall(s1)
# print(st)
p2 = re.compile('<div class="priceInfo"><div class="totalPrice"><span>(.*?)</span>万</div>')
st1 = p2.findall(s)
print(st+st1) |
from dqn_agent import DQNAgent
from tetris import Tetris
from datetime import datetime
from statistics import mean, median
import random
from logs import CustomTensorBoard
from tqdm import tqdm
# Run dqn with Tetris
def dqn():
env = Tetris()
episodes = 2000
max_steps = None
epsilon_stop_episode = 1500
mem_size = 20000
discount = 0.95
batch_size = 512
epochs = 1
render_every = 50
log_every = 50
replay_start_size = 2000
train_every = 1
n_neurons = [32, 32]
render_delay = None
activations = ['relu', 'relu', 'linear']
agent = DQNAgent(env.get_state_size(),
n_neurons=n_neurons, activations=activations,
epsilon_stop_episode=epsilon_stop_episode, mem_size=mem_size,
discount=discount, replay_start_size=replay_start_size)
log_dir = f'logs/tetris-nn={str(n_neurons)}-mem={mem_size}-bs={batch_size}-e={epochs}-{datetime.now().strftime("%Y%m%d-%H%M%S")}'
log = CustomTensorBoard(log_dir=log_dir)
scores = []
for episode in tqdm(range(episodes)):
current_state = env.reset()
done = False
steps = 0
if render_every and episode % render_every == 0:
render = True
else:
render = False
# Game
while not done and (not max_steps or steps < max_steps):
next_states = env.get_next_states()
best_state = agent.best_state(next_states.values())
best_action = None
for action, state in next_states.items():
if state == best_state:
best_action = action
break
reward, done = env.play(best_action[0], best_action[1], render=render,
render_delay=render_delay)
agent.add_to_memory(current_state, next_states[best_action], reward, done)
current_state = next_states[best_action]
steps += 1
scores.append(env.get_game_score())
# Train
if episode % train_every == 0:
agent.train(batch_size=batch_size, epochs=epochs)
# Logs
if log_every and episode and episode % log_every == 0:
avg_score = mean(scores[-log_every:])
min_score = min(scores[-log_every:])
max_score = max(scores[-log_every:])
log.log(episode, avg_score=avg_score, min_score=min_score,
max_score=max_score)
if __name__ == "__main__":
dqn()
|
def dfsCall(graph, start):
stack = [start]
seen = set()
seen.add(start)
while len(stack) > 0:
vertex = stack.pop()
nodes = graph[vertex]
for w in nodes:
if w not in seen:
stack.append(w)
seen.add(w)
print(vertex)
|
from onegov.core.orm.abstract import AdjacencyList
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import TimestampMixin
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import or_
from sqlalchemy_utils import observes
from sqlalchemy.orm import object_session
class Category(AdjacencyList, ContentMixin, TimestampMixin):
""" Defines a category for official notices.
Although the categories are defined as an adjacency list, we currently
use it only as a simple alphabetically ordered key-value list (name-title).
"""
__tablename__ = 'gazette_categories'
#: True, if this category is still in use.
active = Column(Boolean, nullable=True)
def notices(self):
""" Returns a query to get all notices related to this category. """
from onegov.gazette.models.notice import GazetteNotice # circular
notices = object_session(self).query(GazetteNotice)
notices = notices.filter(
GazetteNotice._categories.has_key(self.name) # noqa
)
return notices
@property
def in_use(self):
""" True, if the category is used by any notice. """
if self.notices().first():
return True
return False
@observes('title')
def title_observer(self, title):
""" Changes the category title of the notices when updating the title
of the category.
"""
from onegov.gazette.models.notice import GazetteNotice # circular
notices = self.notices()
notices = notices.filter(
or_(
GazetteNotice.category.is_(None),
GazetteNotice.category != title
)
)
for notice in notices:
notice.category = title
|
#!/usr/bin/python2.7
from __future__ import unicode_literals
import os, errno
import sys
import time
import httplib2
import urllib2
from subprocess import Popen, PIPE, check_output
import webbrowser
# USE THIS: sudo pip install oauth2client==3.0.0
# Give full permissions to both .json credetial files
# DO NOT USE (this will update to version 4.0 which will creates a File Cache Error): sudo pip install --upgrade google-api-python-client
from oauth2client import client
from oauth2client.file import Storage
from threading import Thread
import re
import logging
import ConfigParser
import json
import filecmp
import multiprocessing
# ******
# Consts
# ======
logging.basicConfig(filename='output.log',level=logging.DEBUG)
config_file_name = "sound_drive_local.cfg"
def save_global_config(config_obj):
with open(config_file_name, 'wb') as config_file:
config_obj.write(config_file)
def internet_on():
try:
response = urllib2.urlopen('http://www.google.com', timeout=5)
return True
except urllib2.URLError as err:
pass
return False
def getNumOfCores():
numOfCores = multiprocessing.cpu_count()
return numOfCores
# **********
# youtube_dl
# ==========
# sudo pip install youtube-dl
# Important to update! use 'sudo pip install -U youtube-dl' for that
import youtube_dl
def youtubeDlHook(d):
if d['status'] == 'finished':
file_tuple = os.path.split(os.path.abspath(d['filename']))
logging.debug("Done downloading [" + file_tuple[1] + "]")
if d['status'] == 'downloading':
logging.debug("[" + d['filename'] + "] [" + d['_percent_str'] + "] [" + d['_eta_str'] + "]")
# Download the video
# Requires 'brew install ffmpeg' (not libav!)
# also may require 'brew update && brew upgrade ffmpeg'
#
# MAC: Requires 'brew install ffmpeg' (not libav!)
# Linux: Requires 'sudo apt-get install libav-tools'
c = 0
def downloadSong(yt_song_structure):
global c
if c > 0:
return
#c += 1
options = {
'writethumbnail': True,
'outtmpl': yt_song_structure['playlist_path'] + '/' + yt_song_structure['title'] + '.%(ext)s',
'extractaudio': True,
'noplaylist': True,
'max_downloads': 1,
'progress_hooks': [youtubeDlHook],
'format': 'bestaudio/best',
#'ignoreerrors': True,
'postprocessors': [
{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
},
{'key': 'FFmpegMetadata'},
{'key': 'EmbedThumbnail'},
]
}
try:
with youtube_dl.YoutubeDL(options) as ydl:
ydl.download([str(yt_song_structure['url'])])
except Exception, e:
logging.debug("EXCEPTION: Skipping [" + yt_song_structure['url'] + "]...")
logging.debug("Exception message: " + str(e))
logging.debug(e.__doc__)
logging.debug(e.message)
def isVideoAvailable(youtube, video_id):
# Get the playlist name
response = youtube.videos().list(
part="status",
id=video_id
).execute()
if response["items"] == []:
return False
video = response["items"][0]
return (video['status']['uploadStatus'] == 'processed')
# (Not used)
# Get the playlist name using its ID (not being used currently)
def get_playlist_name(youtube, playlist_id):
response = youtube.playlists().list(
part="snippet",
id=playlist_id
).execute()
playlist_name = response["items"][0]["snippet"]["title"]
return playlist_name
# Get the playlist id from its name
def get_playlist_id(youtube, playlist_name):
logging.debug("Retirieving playlist id for playlist [" + playlist_name + "] ")
nextPageToken = ""
while True:
response = youtube.playlists().list(
part="snippet",
maxResults=10,
pageToken=nextPageToken,
mine=True
).execute()
for playlist_item in response["items"]:
playlist_title = playlist_item["snippet"]["title"]
#playlist_title = unicode(playlist_title).encode('utf8')
if playlist_title == playlist_name:
playlist_id = playlist_item["id"]
logging.debug("Found playlist [" + playlist_title + "] with id [" + playlist_id + "]")
return playlist_id
if "nextPageToken" in response:
nextPageToken = response["nextPageToken"]
else:
break;
return None
def start_sync_playlist_thread():
# Sync playlist
logging.debug("BT command: SYNC PLAYLIST")
sync_playlist_thread = Thread(target=sync_playlist)
#sync_playlist_thread.daemon = True
sync_playlist_thread.start()
def sync_playlist(playlist_name_list):
global songs
global keep_current_song_index
global is_song_playing
global curr_song_play_pipe
global playlist_root_path
if not internet_on():
# Sound taken from http://soundbible.com/1540-Computer-Error-Alert.html
logging.debug("No internet connection, aborting playlist sync process.")
return
try:
curr_song_play_pipe.stdin.write('p')
except:
logging.warning("No song to stop playing for the sync process")
# TODO: this is for debug only (remove when done!)
#try:
# ipaddr = check_output("ifconfig | grep -A 1 wlan0 | grep 'inet addr:' | cut -d':' -f2 | cut -d' ' -f1", shell=True)
# sendNotification(ipaddr)
#except:
# logging.warning("Could not send message to phone...")
# Sound taken from https://appraw.com/ringtone/input-xxk4r
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
print("Authentication needed")
flow = client.flow_from_clientsecrets(
'client_secrets.json',
scope='https://www.googleapis.com/auth/youtube',
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_uri = flow.step1_get_authorize_url()
print(auth_uri)
webbrowser.open_new(auth_uri)
auth_code = raw_input('Enter the auth code: ')
credentials = flow.step2_exchange(auth_code)
storage.put(credentials)
http_auth = credentials.authorize(httplib2.Http())
from apiclient.discovery import build
youtube = build('youtube', 'v3', http = http_auth)
playlist_id_list = []
for playlist_name in playlist_name_list:
logging.debug("Playlist id must be retrieved before continuing")
if playlist_name == None or len(playlist_name) == 0:
logging.debug("ERROR: Playlist name must be set before continuing")
exit(1)
# Retrieve the playlist id
playlist_id = get_playlist_id(youtube, playlist_name)
if playlist_id == None or len(playlist_id) == 0:
logging.debug("ERROR: Could not get playlist ID for playlist [" + playlist_name + "]")
exit(1)
playlist_id_list.append(playlist_id)
# Write the playlist id to the config file
config.set('youtube', 'playlist_id_list', playlist_id_list)
save_global_config(config)
logging.debug("playlist_id_list: " + str(playlist_id_list))
# Get videos list from the playlist response
songs = []
for i, playlist_id in enumerate(playlist_id_list):
nextPageToken = ""
playlist = {}
playlist = {"playlist_name": playlist_name_list[i], "songs": []}
while True:
response = youtube.playlistItems().list(
part="snippet",
maxResults=10,
pageToken=nextPageToken,
playlistId=playlist_id
).execute()
for playlist_item in response["items"]:
song_title = playlist_item["snippet"]["title"]
song_title = re.sub('[^A-Za-z0-9 \(\)\[\]_-]+', '', song_title)
song_title = unicode(song_title).encode('utf8')
song_title = song_title.replace(":", " -")
song_title = song_title.replace("^", "")
song_title = song_title.replace(" ", " ")
song_title = song_title.replace(" ", " ")
song_title = song_title.replace(" ", " ")
song_title = song_title.replace(" ", " ")
song_title = song_title.replace(" ", " ")
song_title = re.sub('^ - ', '', song_title)
song_title = song_title.replace("[", "(").replace("]", ")")
logging.debug("song title ==> " + song_title)
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
video_id = unicode(video_id).encode('utf8')
if (song_title == ""):
logging.warning("Song name is empty. Video ID [" + video_id + "]")
playlist['songs'].append({"title": song_title, "video_id": video_id});
if "nextPageToken" in response:
nextPageToken = response["nextPageToken"]
else:
break;
songs.append(playlist)
logging.debug("songs structure: " + str(songs))
yt_vids_to_download = []
yt_vids_to_delete = []
# List all needed songs
needed_song_files = {}
for playlist in songs:
needed_song_files[playlist['playlist_name']] = [song['title'] + ".mp3" for song in playlist['songs']];
logging.debug("needed_song_files " + str(needed_song_files))
stats = []
for playlist in songs:
playlist_name = playlist['playlist_name']
playlist_stats = {"playlist_name": playlist_name, "current": 0, "remove": 0, "download": 0, "final": 0}
playlist_path = playlist_root_path + playlist_name
# Get local folder playlist
files = [f for f in os.listdir(playlist_path) if os.path.isfile(os.path.join(playlist_path, f)) and os.path.getsize(os.path.join(playlist_path, f))]
files = sorted(files)
logging.debug("Files in folder [" + playlist_path + "] are " + str(files))
playlist_stats['current'] = len(files)
playlist_stats['final'] += playlist_stats['current']
youtube_video_url_prefix = "http://www.youtube.com/watch?v="
# Check which songs should be downloaded
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(int(getNumOfCores()))
for song in playlist['songs']:
song_title = song['title']
song_file = song['title'] + ".mp3"
song_photo_file = song['title'] + ".jpg"
if song_file in files and not song_photo_file in files:
logging.debug("File [" + song_file + "] already exists.")
else:
if isVideoAvailable(youtube, song['video_id']):
playlist_stats['download'] += 1
playlist_stats['final'] += 1
logging.debug("[TO_DOWNLOAD] tag applied to [" + song['title'] + "] with ID [" + song['video_id'] + "]")
yt_vids_to_download.append({"playlist_path": playlist_path, "title": song_title, "url": youtube_video_url_prefix + song['video_id']})
else:
logging.warning("Video [" + song['title'] + "] with id [" + song['video_id'] + "] is not available")
for song_file in files:
if song_file not in needed_song_files[playlist_name]:
yt_vids_to_delete.append(os.path.join(playlist_path, song_file))
logging.debug("[TO_REMOVE] tag applied to [" + song_file + "]")
playlist_stats['remove'] += 1
playlist_stats['final'] -= 1
stats.append(playlist_stats)
logging.debug("stats: " + str(stats))
if len(yt_vids_to_download) == 0 and len(yt_vids_to_delete) == 0:
logging.debug("SoundDrive is synched with YouTube, continuing to play where the song stopped")
try:
curr_song_play_pipe.stdin.write('p')
except:
logging.warning("Pause received but no song is playing at the moment")
else:
for song_file in yt_vids_to_delete:
logging.debug("Removing file [" + song_file + "]...")
os.remove(song_file)
logging.debug("Starts downloading songs...")
startDLTimestamp = int(time.time())
pool.map(downloadSong, yt_vids_to_download)
pool.close()
pool.join()
logging.debug("YouTube download is DONE in [" + str(int(time.time()) - startDLTimestamp) + "] seconds")
# Write the songs array to the config file
config.set('youtube', 'songs', songs)
save_global_config(config)
# Start playing the playlist from the beginning
config.set('playback', 'last_played_song_index', '0')
save_global_config(config)
config = ConfigParser.RawConfigParser()
# Config file is created for the first time
if not os.path.isfile(config_file_name) or os.stat(config_file_name).st_size == 0 or config.read(config_file_name) == []:
logging.debug("Could not find config");
config.add_section('playback')
config.set('playback', 'last_played_song_index', '0')
config.add_section('youtube')
config.set('youtube', 'playlist_name_list', '["music", "music archive"]') # TODO: Need to find a way for the user to config this
config.set('youtube', 'playlist_id_list', '[]') # Dynamically set using the youtube API and the name of the playlist
config.set('youtube', 'songs', '[]') # Depicts the playlist songs order and info # TODO: this config should be in a config file inside the corresponding playlist folder
config.add_section('bluetooth')
config.set('bluetooth', 'device_names', '[\"SKODA\", \"AP5037\"]') # TODO: Need to find a way for the user to config this
save_global_config(config)
# Get important config fields
playlist_name_list = eval(config.get('youtube', 'playlist_name_list'))
playlist_id_list = eval(config.get('youtube', 'playlist_id_list'))
songs = eval(config.get('youtube', 'songs'))
playlist_root_path = "playlists/local/"
# Create playlist folder if needed
for playlist_name in playlist_name_list:
playlist_path = playlist_root_path + playlist_name
logging.debug("Creating playlist_path [" + playlist_path + "]")
if not os.path.isdir(playlist_path):
os.makedirs(playlist_path)
logging.debug("Sync starting...")
sync_playlist(playlist_name_list)
logging.debug("DONE.")
# Print songs summary
for playlist in songs:
logging.debug("Playlist [" + playlist['playlist_name'] + "]")
for song in playlist['songs']:
logging.debug(" Title [" + song['title'] + "] video_id [" + song['video_id'] + "]")
|
# Generated by Django 3.1.7 on 2021-03-13 23:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finalCristianGarcia', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='reserva',
name='fecha_de_egreso',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='reserva',
name='fecha_de_ingreso',
field=models.DateField(auto_now_add=True),
),
]
|
"""
dataset create
Author: Zhengwei Li
Date : 2018/12/24
"""
import cv2
import os
import random as r
import numpy as np
from PIL import Image, ImageEnhance
import torch
import torch.utils.data as data
def read_files(data_dir, file_name={}):
image_name = os.path.join(data_dir, 'image', file_name['image'])
trimap_name = os.path.join(data_dir, 'trimap', file_name['trimap'])
image = cv2.imread(image_name)
trimap = cv2.imread(trimap_name)
return image, trimap
def random_scale_and_creat_patch(image, trimap, patch_size):
# random scale
if r.random() < 0.5:
h, w, c = image.shape
scale = 1 + 0.5*r.random()
image = cv2.resize(image, (int(patch_size*scale), int(patch_size*scale)),
interpolation=cv2.INTER_CUBIC)
trimap = cv2.resize(trimap, (int(patch_size*scale), int(patch_size*scale)),
interpolation=cv2.INTER_NEAREST)
# creat patch
if r.random() < 0.5:
h, w, c = image.shape
if h > patch_size and w > patch_size:
x = r.randrange(0, w - patch_size)
y = r.randrange(0, h - patch_size)
image = image[y:y + patch_size, x:x+patch_size, :]
trimap = trimap[y:y + patch_size, x:x+patch_size, :]
else:
image = cv2.resize(image, (patch_size, patch_size),
interpolation=cv2.INTER_CUBIC)
trimap = cv2.resize(trimap, (patch_size, patch_size),
interpolation=cv2.INTER_NEAREST)
else:
image = cv2.resize(image, (patch_size, patch_size),
interpolation=cv2.INTER_CUBIC)
trimap = cv2.resize(trimap, (patch_size, patch_size),
interpolation=cv2.INTER_NEAREST)
return image, trimap
def random_flip(image, trimap):
if r.random() < 0.5:
image = cv2.flip(image, 0)
trimap = cv2.flip(trimap, 0)
if r.random() < 0.5:
image = cv2.flip(image, 1)
trimap = cv2.flip(trimap, 1)
return image, trimap
def np2Tensor(array):
ts = (2, 0, 1)
tensor = torch.FloatTensor(array.transpose(ts).astype(float))
return tensor
class human_matting_data(data.Dataset):
"""
human_matting
"""
def __init__(self, root_dir, imglist, patch_size):
super().__init__()
self.data_root = root_dir
self.patch_size = patch_size
with open(imglist) as f:
self.imgID = f.readlines()
self.num = len(self.imgID)
print("Dataset : file number %d" % self.num)
def __getitem__(self, index):
# read files
image, trimap = read_files(self.data_root,
file_name={'image': self.imgID[index].strip(),
'trimap': self.imgID[index].strip()[:-4] + '.png'})
# augmentation
image, trimap = random_scale_and_creat_patch(
image, trimap, self.patch_size)
image, trimap = random_flip(image, trimap)
# normalize
image = (image.astype(np.float32) - (114., 121., 134.,)) / 255.0
trimap = trimap.astype(np.float32) / 255.0
# to tensor
image = np2Tensor(image)
trimap = np2Tensor(trimap)
trimap = trimap[0, :, :].unsqueeze_(0)
sample = {'image': image, 'trimap': trimap}
return sample
def __len__(self):
return self.num
|
from dataclasses import dataclass
from typing import List, Optional
from apiclient import APIClient, endpoint, paginated, retry_request
from jsonmarshal import json_field
from apiclient_jsonmarshal import unmarshal_response
def by_query_params_callable(response, prev_params):
if "nextPage" in response and response["nextPage"]:
return {"page": response["nextPage"]}
@endpoint(base_url="http://testserver")
class Urls:
users = "users"
user = "users/{id}"
accounts = "accounts"
class Client(APIClient):
def get_request_timeout(self):
return 0.1
def list_users(self):
return self.get(Urls.users)
@retry_request
def get_user(self, user_id: int):
url = Urls.user.format(id=user_id)
return self.get(url)
def create_user(self, first_name, last_name):
data = {"firstName": first_name, "lastName": last_name}
return self.post(Urls.users, data=data)
def overwrite_user(self, user_id, first_name, last_name):
data = {"firstName": first_name, "lastName": last_name}
url = Urls.user.format(id=user_id)
return self.put(url, data=data)
def update_user(self, user_id, first_name=None, last_name=None):
data = {}
if first_name:
data["firstName"] = first_name
if last_name:
data["lastName"] = last_name
url = Urls.user.format(id=user_id)
return self.patch(url, data=data)
def delete_user(self, user_id):
url = Urls.user.format(id=user_id)
return self.delete(url)
@paginated(by_query_params=by_query_params_callable)
def list_user_accounts_paginated(self, user_id):
return self.get(Urls.accounts, params={"userId": user_id})
@dataclass
class User:
user_id: int = json_field(json="userId")
first_name: str = json_field(json="firstName")
last_name: str = json_field(json="lastName")
@dataclass
class Account:
account_name: str = json_field(json="accountName")
number: str = json_field(json="number")
@dataclass
class AccountPage:
results: List[Account] = json_field(json="results")
page: int = json_field(json="page")
next_page: Optional[int] = json_field(json="nextPage")
class ClientWithJson(Client):
@unmarshal_response(List[User])
def list_users(self):
return super().list_users()
@unmarshal_response(User)
def get_user(self, user_id: int):
return super().get_user(user_id)
@unmarshal_response(User)
def create_user(self, first_name, last_name):
return super().create_user(first_name, last_name)
@unmarshal_response(User)
def overwrite_user(self, user_id, first_name, last_name):
return super().overwrite_user(user_id, first_name, last_name)
@unmarshal_response(User)
def update_user(self, user_id, first_name=None, last_name=None):
return super().update_user(user_id, first_name, last_name)
def delete_user(self, user_id):
return super().delete_user(user_id)
@unmarshal_response(List[AccountPage])
@paginated(by_query_params=by_query_params_callable)
def list_user_accounts_paginated(self, user_id):
return self.get(Urls.accounts, params={"userId": user_id})
|
# Runtime: 36 ms, faster than 82.24% of Python3 online submissions for Number of Lines To Write String.
# Memory Usage: 13.8 MB, less than 6.25% of Python3 online submissions for Number of Lines To Write String.
class Solution:
def numberOfLines(self, widths: List[int], S: str) -> List[int]:
chars = "abcdefghijklmnopqrstuvwxyz"
dict = {}
for i, width in enumerate(widths):
dict[chars[i]] = width
count = 1
sLen = 0
for char in S:
width = dict[char]
if sLen + width > 100:
sLen = width
count += 1
else:
sLen += width
return [count, sLen]
|
import os
PARENT_PATH = os.getenv('PYMCTS_ROOT')
import tester
T = tester.Tester()
T.test(T.UCT, T.HEURISTIC, 0, 3, 20, "out/normal_vs_heuristic")
T.test(T.HEURISTIC, T.UCT, 0, 3, 20, "out/normal_vs_heuristic")
|
"""
Unit testing for freedson_adult_1998
@authors Dominic Létourneau
@date 24/04/2018
"""
import unittest
import libopenimu.algorithms.freedson_adult_1998 as freedson1998
class Freedson1998Test(unittest.TestCase):
def setUp(self):
pass
def thread_finished_callback(self):
pass
def test_cutpoints(self):
self.assertEqual(freedson1998.CutPoints.classify(0), freedson1998.CutPoints.SEDENTARY)
self.assertEqual(freedson1998.CutPoints.classify(10), freedson1998.CutPoints.SEDENTARY)
self.assertEqual(freedson1998.CutPoints.classify(99), freedson1998.CutPoints.SEDENTARY)
self.assertEqual(freedson1998.CutPoints.classify(100), freedson1998.CutPoints.LIGHT)
self.assertEqual(freedson1998.CutPoints.classify(500), freedson1998.CutPoints.LIGHT)
self.assertEqual(freedson1998.CutPoints.classify(1951), freedson1998.CutPoints.LIGHT)
self.assertEqual(freedson1998.CutPoints.classify(1952), freedson1998.CutPoints.MODERATE)
self.assertEqual(freedson1998.CutPoints.classify(3000), freedson1998.CutPoints.MODERATE)
self.assertEqual(freedson1998.CutPoints.classify(5724), freedson1998.CutPoints.MODERATE)
self.assertEqual(freedson1998.CutPoints.classify(5726), freedson1998.CutPoints.VIGOROUS)
self.assertEqual(freedson1998.CutPoints.classify(8000), freedson1998.CutPoints.VIGOROUS)
self.assertEqual(freedson1998.CutPoints.classify(9498), freedson1998.CutPoints.VIGOROUS)
self.assertEqual(freedson1998.CutPoints.classify(9499), freedson1998.CutPoints.VERY_VIGOROUS)
self.assertEqual(freedson1998.CutPoints.classify(100000), freedson1998.CutPoints.VERY_VIGOROUS)
self.assertEqual(freedson1998.CutPoints.classify(100000000000), freedson1998.CutPoints.VERY_VIGOROUS) |
import rospy
from pid import PID
import math
from yaw_controller import YawController
#GAS_DENSITY = 2.858
#ONE_MPH = 0.44704
MAX_V_MPS = 44.7 # Maximum speed in meters_per_second
BRAKE_TORQUE_SCALE = 100000
YAW_SCALE = 8.2
class Controller(object):
def __init__(self, **kwargs):
# TODO: Implement
max_lat_acc = kwargs['max_lat_acc']
max_steer_angle = kwargs['max_steer_angle']
steer_ratio = kwargs['steer_ratio']
wheel_base = kwargs['wheel_base']
self.accel_limit = kwargs['accel_limit']
self.brake_deadband = kwargs['brake_deadband']
self.decel_limit = kwargs['decel_limit']
# Create Variable for the last update time
self.last_update_time = None
self.pid_c = PID(11.2, 0.05, 0.3, -self.accel_limit, self.accel_limit)
self.steer_pid = PID(0.8, 0.05, 0.2, -max_steer_angle/2, max_steer_angle/2)
# Create a steering controller
self.steer_c = YawController(wheel_base=wheel_base, steer_ratio=steer_ratio, min_speed = 0.0, max_lat_accel = max_lat_acc, max_steer_angle = max_steer_angle)
# scale of max acceleration to deceleration
self.brake_scale = self.decel_limit/(-self.accel_limit)
pass
def control(self, **kwargs):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
# Get the variables from the arguments
twist_linear = kwargs['twist_command'].twist.linear
twist_angular = kwargs['twist_command'].twist.angular
if math.fabs(twist_linear.x) < 0.1:
twist_linear.x = 0.
if math.fabs(twist_angular.z) < 0.001:
twist_angular.z = 0.
cv_linear = kwargs['current_velocity'].twist.linear
cv_angular = kwargs['current_velocity'].twist.angular
steer_feedback = kwargs['steer_feedback']
# Set the Error
vel_err = twist_linear.x - cv_linear.x
# Convert to fraction of v_max so pid output is directly
# usable as input to throttle_cmd
vel_err = vel_err/MAX_V_MPS
# Set the present and target values
target_v = twist_linear.x
present_v = cv_linear.x
target_w = twist_angular.z
if self.last_update_time is not None:
# Get current time
time = rospy.get_time()
# Compute timestep between updates and save
# for next iteration
dt = time - self.last_update_time
self.last_update_time = time
# if vehicle is trying to stop we want to reset the integral component
# of the PID controllers so as to not oscillate around zero
if present_v < 1.0:
self.pid_c.reset()
self.steer_pid.reset()
# PID class returns output for throttle and
# brake axes as a joint forward_backward axis
forward_axis = self.pid_c.step(vel_err, dt)
reverse_axis = -forward_axis*(self.decel_limit/(-self.accel_limit))
# if forward axis is positive only then give any throttle
throttle = min(max(0.0, forward_axis), 0.33)
# Only apply brakes if the reverse axis value is large enough to supersede the deadband
#TODO: Figure out how this tuning will work for the real vehicle
brake = max(0.0, reverse_axis - self.brake_deadband)
# Convert brake to Torque value since that is what the publisher expects
brake *= 100
# get the steering value from the yaw controller
steering = self.steer_c.get_steering(target_v, target_w, present_v)
# update using steering pid loop
steering = self.steer_pid.step(steering - steer_feedback, dt)
# steering *= steer_ratios
return throttle, brake, steering
else:
# Update the last_update time and send Zeroes tuple
self.last_update_time = rospy.get_time()
return 0., 0., 0.
|
# -*- coding: utf-8 -*-
import os
from flask import g
from flask import current_app
from werkzeug.utils import secure_filename
from app.api import api
from ..parsers.uploadparser import upload_parser
from sql import db
from sql.importdata import ImportData
from app.utils.response_utils import response_error
from app.utils.response_utils import response_success
from app.utils.upload_utils import allowed_file
from config.paths import API_UPLOAD
@api.route(API_UPLOAD, methods=['POST'])
def upload():
args = upload_parser.parse_args()
picture_data = args.get("picture")
info_data = args.get("info")
user_id = g.current_user.id
if picture_data and allowed_file(picture_data.filename):
picturename = secure_filename(picture_data.filename)
picture_path = os.path.join(current_app.config['UPLOAD_FOLDER'], picturename)
picture_data.save(picture_path)
data = ImportData(info_data, picture_path, user_id)
db.session.add(data)
db.session.commit()
return response_success()
return response_error({"error":"Fail to upload data."})
|
class Solution:
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x<0: return False
y=int(x)
i=0
while y>0:
i = i*10 + y%10
y //= 10
return i==x
print(Solution().isPalindrome(-5555)) |
from django import forms
class ProductForm(forms.Form):
name = forms.CharField(max_length=45)
desc = forms.CharField(widget=forms.Textarea(attrs={'rows': 4}),
label="Description")
category = forms.CharField(max_length=45)
price = forms.DecimalField(max_digits=9, decimal_places=2)
file = forms.ImageField()
|
#/usr/bin/env python
import numpy
import math
import matplotlib.pyplot as pyplot
n = 1024
deltat = 1.0
t = numpy.arange(n) * deltat
whitenoise = numpy.random.normal(0.0, 1.0, n/2+1) + 1j*numpy.random.normal(0.0, 1.0, n/2+1)
whitenoise[0] = 0
freq = numpy.arange(n/2 + 1) / deltat / n
tau = 5.0
rcfilter = 1.0 / (1.0 + 1j*2.0*math.pi*freq*tau)
whitenoise_filtered = whitenoise * rcfilter
print "Integral of PS: %f" % numpy.sum(numpy.abs(whitenoise_filtered)**2/n/n)
whitenoise_t = numpy.fft.irfft(whitenoise_filtered)
autocorr2 = numpy.correlate(whitenoise_t, whitenoise_t, 'full')[n-1:]
print "Integral of autocorrelation = %f" % numpy.sum(autocorr2/n)
pyplot.figure(1, figsize=(10.0, 12.0))
pyplot.subplot(411)
pyplot.plot(freq, numpy.abs(whitenoise)**2/n/n)
pyplot.xlabel("Frequency [Hz]")
pyplot.ylabel("Power spectrum [arb^2]")
#pyplot.subplots_adjust(hspace = 0.5)
pyplot.subplot(412)
pyplot.plot(freq, numpy.abs(whitenoise_filtered)**2/n/n)
pyplot.xlabel("Frequency [Hz]")
pyplot.ylabel("Filtered Power spectrum [arb^2]")
pyplot.subplot(413)
pyplot.plot(t, whitenoise_t)
pyplot.xlabel("Time [s]")
pyplot.ylabel("Band-limited time stream [arb]")
pyplot.subplot(414)
pyplot.plot(t, autocorr2/n)
pyplot.xlabel("Lag Time [s]")
pyplot.ylabel("Autocorrelation [arb^2]")
pyplot.savefig("q2.png")
|
import SocketServer
import time
from kafkaClass import Kafka_producer
class Server(SocketServer.BaseRequestHandler):
def handle(self):
conn=self.request
print(conn)
kafkaproducer=Kafka_producer('master:9092,slave1:9092,slave2:9092','HelloKafka')
error_flag=0
data_r=''
while True:
data=data_r+conn.recv(1500)
if not data:
print("no data!")
break
while True:
list1=data.split('\t',1)
if not len(list1)==2:
data_r=data
break
try:
row_l=int(list1[0])
except:
print("list1",list1)
print("list1[0]",list1[0])
error_flag=1
print("row_length transform error!")
break
if len(list1[1])<row_l:
data_r='\t'.join(list1)
break
print(list1[1][0:row_l])
kafkaproducer.senddata(list1[1][0:row_l])
data=list1[1][row_l:]
if error_flag:
break
conn.close()
if __name__=='__main__':
ip,port='192.168.138.201',8888
server=SocketServer.ThreadingTCPServer((ip,port),Server)
server.serve_forever()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import sys
base_dir = os.path.dirname(__file__)
src_dir = os.path.join(base_dir, "src")
sys.path.insert(0, src_dir)
about = {}
with open(os.path.join(src_dir, "sktutor", "__about__.py")) as f:
exec(f.read(), about)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'scipy>=1.1.0',
'pandas>=1.1.5',
'scikit-learn>=0.24.2',
'patsy>=0.5.1'
]
test_requirements = [
'pytest'
]
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__summary__'],
long_description=readme + '\n\n' + history,
author=about['__author__'],
author_email=about['__email__'],
url=about['__uri__'],
license=about['__license__'],
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
keywords='sktutor',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
test_suite='tests',
tests_require=test_requirements
)
|
# encoding: utf-8
import copy
class GreedyPlayer(object):
def __init__(self, name, color, board, rulebook):
self.name = name
self.color = color
self.board = board
self.rulebook = rulebook
def totalPieces(self, board, color):
total_my_color = 0
total_opponent_color = 0
for row in board:
for column in row:
if(column == color):
total_my_color += 1
elif(column == -color):
total_opponent_color += 1
return total_my_color - total_opponent_color
def minimax(self, current_board, depth, color, maximizingPlayer, x, y, alpha, beta):
if(depth == 0 or current_board.isBoardFull()):
heuristic = self.totalPieces(current_board.board, color)
return heuristic, x, y
if(maximizingPlayer):
bestValue = float("-inf")
best_i = 0
best_j = 0
available_tiles = []
valid_moves = self.rulebook.getValidMoves(color, current_board)
if not valid_moves:
heuristic = self.totalPieces(current_board.board, color)
return heuristic, x, y
for move in valid_moves:
if move[0] not in available_tiles:
available_tiles.append(move[0])
#Para cada filho do nó
for tile in available_tiles:
flip_directions = []
#Verifica por movimentos repitidos
for moves in valid_moves:
if (moves[0] == tile):
flip_directions.append(moves[1])
#Filho criado
node = self.copyBoard(current_board)
i, j = tile[0], tile[1]
node.placePieceInPosition(color, i, j)
node.flipPieces(color, tile, flip_directions)
value, child_i, child_j = self.minimax(node, depth-1, color, False, i, j, alpha, beta)
if(value > bestValue):
best_i = i
best_j = j
bestValue = value
alpha = max(alpha, bestValue)
if(beta <= alpha):
break
return bestValue, best_i, best_j
else:
bestValue = float("inf")
best_i = 0
best_j = 0
available_tiles = []
valid_moves = self.rulebook.getValidMoves(-color, current_board)
if not valid_moves:
heuristic = self.totalPieces(current_board.board, color)
return heuristic, x, y
for move in valid_moves:
if move[0] not in available_tiles:
available_tiles.append(move[0])
for tile in available_tiles:
flip_directions = []
#Verifica por movimentos repitidos
for moves in valid_moves:
if (moves[0] == tile):
flip_directions.append(moves[1])
#Filho criado
node = self.copyBoard(current_board)
i, j = tile[0], tile[1]
node.placePieceInPosition(-color, i, j)
node.flipPieces(-color, tile, flip_directions)
value, child_i, child_j = self.minimax(node, depth-1, color, True, i, j, alpha, beta)
if(value < bestValue):
best_i = i
best_j = j
bestValue = value
beta = min(beta, bestValue)
if(beta <= alpha):
break
return bestValue, best_i, best_j
def copyBoard(self, board):
return copy.deepcopy(board)
def play(self):
if(self.board.isBoardFull() or self.board.noMoreMoves() or self.rulebook.pass_turn == 2):
self.rulebook.end_game = True
else:
valid_moves = self.rulebook.getValidMoves(self.color, self.board)
if not valid_moves:
print "No moves available, player must pass"
self.rulebook.pass_turn += 1
else:
board_copy = self.copyBoard(self.board)
bestValue, best_i, best_j = self.minimax(board_copy, 6, self.color, True, 0, 0, float("-inf"), float("inf"))
greedy_move = [best_i, best_j]
flip_directions = []
for move in valid_moves:
if (greedy_move == move[0]):
flip_directions.append(move[1])
self.board.placePieceInPosition(self.color, best_i, best_j)
self.rulebook.pass_turn = 0
self.board.flipPieces(self.color, greedy_move, flip_directions)
return greedy_move
|
def returnTwo():
return 20,30
x,y = returnTwo()
print x,y
def mul(x,y):
return x*y
# Factorial
print reduce(mul, range(1,11))
def cubeFunc(x):
'''
:param x:
:return:the cube of the value passed in
'''
return x * x * x
print map(cubeFunc, range(1,11))
def myAdd(var1, var2 = 10):
return var1 + var2
print(myAdd(7))
print(myAdd(8,5)) |
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import pkgutil
import pytest
import sys
from twitter.common.java.class_file import ClassFile
import unittest2 as unittest
# Known golden file, com.google.protobuf.ByteString => ByteString.class
# named example_class because of science .gitignore
#
# resources don't properly work in python_tests yet, so this test is marked as expected fail.
_EXAMPLE_RESOURCE = 'resources/example_class'
@pytest.mark.skipif('sys.version_info >= (3,0)')
class ClassFileParserTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._class_data = pkgutil.get_data('twitter.common.java', _EXAMPLE_RESOURCE)
assert cls._class_data is not None
cls._class_file = ClassFile(cls._class_data)
def test_parsed(self):
assert self._class_file is not None
def test_parsed_maj_min(self):
maj, min = self._class_file.version()
assert maj == 50
assert min == 0
def test_parsed_this_class(self):
assert self._class_file.this_class() == 'com/google/protobuf/ByteString'
def test_parsed_super_class(self):
assert self._class_file.super_class() == 'java/lang/Object'
def test_parsed_access(self):
access_flags = self._class_file.access_flags()
assert access_flags.public()
assert access_flags.final()
assert access_flags.super_()
assert not access_flags.interface()
assert not access_flags.abstract()
|
from lib.saga_service.job_submission_service import JobSubmissionService
import unittest
from mockito import Mock, verify, when, any
from mock import Mock as mock_Mock, patch, mock_open
class JobSubmissionServiceTest(unittest.TestCase):
def setUp(self):
self.current_time = 1123455.123
self.connection_string = "ssh://localhost"
self.setup_saga()
self.setup_filesystem()
self.setup_mocks()
self.command = "grep"
self.args = "root /etc/password"
self.service = JobSubmissionService(self.saga, self.saga_job,
self.filesystem)
def test_creates_job_service(self):
self.submit_job()
verify(self.saga_job).Service(self.connection_string,
session=self.session)
def test_sets_job_command(self):
self.submit_job()
self.assertEqual(self.description.executable, "grep")
def test_sets_job_arguments(self):
self.submit_job()
self.assertEqual(self.description.arguments, [self.args])
def test_creates_job(self):
self.submit_job()
verify(self.saga_job_service).create_job(self.description)
def test_runs_job(self):
self.submit_job()
verify(self.job).run()
verify(self.job).wait()
def test_sets_job_output(self):
self.submit_job()
self.assertEqual(self.description.output,
self.get_local_output_file())
def test_sets_local_input_file(self):
self.input_file = "/local/file/name"
self.submit_job()
self.assertEqual(self.description.input, self.input_file)
def test_copies_remote_input_file(self):
self.given_input_file_is_remote()
self.submit_job()
self.assertEqual(self.description.input, self.tmp_input_file)
verify(self.filesystem).copy_and_overwrite([self.input_file],
self.expected_dst)
def test_copies_remote_output_file(self):
self.given_output_file_is_remote()
self.submit_job()
src = self.connection_string + self.get_local_output_file()
verify(self.filesystem).copy_and_overwrite([src], self.output_file)
def test_sets_local_output_file(self):
self.output_file = "/tmp/some-file"
self.wont_copy_file()
self.submit_job()
self.assertEqual(self.description.output, self.output_file)
def test_prints_output_if_no_output_file(self):
self.submit_job()
verify(self.filesystem).cat([self.connection_string + self.get_local_output_file()])
def setup_saga(self):
self.job = Mock()
self.description = mock_Mock()
self.saga_job = Mock()
self.saga_job_service = Mock()
self.session = Mock()
self.context = Mock()
self.saga = Mock()
when(self.saga_job).Description().thenReturn(self.description)
when(self.saga).Context("ssh").thenReturn(self.context)
when(self.saga).Session().thenReturn(self.session)
when_service = when(self.saga_job).Service(self.connection_string,
session=self.session)
when_service.thenReturn(self.saga_job_service)
when_job = when(self.saga_job_service).create_job(self.description)
when_job.thenReturn(self.job)
def setup_filesystem(self):
self.input_file = None
self.output_file = None
self.filesystem = Mock()
def setup_mocks(self):
self.mocked_time = mock_Mock(return_value=self.current_time)
def submit_job(self):
with patch('time.time', self.mocked_time):
return self.service.submit_job(self.command, self.args,
self.input_file,
self.output_file,
self.connection_string)
def given_input_file_is_remote(self):
self.file_name = "file"
self.input_file = "ssh://some-host/path-to/" + self.file_name
self.tmp_input_file = "/tmp/s210664-saga-tmp-input-file-%s" % self.current_time
self.expected_dst = self.connection_string + self.tmp_input_file
def wont_copy_file(self):
when_copy = when(self.filesystem).copy_and_overwrite(any(), any())
when_copy.thenRaise(Exception("Shouldn't copy file"))
def given_output_file_is_remote(self):
file_name = "output-file"
self.output_file = "ssh://some-host/path/to/" + file_name
def get_local_output_file(self):
return "/tmp/s210664-saga-tmp-output-file-%s" % self.current_time
|
'''
https://yandex.ru/tutor/subject/tag/problems/?ege_number_id=365&tag_id=19
'''
n = int(input())
m = 120
a = []
b = 0
c = 0
for i in range(n):
a.append(int(input()))
for i in range(n - 1):
for k in range(i + 1, n):
if (a[i] + a[k]) % m == 0 and (a[i] + a[k]) > b + c and a[i] > a[k]:
c = a[k]
b = a[i]
print(b, c) |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from json import loads
from qiita_core.qiita_settings import r_client
from qiita_pet.test.tornado_test_base import TestHandlerBase
class OAuth2BaseHandlerTests(TestHandlerBase):
def setUp(self):
# Create client test authentication token
self.client_token = 'SOMEAUTHTESTINGTOKENHERE2122'
token_info = {
'timestamp': '12/12/12 12:12:00',
'client_id': 'test123123123',
'grant_type': 'client'
}
r_client.hmset(self.client_token, token_info)
r_client.expire(self.client_token, 5)
# Create username test authentication token
self.user_token = 'SOMEAUTHTESTINGTOKENHEREUSERNAME'
token_info = {
'timestamp': '12/12/12 12:12:00',
'client_id': 'testuser',
'grant_type': 'password',
'user': 'test@foo.bar'
}
r_client.hmset(self.user_token, token_info)
r_client.expire(self.user_token, 5)
# Create test access limit token
self.user_rate_key = 'testuser_test@foo.bar_daily_limit'
r_client.setex(self.user_rate_key, 5, 2)
super(OAuth2BaseHandlerTests, self).setUp()
def test_authenticate_header_client(self):
obs = self.get('/qiita_db/artifacts/1/', headers={
'Authorization': 'Bearer ' + self.client_token})
self.assertEqual(obs.code, 200)
def test_authenticate_header_username(self):
obs = self.get('/qiita_db/artifacts/1/', headers={
'Authorization': 'Bearer ' + self.user_token})
self.assertEqual(obs.code, 200)
# Check rate limiting works
self.assertEqual(int(r_client.get(self.user_rate_key)), 1)
r_client.setex('testuser_test@foo.bar_daily_limit', 1, 0)
obs = self.get('/qiita_db/artifacts/100/', headers={
'Authorization': 'Bearer ' + self.user_token})
exp = {'error': 'invalid_grant',
'error_description': 'Oauth2 error: daily request limit reached'
}
self.assertEqual(loads(obs.body), exp)
def test_authenticate_header_missing(self):
obs = self.get('/qiita_db/artifacts/100/')
self.assertEqual(obs.code, 400)
self.assertEqual(loads(obs.body), {
'error': 'invalid_request',
'error_description': 'Oauth2 error: invalid access token'})
def test_authenticate_header_bad_token(self):
obs = self.get('/qiita_db/artifacts/100/', headers={
'Authorization': 'Bearer BADTOKEN'})
self.assertEqual(obs.code, 400)
exp = {'error': 'invalid_grant',
'error_description': 'Oauth2 error: token has timed out'}
self.assertEqual(loads(obs.body), exp)
def test_authenticate_header_bad_header_type(self):
obs = self.get('/qiita_db/artifacts/100/', headers={
'Authorization': 'WRONG ' + self.client_token})
self.assertEqual(obs.code, 400)
exp = {'error': 'invalid_grant',
'error_description': 'Oauth2 error: invalid access token'}
self.assertEqual(loads(obs.body), exp)
class OAuth2HandlerTests(TestHandlerBase):
def test_authenticate_client_header(self):
# Authenticate using header
obs = self.post(
'/qiita_db/authenticate/', {'grant_type': 'client'}, {
'Authorization': 'Basic MTluZGtPM29NS3NvQ2hqVlZXbHVGN1FreEhSZl'
'loVEtTRmJBVnQ4SWhLN2daZ0RhTzQ6SjdGZlE3Q1FkT3'
'h1S2hRQWYxZW9HZ0JBRTgxTnM4R3UzRUthV0ZtM0lPMk'
'pLaEFtbUNXWnVhYmUwTzVNcDI4czE='})
self.assertEqual(obs.code, 200)
obs_body = loads(obs.body)
exp = {'access_token': obs_body['access_token'],
'token_type': 'Bearer',
'expires_in': 3600}
self.assertDictEqual(obs_body, exp)
# Make sure token in system with proper ttl
token = r_client.hgetall(obs_body['access_token'])
exp = {
b'timestamp': token[b'timestamp'],
b'client_id': (b'19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAV'
b't8IhK7gZgDaO4'),
b'grant_type': b'client'
}
self.assertDictEqual(token, exp)
self.assertEqual(r_client.ttl(obs_body['access_token']), 3600)
def test_authenticate_client_post(self):
# Authenticate using post only
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'client_secret': 'J7FfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2J'
'KhAmmCWZuabe0O5Mp28s1'})
self.assertEqual(obs.code, 200)
obs_body = loads(obs.body)
exp = {'access_token': obs_body['access_token'],
'token_type': 'Bearer',
'expires_in': 3600}
self.assertDictEqual(obs_body, exp)
# Make sure token in system with proper ttl
token = r_client.hgetall(obs_body['access_token'])
exp = {
b'timestamp': token[b'timestamp'],
b'client_id': (b'19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8'
b'IhK7gZgDaO4'),
b'grant_type': b'client'
}
self.assertDictEqual(token, exp)
self.assertEqual(r_client.ttl(obs_body['access_token']), 3600)
def test_authenticate_client_bad_base64_hash(self):
# Authenticate using bad header
obs = self.post(
'/qiita_db/authenticate/', {'grant_type': 'client'}, {
'Authorization': 'Basic MTluZGtPM29NS3NvQ2hqVlZXbHVGN1FreEhSZl'
'loVEtTRmJBVnQ4SBADN2daZ0RhTzQ6SjdGZlE3Q1FkT3'
'h1S2hRQWYxZW9HZ0JBRTgxTnM4R3UzRUthV0ZtM0lPMk'
'pLaEFtbUNXWnVhYmUwTzVNcDI4czE='})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_bad_header_base64_hash(self):
obs = self.post(
'/qiita_db/authenticate/', {'grant_type': 'client'}, {
'Authorization': 'WRONG MTluZGtPM29NS3NvQ2hqVlZXbHVGN1FreEhSZl'
'loVEtTRmJBVnQ4SWhLN2daZ0RhTzQ6SjdGZlE3Q1FkT3'
'h1S2hRQWYxZW9HZ0JBRTgxTnM4R3UzRUthV0ZtM0lPMk'
'pLaEFtbUNXWnVhYmUwTzVNcDI4czE='})
obs_body = loads(obs.body)
exp = {'error': 'invalid_request',
'error_description': 'Oauth2 error: invalid token type'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_bad_client_id(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': 'BADdkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'client_secret': 'J7FfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2J'
'KhAmmCWZuabe0O5Mp28s1'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_bad_client_secret(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'client_secret': 'BADfQ7CQdOxuKhQAf1eoGgBAE81Ns8Gu3EKaWFm3IO2J'
'KhAmmCWZuabe0O5Mp28s1'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_client_missing_info(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'client',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_request',
'error_description': 'Oauth2 error: missing client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'test@foo.bar',
'password': 'password'})
self.assertEqual(obs.code, 200)
obs_body = loads(obs.body)
exp = {'access_token': obs_body['access_token'],
'token_type': 'Bearer',
'expires_in': 3600}
self.assertDictEqual(obs_body, exp)
# Make sure token in system with proper ttl
token = r_client.hgetall(obs_body['access_token'])
exp = {b'timestamp': token[b'timestamp'],
b'user': b'test@foo.bar',
b'client_id': token[b'client_id'],
b'grant_type': b'password'}
self.assertDictEqual(token, exp)
self.assertEqual(r_client.ttl(obs_body['access_token']), 3600)
def test_authenticate_password_non_user_client_id_header(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': '19ndkO3oMKsoChjVVWluF7QkxHRfYhTKSFbAVt8IhK7gZgDa'
'O4',
'username': 'test@foo.bar',
'password': 'password'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_non_user_client_id(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'WAAAAAAAAAARG',
'username': 'test@foo.bar',
'password': 'password'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid client information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_bad_user_id(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'BROKEN@FAKE.COM',
'password': 'password'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid user information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_bad_password(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'test@foo.bar',
'password': 'NOTAReALPASSworD'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_client',
'error_description': 'Oauth2 error: invalid user information'}
self.assertEqual(obs_body, exp)
def test_authenticate_password_missing_info(self):
obs = self.post(
'/qiita_db/authenticate/', {
'grant_type': 'password',
'client_id': 'DWelYzEYJYcZ4wlqUp0bHGXojrvZVz0CNBJvOqUKcrPQ5p4U'
'qE',
'username': 'test@foo.bar'})
self.assertEqual(obs.code, 400)
obs_body = loads(obs.body)
exp = {'error': 'invalid_request',
'error_description': 'Oauth2 error: missing user information'}
self.assertEqual(obs_body, exp)
if __name__ == "__main__":
main()
|
class Operators:
def num(self,n1,n2):
self.n1=n1
self.n2=n2
print(self.n1+self.n2)
def num(self,n3):
self.n3=n3
print(self.n3)
np=Operators()
np.num(6,4)
#method overloading not happen in python
#latest method runs in python
#same mthod name different number of parameters |
from LCA import * # The code to test
import unittest # The test framework
class TestLCA(unittest.TestCase):
tree = BST()
tree.insert_node(2)
tree.insert_node(3)
tree.insert_node(1)
def test_search(self):
self.assertEqual(self.tree.search(3).data,3)
def test_root(self):
self.assertEqual(self.tree.root.data, 2)
def test_root_left(self):
self.assertEqual(self.tree.root.left.data, 1)
def test_root_right(self):
self.assertEqual(self.tree.root.right.data, 3)
|
import tornado.web
from content import PAGES
def page_controller(handler_instance, path):
if path in PAGES:
handler_instance.write(PAGES[path].serialize())
else:
handler_instance.set_status(404)
handler_instance.write({
'message': 'A resource was not found for this path.'
})
|
from collections import deque
if __name__ == "__main__":
T = int(input())
for _ in range(T):
n = int(input())
cards = deque()
card = n
while card > 0:
cards.appendleft(card)
cards.rotate(card)
card -= 1
print(" ".join(map(str, cards)))
|
from __future__ import print_function
__author__ = 'Roberto Estrada (a.k.a) paridin'
import urllib2
import sys, os
from subprocess import PIPE, Popen
class Pylocate:
"""
if you don't specify the port by default is 80
this function get the public ip for you server
@port: receive the specific port to listen you home server
"""
def __init__(self, port='80'):
self.url = 'http://sicam.mx/reportes/php/ip.php'
self.SOURCE_DIR = os.getcwd() + '/source'
self.logFile = os.getcwd() + '/log'
self.port = port
self.log = True #False for don't write logs
try:
self.ip = urllib2.urlopen(self.url).read()
except:
print(sys.exc_info()[1])
"""
This function get the lasted ip save in a file
"""
def get_last_ip(self):
try:
f = open('source/lastip.txt')
self.last_ip = f.read()
f.close()
except:
print(sys.exc_info()[1])
return self.last_ip
"""
this function is for replace the new ip if the script detect is different
if is the first time to run this function write the public ip to redirect you own server
@file
"""
def replaceAll(self, file, search, replace):
import fileinput
for line in fileinput.input(file, inplace=1):
if search in line:
line = line.replace(search,replace)
sys.stdout.write(line)
"""
This function if for run the script
"""
def run(self):
if self.get_last_ip() == '':
try:
self.write_file_ip()
except:
print(sys.exc_info()[1])
else:
try:
self.write_html()
except:
print(sys.exc_info()[1])
"""
Upload file to server
"""
def uploadToServer(self, ftp, user, password, filename='index.html',):
cmd = "curl -T '%s/%s' %s --user %s:%s" % (self.SOURCE_DIR, filename, ftp, user, password)
try:
print("Intentando subir el archivo: %s al ftp." % filename)
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
except OSError as err:
print(err)
if self.log:
f = open(self.logFile + '/upload-file.log', 'w+')
f.write(p.communicate()[1])
f.close()
if p.returncode == 0:
print("Se subio el archivo: %s al ftp." % filename)
else:
print("No fue posible subir el archivo: %s al ftp." % filename)
"""
This function works writing the public ip in a file
"""
def write_file_ip(self):
try:
f = open('%s/lastip.txt' % self.SOURCE_DIR, 'w')
f.write("%s:%s" % (self.ip, self.port))
f.close()
except:
print(sys.exc_info()[1])
"""
This function detect the lasted ip and the new ip, if its the firt time to run this script it works like setup
to construct all the index file
"""
def write_html(self):
lip = self.last_ip
nip = "%s:%s" % (self.ip, self.port)
path = "%s/index.html" % self.SOURCE_DIR
if lip != nip:
try:
self.replaceAll(path, "%s" % lip, nip)
self.write_file_ip()
except:
print(sys.exc_info()[1])
else:
try:
self.replaceAll(path, "CHANGE_IP", nip)
except:
print(sys.exc_info()[1])
if __name__ == '__main__':
#Edit this values with your personal values
ftp='ftp://your_ftp/dir_where_you_want_upload/'
ftp_user='Your_ftp_user'
ftp_password='Your_ftp_pass'
p = Pylocate(port='8000') #specify the port to listen you home server
p.run()
p.uploadToServer(ftp, ftp_user, ftp_password) |
import requests
import os
import hashlib
import subprocess
import json
import pathlib
import urllib
import shutil
import time
import functools
from server_automation import utilities
from server_automation import logger
from server_automation import control
VERSION_MANIFEST_URL = "https://launchermeta.mojang.com/mc/game/version_manifest.json"
# The path to the backups directory
BACKUPS_DIRNAME = 'world_backups'
BACKUPS_DIR = utilities.PACKAGE_PARENT_DIR / BACKUPS_DIRNAME
# The path to the server's world directory
WORLD_DIRNAME = 'world'
WORLD_PATH = utilities.SERVER_ROOT / WORLD_DIRNAME
@functools.lru_cache()
def get_version_manifest():
'''Load the version manifest json (from the internet).'''
# Do a GET request for version manifest (from the interwebs)
response = requests.get(url=VERSION_MANIFEST_URL)
version_manifest = response.json()
return version_manifest
@functools.lru_cache()
def get_latest_version(updateToSnapShot = False):
'''Returns the latest version of minecraft.'''
logger.log('Identifying the latest version of server.jar available...', 'update')
if updateToSnapShot:
latest_version = get_version_manifest()['latest']['snapshot']
else:
latest_version = get_version_manifest()['latest']['release']
logger.log('Latest version is ' + latest_version + '.', 'update')
return latest_version
@functools.lru_cache()
def get_current_version():
'''Returns the current version of the server.'''
logger.log('Identifying the current version of server.jar...', 'update')
# If server.jar does not exist, return None
if not os.path.exists(utilities.SERVER_JAR_PATH):
logger.log('Cannot find server.jar.', 'update')
return None
# Unzip server.jar and look at version.json for the version number
output = subprocess.check_output(['unzip', '-p', utilities.SERVER_JAR_PATH, 'version.json'])
version_json = output.decode('UTF-8')
version_dict = json.loads(version_json)
current_version = version_dict['name']
logger.log('Current version of server.jar is {}.'.format(current_version), 'update')
return current_version
def download_server_jar(version_id = None):
'''Download server.jar (from the interwebs).'''
if version_id == None:
version_id = get_latest_version()
logger.log('Downloading version ' + version_id + ' of server.jar...', 'update')
version = [version for version in get_version_manifest()['versions'] if version['id'] == version_id][0]
jsonlink = version['url']
response = requests.get(jsonlink)
jardata = response.json()
download_link = jardata['downloads']['server']['url']
response = requests.get(download_link)
with open(utilities.SERVER_JAR_PATH,'wb') as f:
f.write(response.content)
logger.log('Downloaded server.jar.', 'update')
def save_world_backup():
'''Saves a backup of the server's world directory into the world_backups directory.'''
logger.log('Saving a backup of ' + WORLD_DIRNAME + ' to ' + BACKUPS_DIRNAME + '...', 'update')
# Create the world_backups dir, if it does not exist
if not os.path.exists(BACKUPS_DIR):
logger.log('Backups directory world_backups did not exist. Creating it...')
os.makedirs(BACKUPS_DIR)
# Make the world backup dirname
backup_dirname = WORLD_DIRNAME + '_' + utilities.version_and_time_stamp()
backup_path = BACKUPS_DIR / backup_dirname
# Copy the world directory into the backup directory
shutil.copytree(WORLD_PATH, backup_path)
logger.log('Backup saved.', 'update')
def update_check():
'''Checks whether or not the server is running the latest version of server.jar, and updates server.jar if necessary.'''
logger.log('Update Check', 'update', format='header')
if get_latest_version() == get_current_version():
logger.log('Server is already up to date.', 'update')
return
logger.log('Server is not up to date! Performing update now...', 'update')
# Make sure we properly turn off the server, first of all
control.stop_server()
download_server_jar()
logger.log('Server is now up to date.', 'update')
# Now that we updated the server.jar, we should turn the server back on
# TODO: Prevent two server_automation calls interfering with eachother
control.start_server()
# TODO: Replace these params with global funcs that store a save |
from django.test.client import Client
from django.http import HttpRequest
from django.test import TestCase
from django.core.urlresolvers import resolve, reverse
from locations.views import home, datacenter_create
class TestExamples(TestCase):
def test_bad_maths(self):
self.assertEqual(1+2, 3)
class TestLocationsViews(TestCase):
def test_root_url_resolves(self):
""" Check that url '/' resolves to home view """
found = resolve('/')
self.assertEqual(found.func, home)
def test_admin_url(self):
found = resolve('/admin/')
self.assertEqual(found.app_name, 'admin')
def test_all_views_resolve(self):
#tuple of tuples... (View name, URL name to be tested against). A resolve on the reverse of the view name should result in the URL name.
view_list = (('locations.views.datacenter_create', 'locations_datacenter_create'),
('locations.views.cage_create', 'locations_cage_create'),
('locations.views.cagerow_create', 'locations_cagerow_create'),
('locations.views.rack_create', 'locations_rack_create'),)
#todo: the views below require arguments... need to fix the function to send arguments to test these views
# ('locations.views.datacenter_by_id', 'locations_datacenter_view_by_id'),
# ('locations.views.cage_by_id', 'locations_cage_view_by_id'),
# ('locations.views.cagerow_by_id', 'locations_cagerow_view_by_id'),
# ('locations.views.rack_by_id', 'locations_rack_view_by_id'))
for item in view_list:
print "testing ", item
url = reverse(item[0])
response = resolve(url)
self.assertEqual(response.url_name, item[1])
self.assertEqual(response.view_name, item[1])
def test_datacenter_view_get(self):
url = reverse('locations.views.datacenter_create')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_all_views_get(self):
view_list = ('locations.views.datacenter_create', 'locations.views.cage_create', 'locations.views.cagerow_create',
'locations.views.rack_create')
for item in view_list:
print "testing ", item
url = reverse(item)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_datacenter_view_post_create_datacenter(self):
url = reverse('locations.views.datacenter_create')
c = Client()
response = c.post(url, dict(name='test datacenter', description=' a test datacenter'))
#todo actually do an assertion here.....
print response.status_code
|
def main(clientesOrdenadosHorarioTermino):
horariovisita = -1000
numVisita = 0
confirmados = {}
# print(clientesOrdenadosHorarioTermino)
for cliente in clientesOrdenadosHorarioTermino:
if(horariovisita <= int(cliente[1]['hInicioInt'])):
horariovisita = cliente[1]['hTerminoInt']
numVisita += 1
confirmados[cliente[0]] = cliente[1]
print("cliente ", numVisita)
print(cliente[0])
print("Horário de início da visita: " + cliente[1]['hInicioString'] + ":" + cliente[1]['mInicioString'])
return confirmados |
from django.urls import path, include
from hod import views
urlpatterns = [
path('', views.index, name="hod_index"),
path('reservations',views.room_reservations,name="hod_room_reservations"),
path('reservations/api',views.events,name="hod_events_api"),
path('leaves',views.leave_history,name="leave_history"),
path('approveleaves/', views.get_leaves, name="leaves"),
path('approveods/',views.get_ods,name = "approve_ods")
]
|
# Define a function overlapping() that takes two lists and returns True if they have at least one member in
# common, False otherwise. You may use your is_member() function, or the in operator, but for the sake
# of the exercise, you should (also) write it using two nested for loops
def overlapping():
list1 = [1, 2, 3, 4, 5, 6, 7, 8]
list2 = [2, 7, 5, 4, 3, 0]
for i in range(len(list1)):
for j in range(len(list2)):
if list1[i] == list2[j]:
return True
print(overlapping())
def overlap():
list3 = [3, 4, 5, 6, 7, 8, 9]
list4 = [12, 3, 2, 55, 66, 77, 88]
for i in range(len(list3)):
for j in range(len(list4)):
if list3[i].__eq__(list4[j]):
return True
def overlap2():
list5 = [1, 2, 3]
list6 = [2, 5, 4]
for i in list5:
if i in list6:
return True
print(overlap2())
print(overlap())
|
__author__ = 'mgarza'
# coding=UTF-8
import curses, logging, sys, subprocess, threading, time, math, random
import ui, JurassicParkTemplate, ending
class Gameplay(object):
"""
This class holds all the mechanics for playing the Jurassic Park Shooting Adventure
"""
def __init__(self, screen, file='JurassicParkObjects.xml'):
logging.basicConfig(filename='engine.log', level=logging.DEBUG)
self.display = ui.Display(screen)
self._game = self._grab_game_objects(file)
self.rooms = {room.attrs['id']: index for (index, room) in enumerate(self._game.room)}
self.current_room_id = self._game.player[0].attrs['position']
self._current_room_index = int(self.rooms[self.current_room_id])
self._current_room = self._game.room[self._current_room_index]
self._treasures, self._doors, self._movement_calls, = {}, {}, {}
self.player_health = self._set_health()
self.inventory = [item for item in self._game.player[0].item]
self.separate_line_y = False
self.player_piece, self.replace_space = 2097728, 1049390
self.treasure_symbol, self.door_symbol, self.interactive_item = 2097444, 2097477, 2097513
self.enemy_V, self.enemy_D, self.enemy_C = 2098774, 2098756, 2098755
self._potion_power = 25
self.facing_direction = 'right'
self.game_status = 'alive'
self.weapon_name = ''
def change_game_status(self, status):
self.game_status = status
def _grab_game_objects(self, file, desc=JurassicParkTemplate):
"""Grabs all game objects from xml provided, returns game object"""
with open(file) as f:
xml_data = f.read()
successful, game = desc.obj_wrapper(xml_data)
if not successful:
raise game
return game
def _set_health(self):
"""Sets player's health to max for the player's level"""
for index, level in enumerate(self._game.player[0].level):
if level.attrs['level'] == str(self._game.player[0].attrs['level']):
return int(level.attrs['max_health'])
def _update_player_stats(self, health_diff=0, defense_diff=0, strength_diff=0, experience_diff=0):
'''Updates player stats and info on screen returns string if leveling occured'''
out_message = ''
self._game.player[0].attrs['experience'] = int(self._game.player[0].attrs['experience']) + int(experience_diff)
for index, level in enumerate(self._game.player[0].level):
if level.attrs['level'] == str(self._game.player[0].attrs['level']):
new_health = int(self.player_health) + int(health_diff)
self.player_health = new_health if new_health <= int(level.attrs['max_health']) else int(
level.attrs['max_health'])
level.attrs['defense'] = int(level.attrs['defense']) + int(defense_diff)
level.attrs['strength'] = int(level.attrs['strength']) + int(strength_diff)
exp_points = int(level.attrs['exp_to_next'])
if int(self._game.player[0].attrs['experience']) >= exp_points:
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/level.wav']])
thread.deamon = True
thread.start()
self._game.player[0].attrs['level'] = int(self._game.player[0].attrs['level']) + 1
self.player_health = self._game.player[0].level[index + 1].attrs['max_health']
out_message = 'Huzzah you have leveled up to level ' + str(self._game.player[0].attrs['level'])
num_potions = len([item.attrs['name'] for item in self.inventory if item.attrs['type'] == 'potion'])
weapons = [[item.attrs['name'], item.attrs['strength']] for item in self.inventory if
item.attrs['type'] == 'weapon']
weapon_strength = 0
for weapon in weapons:
if int(weapon[1]) >= weapon_strength:
weapon_strength = int(weapon[1])
self.weapon_name = weapon[0]
player_stats = 'Health: ' + str(self.player_health) + ' Potions: ' + str(num_potions) + ' Level: ' + str(
self._game.player[0].attrs['level'])
player_stats_cont = 'Experience Points: ' + str(
self._game.player[0].attrs['experience']) + ' Weapon: ' + self.weapon_name
text = [player_stats, player_stats_cont]
self.display.text_to_dialog(text, 5)
if self.player_health <= 0:
self.game_status = 'dead'
return out_message
def _player_stats(self):
'''Returns player's health strength experience and defense'''
player_stats = []
player_level = str(self._game.player[0].attrs['level'])
weapon_strength = 0
weapons = [[item.attrs['name'], item.attrs['strength']] for item in self.inventory if
item.attrs['type'] == 'weapon']
for weapon in weapons:
if int(weapon[1]) >= weapon_strength:
weapon_strength = int(weapon[1])
for level in self._game.player[0].level:
if level.attrs['level'] == player_level:
max_health = level.attrs['max_health']
defense = level.attrs['defense']
strength = int(level.attrs['strength']) + weapon_strength
exp_to_next = level.attrs['exp_to_next']
player_stats = [max_health, defense, strength, exp_to_next]
return player_stats
def _enemy_stats(self, enemy):
'''returns enemy stats in a list for enemy level'''
enemy_stats = []
enemy_y, enemy_x = enemy.attrs['coordinates'].split(',')
enemy_coord = [int(enemy_y), int(enemy_x)]
if enemy.attrs['type'] == 'Compsognathus':
enemy_symbol = self.enemy_C
elif enemy.attrs['type'] == 'Dilophosaurus':
enemy_symbol = self.enemy_D
else:
enemy_symbol = self.enemy_V
for enemy_group in self._game.enemy:
if enemy_group.attrs['name'] == enemy.attrs['type']:
enemy_speed = enemy_group.attrs['movement_interval']
for level in enemy_group.level:
if level.attrs['level'] == enemy.attrs['level']:
enemy_health = level.attrs['health']
enemy_strength = level.attrs['strength']
enemy_exp_points = level.attrs['exp_points']
enemy_stats = [enemy_health, enemy_coord, enemy_strength, enemy_exp_points, enemy_speed,
enemy_symbol]
return enemy_stats
def _check_inventory(self, object):
"""Checks inventory for items that unhide doors or treasures"""
if object.attrs['hidden'] == 'True':
requirement = object.attrs['unhide'].split(',')
inventory = [item.attrs['name'] for item in self.inventory]
if set(requirement).issubset(set(inventory)):
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/door_unlock.wav']])
thread.deamon = True
thread.start()
object.attrs['hidden'] = 'False'
def _setup_room(self, player_position):
"""Sets up parameters needed for current room"""
self._movement_calls = {}
self._treasures = {treasure.attrs['id']: [treasure.attrs['coordinates'], indx] for indx, treasure in
enumerate(self._current_room.treasure) if treasure}
self._doors = {door.attrs['id']: [door.attrs['coordinates'], indx] for indx, door in
enumerate(self._current_room.door) if door}
self.shots = 0
for treasure_id, treasure_info in self._treasures.items(): # Add treasures to room
treasure_coord, indx = treasure_info
y, x = treasure_coord.split(',')
coord = [int(y), int(x)]
self._check_inventory(self._current_room.treasure[indx])
hidden = self._current_room.treasure[indx].attrs['hidden']
id = self._current_room.treasure[indx].attrs['id']
if 'bones' in id and hidden == 'False' or 'computer' in id and hidden == 'False':
self._treasures[treasure_id] = [self.display.add_char(coord, self.interactive_item, modify=True), indx]
elif self._current_room.treasure[indx].attrs['status'] == 'full' and hidden == 'False':
self._treasures[treasure_id] = [self.display.add_char(coord, self.treasure_symbol, modify=True), indx]
else:
self._treasures[treasure_id] = [self.display.add_char(coord, self.replace_space, modify=True), indx]
for door_id, door_info in self._doors.items(): # Add doors to room
door_coord, indx = door_info
y, x = door_coord.split(',')
coord = [int(y), int(x)]
self._check_inventory(self._current_room.door[indx])
hidden = self._current_room.door[indx].attrs['hidden']
if hidden == 'False':
self._doors[door_id] = [self.display.add_char(coord, self.door_symbol, modify=True), indx]
else:
self._doors[door_id] = [self.display.add_char(coord, self.replace_space, modify=True), indx]
update_time = time.time() + 0.5
for enemy in self._current_room.enemy:
enemy_id = 'enemy.' + enemy.attrs['id']
enemy_health, enemy_coord, enemy_strength, enemy_exp_points, enemy_speed, enemy_symbol = self._enemy_stats(
enemy)
new_coord = self.display.add_char(enemy_coord, enemy_symbol, modify=True)
self._enemy_movement(enemy_id, new_coord, player_position, enemy_symbol, enemy_strength, enemy_health,
enemy_exp_points, enemy_speed, update_time)
self._update_player_stats()
def _process_input(self, command, player_position):
"""Takes input and reacts to command"""
new_position = player_position
neighbors = self.display.get_neighbors(player_position)
if command == curses.KEY_UP:
if self.facing_direction == 'up' and neighbors['up'][0] == self.replace_space:
self.display.add_char(player_position, self.replace_space)
new_position[0] -= 1
self.display.add_char(new_position, self.player_piece)
else:
self.facing_direction = 'up'
elif command == curses.KEY_DOWN:
if self.facing_direction == 'down' and neighbors['down'][0] == self.replace_space:
self.display.add_char(player_position, self.replace_space)
new_position[0] += 1
self.display.add_char(new_position, self.player_piece)
else:
self.facing_direction = 'down'
elif command == curses.KEY_LEFT:
if self.facing_direction == 'left' and neighbors['left'][0] == self.replace_space:
self.display.add_char(player_position, self.replace_space)
new_position[1] -= 1
self.display.add_char(new_position, self.player_piece)
else:
self.facing_direction = 'left'
elif command == curses.KEY_RIGHT:
if self.facing_direction == 'right' and neighbors['right'][0] == self.replace_space:
self.display.add_char(player_position, self.replace_space)
new_position[1] += 1
self.display.add_char(new_position, self.player_piece)
else:
self.facing_direction = 'right'
elif command == ord('c'):
new_position = self._interact(player_position)
elif command == ord('x'):
cured = self._heal()
if cured:
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/cure.wav']])
thread.deamon = True
thread.start()
elif command == ord(' '):
bullet_id = 'bullet' + str(self.shots)
soundfile = 'Sound/' + self.weapon_name + '.wav'
thread = threading.Thread(target=subprocess.call, args=[['afplay', soundfile]])
thread.deamon = True
thread.start()
player_stats = self._player_stats()
player_strength = int(player_stats[2])
self._shoot(bullet_id, player_position, self.facing_direction, player_strength)
self.shots += 1
elif command == ord('p'):
self._pause()
return new_position
def _re_display(self):
"""re-displays treasure and door icons in room"""
for treasure_id, treasure_info in self._treasures.items(): # Add treasures to room
treasure_coord, indx = treasure_info
y, x = treasure_coord
treasure_coord = [int(y), int(x)]
self._check_inventory(self._current_room.treasure[indx])
hidden = self._current_room.treasure[indx].attrs['hidden']
id = self._current_room.treasure[indx].attrs['id']
if 'bones' in id and hidden == 'False' or 'computer' in id and hidden == 'False':
self._treasures[treasure_id] = [self.display.add_char(treasure_coord, self.interactive_item), indx]
elif self._current_room.treasure[indx].attrs['status'] == 'full' and hidden == 'False':
self._treasures[treasure_id] = [self.display.add_char(treasure_coord, self.treasure_symbol), indx]
for door_id, door_info in self._doors.items(): # Add doors to room
door_coord, indx = door_info
y, x = door_coord
door_coord = [int(y), int(x)]
self._check_inventory(self._current_room.door[indx])
hidden = self._current_room.door[indx].attrs['hidden']
id = self._current_room.door[indx].attrs['id']
if hidden == 'False':
self._doors[door_id] = [self.display.add_char(door_coord, self.door_symbol), indx]
def _change_room(self, coord):
"""Changes the player's current room"""
new_room = False
player_start = []
for door_id, door_info in self._doors.items():
door_coord, door_index = door_info
if door_coord == coord:
if self._current_room.door[door_index].attrs['condition'] == 'open' and \
self._current_room.door[door_index].attrs['hidden'] == 'False':
player_start = str(self._current_room.door[door_index].attrs['player_start']).split(',')
player_start = [int(player_start[0]), int(player_start[1])]
new_room = True
self.current_room_id = self._current_room.door[door_index].attrs['connect_to']
if self.current_room_id == 'Exit':
return new_room, player_start
self._current_room_index = int(self.rooms[self.current_room_id])
self._current_room = self._game.room[self._current_room_index]
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/BOUNCE1.wav']])
thread.deamon = True
thread.start()
elif self._current_room.door[door_index].attrs['condition'] == 'locked' and \
self._current_room.door[door_index].attrs['hidden'] == 'False':
inventory_items = [item.attrs['name'] for item in self.inventory]
if self._current_room.door[door_index].attrs['requirements'] in inventory_items:
self._current_room.door[door_index].attrs['condition'] = 'open'
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/door_unlock.wav']])
thread.deamon = True
thread.start()
result = ['You have unlocked the door with ' + str(
self._current_room.door[door_index].attrs['requirements'])]
self.display.text_to_dialog(result, self.display.separate_line_y + 4, clear_low=True)
else:
result = ['You cannot open this door you need the ' + str(
self._current_room.door[door_index].attrs['requirements'])]
self.display.text_to_dialog(result, self.display.separate_line_y + 4, clear_low=True)
return new_room, player_start
def _add_inventory(self, coord, treasure_index, type):
"""Adds items in treasure to player inventory returns response in a string"""
result = ''
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/open.wav']])
thread.deamon = True
thread.start()
treasure_len = len(self._current_room.treasure[treasure_index].item)
self._current_room.treasure[treasure_index].attrs['status'] = 'empty'
result += 'You have received '
for item_index, item in enumerate(self._current_room.treasure[treasure_index].item):
self.inventory.append(item)
if item_index == treasure_len - 1:
result += item.attrs['name'] + '.'
elif item_index == treasure_len - 2:
result += item.attrs['name'] + ' and '
else:
result += item.attrs['name'] + ', '
if item.attrs['type'] == 'weapon':
result += ' It adds ' + item.attrs['strength'] + ' points to your strength!'
if type == 'treasure':
self.display.add_char(coord, self.replace_space)
self._update_player_stats()
return result
def _open_treasure(self, coord, type='treasure'):
"""Opens treasure chest and retrieves items"""
result = []
for treasure_id, treasure_info in self._treasures.items(): # Add treasures to room
treasure_coord, indx = treasure_info
if coord == treasure_coord:
condition = self._current_room.treasure[indx].attrs['condition']
status = self._current_room.treasure[indx].attrs['status']
id = str(self._current_room.treasure[indx].attrs['id'])
if 'computer' in id:
type = 'computer'
if condition == 'open' and status == 'full':
result.append(self._add_inventory(coord, indx, type))
elif condition == 'locked' and status == 'full':
inventory_items = [item.attrs['name'] for item in self.inventory]
requirement = self._current_room.treasure[indx].attrs['requirements']
if requirement in inventory_items:
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/door_unlock.wav']])
thread.deamon = True
thread.start()
self._current_room.treasure[indx].attrs['condition'] = 'open'
if type == 'treasure':
result.append('You unlocked this treasure chest with ' + requirement)
elif type == 'computer':
result.append('You used ' + requirement + ' to log in as John Hammond')
elif type == 'bones':
result.append('You added the ' + requirement + ' to the display')
result.append(self._add_inventory(coord, indx, type))
else:
if type == 'treasure':
result.append('You cannot open this treasure chest you need the ' + requirement)
elif type in ('bones', 'computer'):
text = [text.value for text in self._current_room.treasure[indx].include if
text.attrs['id'] == condition][0]
result.append(text)
elif type in ('bones', 'computer'):
text = \
[text.value for text in self._current_room.treasure[indx].include if text.attrs['id'] == condition][
0]
result.append(text)
self._re_display()
self.display.text_to_dialog(result, self.display.separate_line_y + 4, clear_low=True)
def _interact(self, player_position):
"""Player interaction with doors and items"""
new_position = player_position
neighbors = self.display.get_neighbors(player_position)
for key, cell in neighbors.items():
if key == 'up':
interact_cell = [player_position[0] - 1, player_position[1]]
elif key == 'down':
interact_cell = [player_position[0] + 1, player_position[1]]
elif key == 'left':
interact_cell = [player_position[0], player_position[1] - 1]
else:
interact_cell = [player_position[0], player_position[1] + 1]
if cell[0] == self.treasure_symbol:
self._open_treasure(interact_cell)
elif cell[0] == self.interactive_item:
self._open_treasure(interact_cell, type='bones')
elif cell[0] == self.door_symbol:
result, player_start = self._change_room(interact_cell)
if result:
if self.current_room_id == 'Exit':
win_type = len(
[item.attrs['name'] for item in self.inventory if item.attrs['name'] == 'Dinosaur Embryos'])
self.game_status = 'win' + str(win_type)
else:
new_position = self.display.display_room(self._current_room, player_start)
self._setup_room(new_position)
self.display.add_char(new_position, self.player_piece)
return new_position
def _shoot(self, bullet_id, start_coord, direction, strength, last_update=0):
"""Rifle always shoots right """
if 'bullet' in bullet_id:
interval = 0.025
turnRed = 0
else:
interval = 0.05
turnRed = 1024
current_space = self.display.get_char(start_coord)
bullet = 2097709
bullet_coord = [start_coord[0], start_coord[1] + 1] # default right
if direction == 'up':
interval = interval * 2
bullet = 2097788
bullet_coord = [start_coord[0] - 1, start_coord[1]]
elif direction == 'down':
interval = interval * 2
bullet = 2097788
bullet_coord = [start_coord[0] + 1, start_coord[1]]
elif direction == 'left':
bullet = 2097709
bullet_coord = [start_coord[0], start_coord[1] - 1]
next_space = self.display.get_char(bullet_coord)
if current_space == bullet + turnRed:
self.display.add_char(start_coord, self.replace_space)
if next_space == self.replace_space:
self.display.add_char(bullet_coord, bullet + turnRed)
self._movement_calls[bullet_id] = [bullet_coord, interval, last_update, direction, self._shoot, '',
strength, 0, 0]
elif next_space in (self.enemy_V, self.enemy_D, self.enemy_C):
self._player_attack(bullet_coord, strength)
self._movement_calls.pop(bullet_id, None)
elif next_space == self.player_piece:
self._enemy_attack(strength)
self._movement_calls.pop(bullet_id, None)
else:
self._movement_calls.pop(bullet_id, None)
def _heal(self):
'''Uses potion in inventory to increase player's health'''
num_potions = len([item.attrs['name'] for item in self.inventory if item.attrs['type'] == 'potion'])
player_stats = self._player_stats()
max_health = player_stats[0]
cured = False
if num_potions:
if self.player_health < int(max_health):
potion = [item for item in self.inventory if item.attrs['type'] == 'potion'][0]
self.inventory.remove(potion)
t = self._update_player_stats(health_diff=self._potion_power)
self.display.text_to_dialog(['You have used a potion'], self.display.separate_line_y + 4)
cured = True
else:
self.display.text_to_dialog(['Your health is already maxed'], self.display.separate_line_y + 4)
else:
self.display.text_to_dialog(['You do not have any potions'], self.display.separate_line_y + 4)
return cured
def _player_attack(self, coord, strength):
'''Handles attack for player to enemy'''
for moving_id, moving_info in self._movement_calls.items():
enemy_coord, interval, last_update, direction, function, enemy_symbol, enemy_strength, enemy_health, enemy_exp_points = moving_info
if enemy_symbol == self.enemy_C:
enemy_type = 'Compsognathus'
elif enemy_symbol == self.enemy_D:
enemy_type = 'Dilophosaurus'
else:
enemy_type = 'Velociraptor'
if 'enemy' in moving_id and enemy_coord == coord:
new_enemy_health = int(enemy_health) - int(strength)
leveled = []
if new_enemy_health <= 0:
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/kill.wav']])
thread.deamon = True
thread.start()
self._movement_calls.pop(moving_id, None)
self.display.add_char(coord, self.replace_space)
leveled.append(
'You killed a ' + enemy_type + ' and received ' + enemy_exp_points + ' experience points ')
leveled.append(self._update_player_stats(experience_diff=enemy_exp_points))
enemies = len([obj for obj, move in self._movement_calls.items() if 'enemy' in obj])
# if no enemies check for hidden treasures or doors
if enemies == 0:
hidden = False
for treasure_id, treasure_info in self._treasures.items(): #Add treasures to room
coord, indx = treasure_info
if self._current_room.treasure[indx].attrs['hidden'] == 'True':
hidden = True
unhide = self._current_room.treasure[indx].attrs['unhide']
self._current_room.treasure[indx].attrs[
'hidden'] = 'False' if unhide == 'no enemies' else 'True'
for door_id, door_info in self._doors.items(): #Add treasures to room
coord, indx = door_info
if self._current_room.door[indx].attrs['hidden'] == 'True':
hidden = True
unhide = self._current_room.treasure[indx].attrs['unhide']
self._current_room.treasure[indx].attrs[
'hidden'] = 'False' if unhide == 'no enemies' else 'True'
if hidden:
thread = threading.Thread(target=subprocess.call,
args=[['afplay', 'Sound/door_unlock.wav']])
thread.deamon = True
thread.start()
self._re_display()
self.display.text_to_dialog(leveled, self.display.separate_line_y + 4)
else:
self._movement_calls[moving_id] = [coord, interval, last_update, direction, function, enemy_symbol,
enemy_strength, new_enemy_health, enemy_exp_points]
def _enemy_attack(self, enemy_strength):
"""Reduce players health by strength provided"""
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/hit.wav']])
thread.deamon = True
thread.start()
self._update_player_stats(health_diff=-(int(enemy_strength)))
def _enemy_movement(self, enemy_id, enemy_position, player_position, enemy_symbol, enemy_strength, enemy_health,
enemy_exp_points, enemy_speed, last_update=0):
"""Looks at neighbors and if player is spot player will take some damage"""
move = True
choices = []
neighbors = self.display.get_neighbors(enemy_position)
speed = float(enemy_speed)
for key in neighbors:
if neighbors[key][0] == self.player_piece:
self._enemy_attack(enemy_strength)
move = False
elif neighbors[key][0] == self.replace_space:
if key == 'up':
choices.append([enemy_position[0] - 1, enemy_position[1]])
elif key == 'down':
choices.append([enemy_position[0] + 1, enemy_position[1]])
elif key == 'left':
choices.append([enemy_position[0], enemy_position[1] - 1])
else:
choices.append([enemy_position[0], enemy_position[1] + 1])
if enemy_symbol == self.enemy_D and move:
spit = self._enemy_spit(enemy_position, enemy_strength)
if spit:
move = False
distance = math.hypot(player_position[1] - enemy_position[1], player_position[0] - enemy_position[0])
if distance > 30 and last_update:
move = False
if move: # move towards player if possible
step = self.display.find_enemy_move(enemy_position, player_position, self.replace_space, enemy_symbol,
self.player_piece)
if step and self.display.get_char(step) == self.replace_space:
self.display.add_char(enemy_position, self.replace_space)
self.display.add_char(step, enemy_symbol)
self._movement_calls[enemy_id] = [step, speed, last_update, None, self._enemy_movement, enemy_symbol,
enemy_strength, enemy_health,
enemy_exp_points] #add to movement_calls
else:
self.display.add_char(enemy_position, enemy_symbol)
self._movement_calls[enemy_id] = [enemy_position, speed, last_update, None, self._enemy_movement,
enemy_symbol, enemy_strength, enemy_health, enemy_exp_points]
else:
choice = random.choice(choices) if choices else enemy_position
self.display.add_char(enemy_position, self.replace_space)
self.display.add_char(choice, enemy_symbol)
self._movement_calls[enemy_id] = [choice, speed, last_update, None, self._enemy_movement, enemy_symbol,
enemy_strength, enemy_health, enemy_exp_points]
def _enemy_spit(self, coord, enemy_strength):
'''For enemy to spit at player'''
y, x = coord
direction = None
neighbors = self.display.get_neighbors(coord, 0)
for key, cells in neighbors.items():
for cell in cells:
if cell == self.replace_space or cell == self.player_piece:
if cell == self.player_piece:
direction = key
spit_id = 'spit' + str(self.shots)
self._shoot(spit_id, [y, x], direction, enemy_strength)
self.shots += 1
else:
break
return direction
def _pause(self):
"""Throws game into while loop till user enters key"""
action = self.display.get_input()
while action != ord('p') or action != ord('q'):
text = ['The game is paused', 'p: continue game q: quit game']
self.display.text_to_dialog(text, self.display.separate_line_y + 4, clear_low=True)
action = self.display.get_input()
if action == ord('q'):
self.game_status = 'quit'
break
elif action == ord('p'):
self.display.text_to_dialog([''], self.display.separate_line_y + 4, clear_low=True)
break
elif action != ord('s'):
self._save()
def _save(self):
"""Saves game state"""
pass
def _intro(self):
"""Displays the intro to the game"""
image_file = str(self._game.intro[0].include[0].value.strip()) # add if type = image
jpLogo = self.display.read_text(image_file)
image_start = 2
spacing = 2
image_height = len(jpLogo)
start_text = image_start + image_height + spacing
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/welcome.wav']])
thread.deamon = True
thread.start()
for text_index in range(len(self._game.intro[0].text)):
self.display.clear()
text = [text.strip() for text in self._game.intro[0].text[text_index].value.split('\n')]
self.display.ascii_to_screen(image_start, jpLogo, 4)
self.display.ascii_to_screen(start_text, text)
action = self.display.get_input()
def _game_Over(self):
"""Runs game over screen"""
action = True
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/cleaver.wav']])
thread.deamon = True
thread.start()
top = self.display.read_text('ASCII/gameover.txt', strip=False)
buffer = 12
spacing = 4
self.display.ascii_to_screen(buffer, top, 4, 57)
message = ['Press Enter to go back to the main menu']
self.display.text_to_dialog(message, buffer + spacing + len(top))
while action:
action = self.display.get_input()
if action == ord('\n'):
action = False
def _ending(self, type='bad'):
"""Runs screen ending"""
if type == 'bad':
end_idx = 0
width = 47
else:
end_idx = 1
width = 60
buffer = 5
spacing = 2
endings = self._grab_game_objects('endingcontent.xml', desc=ending)
top = self.display.read_text('ASCII/escaped.txt', strip=False)
action = -1
thread = threading.Thread(target=subprocess.call, args=[['afplay', 'Sound/trex.wav']])
thread.deamon = True
thread.start()
while action != ord('\n'):
for image in endings.ending[end_idx].image:
if action == ord('\n'):
break
self.display.clear()
self.display.ascii_to_screen(buffer, top, 4, 78)
i = image.value.split('\n')
self.display.ascii_to_screen(buffer + len(top) + spacing, i, 1, width)
time.sleep(.10)
action = self.display.get_input()
def play(self, intro=True):
"""runs game"""
if intro:
self._intro()
else:
self.current_room_id = self._game.player[0].attrs['position']
self._current_room_index = int(self.rooms[self.current_room_id])
self._current_room = self._game.room[self._current_room_index]
self.player_health = self._set_health()
self.display.set_timeout(0)
player_position = self.display.display_room(self._current_room)
self._setup_room(player_position)
self.display.add_char(player_position, self.player_piece)
self._update_player_stats()
while self.game_status == 'alive':
action = self.display.get_input()
if action != -1:
player_position = self._process_input(action, player_position)
if self._movement_calls:
for object, moves in self._movement_calls.items():
ntime = time.time()
coord, interval, last_update, direction, function, enemy_symbol, strength, enemy_health, enemy_exp_points = moves
if ntime > last_update:
if 'bullet' in object or 'spit' in object:
last_update = time.time() + interval
function(object, coord, direction, strength, last_update)
else:
last_update = time.time() + interval
function(object, coord, player_position, enemy_symbol, strength, enemy_health,
enemy_exp_points, interval, last_update)
if self.game_status == 'dead':
self.display.set_timeout(-1)
self.display.clear()
self._game_Over()
elif self.game_status == 'win0':
self.display.set_timeout(0)
self.display.clear()
self._ending()
elif self.game_status == 'win1':
self.display.set_timeout(0)
self.display.clear()
self._ending('good')
|
# 打印100以内的质数
def judge(x):
for i in range(2,x):
if x % i == 0:
return False
return True
for i in range(2,101):
if judge(i):
print(i) |
#数学定理:假若p为质数,a为任意正整数,那么a^p-a可被p整除
import random
def feima():
#产生一个随机的正整数
a = random.randint(1,10000)
#求2到1000的质数
l=list(range(2,1000))
for n,i in enumerate(l):
for j in l[n+1:]:
if j%i==0:
l.remove(j)
#将列表中的整数转化为字符串,目的是为了能取出单个质数
for k in range(0,len(l)):
l[k]=str(l[k])
#把一个质数取出来
r = "".join(random.sample(l,1))
#将字符串类型的质数转化成int型进行运算判断
p = int (r)
if((a**p-a)%p==0):
print("费马小定理得到验证!")
else:
print("费马小定理骗人的!")
feima()
|
import sqlite3
import logging
class DataStore:
'''Performs datbase operations'''
def __init__(self, database, flow_file):
self.database = database
self.flow_file = flow_file
def add_reading(self, reading):
try:
register = self.get_register(
nmi=reading['NMI'],
meter_serial_number=reading['MeterSerialNumber'],
register_id=reading['RegisterID'])
if register:
logging.debug('Register already exists')
else:
register = self.add_new_register(
nmi=reading['NMI'],
meter_serial_number=reading['MeterSerialNumber'],
register_id=reading['RegisterID'])
self.add_new_reading(
register_id=register,
reading=reading['CurrentRegisterRead'],
read_date_time=reading['CurrentRegisterReadDateTime'],
usage=reading['Quantity'],
uom=reading['UOM'],
flow_file=self.flow_file)
except sqlite3.OperationalError as e:
logging.fatal('Database error: {}'.format(e))
raise RuntimeError('Database error: {}'.format(e))
def get_register(self, nmi, meter_serial_number, register_id):
'''Returns internal register record ID'''
register_record = None
try:
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
sql = '''
SELECT id FROM registers
WHERE
nmi = ? AND
meter_serial_number = ? AND
register_id = ?'''
cursor.execute(sql, (nmi, meter_serial_number, register_id))
register_record = cursor.fetchone()[0]
connection.close()
except TypeError as e:
logging.info('Register not found: {}'.format(nmi))
return register_record
def add_new_register(self, nmi, meter_serial_number, register_id):
'''Add a new register record and return ID'''
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
sql = '''
INSERT INTO registers(nmi, meter_serial_number, register_id)
VALUES (?, ?, ?);'''
cursor.execute(sql, (nmi, meter_serial_number, register_id))
register_record = cursor.lastrowid
connection.commit()
connection.close()
logging.info('Register added for NMI: {}'.format(nmi))
return register_record
def add_new_reading(self, register_id, reading, read_date_time, usage, uom,
flow_file):
'''Add a new reading'''
connection = sqlite3.connect(self.database)
cursor = connection.cursor()
sql = '''
INSERT INTO readings(
register_id, reading, read_date_time, usage, uom, flow_file)
VALUES (?, ?, ?, ?, ?, ?)'''
cursor.execute(sql, (
register_id, reading, read_date_time, usage, uom, flow_file))
connection.commit()
connection.close()
logging.info('Reading added for register_id: {}'.format(register_id))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-6-8
@author: Chine
'''
import urllib
import base64
import binascii
import re
import json
from cola.core.errors import DependencyNotInstalledError,\
LoginFailure
try:
import rsa
except ImportError:
raise DependencyNotInstalledError("rsa")
class WeiboLoginFailure(LoginFailure): pass
class WeiboLogin(object):
def __init__(self, opener, username, passwd):
self.opener = opener
self.username = username
self.passwd = passwd
def get_user(self, username):
username = urllib.quote(username)
return base64.encodestring(username)[:-1]
def get_passwd(self, passwd, pubkey, servertime, nonce):
key = rsa.PublicKey(int(pubkey, 16), int('10001', 16))
message = str(servertime) + '\t' + str(nonce) + '\n' + str(passwd)
passwd = rsa.encrypt(message, key)
return binascii.b2a_hex(passwd)
def prelogin(self):
username = self.get_user(self.username)
prelogin_url = 'http://login.sina.com.cn/sso/prelogin.php?entry=sso&callback=sinaSSOController.preloginCallBack&su=%s&rsakt=mod&client=ssologin.js(v1.4.5)' % username
data = self.opener.open(prelogin_url)
regex = re.compile('\((.*)\)')
try:
json_data = regex.search(data).group(1)
data = json.loads(json_data)
return str(data['servertime']), data['nonce'], \
data['pubkey'], data['rsakv']
except:
raise WeiboLoginFailure
def login(self):
login_url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.5)'
try:
servertime, nonce, pubkey, rsakv = self.prelogin()
postdata = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
'ssosimplelogin': '1',
'vsnf': '1',
'vsnval': '',
'su': self.get_user(self.username),
'service': 'miniblog',
'servertime': servertime,
'nonce': nonce,
'pwencode': 'rsa2',
'sp': self.get_passwd(self.passwd, pubkey, servertime, nonce),
'encoding': 'UTF-8',
'prelt': '115',
'rsakv' : rsakv,
'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'META'
}
postdata = urllib.urlencode(postdata)
text = self.opener.open(login_url, postdata)
regex = re.compile('\((.*)\)')
json_data = json.loads(regex.search(text).group(1))
return json_data['result'] == True
except WeiboLoginFailure:
return False |
import requests
import re
from unicodedata import normalize
from bs4 import BeautifulSoup
from parser.course import Course
from parser.coursecode import CourseCode
from parser.unitrange import UnitRange
from parser.term import Term
class CourseParser:
def __init__(self):
self.COURSES_SOURCE = "http://catalog.calpoly.edu/coursesaz/csc/"
self.TERM_OFFERING_PREFIX = 'Term Typically Offered: '
self.PREREQ_PREFIX = 'Prerequisite: '
self.CR_NC_MARKER = 'CR/NC'
self.CROSSLIST_REGEX = r'Crosslisted as (\w+/\w+)'
def get_courses(self):
request = requests.get(self.COURSES_SOURCE)
soup = BeautifulSoup(request.text, "html.parser")
courses = []
course_blocks = soup.find_all('div', attrs={'class': 'courseblock'})
for course_block in course_blocks:
full_name, unit_string = tuple(s.strip() for s in course_block.p.strong.strings)
full_code, name = tuple(s.strip() for s in full_name.split('.')[:2])
dept, code = tuple(full_code.split())
code = int(code)
units = self.parse_unit_range(unit_string)
subheader = course_block.find('div', attrs={'class': 'noindent courseextendedwrap'})
terms_string = [p.string for p in subheader.find_all('p', attrs={'class': 'noindent'})
if p.string.startswith(self.TERM_OFFERING_PREFIX)][0]
terms = [Term.from_str(t) for t in terms_string[len(self.TERM_OFFERING_PREFIX):].split(', ')]
subheaders = list(subheader.strings)
is_CRNC = self.CR_NC_MARKER in subheaders
prereq_idx = [idx for idx, string in enumerate(subheaders) if string.startswith(self.PREREQ_PREFIX)]
if len(prereq_idx) == 0:
prereqs = None
else:
prereqs = normalize('NFKD', ''.join(subheaders[prereq_idx[0]:])[len(self.PREREQ_PREFIX):])
desc = normalize('NFKD', ''.join(course_block.find('div', attrs={'class': 'courseblockdesc'}).p.strings))
crosslist = re.search(self.CROSSLIST_REGEX, desc)
if crosslist:
dept = crosslist.group(1)
course = Course(CourseCode(dept, code), name, units,
terms, is_CRNC, prereqs, desc)
courses.append(course)
return courses
@staticmethod
def parse_unit_range(string):
bounds = [int(s) for s in string.replace('-', ' ').split() if s.isdigit()]
if len(bounds) == 1:
return UnitRange.always(bounds[0])
else:
return UnitRange(bounds[0], bounds[1])
|
import torch
import torchvision
import torchvision.transforms as transforms
torch.manual_seed(17) # eliminate random.
batch_size = 100
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Model
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(784, 10)
def forward(self, x):
return self.linear(x)
# initialize
model = Model()
for images, labels in train_loader:
print(images.shape) # torch.Size([100, 1, 28, 28])
images = images.view(-1, 784)
print(images.shape) # torch.Size([100, 784])
# forward
predict = model(images)
print(predict.shape)
exit()
'''
torch.Size([100, 1, 28, 28])
torch.Size([100, 784])
torch.Size([100, 10])
''' |
from djangorestframework.resources import ModelResource
from shopback.base.serializer import ChartSerializer
class SearchResource(ModelResource):
""" docstring for SearchResource ModelResource """
fields = (('charts','ChartSerializer'),('item_dict',None))
#exclude = ('url',)
class RankResource(ModelResource):
""" docstring for SearchResource ModelResource """
exclude = ('url',)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import re
import sys
import io
from bs4 import BeautifulSoup
from urllib import request
import requests
import urllib.parse
import urllib.request
import http.cookiejar
# step one : login
def login_return_Cookie(authenticity_token):
# 登录后
# login_url = 'http://xtcgit.eebbk.com:9999/users/sign_in#login-pane'
login_url = 'http://xtcgit.eebbk.com:9999/users/sign_in'
data = {'utf8': '✓',
'authenticity_token': authenticity_token,
'user[login]': 'liyang@oaserver.dw.gdbbk.com',
'user[password]': 'ly0904010214',
'user[remember_me]': '0'}
post_data = urllib.parse.urlencode(data).encode('utf-8')
# post_data = b'utf8=%E2%9C%93&authenticity_token=iijSEnofQ3ZbGjbRwCjIGO9TaE6nHlJtVDpwI1V5YUG5/XRNc3/144KZDT8U7Az0b97/UHhIGO7FkjjfQ62nNw==&user%5Blogin%5D=liyang%40oaserver.dw.gdbbk.com&user%5Bpassword%5D=ly0904010214&user%5Bremember_me%5D=0'
print('post_data', post_data)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,ja;q=0.8',
'Cache-Control': 'max-age=0',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'diff_view=parallel; user_callout_dismissed=true; _gitlab_session=dc0e4c573c00f42a9d1326b929f5863b',
'Host': 'xtcgit.eebbk.com:9999',
'Origin': 'http://xtcgit.eebbk.com:9999',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://xtcgit.eebbk.com:9999/users/sign_in',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
# req = urllib.request.Request(login_url, headers=headers, data=post_data)
# # 构造cookie
# cookie = http.cookiejar.CookieJar()
# # 由cookie构造opener
# opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie))
# # 发送登录请求,此后这个opener就携带了cookie,以证明自己登录过
# print('get_method = ', req.get_method())
# print('data = ', req.data)
# resp = opener.open(req)
# # resp = request.urlopen(req)
# print('response = ', resp.read().decode('utf-8'))
data = {'utf8': '%E2%9C%93',
'authenticity_token': authenticity_token,
'user%5Blogin%5D': 'liyang@oaserver.dw.gdbbk.com',
'user%5Bpassword%5D': 'ly0904010214',
'user%5Bremember_me%5D': '0'}
# 构造Session
session = requests.Session()
# 在session中发送登录请求,此后这个session里就存储了cookie
# 可以用print(session.cookies.get_dict())查看
resp = session.post(login_url, data)
print(resp.status_code)
# print(resp.content.decode('utf-8'))
# 登录后才能访问的网页
url = 'http://xtcgit.eebbk.com:9999/dashboard/projects'
# 发送访问请求
resp = session.get(url)
# print(resp.content.decode('utf-8'))
# 登录后才能访问的网页
# url = 'http://xtcgit.eebbk.com:9999/dashboard/projects'
# # 构造访问请求
# req = urllib.request.Request(url, headers=headers)
# resp = opener.open(req)
# print(resp.read().decode('utf-8'))
def get_authenticity_token():
login_url = 'http://xtcgit.eebbk.com:9999/users/sign_in'
user_headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'}
session = requests.Session()
response = session.get(login_url, headers=user_headers)
print("authenticity_token", response.content)
pattern = re.compile(r'<meta name="csrf-token" content="(.*)" />')
print("pattern", pattern.findall(response.content.decode('utf8')))
authenticity_token = pattern.findall(response.content.decode('utf8'))[0]
print("authenticity_token", authenticity_token)
return authenticity_token
# step two : get list
def get_urls_by_Cookie():
# 登录后才能访问的网页
url = 'http://xtcgit.eebbk.com:9999/dashboard/projects'
req = request.Request(url)
# 设置cookie
req.add_header('cookie', r'diff_view=parallel; user_callout_dismissed=true; _gitlab_session=f1f22c337a9d965d689e6e0b1c835fd9')
# 设置请求头
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')
resp = request.urlopen(req)
print(resp.read().decode('utf-8'))
# def get_urls():
# # With语句是什么?
# # 有一些任务,可能事先需要设置,事后做清理工作。对于这种场景,Python的with语句提供了一种非常方便的处理方式。
# # 一个很好的例子是文件处理,你需要获取一个文件句柄,从文件中读取数据,然后关闭文件句柄。
#
# with request.urlopen() as f:
# print('Status:', f.status, f.reason)
# print('Status:', f., f.reason)
# for k, v in f.getheaders():
# print('%s: %s' % (k, v))
# step three : download
# step four : zip files
# step five : zip zips
if __name__ == '__main__':
# print('argv len : %d', len(sys.argv))
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8') # 改变标准输出的默认编码
# login_return_Cookie(get_authenticity_token())
get_urls_by_Cookie()
# get_urls()
# exit(1)
# f_input = sys.argv[1]
# f_output = sys.argv[2]
|
import json
import operator
from collections import OrderedDict
from takwimu import settings
from takwimu.utils.medium import Medium
from takwimu.models.dashboard import ProfilePage, ProfileSectionPage
from takwimu.models.dashboard import TopicPage
def takwimu_countries(request):
return {
'countries': [
{
'name': 'Burkina Faso',
'name_short': 'Burkina Faso',
'published': False
}, {
'name': 'Democratic Republic of Congo',
'name_short': 'DR Congo',
'published': False
}, {
'name': 'Ethiopia',
'name_short': 'Ethiopia',
'published': True
}, {
'name': 'Kenya',
'name_short': 'Kenya',
'published': True
}, {
'name': 'Nigeria',
'name_short': 'Nigeria',
'published': True
}, {
'name': 'Senegal',
'name_short': 'Senegal',
'published': True
}, {
'name': 'South Africa',
'name_short': 'South Africa',
'published': True
},{
'name': 'Tanzania',
'name_short': 'Tanzania',
'published': True
}, {
'name': 'Uganda',
'name_short': 'Uganda',
'published': False
}, {
'name': 'Zambia',
'name_short': 'Zambia',
'published': False
}
]
}
def takwimu_stories(request):
stories_latest = []
stories_trending = []
try:
if settings.HURUMAP.get('url') != 'https://takwimu.africa':
with open('data/articles.json') as f:
stories = json.load(f)
else:
client = Medium()
stories = client.get_publication_posts('takwimu-africa',
count=20)
stories_latest = stories[0:3]
stories_trending = sorted(
stories, key=operator.itemgetter('clap_count'), reverse=True)
except Exception as e:
print(e.message)
return {
'stories_latest': stories_latest,
'stories_trending': stories_trending[0:3]
}
def takwimu_topics(request):
"""
Sections, topics with indicators and key issues
"""
sections = []
try:
profile_topics = _traverse_profile_sections(
ProfilePage.objects.live(), request)
profile_section_topics = _traverse_profile_sections(
ProfileSectionPage.objects.live(), request, len(profile_topics))
sections = profile_topics + profile_section_topics
except Exception as e:
print(e.message)
return {
'sections': sections,
}
def _traverse_profile_sections(profile_sections, request, start_section_num=0):
sections_by_title = OrderedDict()
section_topics_by_title = OrderedDict()
section_topic_indicators_by_title = OrderedDict()
for section_num, profile_section in enumerate(profile_sections, start=start_section_num + 1):
(country, profile_title) = (str(profile_section.get_parent()), profile_section.title) \
if isinstance(profile_section, ProfileSectionPage) \
else (str(profile_section), 'Country Overview')
default_section = {
'id': 'section-{}'.format(section_num),
'title': profile_title,
'href': 'section-{}-topics'.format(section_num),
'key_issues': [],
}
section = sections_by_title.setdefault(
profile_title.lower(), default_section)
topics_by_title = section_topics_by_title.setdefault(
profile_title.lower(), OrderedDict())
topic_indicators_by_title = section_topic_indicators_by_title.setdefault(
profile_title.lower(), OrderedDict())
start_topic_num = len(topics_by_title.keys())
for topic_num, section_topic in enumerate(profile_section.body, start=start_topic_num + 1):
# Topics that have no indicators (key issues) should be
# displayed separately.
topic_title = section_topic.value['title']
if not section_topic.value['indicators']:
section['key_issues'].append({
'id': '{}-key_issue-{}'.format(section['id'], topic_num),
'title': topic_title,
'country': country,
'href': profile_section.get_url(request),
})
else:
default_topic = {
'id': '{}-topic-{}'.format(section['id'], topic_num),
'title': topic_title,
'href': '{}-topic-{}-indicators'.format(section['id'], topic_num),
}
topic = topics_by_title.setdefault(
topic_title.lower(), default_topic)
indicators_by_title = topic_indicators_by_title.setdefault(
topic_title.lower(), OrderedDict())
start_indicator_num = len(indicators_by_title.keys())
for indicator_num, topic_indicator in enumerate(section_topic.value['indicators'], start=start_indicator_num + 1):
indicator_title = topic_indicator.value['title']
default_indicator = {
'id': '{}-indicator-{}'.format(topic['id'], indicator_num),
'title': indicator_title,
'href': '{}-indicator-{}-country-selections'.format(topic['id'], indicator_num),
'countries': [],
}
indicator = indicators_by_title.setdefault(
indicator_title.lower(), default_indicator)
indicator['countries'].append({
'title': country,
'href': profile_section.get_url(request),
})
topic['indicators'] = indicators_by_title.values()
section['topics'] = topics_by_title.values()
return sections_by_title.values()
|
from PySide2.QtWidgets import *
import random
class Window(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setMinimumSize(500,300)
self.liste = ["CSI","CIR","BIOST","CENT",'BIAST',"EST"]
self.layout = QVBoxLayout()
self.label = QLabel()
self.button = QPushButton("Changer de Cycle")
self.button.clicked.connect(self.listeHasard)
self.layout.addWidget(self.label)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
def listeHasard(self):
txt = random.choice(self.liste)
self.label.setText(txt)
if __name__ == "__main__" :
app = QApplication([])
win = Window()
win.show()
app.exec_()
|
#!/usr/bin/env python
# coding:utf-8
# vi:tabstop=4:shiftwidth=4:expandtab:sts=4
#from pympler import tracker
#tr = tracker.SummaryTracker()
import deepstacks
from deepstacks.macros import *
from .macros import *
import pickle
#from memory_profiler import memory_usage
from StringIO import StringIO
#using_nolearn=False
from .. import utils
from ..lasagne.utils import ordered_errors as get_ordered_errors
import sys
import os
import time
import numpy as np
import math
import random
import gc
import fcntl
import copy
import string
import logging
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout)
#logging.info('Timeline trace written to %s', tl_fn)
from collections import OrderedDict
print os.environ.get('THEANO_FLAGS')
logging.info('start theano ...')
import theano
import theano.tensor as T
from ..utils import easyshared
from ..utils import lr_policy
import lasagne
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.compile.nanguardmode import NanGuardMode
import thread
import cv2
#import matplotlib.pyplot as plt
import argparse
sys.setrecursionlimit(50000)
#import nolearn
#import nolearn.lasagne
#import nolearn.lasagne.visualize
floatX=theano.config.floatX
from ..utils.momentum import adamax
from ..utils.curry import *
from ..utils.multinpy import readnpy,writenpy
def sorted_values(m):#{{{
if isinstance(m,OrderedDict):
return m.values()
else:
a=sorted(m.keys())
return [m[k] for k in a]#}}}
#
#def enlarge(a,n):#{{{
# if n<0:
# return a[::-n,::-n,:]
# elif n>0:
# return np.repeat(np.repeat(a,n,0),n,1)#}}}
#def tile(network,width,height):#{{{
# network = lasagne.layers.ConcatLayer((network,)*width,axis=2)
# network = lasagne.layers.ConcatLayer((network,)*height,axis=3)
# return network#}}}
#
#def imnorm(x):#{{{
# M=np.max(x)
# m=np.min(x)
# l=M-m
# if l==0:
# l=1.0
# res=((x-m)*1.0/l*255.0).astype('uint8')
# return res#}}}
#def im256(x):#{{{
# M=1.0
# m=0.0
# l=M-m
# if l==0:
# l=1.0
# res=((x-m)*1.0/l*255.0).astype('uint8')
# return res#}}}
#
#def smooth_abs(x):#{{{
# return (x*x+utils.floatX(1e-8))**utils.floatX(0.5);#}}}
#
def mylossfunc(a,b):
return (a-b)**2.0
class ZeroLayer(lasagne.layers.InputLayer):
pass
def inputlayer_zeroslike(layer):#{{{
shape=lasagne.layers.get_output_shape(layer)
res=ZeroLayer(shape,input_var=T.zeros(shape,dtype=floatX))
return res#}}}
def inputlayer_zeros(shape):#{{{
return ZeroLayer(shape,input_var=T.zeros(shape,dtype=floatX))#}}}
def inputlayer_oneslike(layer,scale=1.0):#{{{
shape=lasagne.layers.get_output_shape(layer)
res=ZeroLayer(shape,input_var=T.ones(shape,dtype=floatX)*utils.floatX(scale))
return res#}}}
def inputlayer_ones(shape,scale=1.0):#{{{
return ZeroLayer(shape,input_var=T.ones(shape,dtype=floatX)*utils.floatX(scale))#}}}
def touch(fname, times=None):#{{{
with open(fname, 'a'):
os.utime(fname, times)#}}}
def set_param_value(params,values,ignore_mismatch=False,resetparams=[]):#{{{
res=[]
if len(params) != len(values):
raise ValueError("mismatch: got %d values to set %d parameters" %
(len(values), len(params)))
for p, v in zip(params, values):
if p in resetparams:
continue
pshape=p.get_value().shape
vshape=v.shape
if len(pshape) != len(vshape):
if ignore_mismatch:
logging.warning("mismatch: parameter has shape %r but value to "
"set has shape %r" % (pshape, vshape))
res+=[p]
else:
raise ValueError("mismatch: parameter has shape %r but value to "
"set has shape %r" %
(pshape, vshape))
else:
needpad=False
setvalue=True
if ignore_mismatch:
padflag=True
padlist=()
for i in range(len(pshape)):
if pshape[i]<vshape[i]:
padflag=False
if padflag:
for i in range(len(pshape)):
if pshape[i]>vshape[i]:
padlist+=((0,pshape[i]-vshape[i]),)
needpad=True
else:
padlist+=((0,0),)
else:
for i in range(len(pshape)):
if pshape[i]<vshape[i]:
logging.warning("mismatch: parameter has shape %r but value to set has shape %r" % (pshape, vshape))
res+=[p]
setvalue=False
break
else:
for i in range(len(pshape)):
if pshape[i]!=vshape[i]:
raise ValueError("mismatch: parameter has shape %r but value to "
"set has shape %r" %
(pshape, vshape))
if needpad:
logging.warning('pad parameter value from %r to %r' % (vshape,pshape))
#print pshape,v.shape,padlist
v=np.pad(v,padlist,'constant')
res+=[p]
if setvalue:
p.set_value(v)
return res#}}}
def save_params(epoch,layers,global_params,prefix='',deletelayers=[]):#{{{
for layers,name in zip((layers,),("layers",)):
for i in range(len(layers)):
params = lasagne.layers.get_all_params(layers[i])
for layer in deletelayers:
newparams = []
for t in params:
#if t != layer.W and t!=layer.b:
# newparams+=[t]
if not t in layer.get_params():
newparams+=[t]
params=newparams
#params=[]
#for j in range(len(layers[i])):
# params=params+layers[i][j].get_params()
values=[p.get_value() for p in params]
np.savez(prefix+'model-'+name+'-'+str(i)+'.npz', *values)
if len(values)==0:
touch(prefix+'model-'+name+'-'+str(i)+'.skip')
params=global_params
values=[epoch]+[p.get_value() for p in params]
np.savez(prefix+'model-global-'+str(0)+'.npz', *values)
if len(values)==0:
touch(prefix+'model-global-'+str(0)+'.skip')#}}}
def load_params(layers,global_params,prefix='',partial=False,ignore_mismatch=False,newlayers=[],resetlayers=[]):#{{{
epoch = 0
mismatch = []
for layers,name in zip((layers,),("layers",)):
for i in range(len(layers)):
if not os.path.exists(prefix+'model-'+name+'-'+str(i)+'.npz'):
break;
if not os.path.exists(prefix+'model-'+name+'-'+str(i)+'.skip'):
with np.load(prefix+'model-'+name+'-'+str(i)+'.npz') as f:
values = [f['arr_%d' % n] for n in range(len(f.files))]
params = lasagne.layers.get_all_params(layers[i])
for layer in newlayers:
newparams = []
for t in params:
#if t != layer.W and t!=layer.b:
# newparams+=[t]
if not t in layer.get_params():
newparams+=[t]
params=newparams
resetparams = []
for layer in resetlayers:
for t in layer.get_params():
resetparams += [t]
#params=[]
#for j in range(len(layers[i])):
#a=[x for x in layers[i][j].get_params() and x not in params]
#params+=a
if partial:
values=values[:len(params)]
mismatch+=set_param_value(params,values,ignore_mismatch=ignore_mismatch,resetparams=resetparams)
if os.path.exists(prefix+'model-global-'+str(0)+'.npz'):
if not os.path.exists(prefix+'model-global-'+str(0)+'.skip'):
with np.load(prefix+'model-global-'+str(0)+'.npz') as f:
values = [f['arr_%d' % n] for n in range(len(f.files))]
epoch = values[0]
values = values[1:]
params=global_params
if partial:
values=values[:len(params)]
mismatch+=set_param_value(params,values,ignore_mismatch=ignore_mismatch,resetparams=[])
return epoch,mismatch#}}}
#from join import join_layer as JoinLayer
#from join import copy_batch_norm as CopyBatchMorm
#def get_errors(groups,m=None,prefix=''):#
# res=[]
# #print 'DEBUG'
# for t in groups['errors']:
# if m is None:
# res+=[[prefix+t,map(lasagne.layers.get_output,groups['errors'][t])]]
# else:
# tmp=map(lambda x:JoinLayer(x,m),groups['errors'][t])
# res+=[[prefix+t,map(lasagne.layers.get_output,tmp)]]
# #print [[t,map(lasagne.layers.get_output_shape,groups['errors'][t])]]
# return sorted(res,key=lambda x:x[0])#
#def get_watchpoints(groups,m=None,prefix=''):#
# res=[]
# #print 'DEBUG'
# for t in groups['watchpoints']:
# if m is None:
# res+=[[prefix+t,map(lasagne.layers.get_output,groups['watchpoints'][t])]]
# else:
# tmp=map(lambda x:JoinLayer(x,m),groups['watchpoints'][t])
# res+=[[prefix+t,map(lasagne.layers.get_output,tmp)]]
# #print [[t,map(lasagne.layers.get_output_shape,groups['watchpoints'][t])]]
# return sorted(res,key=lambda x:x[0])#
#class Seq:#
# def __init__(self,key,start=0):
# self.key=key
# self.p=start
# def next(self):
# p=self.p
# self.p+=1
# return self.key+str(p)#
#def sharegroup_replace(m,l):#
# res=()
# for a in l:
# if a[-2]==0:
# res+=(a,)
# else:
# res+=(a[:-2]+(m[a[-2]].next(),a[-1],),)
# return res#
def create_layers_dict(conv_layers):#{{{
res=OrderedDict()
for i,t in enumerate(conv_layers):
res['layer_'+str(i)]=t
return res#}}}
#def handle_finish(conv_groups,m):
# conv_groups['predict']=[JoinLayer(conv_groups['output'][0],{
# conv_groups['freedim'][0]:conv_groups['best_freedim'][0],
# })]
#
#def build_network(inputs):
#
# source_image_network=inputs['source_image']
# action_network=inputs['action']
# target_image_network=inputs['target_image']
#
# F=64
#
# sq1=Seq('conv')
# sq2=Seq('conv')
# sq3=Seq('deconv')
# sq4=Seq('deconv')
# sq5=Seq('deconv')
#
# network,conv_groups,conv_layers = build_convdeconv_network(source_image_network,sharegroup_replace({1:sq1,2:sq2,3:sq3,4:sq4,5:sq5},(
# ## source
# ('source_image',0 ,0, 0,0,1,{'noise':1.0/256}),
# (0,8 ,5, 1,0,1,{}),#{{{
# (0,8 ,3, 1,0,1,{}),
# (0,F ,4, 4,0,1,{}),
# (0,F ,3, 1,0,1,{}),
# (0,F ,3, 1,0,1,{}),
# (0,F ,4, 4,0,1,{}),
# (0,F ,3, 1,0,1,{}),
# (0,F ,3, 1,0,1,{}),
# (0,F *9,4, 4,0,1,{}),#}}}
# (0,(16,6,6),0, 0,0,1,{}),#{{{
# (0,16 ,1, 1,0,1,{}),
# (0,16 ,1, 1,0,1,{}),
# (0,F *9,6, 6,0,1,{}),#}}}
# (0,(16,6,6),0, 0,0,1,{}),#{{{
# (0,16 ,1, 1,0,1,{}),
# (0,16 ,1, 1,0,1,{}),
# (0,F *9,6, 6,0,1,{}),#}}}
# (0,(16,6,6),0, 0,0,1,{}),#{{{
# (0,16 ,1, 1,0,1,{}),
# (0,16 ,1, 1,0,1,{}),
# (0,F *9,6, 6,0,1,{}),#}}}
# (0,0 ,0, 0,'source',1,{'noise':1.0/256}),
# (0,(16,6,6),0, 0,0,3,{}),#{{{
# (0,16 ,1, 1,0,3,{}),
# (0,16 ,1, 1,0,3,{}),
# (0,F *9,6, 6,0,3,{}),#}}}
# (0,(16,6,6),0, 0,0,3,{}),#{{{
# (0,16 ,1, 1,0,3,{}),
# (0,16 ,1, 1,0,3,{}),
# (0,F *9,6, 6,0,3,{}),#}}}
# (0,(16,6,6),0, 0,0,3,{}),#{{{
# (0,16 ,1, 1,0,3,{}),
# (0,16 ,1, 1,0,3,{}),
# (0,F *9,6, 6,0,3,{}),#}}}
# (0,(F,3,3),0,0,0,3,{}),
# (0,F ,3,-4,0,3,{'nopad'}), #{{{
# (0,F ,3, 1,0,3,{}),
# (0,F ,3, 1,0,3,{}),
# (0,F ,3,-4,0,3,{}),
# (0,F ,3, 1,0,3,{}),
# (0,F ,3, 1,0,3,{}),
# (0,8 ,3,-4,0,3,{}),
# (0,8 ,3, 1,0,3,{}),
# (0,3 ,5, 1,'source_image_recon',3,{'equal':['source_image','source_image_recon',mylossfunc]}), #}}}
#
# ## target
# ('target_image',0 ,0, 0,0,2,{'noise':1.0/256}),#{{{
# (0,8 ,5, 1,0,2,{}),
# (0,8 ,3, 1,0,2,{}),
# (0,F ,4, 4,0,2,{}),
# (0,F ,3, 1,0,2,{}),
# (0,F ,3, 1,0,2,{}),
# (0,F ,4, 4,0,2,{}),
# (0,F ,3, 1,0,2,{}),
# (0,F ,3, 1,0,2,{}),
# (0,F *9,4, 4,0,2,{}),#}}}
# (0,(16,6,6),0, 0,0,2,{}),#{{{
# (0,16 ,1, 1,0,2,{}),
# (0,16 ,1, 1,0,2,{}),
# (0,F *9,6, 6,0,2,{}),#}}}
# (0,(16,6,6),0, 0,0,2,{}),#{{{
# (0,16 ,1, 1,0,2,{}),
# (0,16 ,1, 1,0,2,{}),
# (0,F *9,6, 6,0,2,{}),#}}}
# (0,(16,6,6),0, 0,0,2,{}),#{{{
# (0,16 ,1, 1,0,2,{}),
# (0,16 ,1, 1,0,2,{}),
# (0,F *9,6, 6,0,2,{}),#}}}
# (0,0 ,0, 0,'target',2,{'noise':1.0/256}),
# (0,(16,6,6),0, 0,0,4,{}),#{{{
# (0,16 ,1, 1,0,4,{}),
# (0,16 ,1, 1,0,4,{}),
# (0,F *9,6, 6,0,4,{}),#}}}
# (0,(16,6,6),0, 0,0,4,{}),#{{{
# (0,16 ,1, 1,0,4,{}),
# (0,16 ,1, 1,0,4,{}),
# (0,F *9,6, 6,0,4,{}),#}}}
# (0,(16,6,6),0, 0,0,4,{}),#{{{
# (0,16 ,1, 1,0,4,{}),
# (0,16 ,1, 1,0,4,{}),
# (0,F *9,6, 6,0,4,{}),#}}}
# (0,(F,3,3),0,0,0,4,{}),
# (0,F ,3,-4,0,4,{'nopad'}),#{{{
# (0,F ,3, 1,0,4,{}),
# (0,F ,3, 1,0,4,{}),
# (0,F ,3,-4,0,4,{}),
# (0,F ,3, 1,0,4,{}),
# (0,F ,3, 1,0,4,{}),
# (0,8 ,3,-4,0,4,{}),
# (0,8 ,3, 1,0,4,{}),
# (0,3 ,5, 1,'target_image_recon',4,{'equal':['target_image','target_image_recon',mylossfunc]} if one_direction else {}), #}}}
#
# ## freedim
# (('target','source'),0,0,0,0,0,{'sub'}),
# (0,0, 0, 0,'freedim',0,{'relu':lambda x:T.eq(abs(x),T.max(abs(x),axis=(1,),keepdims=True))}),
# ((0,'source'),F *9,1, 1,0,0,{}),
# (0,(16,6,6),0, 0,0,0,{}),#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,0,0,{}),#}}}
# (0,(16,6,6),0, 0,0,0,{}),#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,0,0,{}),#}}}
# (0,(16,6,6),0, 0,0,0,{}), #'linear'#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,0,0,{'linear'}),#}}}
# (('source',0),
# 0, 0,0,0,{'add':True,'equal':['target','freedim_enumerate',mylossfunc]}),
#
# (('source','action'),F*9,1, 1,0,0,{}),
# (0,(16,6,6),0, 0,0,0,{}),#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,0,0,{}),#}}}
# (0,(16,6,6),0, 0,0,0,{}),#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,0,0,{}),#}}}
# (0,(16,6,6),0, 0,0,0,{}), #'freedim_predict'#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,'freedim_predict',0,{'equal':['freedim','freedim_predict',mylossfunc]}),#}}}
# ('freedim_predict',0,0,0,'best_freedim',0,{'relu':lambda x:T.eq(abs(x),T.max(abs(x),axis=(1,),keepdims=True))}),
#
# (('source','action','freedim'),F*9,1, 1,0,0,{}), # 预测的时候把 freedim 换成 best_freedim
# (0,(16,6,6),0, 0,0,0,{}),#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,0,0,{}),#}}}
# (0,(16,6,6),0, 0,0,0,{}),#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,0,0,{}),#}}}
# (0,(16,6,6),0, 0,0,0,{}), #'target_predict'#{{{
# (0,16 ,1, 1,0,0,{}),
# (0,16 ,1, 1,0,0,{}),
# (0,F *9,6, 6,'target_predict',0,{'equal':['target','target_predict',mylossfunc]}),#}}}
# (0,(16,6,6),0, 0,0,5,{}),#{{{
# (0,16 ,1, 1,0,5,{}),
# (0,16 ,1, 1,0,5,{}),
# (0,F *9,6, 6,0,5,{}),#}}}
# (0,(16,6,6),0, 0,0,5,{}),#{{{
# (0,16 ,1, 1,0,5,{}),
# (0,16 ,1, 1,0,5,{}),
# (0,F *9,6, 6,0,5,{}),#}}}
# (0,(16,6,6),0, 0,0,5,{}),#{{{
# (0,16 ,1, 1,0,5,{}),
# (0,16 ,1, 1,0,5,{}),
# (0,F *9,6, 6,0,5,{}),#}}}
# (0,(F,3,3),0,0,0,5,{}),
# (0,F ,3,-4,0,5,{'nopad'}), #{{{
# (0,F ,3, 1,0,5,{}),
# (0,F ,3, 1,0,5,{}),
# (0,F ,3,-4,0,5,{}),
# (0,F ,3, 1,0,5,{}),
# (0,F ,3, 1,0,5,{}),
# (0,8 ,3,-4,0,5,{}),
# (0,8 ,3, 1,0,5,{}),
# (0,3 ,5, 1,0,5,{'watch':['target_image','train:predict_recon',mylossfunc]}), #}}}
# )),{
# 'action':action_network,
# 'source_image':source_image_network,
# 'target_image':target_image_network,
# },relu=lasagne.nonlinearities.leaky_rectify,init=lasagne.init.HeUniform,autoscale=False,finish=handle_finish)
#
# assert sq1.next()==sq2.next()
# assert len(set([sq3.next(),sq4.next(),sq5.next()]))==1
#
# res=create_layers_dict(conv_layers)
# res['action']=action_network
# res['source_image']=source_image_network
# res['target_image']=target_image_network
# errors = get_errors(conv_groups)+[
# ['example_errors',[]],
# ]
# val_watch_errors = get_watchpoints(conv_groups)+[
# ]
# return [res],errors,val_watch_errors,conv_groups
network_builder=None
def register_network_builder(build_network):
global network_builder
if network_builder is None:
network_builder=build_network
else:
logging.info('Ignore register_network_builder.')
inference_handler=None
def register_inference_handler(h):
global inference_handler
inference_handler=h
model_handlers=[]
def register_model_handler(h):
global model_handlers
model_handlers+=[h]
params_handlers=[]
def register_params_handler(h):
global params_handlers
params_handlers+=[h]
#quit_flag=False
#frames=270
#rl_dummy=16
#def load_200():
# if frames==90:
# npyfile=('200x64x'+str(frames)+'.npy',)
# else:
# npyfile=('200x64x'+str(frames)+'-0.npy',
# '200x64x'+str(frames)+'-1.npy')
# if not os.path.exists(npyfile[0]):
# #aa=[] #(frames, 3, 256, 256)*16
# base=0
# for dir in ['turnleft','turnright','up','down']:
# for i in range(200):
# a=np.load('200x64x'+str(frames)+'/'+dir+'/'+dir+'-'+str(i)+'.npz')['arr_0']
# writenpy(npyfile,(3,64,64),np.arange(len(a))+base,a,fast=True)
# base+=len(a)
# return curry(readnpy,npyfile,(3,64,64))
#minibatch_handlers=[]
#def register_minibatch_handler(h):
# global minibatch_handlers
# minibatch_handlers+=[h]
#centralize=False
#one_direction=False
#
#def iterate_minibatches_200(aa,stage,batchsize,iteratesize=400, shuffle=False, idx=False):
# rangeframes=frames/90*10
# unitframes=frames/90*10
# i=0
# last=None
# batchsize0=batchsize
# while i<iteratesize:
# if stage=='train':
# if last is not None:
# batchsize=batchsize0#/2
# else:
# batchsize=batchsize0
# else:
# batchsize=batchsize0
# if stage=='train':
# actions1=np.zeros((batchsize,handledata.num_curr_actions,1,1),dtype=floatX)
# #actions2=np.zeros((batchsize,handledata.num_curr_actions,1,1),dtype=floatX)
# k=(np.random.rand(batchsize)*800).astype('int')
# beginpos=rangeframes+(np.random.rand(batchsize)*(frames-rangeframes*2)).astype('int')
# if centralize:
# k[::2]=k[0]
# beginpos[::2]=beginpos[0]
# action1=(np.random.rand(batchsize)*rangeframes).astype('int')
# #action2=(np.random.rand(batchsize)*rangeframes).astype('int')
# #endpos=beginpos+action2
# beforpos=beginpos-action1
# faction1=action1.astype(floatX)/unitframes
# #faction2=action2.astype(floatX)/unitframes
# isleft=(k<200)
# isright=(k>=200)*(k<400)
# isup=(k>=400)*(k<600)
# isdown=(k>=600)
# actions1[:,0,0,0]=isleft*faction1
# #actions2[:,0,0,0]=isleft*faction2
# actions1[:,1,0,0]=isright*faction1
# #actions2[:,1,0,0]=isright*faction2
# actions1[:,2,0,0]=isup*faction1
# #actions2[:,2,0,0]=isup*faction2
# actions1[:,3,0,0]=isdown*faction1
# #actions2[:,3,0,0]=isdown*faction2
# assert idx==False
# actions1=np.concatenate((actions1[:,0:4],
# np.zeros((batchsize,6,1,1),dtype=floatX)
# ),axis=1)
# #actions2=np.concatenate((actions2[:,0:4],
# # np.zeros((batchsize,6,1,1),dtype=floatX)
# # ),axis=1)
# else:
# #actions1=np.zeros((batchsize,handledata.num_curr_actions,1,1),dtype=floatX)
# actions2=np.zeros((batchsize,handledata.num_curr_actions,1,1),dtype=floatX)
# k=(np.random.rand(batchsize)*800).astype('int')
# #action1=(np.random.rand(batchsize)*rangeframes).astype('int')
# action2=(np.random.rand(batchsize)*rangeframes).astype('int')
# #beginpos=rangeframes+(np.random.rand(batchsize)*(frames-rangeframes*2)).astype('int')
# endpos=frames-1-(np.random.rand(batchsize)*rangeframes).astype('int')
# beginpos=endpos-action2
# #endpos=beginpos+action2
# #beforpos=beginpos-action1
# #faction1=action1.astype(floatX)/unitframes
# faction2=action2.astype(floatX)/unitframes
# isleft=(k<200)
# isright=(k>=200)*(k<400)
# isup=(k>=400)*(k<600)
# isdown=(k>=600)
# #actions1[:,0,0,0]=isleft*faction1
# actions2[:,0,0,0]=isleft*faction2
# #actions1[:,1,0,0]=isright*faction1
# actions2[:,1,0,0]=isright*faction2
# #actions1[:,2,0,0]=isup*faction1
# actions2[:,2,0,0]=isup*faction2
# #actions1[:,3,0,0]=isdown*faction1
# actions2[:,3,0,0]=isdown*faction2
# assert idx==False
# #actions1=np.concatenate((actions1[:,0:4],
# # np.zeros((batchsize,6,1,1),dtype=floatX)
# # ),axis=1)
# actions2=np.concatenate((actions2[:,0:4],
# np.zeros((batchsize,6,1,1),dtype=floatX)
# ),axis=1)
#
# if stage=='train':
# if is_autoencoder:
# batch = ((aa(k*frames+beforpos)/256.0).astype(floatX),None,actions1,"(aa(k*frames+beginpos)/256.0).astype(floatX)",None,"actions2","(aa(k*frames+endpos)/256.0).astype(floatX)",None,None,None,None)
# images1, ig1, actions1, images2, ig2, actions2, images3, ig3, rewards, targets, flags = batch
# images2=images1
# actions1=np.zeros_like(actions1)
# else:
# batch = ((aa(k*frames+beforpos)/256.0).astype(floatX),None,actions1,(aa(k*frames+beginpos)/256.0).astype(floatX),None,"actions2","(aa(k*frames+endpos)/256.0).astype(floatX)",None,None,None,None)
# images1, ig1, actions1, images2, ig2, actions2, images3, ig3, rewards, targets, flags = batch
# idx1 = create_fingerprint(k*frames+beforpos,32)
# idx2 = create_fingerprint(k*frames+beginpos,32)
# if images1.shape[2]==256:
# images1=images1[:,:,::4,::4]
# images2=images2[:,:,::4,::4]
# #images3=images3[:,:,::4,::4]
#
# action_samples=build_action_sample(rl_dummy,4,zero=True)
#
# #if sigma_const is not None:
# # sigma=np.ones_like(sigma)*sigma_const
#
# inputs=images1
# outputs=images2
# actions=actions1
#
# #actions[::8]=np.zeros_like(actions[::8])
#
# drdscxcy=np.zeros((batchsize,4,1,1),dtype=floatX)
# dxdy=np.zeros((batchsize,2,1,1),dtype=floatX)
#
# actions=np.concatenate((actions[:,0:4],
# drdscxcy[:,2:4].reshape(batchsize,2,1,1),
# dxdy.reshape(batchsize,2,1,1),
# drdscxcy[:,0:2].reshape(batchsize,2,1,1),
# ),axis=1)
# samples=np.concatenate((action_samples,np.zeros((rl_dummy,num_extra_actions,1,1),dtype=floatX)),axis=1)
#
# if last is None:
# for j in range(batchsize):
# if not one_direction:
# if random.random()*2.0<=1.0:
# actions[j]=-actions[j]
# tmp=inputs[j]
# inputs[j]=outputs[j]
# outputs[j]=tmp
#
# X={
# 'source_image':inputs,
# 'target_image':outputs,
# 'action':actions,
# }
# if using_fingerprint:
# X['source_fingerprint']=idx1
# X['target_fingerprint']=idx2
# for h in minibatch_handlers:
# h(X)
# else:
# inputs=np.concatenate((last['source_image'][batchsize:],inputs),axis=0)
# outputs=np.concatenate((last['target_image'][batchsize:],outputs),axis=0)
# actions=np.concatenate((last['action'][batchsize:],actions),axis=0)
#
# for j in range(batchsize):
# if not one_direction:
# if random.random()*2.0<=1.0:
# actions[j]=-actions[j]
# tmp=inputs[j]
# inputs[j]=outputs[j]
# outputs[j]=tmp
#
# X={
# 'source_image':inputs,
# 'target_image':outputs,
# 'action':actions,
# }
# if using_fingerprint:
# X['source_fingerprint']=idx1
# X['target_fingerprint']=idx2
# for h in minibatch_handlers:
# h(X)
# last=X
#
# yield X,X['target_image']
# else:
# X={
# 'source_image':(aa(k*frames+beginpos)/256.0).astype(floatX),
# 'target_image':(aa(k*frames+endpos)/256.0).astype(floatX),
# 'action':actions2,
# }
# idx1 = create_fingerprint(k*frames+beginpos,32)
# idx2 = create_fingerprint(k*frames+endpos,32)
# if using_fingerprint:
# X['source_fingerprint']=idx1
# X['target_fingerprint']=idx2
# for h in minibatch_handlers:
# h(X)
# yield X,X['target_image']
# i+=1
def random_shift(n,*all_inputs):
actions=(np.random.rand(len(all_inputs[0]),2,1,1)).astype(floatX)
actions2=(actions*n*2).astype('int8')-n
all_outputs=[]
for inputs in all_inputs:
outputs=np.zeros(inputs.shape,dtype=floatX)
for i in range(len(inputs)):
tmp=np.pad(inputs[i:i+1],((0,0),(0,0),(n,n),(n,n)),mode='constant',constant_values=0)
tmp=np.roll(tmp,actions2[i,0,0,0],2)
tmp=np.roll(tmp,actions2[i,1,0,0],3)
if n>0:
outputs[i:i+1]=tmp[:,:,n:-n,n:-n]
else:
outputs[i:i+1]=tmp
all_outputs+=[outputs]
return all_outputs+[actions2.reshape(len(inputs),2)]
#def random_rotate(w,h,angle,scale,*all_inputs):
# cx=(np.random.rand(len(all_inputs[0])).astype(floatX))*w
# cy=(np.random.rand(len(all_inputs[0])).astype(floatX))*h
# actions=(np.random.rand(len(all_inputs[0]),4,1,1)).astype(floatX)
# actions2=np.zeros_like(actions)
# actions2[:,0]=(actions[:,0]*angle*2-angle).astype(floatX)
# actions2[:,1]=(actions[:,1]*scale*2-scale).astype(floatX)
# actions2[:,2,0,0]=cx
# actions2[:,3,0,0]=cy
# all_outputs=[]
# for inputs in all_inputs:
# outputs=np.zeros(inputs.shape,dtype=floatX)
# for i in range(len(inputs)):
# mat = cv2.getRotationMatrix2D((cx[i],cy[i]),actions2[i,0,0,0],1.0+actions2[i,1,0,0])
# tmp = cv2.warpAffine(inputs[i].transpose(1,2,0),mat,inputs[i].shape[1:]).transpose(2,0,1)
# #tmp=np.pad(inputs[i:i+1],((0,0),(0,0),(n,n),(n,n)),mode='constant',constant_values=0)
# #tmp=np.roll(tmp,actions2[i,0,0,0],2)
# #tmp=np.roll(tmp,actions2[i,1,0,0],3)
# outputs[i]=tmp
# all_outputs+=[outputs]
# return all_outputs+[actions2.reshape(len(inputs),4)]
def random_rotate(w,h,angle,scale,*all_inputs):
if type(angle)==float:
angle=(-angle,angle)
if type(scale)==float:
scale=(1-scale,1+scale)
cx=(np.random.rand(len(all_inputs[0])).astype(floatX))*w
cy=(np.random.rand(len(all_inputs[0])).astype(floatX))*h
actions=(np.random.rand(len(all_inputs[0]),4,1,1)).astype(floatX)
actions2=np.zeros_like(actions)
actions2[:,0]=(actions[:,0]*(angle[1]-angle[0])+angle[0]).astype(floatX)
actions2[:,1]=(actions[:,1]*(scale[1]-scale[0])+scale[0]).astype(floatX)
actions2[:,2,0,0]=cx
actions2[:,3,0,0]=cy
all_outputs=[]
for inputs in all_inputs:
outputs=np.zeros(inputs.shape,dtype=floatX)
for i in range(len(inputs)):
mat = cv2.getRotationMatrix2D((cx[i],cy[i]),actions2[i,0,0,0],actions2[i,1,0,0])
tmp = cv2.warpAffine(inputs[i].transpose(1,2,0),mat,inputs[i].shape[1:]).transpose(2,0,1)
#tmp=np.pad(inputs[i:i+1],((0,0),(0,0),(n,n),(n,n)),mode='constant',constant_values=0)
#tmp=np.roll(tmp,actions2[i,0,0,0],2)
#tmp=np.roll(tmp,actions2[i,1,0,0],3)
outputs[i]=tmp
all_outputs+=[outputs]
return all_outputs+[actions2.reshape(len(inputs),4)]
def list_transpose(a):
aa=[]
for i in range(len(a[0])):
t=[]
for j in range(len(a)):
t+=[a[j][i]]
aa+=[t]
return aa
#num_extra_actions=4
#
#def enumerate_actions(d):
# for i in range(d):
# for j in range(0,30):
# value=np.zeros((handledata.num_curr_actions+num_extra_actions,1,1),dtype=floatX)
# value[i,0,0]=j*1.0/30
# yield value
#def build_action_sample(n,d,zero=False):
# value=(np.random.rand(n,handledata.num_curr_actions,1,1)*1.0).astype(theano.config.floatX)
# for j in range(n):
# k=int(random.random()*d)
# for i in range(0,handledata.num_curr_actions):
# if i!=k:
# value[j,i,0,0]=0
# if zero:
# value[0,:,:,:]=np.zeros_like(value[0,:,:,:])
# return value
#
#def build_freedim_sample(shape):
# value=np.random.rand(*shape)
# minval=value.min(axis=1,keepdims=True)
# value=(value==minval).astype(floatX)
# return value
#
#def build_freedim_sample_tensor(shape):
# srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579))
# value=np.random.rand(*shape)
# value=srng.uniform(size=shape)
# minval=value.min(axis=1,keepdims=True)
# value=T.eq(value,minval)
# value=T.set_subtensor(value[0],T.zeros_like(value[0]))
# return value
#def show(src,norms,predictsloop,predictsloop2,predictsloop3,num_batchsize,t,bottom=None,right=None):
# t=t%len(predictsloop)
# w=64
# h=64
# xscreenbase=0
# yscreenbase=0
# for i in range(num_batchsize):
# for j in range(len(src)):
# #j=0
# imshow64x64("sample-"+str(i)+"-"+str(j),
# norms[j](src[j][i,0:3,:,:].transpose(1,2,0)))
# cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h)
#
# #j=1
# #cv2.imshow("sample-"+str(i)+"-"+str(j),
# # imnorm(srchide0[i,0:3].transpose(1,2,0)))
# #cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h)
# #j=1
# #cv2.imshow("sample-"+str(i)+"-"+str(j),
# # imnorm(recon[i,0:3].transpose(2,1,0)))
# #cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h)
#
# n=j+1
#
# #for p in range(1):
# # j=p+n
# # base=srchide1.shape[1]-1
# # cv2.imshow("sample-"+str(i)+"-"+str(j),
# # imnorm(enlarge(srchide1[i,base+p:base+p+1].transpose(1,2,0),4)))
# # cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h)
# #n+=1
#
#
# for p in range(4):
# j=p+n
# imshow64x64("sample-"+str(i)+"-"+str(j),
# imnorm(predictsloop[t][p][i,0:3].transpose(2,1,0)))
# cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h)
# n+=4
# for p in range(4): #num_actions+4
# j=p+n
# imshow64x64("sample-"+str(i)+"-"+str(j),
# imnorm(predictsloop2[t][p][i,0:3].transpose(2,1,0)))
# cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h)
# n+=4 #num_actions+4
# if i>=7:
# break
# if bottom is not None:
# cv2.imshow('bottom',bottom)
# cv2.moveWindow("bottom",0,64*8)
# if right is not None:
# cv2.imshow('right',right)
# cv2.moveWindow("right",64*10,0)
#ignore_output=False
#def set_ignore_output(val):
# global ignore_output
# ignore_output=val
#
#objective_loss_function=mylossfunc
#def set_loss_function(f):
# global objective_loss_function
# objective_loss_function=f
#is_classify=False
#def set_classify(val):
# global is_classify
# is_classify=val
#def objective(layers,myloss=None,deterministic=False,*args,**kwargs):
# if not is_autoencoder and not ignore_output:
# loss = nolearn.lasagne.objective(layers,*args,**kwargs)
# loss += myloss
# else:
# loss = myloss
# return loss
def myscore(key,val,X,y):
return val
def make_nolearn_scores(losslist,tagslice):
res=[]
for tag,sli in tagslice:
if len(losslist[sli])>0:
i=0
for t in losslist[sli]:
if len(losslist[sli])>1:
res+=[[tag+'-'+str(i),curry(myscore,tag,t)]]
else:
res+=[[tag,curry(myscore,tag,t)]]
i+=1
return res
#src=None
#norms=None
#predictsloop=[]
#predictsloop2=[]
#predictsloop3=[]
#bottom=None
#right=None
#sn=0
#def mybatchok(num_batchsize,sigma_base,sigma_var,net,history):
# #global sn
#
# sigma=utils.floatX(random.random()*sigma_base.get_value())
# sigma_var.set_value(sigma)
#
# sys.stdout.write(".")
# #show(src,norms,predictsloop,predictsloop2,predictsloop3,num_batchsize,sn,bottom=bottom,right=right)
# #cv2.waitKey(100)
# #sn+=1
#def plot_loss(net,pltskip):
# skip=int(pltskip.get_value()+0.5)
# train_loss = [row['train_loss'] for row in net.train_history_]
# valid_loss = [row['valid_loss'] for row in net.train_history_]
# for i in range(skip):
# if i<len(train_loss):
# train_loss[i]=None
# if i<len(valid_loss):
# valid_loss[i]=None
# plt.plot(train_loss, label='train loss')
# plt.plot(valid_loss, label='valid loss')
# plt.xlabel('epoch')
# plt.ylabel('loss')
# plt.legend(loc='best')
# return plt
#def myepochok():
# #global bottom,right
#
# #curr_epoch=epoch_begin+len(history)
#
# save_params(curr_epoch,[
# sorted_values(networks) for networks in all_networks
# ],[],'hideconv-',deletelayers=[])
# #print ''
#
# #tr.print_diff()
#
## easyshared.update()
##
## fig_size = plt.rcParams["figure.figsize"]
## fig_size[0] = 10
## fig_size[1] = 4
## plt.rcParams["figure.figsize"] = fig_size
## plt.clf()
## loss_plt=plot_loss(net,pltskip)
## loss_plt.savefig('loss.png',dpi=64)
## #print net.layers_['source']
## #feature_plt=nolearn.lasagne.visualize.plot_conv_weights(net.layers_['source'],figsize=(6, 6))
## #feature_plt.savefig('feature.png',dpi=64)
##
## bottom=cv2.imread('loss.png')
## #right=cv2.imread('feature.png')
##
## #myupdate(epoch_begin,num_batchsize,all_networks,predict_fns,walker_fn,net,history)
##
## if len(history) % 5 == 0:
## while gc.collect() > 0:
## pass
#visualize_validation_set=False
#
#def myupdate(epoch_begin,num_batchsize,all_networks,predict_fns,walker_fn,net,history):
# global src,norms,predictsloop,predictsloop2,predictsloop3
# predict_fn,visual_fn=predict_fns
# if visualize_validation_set:
# iterate_fn=net.batch_iterator_val
# else:
# iterate_fn=net.batch_iterator_train
# src=None
# norms=None
# predictsloop=[]
# predictsloop2=[]
# predictsloop3=[]
# it=iterate_fn(None,None)
# for batch in it:
# #inputs, inputs2, actions, outputs, outputs2, rewards, targets, flags = batch
#
# inputs = batch[0]['source_image']
# actions = batch[0]['action']
# outputs = batch[0]['target_image']
#
# #if inputs.shape[2]==256:
# # inputs=inputs[:,:,::4,::4]
# # outputs=outputs[:,:,::4,::4]
#
# vis_args = [batch[0][key] for key in visual_varnames]
# vis = visual_fn(inputs,actions,*vis_args)
# #srchide0=hides[0]
# #srchide1=hides[1]
# #srchide2=hides[2]
# #srchide0=srchide0.transpose(0,1,3,2)
# #srchide1=srchide1.transpose(0,1,3,2)
# #srchide2=srchide2.transpose(0,1,3,2)
#
# #recon=recon_fn(inputs,
# # np.concatenate((actions,np.zeros((num_batchsize,2,1,1),dtype=floatX)),axis=1),
# # outputs)
#
# batchsize,_,ig1,ig2=actions.shape
# num_actions=handledata.num_curr_actions
#
# p=[inputs]*(num_actions+4)
# for t in range(5):
# #sources=[]
# predicts=[]
# predicts2=[]
# for i in range(num_actions+4):
#
# if t>0:
# actions1=np.concatenate((np.eye(num_actions+4,dtype=floatX)[i:i+1],)*batchsize,axis=0).reshape(batchsize,num_actions+4,1,1)*(0.1)
# else:
# actions1=np.concatenate((np.eye(num_actions+4,dtype=floatX)[i:i+1],)*batchsize,axis=0).reshape(batchsize,num_actions+4,1,1)*(0.0)
# #print 'predict actions',actions1
# if not using_fingerprint:
# predict=predict_fn(
# p[i],
# actions1,
# )
# else:
# predict=predict_fn(
# p[i],
# batch[0]['source_fingerprint'], #XXX
# outputs,#XXX
# batch[0]['target_fingerprint'], #XXX
# actions1,
# )
# predicts+=[predict]
#
# actions2=np.concatenate((np.eye(num_actions+4,dtype=floatX)[i:i+1],)*batchsize,axis=0).reshape(batchsize,num_actions+4,1,1)*(0.1*t)
# if not using_fingerprint:
# predict=predict_fn(
# inputs,
# actions2,
# )
# else:
# predict=predict_fn(
# inputs,
# batch[0]['source_fingerprint'],
# outputs,
# batch[0]['target_fingerprint'],
# actions2,
# )
# predicts2+=[predict]
# p=predicts
# predictsloop+=[predicts]
# #predictsloop+=[map(deconv3_fn,predicts)]
# predictsloop2+=[predicts2]
#
# #tmp=list_transpose(sources) # (batchsize,actions,[features,width,height])
# #for j in range(8):
# # print [(x**2).sum()**0.5 for x in tmp[j][0:4]]
# #print np. asarray(bn_std)
# #src=[inputs.transpose(0,1,3,2),np.roll(inputs.transpose(0,1,3,2),1,axis=0)]
# src=[inputs.transpose(0,1,3,2),outputs.transpose(0,1,3,2)]+[t.transpose(0,1,3,2) for t in vis]
# norms=[imnorm]*len(src)
#
# #print action
# for t in range(len(predictsloop)):
# show(src,norms,predictsloop,predictsloop2,predictsloop3,num_batchsize,t)
# if 0xFF & cv2.waitKey(100) == 27:
# break_flag = True
# break
#
# it.close()
# break
#class PrintLayerInfo:
# def __init__(self):
# pass
#
# def __call__(self, nn, train_history=None):
# verbose=nn.verbose
# nn.verbose=2
# nolearn.lasagne.PrintLayerInfo()(nn,train_history)
# nn.verbose=verbose
#
#class PrintLog(nolearn.lasagne.PrintLog):
# def __call__(self, nn, train_history):
# self.first_iteration=True
# train_history = [x for x in train_history]
# train_history[-1]=train_history[-1].copy()
# info = train_history[-1]
# for name, func in nn.scores_train:
# if info[name]<0.001:
# info[name] = '[{:0.6e}]'.format(info[name])
# print self.table(nn, train_history)
# sys.stdout.flush()
#
#class Net(nolearn.lasagne.NeuralNet):
# def initialize_layers(self, layers=None):
#
# from nolearn.lasagne.base import Layers
# from lasagne.layers import get_all_layers
# from lasagne.layers import get_output
# from lasagne.layers import InputLayer
# from lasagne.layers import Layer
# from lasagne.utils import floatX
# from lasagne.utils import unique
#
# if layers is not None:
# self.layers = layers
# self.layers_ = Layers()
#
# assert isinstance(self.layers[0], Layer)
#
# if isinstance(self.layers[0], Layer):
# j = 0
# for out_layer in self.layers:
# for i, layer in enumerate(get_all_layers(out_layer)):
# if layer not in self.layers_.values():
# name = layer.name or self._layer_name(layer.__class__, j)
# j+=1
# if name in self.layers_:
# print 'WARNING: ',name,'exists.'
# self.layers_[name] = layer
# if self._get_params_for(name) != {}:
# raise ValueError(
# "You can't use keyword params when passing a Lasagne "
# "instance object as the 'layers' parameter of "
# "'NeuralNet'."
# )
# return self.layers[-1]
#visual_keys=[]
#visual_vars=[]
#visual_varnames=[]
#def register_visual(key):
# global visual_keys
# visual_keys+=[key]
#def register_visual_var(name,var):
# global visual_vars
# global visual_varnames
# visual_vars+=[var]
# visual_varnames+=[name]
batch_iterator_train=None
batch_iterator_val=None
batch_iterator_test=None
batch_iterator_inference=None
def register_batch_iterator(train,val,test=None,inference=None):
global batch_iterator_train,batch_iterator_val,batch_iterator_test,batch_iterator_inference
batch_iterator_train,batch_iterator_val,batch_iterator_test,batch_iterator_inference=train,val,test,inference
global_batches=0
def register_batches(batches):
global global_batches
global_batches=batches
def layers(l):
return macros(l)
def paramlayers(layertype,l,h=None):
res=()
l=macros(l)
i=0
for a in l:
if h is not None:
h(a)
#m=dict(a[-1].copy())
if type(a[-1]) == dict:
m = a[-1].copy()
elif type(a[-1]) == set:
m = {}
for t in a[-1]:
m[t] = True
else:
m = {}
a = a+(m, )
assert 'saveparamlayer' not in m
m['saveparamlayer']=layertype
res+=(a[:-1]+(m,),)
return res
def newlayers_checker(a):
if a[-2]!=0:
print a
assert a[-2]==0
def newlayers(l):
return paramlayers('newlayer',l,newlayers_checker)
def deletelayers(l):
return paramlayers('deletelayer',l)
def resetlayers(l):
return paramlayers('resetlayer',l)
on_batch_finished=[]
on_epoch_finished=[]
on_training_started=[]
on_training_finished=[]
def register_training_callbacks(bf,ef,ts,tf):
global on_batch_finished, on_epoch_finished, on_training_started, on_training_finished
on_batch_finished+=bf
on_epoch_finished+=ef
on_training_started+=ts
on_training_finished+=tf
on_validation_started=[]
on_validation_finished=[]
def register_validation_callbacks(vs,vf):
global on_validation_started,on_validation_finished
on_validation_started+=vs
on_validation_finished+=vf
training_data_shaker=None
validation_data_shaker=None
def register_data_shaker(train,val=None):
global training_data_shaker,validation_data_shaker
training_data_shaker=train
validation_data_shaker=val
def save_weight_visualization(layernames, w, b, a):
try:
import h5py
except ImportError:
logging.error("Attempt to create HDF5 Loader but h5py is not installed.")
exit(-1)
fn = os.path.join(args.save, 'vis.h5')
vis_db = h5py.File(fn, 'w')
db_layers = vis_db.create_group("layers")
logging.info('Saving visualization to %s', fn)
for i in range(0, len(w)):
dset = db_layers.create_group(str(i))
dset.attrs['name'] = layernames[i]
if w[i] is not None and w[i].shape:
dset.create_dataset('weights', data=w[i])
if b[i] is not None and b[i].shape:
dset.create_dataset('bias', data=b[i])
if a[i] is not None and a[i].shape:
dset.create_dataset('activations', data=a[i])
vis_db.close()
train_fn = None
val_fn = None
inference_fn = None
inference_layers = None
inference_layers_fn = None
def inference(num_batchsize,inference_db,mean_data=None,visualize_inf=False):
count=0
for batch in batch_iterator_inference(num_batchsize,inference_db):
if type(batch)==tuple:
batch,ids=batch
else:
ids=[]
for t in range(num_batchsize):
ids=ids+[count]
count+=1
if mean_data is not None:
assert 'mean' not in batch
batch['mean']=mean_data
out = inference_fn(*sorted_values(batch))
if inference_handler is not None:
inference_handler(out[0],ids)
if visualize_inf:
layernames=[]
w=[]
b=[]
a=inference_layers_fn(*sorted_values(batch))
for layer_id,layer in enumerate(inference_layers):
layername=layer.name or layer.__class__.__name__+str(layer_id)
weights=None
bias=None
for p in layer.get_params():
if p.name=='W':
weights=p.get_value()
if p.name=='b':
bias=p.get_value()
layernames+=[layername]
w+=[weights]
b+=[bias]
save_weight_visualization(layernames, w, b, a)
return out[0]
loss_handler=None
def register_loss_handler(h):
global loss_handler
loss_handler=h
try:
import caffe
from caffe.proto import caffe_pb2
def load_mean_file(mean_file):
if mean_file.endswith('.npy'):
return np.load(mean_file)
with open(mean_file, 'rb') as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
if blob.HasField('shape'):
blob_dims = blob.shape
assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
elif blob.HasField('num') and blob.HasField('channels') and \
blob.HasField('height') and blob.HasField('width'):
blob_dims = (blob.num, blob.channels, blob.height, blob.width)
else:
raise ValueError('blob does not provide shape or 4d dimensions')
#pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
#print pixel.shape
#t.set_mean('data', pixel)
mean=np.reshape(blob.data, blob_dims[1:])
#mean=mean[:,(256-224)//2:(256-224)//2+224,(256-224)//2:(256-224)//2+224]
return mean
except ImportError:
def load_mean_file(mean_file):
assert mean_file.endswith('.npy')
return np.load(mean_file)
class DefaultNetworkBuilder(object):
def __init__(self,modelfile):
self.modelfile=modelfile
m={}
exec(open(modelfile).read(), m)
if 'network' in m:
self.network=m['network']
self.build_network=None
elif 'build_network' in m:
self.network=None
self.build_network=m['build_network']
def __call__(self,inputs):
if self.network is not None:
#if 'image' not in inputs:
# print inputs
network=inputs['image']
if 'mean' in inputs:
network=lasagne.layers.ElemwiseMergeLayer((network,inputs['mean']),T.sub)
inputs=inputs.copy()
inputs.pop('mean')
return deepstacks.lasagne.build_network(network, self.network, inputs)
elif self.build_network is not None:
return self.build_network(inputs)
lrpolicy = None
layers = None
def run(args):
global lrpolicy
global train_fn,val_fn,inference_fn
global inference_layers,inference_layers_fn
global network_builder
global layers
if args.train_db != '':
mode='training'
elif args.validation_db != '':
mode='validation'
elif args.inference_db != '':
mode='inference'
else:
mode='training'
logging.info(mode)
if args.verbose:
deepstacks.set_verbose(args.verbose)
if args.seed:
random.random.seed(args.seed)
np.random.seed(args.seed)
if args.network!='':
network_builder=DefaultNetworkBuilder(os.path.join(os.path.dirname(os.path.realpath(__file__)),args.networkDirectory,args.network))
num_batches=global_batches
num_epochs=args.epoch
num_batchsize=args.batch_size
learning_rate=args.lr_base_rate
momentum=args.momentum
grads_clip=args.grads_clip
grads_noise=args.grads_noise
accumulation=args.accumulation
total_training_steps = num_epochs
lrpolicy = lr_policy.LRPolicy(args.lr_policy,
args.lr_base_rate,
args.lr_gamma,
args.lr_power,
total_training_steps,
args.lr_stepvalues)
# if batch_iterator_train is None:
# loader=load_200
# iterate_minibatches=iterate_minibatches_200
# aa = loader()
# register_batch_iterator(AsyncIterate(curry(iterate_minibatches,aa,'train',batchsize=num_batchsize,iteratesize=400,shuffle=True)),AsyncIterate(curry(iterate_minibatches,aa,'val',batchsize=num_batchsize,iteratesize=1,shuffle=True)))
m={}
dtypes={}
#print batch_iterator_train
#print args
#print mode
if mode == 'training':
it=batch_iterator_train(num_batchsize,args.train_db)
elif mode == 'validation':
it=batch_iterator_val(num_batchsize,args.validation_db)
elif mode == 'inference':
#print 'args.inference_db',args.inference_db
it=batch_iterator_inference(num_batchsize,args.inference_db)
#print 'mode',mode
#print it
batch0 = None
for X in it:
#print X
if type(X)==tuple:
X,ids=X
batch0 = X
for t in X:
m[t]=X[t].shape
dtypes[t]=X[t].dtype
it.close()
break
lr=easyshared.add('lr.txt',learning_rate)
sigma_base=easyshared.add('sigma.txt',1.0)
pltskip=easyshared.add('pltskip.txt',0.0)
decay=easyshared.add('decay.txt',1e-8)
subtractMean = args.subtractMean #'image', 'pixel' or 'none'
mean_data = load_mean_file(args.mean).astype(floatX) if args.mean!='' else None
if subtractMean == 'pixel' and mean_data is not None:
pixel = mean_data.mean(axis=(1,2),keepdims=True,dtype=floatX)
mean_data = np.tile(pixel,(1,mean_data.shape[1],mean_data.shape[2]))
if mean_data is not None:
mean_data=np.tile(mean_data[np.newaxis,...],(num_batchsize,1,1,1))
easyshared.update()
sigma_var=theano.shared(utils.floatX(1.0))
inputs={}
if mean_data is not None:
m['mean']=mean_data.shape
dtypes['mean']=mean_data.dtype
for k in m:
out=StringIO()
print >>out,k,m[k],dtypes[k]
logging.info(string.strip(out.getvalue()))
name=k
input_var_type = T.TensorType(dtypes[k],
[False,]+[s == 1 for s in m[k][1:]])
# copy lasagne code would cause a TypeError: integer vector required for argument: true_one_of_n(got type: TensorType(int64, (True,)) instead of: TensorType(int64, vector))
#input_var_type = T.TensorType(dtypes[k],
# [s == 1 for s in m[k][:]])
var_name = ("%s.input" % name) if name is not None else "input"
input_var = input_var_type(var_name)
inputs[k]=lasagne.layers.InputLayer(name=name,input_var=input_var,shape=m[k])
#print lasagne.layers.get_output_shape(inputs[k])
# source_image_network=lasagne.layers.InputLayer(name='source_image',shape=m['source_image'],input_var=source_image_var)
# target_image_network=lasagne.layers.InputLayer(name='target_image',shape=m['target_image'],input_var=target_image_var)
# action_network=lasagne.layers.InputLayer(name='action',shape=m['action'],input_var=action_var)
#print "Building model and compiling functions..."
delta_errors,state_layers,hidestate_layers,delta_layers,delta_predict_networks = [],[],[],[],[]
zeroarch_networks,zeroarch_bnlayer,watcher_network,updater = None,None,None,None
network,stacks,layers,raw_errors,raw_watchpoints = network_builder(inputs)
for h in model_handlers:
h(inputs,network,stacks,layers,raw_errors,raw_watchpoints)
#all_networks,ordered_errors,ordered_watch_errors,conv_groups = network_builder(inputs)
all_networks=[create_layers_dict(layers)]
ordered_errors = get_ordered_errors(raw_errors)
#print ordered_errors
ordered_val_errors = get_ordered_errors(raw_errors,deterministic=True)
#print ordered_val_errors
ordered_watch_errors = get_ordered_errors(raw_watchpoints)
ordered_val_watch_errors = get_ordered_errors(raw_watchpoints,deterministic=True)
conv_groups = stacks
errors = []
val_errors = []
val_watch_errors = []
train_watch_errors = []
tagslice = []
count = 0
valtagslice = []
valcount = 0
for tag,errs in ordered_errors:
if not tag.startswith('val:'):
errors += errs
tagslice += [[tag,slice(count,count+len(errs))]]
count += len(errs)
for tag,errs in ordered_val_errors:
if not tag.startswith('train:'):
val_errors += errs
valtagslice += [[tag,slice(valcount,valcount+len(errs))]]
valcount += len(errs)
assert len(val_errors)==len(errors)
i=0
for tag,errs in ordered_watch_errors:
valtag,valerrs=ordered_val_watch_errors[i]
assert tag==valtag
assert len(errs)==len(valerrs)
if tag.startswith('train:'):
train_watch_errors += errs
tagslice += [[tag[len('train:'):],slice(count,count+len(errs))]]
count += len(errs)
elif tag.startswith('val:'):
val_watch_errors += valerrs
valtagslice += [[tag[len('val:'):],slice(valcount,valcount+len(errs))]]
valcount += len(errs)
else:
val_watch_errors += errs
valtagslice += [[tag,slice(valcount,valcount+len(errs))]]
valcount += len(errs)
i+=1
errors = [errors]
val_errors = [val_errors]
val_watch_errors = [val_watch_errors]
train_watch_errors = [train_watch_errors]
has_loading_networks=False
loading_networks_list=[]
for networks in all_networks:
if 'loading_networks' in networks:
if networks['loading_networks'] is not None:
has_loading_networks=True
loading_networks=networks['loading_networks']
networks.pop('loading_networks')
else:
loading_networks=networks
if loading_networks is not None:
loading_networks_list+=[loading_networks]
newlayers = conv_groups['newlayer'] if 'newlayer' in conv_groups else []
resetlayers = conv_groups['resetlayer'] if 'resetlayer' in conv_groups else []
if args.weights != '':
weights=os.path.join(args.save,args.weights+'-')
elif args.snapshotPrefix !='':
weights=os.path.join(args.save,args.snapshotPrefix+'-')
else:
weights=os.path.join(args.save,'')
if args.snapshotPrefix !='':
prefix=os.path.join(args.save,args.snapshotPrefix+'-')
else:
prefix=os.path.join(args.save,'')
epoch_begin,mismatch=load_params([
sorted_values(loading_networks) for loading_networks in loading_networks_list
],[],weights,ignore_mismatch=True,newlayers=newlayers,resetlayers=resetlayers)
if args.weights != '':
assert epoch_begin!=0
logging.info('epoch_begin=%d'%epoch_begin)
for h in params_handlers:
h(inputs,network,stacks,layers,raw_errors,raw_watchpoints)
if 'deletelayer' in conv_groups:
deletelayers = conv_groups['deletelayer']
save_params(epoch_begin,[
sorted_values(networks) for networks in all_networks
],[],prefix,deletelayers=deletelayers)
logging.info('layer(s) deleted.')
exit(1)
if has_loading_networks:
save_params(epoch_begin,[
sorted_values(networks) for networks in all_networks
],[],prefix)
logging.info('save.')
exit(0)
if updater is not None:
updater()
params = lasagne.layers.get_all_params(sum([networks.values() for networks in all_networks],[]), trainable=True)
loss = 0.0
valloss = 0.0
losslist = []
vallosslist = []
tmp = 0.0
for ee in errors:
for err in ee:
if err!=None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
loss = loss+tmp
for ee in val_errors:
for err in ee:
if err!=None:
tmp = err.mean(dtype=floatX)
vallosslist = vallosslist+[tmp]
valloss = valloss+tmp
for ee in val_watch_errors:
for err in ee:
if err!=None:
tmp = err.mean(dtype=floatX)
vallosslist = vallosslist+[tmp]
for ee in train_watch_errors:
for err in ee:
if err!=None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
logging.info('count_params: %d'%sum([lasagne.layers.count_params(networks.values(),trainable=True) for networks in all_networks],0))
loss0 = loss
if loss_handler is not None:
loss = loss_handler(loss,sum([networks.values() for networks in all_networks],[]))
extra_loss = loss - loss0
# l2_penalty 的权重选择:先计算平均,然后在理想的现象数上平摊,因为每个
# 现象提供 64*64*3 个方程,所以我们只需要 700 多个“理想的”现象,就可以
# 确定所有的参数,就在这 700 多个现象上平摊
#l2_penalty = lasagne.regularization.regularize_network_params(networks.values(),lasagne.regularization.l1)/utils.floatX(2.0)
#loss = loss+l2_penalty/(lasagne.layers.count_params(networks.values(),regularizable=True))/(lasagne.layers.count_params(networks.values(),trainable=True)/(64*64*3))
# walker_fn = None
#
# if 'predict' in conv_groups:
# key='predict'
# else:
# key='output'
# if not using_fingerprint:
# predict_fn = theano.function([inputs['source_image'].input_var,inputs['action'].input_var],
# lasagne.layers.get_output(conv_groups[key][0],deterministic=True),
# on_unused_input='warn', allow_input_downcast=True)
# else:
# predict_fn = theano.function([
# inputs['source_image'].input_var,
# inputs['source_fingerprint'].input_var,
# inputs['target_image'].input_var, #XXX
# inputs['target_fingerprint'].input_var, #XXX
# inputs['action'].input_var],
# lasagne.layers.get_output(conv_groups[key][0],deterministic=True),
# on_unused_input='warn', allow_input_downcast=True)
# visual_fn = theano.function([inputs['source_image'].input_var,inputs['action'].input_var]+visual_vars,
# [lasagne.layers.get_output(conv_groups[key][0],deterministic=True) for key in visual_keys],
# on_unused_input='warn', allow_input_downcast=True)
#predict2_fn = theano.function([source_image_var,target_image_var,action_var],
# lasagne.layers.get_output(conv_groups['output'][0],deterministic=True),
# on_unused_input='warn', allow_input_downcast=True)
gc.collect()
if updater is not None:
updater()
#layers=list(set(sum([networks.values() for networks in all_networks],[]))-{conv_groups['output'][0]})+[conv_groups['output'][0]]
conv_groups=None
# if using_nolearn:
# net = Net(
# layers=layers,
# update=crossbatch_momentum.adamax,
# update_learning_rate=learning_rate,
# update_average=accumulation,
# update_grads_clip=grads_clip,
# update_noise=False,
# objective=objective,
# objective_loss_function=objective_loss_function,
# objective_myloss=loss,
# scores_train=make_nolearn_scores(losslist,tagslice),
# scores_valid=make_nolearn_scores(vallosslist,valtagslice),
# y_tensor_type=y_tensor_type,
# train_split=lambda X,y,net:(X,X,y,y),
# verbose=0,
# regression=not is_classify,
# batch_iterator_train=curry(wrap_batch_iterator_train,num_batchsize),
# batch_iterator_val=curry(wrap_batch_iterator_val,num_batchsize),
# check_input=False,
# on_batch_finished=[
# #curry(mybatchok,num_batchsize,sigma_base,sigma_var)
# ]+on_batch_finished,
# on_epoch_finished=[
# curry(myepochok,epoch_begin,num_batchsize,all_networks,easyshared,pltskip),
# #curry(myupdate,epoch_begin,num_batchsize,all_networks,[predict_fn,visual_fn],walker_fn)
# PrintLog(),
# ]+on_epoch_finished,
# on_training_started=[
# #PrintLayerInfo(), #bugy
# #curry(myupdate,epoch_begin,num_batchsize,all_networks,[predict_fn,visual_fn],walker_fn)
# ]+on_training_started,
# on_training_finished=[
# ]+on_training_finished,
# max_epochs=num_epochs,
#
# )
#
# net.initialize_layers()
#
# assert net.layers_[-1]==net.layers[-1] # nolearn bug, two layer with same name would cause this
#
# X0={}
# for X,y in batch_iterator_train(num_batchsize):
# for t in X:
# X0[t]=None
# break
#
# net.fit(X0,np.zeros((num_batchsize,),dtype=floatX))
# else:
for layer in lasagne.layers.get_all_layers(stacks['_all_']): #lasagne.layers.get_all_layers(layers+stacks['output']):
fn = theano.function(
map(lambda x:x.input_var,sorted_values(inputs)),
T.as_tensor_variable(lasagne.layers.get_output(layer,deterministic=True)).shape,
on_unused_input='warn',
allow_input_downcast=True,
)
shape1=lasagne.layers.get_output_shape(layer)
shape2=tuple(fn(*sorted_values(batch0)))
print '---',layer,'---'
print shape1,shape2
assert shape1==shape2
print 'output shape ok'
if mode=='inference':
if 'predict' in stacks:
key='predict'
else:
key='output'
if inference_fn is None:
inference_fn = theano.function(
map(lambda x:x.input_var,sorted_values(inputs)),
map(lambda x:lasagne.layers.get_output(x,deterministic=True),stacks[key]),
on_unused_input='warn',
allow_input_downcast=True,
)
if inference_layers_fn is None:
inference_layers = lasagne.layers.get_all_layers(layers)
for layer in inference_layers:
print layer
print type(lasagne.layers.get_output(layer,deterministic=True))
inference_layers_fn = theano.function(
map(lambda x:x.input_var,sorted_values(inputs)),
map(lambda x:lasagne.layers.get_output(x,deterministic=True),inference_layers),
on_unused_input='warn',
allow_input_downcast=True,
)
logging.info('num_batchsize=%d'%num_batchsize)
inference(num_batchsize,args.inference_db,mean_data=mean_data,visualize_inf=args.visualize_inf)
# for batch in batch_iterator_inference(num_batchsize,args.inference_db):
# out = inference_fn(*sorted_values(batch))
# if inference_handler is not None:
# inference_handler(out[0])
# return out[0]
elif mode=='training' or mode=='validation':
if mode=='training':
if args.optimization=='sgd':
updates = lasagne.updates.sgd(loss, params, learning_rate=lr)
elif args.optimization=='momentum':
updates = lasagne.updates.momentum(loss, params, learning_rate=lr)
elif args.optimization=='nesterov_momentum':
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=lr)
elif args.optimization=='adagrad':
updates = lasagne.updates.adagrad(loss, params, learning_rate=lr)
elif args.optimization=='rmsprop':
updates = lasagne.updates.rmsprop(loss, params, learning_rate=lr)
elif args.optimization=='adadelta':
updates = lasagne.updates.adadelta(loss, params, learning_rate=lr)
elif args.optimization=='adam':
updates = lasagne.updates.adam(loss, params, learning_rate=lr)
elif args.optimization=='adamax':
updates = lasagne.updates.adamax(loss, params, learning_rate=lr)
elif args.optimization=='adamax2':
updates = adamax(loss, params, learning_rate=lr, grads_clip=grads_clip, noise=grads_clip,average=accumulation)
if train_fn is None:
train_fn = theano.function(
map(lambda x:x.input_var,sorted_values(inputs)),
[loss,extra_loss]+losslist,
updates=updates,
on_unused_input='warn',
allow_input_downcast=True,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if args.nan_guard else None
)
if val_fn is None:
val_fn = theano.function(
map(lambda x:x.input_var,sorted_values(inputs)),
[valloss]+vallosslist,
on_unused_input='warn',
allow_input_downcast=True,
)
if mode=='training':
for h in on_training_started:
h(locals())
min_loss=float('inf')
min_valloss=float('inf')
# We iterate over epochs:
for epoch in range(epoch_begin,epoch_begin+num_epochs):
saveepoch=epoch+1
if args.snapshotFromZero:
saveepoch-=epoch_begin
if lrpolicy is not None:
lrval=lrpolicy.get_learning_rate(epoch-epoch_begin)
lr.set_value(lrval)
logging.info('set lr to %f'%lrval)
easyshared.update()
break_flag = False
if mode=='training':
# In each epoch, we do a full pass over the training data:
train_err = 0
train_penalty = 0
train_errlist = None
train_batches = 0
start_time = time.time()
count = 0
loopcount = 0
train_it=batch_iterator_train(num_batchsize,args.train_db)
#num_batches=global_batches
train_len = 80
if num_batches > train_len:
n=num_batches//80
train_len=(num_batches+n-1)//n
err=None
while True:
stop=False
nan_exit=False
for i in range(train_len):
try:
batch = next(train_it)
except StopIteration:
stop=True
break
if type(batch)==tuple:
batch,ids=batch
if mean_data is not None:
assert 'mean' not in batch
batch['mean']=mean_data
if training_data_shaker is not None:
batch = training_data_shaker(batch)
#batch['image']=batch['image']/256.0
#batch['image']=1-batch['image']
#print batch['image'][0]
#print batch['image'].shape
#print batch['image'].dtype
#print batch['target'][0]
#print batch['target'].shape
#print batch['target'].dtype
res=train_fn(*sorted_values(batch))
err=res[0]
if np.isnan(err):
print 'got nan.'
nan_exit=True
break
penalty=res[1]
errlist=res[2:]
train_err += err
train_penalty += penalty
if train_errlist is None:
train_errlist=errlist
else:
for j in range(len(errlist)):
train_errlist[j]=train_errlist[j]+errlist[j]
train_batches += 1
count = count+1
for h in on_batch_finished:
h(locals())
sys.stdout.write(".")
#show(src,norms,predictsloop,predictsloop2,predictsloop3,num_batchsize,num_actions,i)
if 0xFF & cv2.waitKey(100) == 27:
break_flag = True
break
print ''
# Then we print the results for this epoch:
avg_train_err = train_err / train_batches
avg_penalty = train_penalty / train_batches
if not args.digits:
logging.info("Epoch {}:{} of {} took {:.3f}s".format(
epoch + 1, loopcount+1, epoch_begin+num_epochs, time.time() - start_time))
#print " training loss:\t\t{:.6f}".format(avg_train_err)
out=StringIO()
print >>out,' ','training loss',':',avg_train_err
logging.info(string.strip(out.getvalue()))
out=StringIO()
print >>out,' ','training penalty',':',avg_penalty
logging.info(string.strip(out.getvalue()))
tmp = map(lambda x:x/train_batches,train_errlist)
for tag,sli in tagslice:
if len(tmp[sli])>0:
if tag.startswith('train:'):
tag=tag[len('train:'):]
out=StringIO()
print >>out,' ',tag,':',tmp[sli]
logging.info(string.strip(out.getvalue()))
elif num_batches>0 and i>0 or num_batches==0 and stop:
#logging.info("Training {} of {} took {:.3f}s".format(
# epoch + 1, epoch_begin+num_epochs, time.time() - start_time))
out=StringIO()
print >>out,'loss','=',avg_train_err,','
print >>out,'penalty','=',avg_penalty,','
print >>out,'lr','=',lr.get_value(),','
tmp = map(lambda x:x/train_batches,train_errlist)
for tag,sli in tagslice:
if len(tmp[sli])>0:
if tag.startswith('train:'):
tag=tag[len('train:'):]
if len(tmp[sli])==1:
print >>out,tag,'=',tmp[sli][0],','
else:
for i,val in enumerate(tmp[sli]):
print >>out,tag+'_'+str(i),'=',val,','
if num_batches>0:
logging.info("Training (epoch " + str(saveepoch-1+1.0*train_batches/num_batches) + "): " + string.replace(out.getvalue(),'\n',' '))
else:
logging.info("Training (epoch " + str(saveepoch) + "): " + string.replace(out.getvalue(),'\n',' '))
if nan_exit:
exit(1)
if stop:
if num_batches==0:
num_batches=train_batches
# if lrpolicy is None:
# total_training_steps = num_epochs
# lrpolicy = lr_policy.LRPolicy(args.lr_policy,
# args.lr_base_rate,
# args.lr_gamma,
# args.lr_power,
# total_training_steps,
# args.lr_stepvalues)
#vals = []
#for t in lasagne.layers.get_all_params(networks1.values(),regularizable=True):
# val = abs(t.get_value()).max()
# vals += [val]
#print 'max |w|:',max(vals)
if stop:
break
loopcount+=1
if (saveepoch)%args.validation_interval!=0:
continue
# And a full pass over the validation data:
val_result=[]
for batch_iterator,db,stage in (
(batch_iterator_val,args.validation_db,'val'),
(batch_iterator_test,args.test_db,'test')):
val_err = 0
val_errlist = None
val_batches = 0
start_time = time.time()
count = 0
loopcount = 0
if batch_iterator is not None and db != '':
pass
else:
val_result+=[[ stage, val_err, val_errlist, val_batches, start_time, count, loopcount]]
continue
val_it=batch_iterator(num_batchsize,db)
for h in on_validation_started:
h(locals())
val_len = 80
err=None
while True:
stop=False
for i in range(val_len):
try:
batch = next(val_it)
except StopIteration:
stop=True
break
if type(batch)==tuple:
batch,ids=batch
if mean_data is not None:
assert 'mean' not in batch
batch['mean']=mean_data
if stage=='val':
if validation_data_shaker is not None:
batch = validation_data_shaker(batch)
#for t in batch:
# print t,batch[t].shape
res=val_fn(*sorted_values(batch))
err=res[0]
errlist=res[1:]
val_err += err
if val_errlist is None:
val_errlist=errlist
else:
for j in range(len(errlist)):
val_errlist[j]=val_errlist[j]+errlist[j]
val_batches += 1
count = count+1
sys.stdout.write("o")
#show(src,norms,predictsloop,predictsloop2,predictsloop3,num_batchsize,num_actions,i)
if 0xFF & cv2.waitKey(100) == 27:
break_flag = True
break
print ''
#vals = []
#for t in lasagne.layers.get_all_params(networks1.values(),regularizable=True):
# val = abs(t.get_value()).max()
# vals += [val]
#print 'max |w|:',max(vals)
if stop:
break
loopcount+=1
val_result+=[[ stage, val_err, val_errlist, val_batches, start_time, count, loopcount]]
# Then we print the results for this epoch:
if not args.digits:
for stage, val_err, val_errlist, val_batches, start_time, count, loopcount in val_result:
if count>0:
avg_val_err = val_err / val_batches
logging.info("Epoch {}:{} of {} took {:.3f}s".format(
epoch + 1, loopcount+1, epoch_begin+num_epochs, time.time() - start_time))
#print " validation loss:\t\t{:.6f}".format(avg_val_err)
out=StringIO()
print >>out,' ',stage+' loss',':',avg_val_err
logging.info(string.strip(out.getvalue()))
tmp = map(lambda x:x/val_batches,val_errlist)
for tag,sli in valtagslice:
if len(tmp[sli])>0:
if tag.startswith('val:'):
tag=tag[len('val:'):]
out=StringIO()
print >>out, ' ',tag,':',tmp[sli]
logging.info(string.strip(out.getvalue()))
else:
out=StringIO()
for stage, val_err, val_errlist, val_batches, start_time, count, loopcount in val_result:
if count>0:
avg_val_err = val_err / val_batches
print >>out,stage+'_loss','=',avg_val_err,','
tmp = map(lambda x:x/val_batches,val_errlist)
for tag,sli in valtagslice:
if len(tmp[sli])>0:
if tag.startswith('val:'):
tag=tag[len('val:'):]
if len(tmp[sli])==1:
print >>out,stage+'_'+tag,'=',tmp[sli][0],','
else:
for i,val in enumerate(tmp[sli]):
print >>out,stage+'_'+tag+'_'+str(i),'=',val,','
logging.info("Validation (epoch " + str(saveepoch) + "): " + string.replace(out.getvalue(),'\n',' '))
for h in on_validation_finished:
h(locals())
save_params(epoch+1,[
sorted_values(networks) for networks in all_networks
],[],prefix,deletelayers=[])
if mode=='training':
if train_err / train_batches < min_loss:
min_loss = train_err / train_batches
logging.info('New low training loss : %f'%min_loss)
stage, val_err, val_errlist, val_batches, start_time, count, loopcount = val_result[0]
if val_batches > 0 and val_err / val_batches < min_valloss:
min_valloss = val_err / val_batches
logging.info('New low validation loss : %f'%min_valloss)
if mode=='training':
if args.snapshotInterval>0:
if (saveepoch)%max(1,int(args.snapshotInterval))==0:
logging.info('Snapshotting to %s'%(prefix+'epoch'+str(saveepoch)))
save_params(epoch+1,[
sorted_values(networks) for networks in all_networks
],[],prefix+'epoch'+str(saveepoch)+'-',deletelayers=[])
logging.info('Snapshot saved')
if mode=='training':
for h in on_epoch_finished:
h(locals())
while gc.collect() > 0:
pass
#print 'gc'
#tmp=memory_usage((train_fn, (batch[0]['source_image'],batch[0]['action'],batch[0]['target_image'],)))
#print np.mean(tmp)
if break_flag or 0xFF & cv2.waitKey(100) == 27:
break
if mode=='training':
for h in on_training_finished:
h(locals())
class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
super(ArgumentParser,self).__init__(description='Deepstacks.')
parser=self
def define_integer(key,default,desc):
parser.add_argument('--'+key,type=int,default=default,help=desc)
def define_string(key,default,desc):
parser.add_argument('--'+key,type=str,default=default,help=desc)
def define_float(key,default,desc):
parser.add_argument('--'+key,type=float,default=float(default),help=desc)
def define_boolean(key,default,desc):
parser.add_argument('--'+key,type=bool,default=default,help=desc)
define_integer('accumulation', 1, """Accumulate gradients over multiple batches.""")
# Basic model parameters. #float, integer, boolean, string
define_integer('batch_size', 16, """Number of images to process in a batch""")
#define_integer(
# 'croplen', 0, """Crop (x and y). A zero value means no cropping will be applied""")
define_integer('epoch', 1, """Number of epochs to train, -1 for unbounded""")
define_string('inference_db', '', """Directory with inference file source""")
define_integer(
'validation_interval', 1, """Number of train epochs to complete, to perform one validation""")
define_string('labels_list', '', """Text file listing label definitions""")
define_string('mean', '', """Mean image file""")
define_float('momentum', '0.9', """Momentum""") # Not used by DIGITS front-end
define_float('grads_clip', '0.0', """Gradients clip""") # Not used by DIGITS front-end
define_float('grads_noise', '0.0', """Gradients noise""") # Not used by DIGITS front-end
define_string('network', '', """File containing network (model)""")
define_string('networkDirectory', '', """Directory in which network exists""")
define_string('optimization', 'sgd', """Optimization method""")
define_string('save', 'results', """Save directory""")
define_integer('seed', 0, """Fixed input seed for repeatable experiments""")
define_boolean('shuffle', False, """Shuffle records before training""") #ignored
define_boolean('verbose', False, """Print more""")
define_float(
'snapshotInterval', 1.0,
"""Specifies the training epochs to be completed before taking a snapshot""")
define_string('snapshotPrefix', '', """Prefix of the weights/snapshots""")
define_boolean('snapshotFromZero', False, """snapshoting from epoch zero""")
define_boolean('digits', False, """format output for digits""")
define_string(
'subtractMean', 'none',
"""Select mean subtraction method. Possible values are 'image', 'pixel' or 'none'""")
define_string('train_db', '', """Directory with training file source""")
#define_string(
# 'train_labels', '',
# """Directory with an optional and seperate labels file source for training""")
define_string('validation_db', '', """Directory with validation file source""")
define_string('test_db', '', """Directory with test file source""")
#define_string(
# 'validation_labels', '',
# """Directory with an optional and seperate labels file source for validation""")
#define_string(
# 'visualizeModelPath', '', """Constructs the current model for visualization""")
define_boolean(
'visualize_inf', False, """Will output weights and activations for an inference job.""")
define_string(
'weights', '', """Filename for weights of a model to use for fine-tuning""")
# @TODO(tzaman): is the bitdepth in line with the DIGITS team?
#define_integer('bitdepth', 8, """Specifies an image's bitdepth""")
# @TODO(tzaman); remove torch mentions below
define_float('lr_base_rate', '0.01', """Learning rate""")
define_string(
'lr_policy', 'fixed',
"""Learning rate policy. (fixed, step, exp, inv, multistep, poly, sigmoid)""")
define_float(
'lr_gamma', -1,
"""Required to calculate learning rate. Applies to: (step, exp, inv, multistep, sigmoid)""")
define_float(
'lr_power', float('Inf'),
"""Required to calculate learning rate. Applies to: (inv, poly)""")
define_string(
'lr_stepvalues', '',
"""Required to calculate stepsize of the learning rate. Applies to: (step, multistep, sigmoid).
For the 'multistep' lr_policy you can input multiple values seperated by commas""")
define_boolean(
'nan_guard', False, """Enable NanGuardMode of theano.""")
define_boolean(
'testMany', False, """Test many.""")
define_boolean(
'allPredictions', False, """All predictions should be grabbed and formatted.""")
## Tensorflow-unique arguments for DIGITS
#define_string(
# 'save_vars', 'all',
# """Sets the collection of variables to be saved: 'all' or only 'trainable'.""")
#define_string('summaries_dir', '', """Directory of Tensorboard Summaries (logdir)""")
#define_boolean(
# 'serving_export', False, """Flag for exporting an Tensorflow Serving model""")
#define_boolean('log_device_placement', False, """Whether to log device placement.""")
#define_integer(
# 'log_runtime_stats_per_step', 0,
# """Logs runtime statistics for Tensorboard every x steps, defaults to 0 (off).""")
## Augmentation
#define_string(
# 'augFlip', 'none',
# """The flip options {none, fliplr, flipud, fliplrud} as randompre-processing augmentation""")
#define_float(
# 'augNoise', 0., """The stddev of Noise in AWGN as pre-processing augmentation""")
#define_float(
# 'augContrast', 0., """The contrast factor's bounds as sampled from a random-uniform distribution
# as pre-processing augmentation""")
#define_boolean(
# 'augWhitening', False, """Performs per-image whitening by subtracting off its own mean and
# dividing by its own standard deviation.""")
#define_float(
# 'augHSVh', 0., """The stddev of HSV's Hue shift as pre-processing augmentation""")
#define_float(
# 'augHSVs', 0., """The stddev of HSV's Saturation shift as pre-processing augmentation""")
#define_float(
# 'augHSVv', 0., """The stddev of HSV's Value shift as pre-processing augmentation""")
args = None
def main():
global args
parser = ArgumentParser()
args = parser.parse_args()
run(args)
|
"""
Project version queries
"""
from typing import Generator, List, Optional, Union
import warnings
from typeguard import typechecked
from ...helpers import Compatible, format_result, fragment_builder
from .queries import gql_project_version, GQL_PROJECT_VERSION_COUNT
from ...types import ProjectVersion as ProjectVersionType
from ...utils import row_generator_from_paginated_calls
class QueriesProjectVersion:
"""
Set of ProjectVersion queries
"""
# pylint: disable=too-many-arguments,too-many-locals
def __init__(self, auth):
"""
Initializes the subclass
Parameters
----------
auth : KiliAuth object
"""
self.auth = auth
# pylint: disable=dangerous-default-value
@Compatible(['v2'])
@typechecked
def project_version(
self,
first: Optional[int] = 100,
skip: Optional[int] = 0,
fields: List[str] = [
'createdAt',
'id',
'content',
'name',
'project',
'projectId'],
project_id: str = None,
disable_tqdm: bool = False,
as_generator: bool = False) -> Union[List[dict], Generator[dict, None, None]]:
# pylint: disable=line-too-long
"""
Gets a generator or a list of project versions respecting a set of criteria
Parameters
----------
fields :
All the fields to request among the possible fields for the project versions
See [the documentation](https://cloud.kili-technology.com/docs/python-graphql-api/graphql-api/#projectVersions) for all possible fields.
first :
Number of project versions to query
project_id :
Filter on Id of project
skip :
Number of project versions to skip (they are ordered by their date
of creation, first to last).
disable_tqdm :
If True, the progress bar will be disabled
as_generator:
If True, a generator on the project versions is returned.
Returns
-------
result:
a result object which contains the query if it was successful, or an error message else.
"""
if as_generator is False:
warnings.warn("From 2022-05-18, the default return type will be a generator. Currently, the default return type is a list. \n"
"If you want to force the query return to be a list, you can already call this method with the argument as_generator=False",
DeprecationWarning)
count_args = {"project_id": project_id}
disable_tqdm = disable_tqdm or as_generator
payload_query = {
'where': {
'projectId': project_id,
},
}
project_versions_generator = row_generator_from_paginated_calls(
skip,
first,
self.count_project_versions,
count_args,
self._query_project_versions,
payload_query,
fields,
disable_tqdm
)
if as_generator:
return project_versions_generator
return list(project_versions_generator)
def _query_project_versions(self,
skip: int,
first: int,
payload: dict,
fields: List[str]):
payload.update({'skip': skip, 'first': first})
_gql_project_version = gql_project_version(
fragment_builder(fields, ProjectVersionType))
result = self.auth.client.execute(_gql_project_version, payload)
return format_result('data', result)
@Compatible(['v2'])
@typechecked
def count_project_versions(self, project_id: str) -> int:
"""
Count the number of project versions
Parameters
----------
project_id :
Filter on ID of project
Returns
-------
result:
the number of project versions with the parameters provided
"""
variables = {
'where': {'projectId': project_id},
}
result = self.auth.client.execute(GQL_PROJECT_VERSION_COUNT, variables)
count = format_result('data', result)
return count
|
from django.urls import path
from apps.Game import views
urlpatterns = [
path('player/<int:p_id>', views.send_player_data),
path('game/<int:g_id>', views.send_game_data),
path('team/<int:t_id>', views.send_team_data),
] |
# -*- coding: utf-8 -*-
from rest_framework import permissions, viewsets
from authentication.models import Account
from authentication.permissions import IsAccountOwner
from authentication.serializers import AccountSerializer
import json
from django.contrib.auth import authenticate, login, logout
from rest_framework import status, views, permissions
from rest_framework.response import Response
class AccountViewSet(viewsets.ModelViewSet):
"""
lookup_field: Atributo por cual atributo buscaramos a los usuarios
en este caso no sera por el ID si no por el username
queryset: Query que ejecutaremos
serializer_class = Clase que se va a serializar
"""
lookup_field = 'username'
queryset = Account.objects.all()
serializer_class = AccountSerializer
def get_permissions(self):
#Verificamos si el usuario tiene permisos
if self.request.method in permissions.SAFE_METHODS:
return (permissions.AllowAny(),)
#Verificamos que el metodo por donde se enviaron los datos sea POST
if self.request.method == 'POST':
return (permissions.AllowAny(),)
return (permissions.IsAuthenticated(), IsAccountOwner(),)
def create(self, request):
"""Funcion para crear un usuario con una contraseña codificada
Metodo que sustituye .create() por Account.objects.create_user
"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
Account.objects.create_user(**serializer.validated_data)
return Response(serializer.validated_data, status=status.HTTP_201_CREATED)
return Response({
'status': 'Bad request',
'message': 'Account could not be created with received data.'
}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(views.APIView):
"""Clase para loguear en el sistema
"""
def post(self, request, format=None):
data = json.loads(request.body)
email = data.get('email', None)
password = data.get('password', None)
account = authenticate(email=email, password=password)
if account is not None:
if account.is_active:
#creamos una nueva sesion para el usuario
login(request, account)
#serializamos el usuario
serialized = AccountSerializer(account)
#lo retornamos como un json
return Response(serialized.data)
else:
return Response({
'status': 'Unauthorized',
'message': 'This account has been disabled.'
}, status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({
'status': 'Unauthorized',
'message': 'Username/password combination invalid.'
}, status=status.HTTP_401_UNAUTHORIZED)
class LogoutView(views.APIView):
"""
Clase para cerrar sesion
"""
#validamos que el usuario este logueado, si no le mando un error 403
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, format=None):
#SI el usuario esta logueado, cerramos la sesion
logout(request)
#No hay nada razonable para volver al salir,
#por lo que sólo devuelve una respuesta vacía con un código de estado 200.
return Response({}, status=status.HTTP_204_NO_CONTENT) |
from .models import *
from rest_framework import serializers
class ActiveCampaigns(serializers.ModelSerializer):
class Meta:
model = active_campaigns
fields = ("CAMPAIGN_ID", "ZIP" , "ADDRESS" , "CITY" , "STATE" , "COUNTRY" , "LONGITUDE", "LATITUDE", "CHAIN_ID" , "DATE" , "SLOTS")
def create(self, validated_data):
return active_campaigns.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.CAMPAIGN_ID = validated_data.get('CAMPAIGN_ID', instance.CAMPAIGN_ID)
instance.ZIP = validated_data.get('ZIP', instance.ZIP)
instance.ADDRESS = validated_data.get('ADDRESS', instance.ADDRESS)
instance.CITY = validated_data.get('CITY', instance.CITY)
instance.STATE = validated_data.get('STATE', instance.STATE)
instance.COUNTRY = validated_data.get('COUNTRY', instance.COUNTRY)
instance.LONGITUDE = validated_data.get('LONGITUDE', instance.LONGITUDE)
instance.LATITUDE = validated_data.get('LATITUDE', instance.LATITUDE)
instance.CHAIN_ID = validated_data.get('CHAIN_ID', instance.CHAIN_ID)
instance.DATE = validated_data.get('DATE', instance.DATE)
instance.SLOTS = validated_data.get('SLOTS', instance.SLOTS)
instance.save()
return instance
class TokenSerializer(serializers.Serializer):
token = serializers.CharField(max_length=255)
class UserSerializer(serializers.Serializer):
USEREMAIL = serializers.CharField(max_length = 100)
USERNAME = serializers.CharField(max_length = 100)
CONTACT = serializers.CharField(max_length = 100)
BLOOD_GP = serializers.CharField(max_length = 100)
GENDER = serializers.CharField(max_length = 100)
AGE = serializers.IntegerField()
ZIP = serializers.CharField(max_length = 100)
ADDRESS = serializers.CharField(max_length = 100)
CITY = serializers.CharField(max_length = 100)
STATE = serializers.CharField(max_length = 100)
COUNTRY = serializers.CharField(max_length = 100)
#DONATE_Bf = serializers.CharField(max_length = 100)
#LONGITUDE = serializers.CharField(max_length = 100)
#LATITUDE = serializers.CharField(max_length = 100)
def create(self, validated_data):
return users_profile.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.USEREMAIL = validated_data.get('USEREMAIL', instance.USEREMAIL)
instance.USERNAME = validated_data.get('USERNAME', instance.USERNAME)
instance.CONTACT = validated_data.get('CONTACT', instance.CONTACT)
instance.BLOOD_GP = validated_data.get('BLOOD_GP', instance.BLOOD_GP)
instance.GENDER = validated_data.get('GENDER', instance.GENDER)
instance.AGE = validated_data.get('AGE', instance.AGE)
instance.ZIP = validated_data.get('ZIP', instance.ZIP)
instance.ADDRESS = validated_data.get('ADDRESS', instance.ADDRESS)
instance.CITY = validated_data.get('CITY', instance.CITY)
instance.STATE = validated_data.get('STATE', instance.STATE)
instance.COUNTRY = validated_data.get('COUNTRY', instance.COUNTRY)
#instance.DONATE_Bf = validated_data.get('DONATE_Bf', instance.DONATE_Bf)
#instance.LONGITUDE = validated_data.get('LONGITUDE', instance.LONGITUDE)
#instance.LATITUDE = validated_data.get('LATITUDE', instance.LATITUDE)
instance.save()
return instance
|
import uuid
from flask import request, jsonify, send_from_directory, Blueprint
from config import Config
import os
file = Blueprint("upload", __name__)
@file.route('/get/<filename>')
def get_img(filename):
return send_from_directory(Config.UPLOAD_FOLDER,filename)
@file.route('/upload/',methods=['POST'])
def upload():
image = request.files.get('file')
if image:
if not image.filename.endswith(tuple(['.jpg','.png','.mp4'])):
return jsonify({'message':'error file format'}),409
print(uuid.uuid4())
filename = str(uuid.uuid4()).replace('-','') + '.' + image.filename.split('.')[-1]
if not os.path.isdir(Config.UPLOAD_FOLDER):
os.makedirs(Config.UPLOAD_FOLDER)
image.save(os.path.join(Config.UPLOAD_FOLDER, filename))
return filename
else:
return "nothing upload"
|
"""Some utility functions for patterns common in Firecrown.
"""
def upper_triangle_indices(n: int):
"""generator that yields a sequence of tuples that carry the indices for an
(n x n) upper-triangular matrix. This is a replacement for the nested loops:
for i in range(n):
for j in range(i, n):
...
"""
for i in range(n):
for j in range(i, n):
yield i, j
|
#coding=utf-8
__author__ = 'love_huan'
from scipy import stats
import numpy as np
import pylab
x = np.array([1, 2, 5, 7, 10, 15])
y = np.array([2, 6, 7, 9, 14, 19])
slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, y)
predict_y = intercept + slope * x
pred_error = y - predict_y
degrees_of_freedom = len(x) - 2
residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom)
pylab.plot(x, y, 'o')
pylab.plot(x, predict_y, 'k-')
pylab.show()
|
#!/usr/bin/env python
from itertools import islice
import aiohttp
import asyncio
from async_timeout import timeout
import cachetools.func
from collections import defaultdict
from decimal import Decimal
from enum import Enum
import json
import logging
import pandas as pd
import requests
import time
from typing import (
Any,
AsyncIterable,
Dict,
List,
Optional,
DefaultDict,
Set,
Tuple,
)
import websockets
from websockets.client import Connect as WSConnectionContext
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.kucoin.kucoin_order_book import KucoinOrderBook
from hummingbot.connector.exchange.kucoin.kucoin_active_order_tracker import KucoinActiveOrderTracker
from hummingbot.core.utils.async_utils import safe_ensure_future
SNAPSHOT_REST_URL = "https://api.kucoin.com/api/v2/market/orderbook/level2"
DIFF_STREAM_URL = ""
TICKER_PRICE_CHANGE_URL = "https://api.kucoin.com/api/v1/market/allTickers"
EXCHANGE_INFO_URL = "https://api.kucoin.com/api/v1/symbols"
def secs_until_next_oclock():
this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(minute=0, second=0, microsecond=0)
next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)
delta: float = next_hour.timestamp() - time.time()
return delta
class StreamType(Enum):
Depth = "depth"
Trade = "trade"
class KucoinWSConnectionIterator:
"""
A message iterator that automatically manages the auto-ping requirement from Kucoin, and returns all JSON-decoded
messages from a Kucoin websocket connection
Instances of this class are intended to be used with an `async for msg in <iterator>: ...` block. The iterator does
the following:
1. At the beginning of the loop, connect to Kucoin's public websocket data stream, and subscribe to topics matching
its constructor arguments.
2. Start an automatic ping background task, to keep the websocket connection alive.
3. Yield any messages received from Kucoin, after JSON decode. Note that this means all messages, include ACK and
PONG messages, are returned.
4. Raises `asyncio.TimeoutError` if no message have been heard from Kucoin for more than
`PING_TIMEOUT + PING_INTERVAL`.
5. If the iterator exits for any reason, including any failures or timeout - stop and clean up the automatic ping
task.
The trading pairs subscription can be updated dynamically by assigning into the `trading_pairs` property.
Note that this iterator does NOT come with any error handling logic or built-in resilience by itself. It is expected
that the caller of the iterator should handle all errors from the iterator.
"""
PING_TIMEOUT = 10.0
PING_INTERVAL = 5
_kwsci_logger: Optional[logging.Logger] = None
@classmethod
def logger(cls) -> logging.Logger:
if cls._kwsci_logger is None:
cls._kwsci_logger = logging.getLogger(__name__)
return cls._kwsci_logger
def __init__(self, stream_type: StreamType, trading_pairs: Set[str]):
self._ping_task: Optional[asyncio.Task] = None
self._stream_type: StreamType = stream_type
self._trading_pairs: Set[str] = trading_pairs
self._last_nonce: int = int(time.time() * 1e3)
self._websocket: Optional[websockets.WebSocketClientProtocol] = None
@staticmethod
async def get_ws_connection_context() -> WSConnectionContext:
async with aiohttp.ClientSession() as session:
async with session.post('https://api.kucoin.com/api/v1/bullet-public', data=b'') as resp:
response: aiohttp.ClientResponse = resp
if response.status != 200:
raise IOError(f"Error fetching Kucoin websocket connection data."
f"HTTP status is {response.status}.")
data: Dict[str, Any] = await response.json()
endpoint: str = data["data"]["instanceServers"][0]["endpoint"]
token: str = data["data"]["token"]
ws_url: str = f"{endpoint}?token={token}&acceptUserMessage=true"
return WSConnectionContext(ws_url)
@staticmethod
async def update_subscription(ws: websockets.WebSocketClientProtocol,
stream_type: StreamType,
trading_pairs: Set[str],
subscribe: bool):
# Kucoin has a limit of 100 subscription per 10 seconds
it = iter(trading_pairs)
trading_pair_chunks: List[Tuple[str]] = list(iter(lambda: tuple(islice(it, 100)), ()))
subscribe_requests: List[Dict[str, Any]] = []
if stream_type == StreamType.Depth:
for trading_pair_chunk in trading_pair_chunks:
market_str: str = ",".join(sorted(trading_pair_chunk))
subscribe_requests.append({
"id": int(time.time()),
"type": "subscribe" if subscribe else "unsubscribe",
"topic": f"/market/level2:{market_str}",
"response": True
})
else:
for trading_pair_chunk in trading_pair_chunks:
market_str: str = ",".join(sorted(trading_pair_chunk))
subscribe_requests.append({
"id": int(time.time()),
"type": "subscribe" if subscribe else "unsubscribe",
"topic": f"/market/match:{market_str}",
"privateChannel": False,
"response": True
})
for i, subscribe_request in enumerate(subscribe_requests):
await ws.send(json.dumps(subscribe_request))
if i != len(subscribe_requests) - 1: # only sleep between requests
await asyncio.sleep(10)
await asyncio.sleep(0.2) # watch out for the rate limit
async def subscribe(self, stream_type: StreamType, trading_pairs: Set[str]):
await KucoinWSConnectionIterator.update_subscription(self.websocket, stream_type, trading_pairs, True)
async def unsubscribe(self, stream_type: StreamType, trading_pairs: Set[str]):
await KucoinWSConnectionIterator.update_subscription(self.websocket, stream_type, trading_pairs, False)
@property
def stream_type(self) -> StreamType:
return self._stream_type
@property
def trading_pairs(self) -> Set[str]:
return self._trading_pairs.copy()
@trading_pairs.setter
def trading_pairs(self, trading_pairs: Set[str]):
prev_trading_pairs = self._trading_pairs
self._trading_pairs = trading_pairs.copy()
if prev_trading_pairs != trading_pairs and self._websocket is not None:
async def update_subscriptions_func():
unsubscribe_set: Set[str] = prev_trading_pairs - trading_pairs
subscribe_set: Set[str] = trading_pairs - prev_trading_pairs
if len(unsubscribe_set) > 0:
await self.unsubscribe(self.stream_type, unsubscribe_set)
if len(subscribe_set) > 0:
await self.subscribe(self.stream_type, subscribe_set)
safe_ensure_future(update_subscriptions_func())
@property
def websocket(self) -> Optional[websockets.WebSocketClientProtocol]:
return self._websocket
@property
def ping_task(self) -> Optional[asyncio.Task]:
return self._ping_task
def get_nonce(self) -> int:
now_ms: int = int(time.time() * 1e3)
if now_ms <= self._last_nonce:
now_ms = self._last_nonce + 1
self._last_nonce = now_ms
return now_ms
async def _ping_loop(self, interval_secs: float):
ws: websockets.WebSocketClientProtocol = self.websocket
while True:
try:
if not ws.closed:
await ws.ensure_open()
ping_msg: Dict[str, Any] = {
"id": self.get_nonce(),
"type": "ping"
}
await ws.send(json.dumps(ping_msg))
except websockets.exceptions.ConnectionClosedError:
pass
except asyncio.CancelledError:
raise
except Exception:
raise
await asyncio.sleep(interval_secs)
async def _inner_messages(self, ws: websockets.WebSocketClientProtocol) -> AsyncIterable[str]:
# Terminate the recv() loop as soon as the next message timed out, so the outer loop can disconnect.
try:
while True:
async with timeout(self.PING_TIMEOUT + self.PING_INTERVAL):
yield await ws.recv()
except asyncio.TimeoutError:
self.logger().warning(f"Message recv() timed out. "
f"Stream type = {self.stream_type},"
f"Trading pairs = {self.trading_pairs}.")
raise
async def __aiter__(self) -> AsyncIterable[Dict[str, any]]:
if self._websocket is not None:
raise EnvironmentError("Iterator already in use.")
# Get connection info and connect to Kucoin websocket.
ping_task: Optional[asyncio.Task] = None
try:
async with (await self.get_ws_connection_context()) as ws:
self._websocket = ws
# Subscribe to the initial topic.
await self.subscribe(self.stream_type, self.trading_pairs)
# Start the ping task
ping_task = safe_ensure_future(self._ping_loop(self.PING_INTERVAL))
# Get messages
async for raw_msg in self._inner_messages(ws):
msg: Dict[str, any] = json.loads(raw_msg)
yield msg
finally:
# Clean up.
if ping_task is not None:
ping_task.cancel()
class KucoinAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
PING_INTERVAL = 15
SYMBOLS_PER_CONNECTION = 100
SLEEP_BETWEEN_SNAPSHOT_REQUEST = 5.0
_kaobds_logger: Optional[HummingbotLogger] = None
class TaskEntry:
__slots__ = ("__weakref__", "_trading_pairs", "_task", "_message_iterator")
def __init__(self, trading_pairs: Set[str], task: asyncio.Task):
self._trading_pairs: Set[str] = trading_pairs.copy()
self._task: asyncio.Task = task
self._message_iterator: Optional[KucoinWSConnectionIterator] = None
@property
def trading_pairs(self) -> Set[str]:
return self._trading_pairs.copy()
@property
def task(self) -> asyncio.Task:
return self._task
@property
def message_iterator(self) -> Optional[KucoinWSConnectionIterator]:
return self._message_iterator
@message_iterator.setter
def message_iterator(self, msg_iter: KucoinWSConnectionIterator):
self._message_iterator = msg_iter
def update_trading_pairs(self, trading_pairs: Set[str]):
self._trading_pairs = trading_pairs.copy()
if self._message_iterator is not None:
self._message_iterator.trading_pairs = self._trading_pairs
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._kaobds_logger is None:
cls._kaobds_logger = logging.getLogger(__name__)
return cls._kaobds_logger
def __init__(self, trading_pairs: List[str]):
super().__init__(trading_pairs)
self._order_book_create_function = lambda: OrderBook()
self._tasks: DefaultDict[StreamType, Dict[int, KucoinAPIOrderBookDataSource.TaskEntry]] = defaultdict(dict)
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]:
results = dict()
async with aiohttp.ClientSession() as client:
resp = await client.get(TICKER_PRICE_CHANGE_URL)
resp_json = await resp.json()
for trading_pair in trading_pairs:
resp_record = [o for o in resp_json["data"]["ticker"] if o["symbolName"] == trading_pair][0]
results[trading_pair] = float(resp_record["last"])
return results
async def get_trading_pairs(self) -> List[str]:
if not self._trading_pairs:
try:
self._trading_pairs = await self.fetch_trading_pairs()
except Exception:
self._trading_pairs = []
self.logger().network(
"Error getting active exchange information.",
exc_info=True,
app_warning_msg="Error getting active exchange information. Check network connection."
)
return self._trading_pairs
@staticmethod
@cachetools.func.ttl_cache(ttl=10)
def get_mid_price(trading_pair: str) -> Optional[Decimal]:
resp = requests.get(url=TICKER_PRICE_CHANGE_URL)
records = resp.json()
result = None
for record in records["data"]["ticker"]:
if trading_pair == record["symbolName"] and record["buy"] is not None and record["sell"] is not None:
result = (Decimal(record["buy"]) + Decimal(record["sell"])) / Decimal("2")
break
return result
@staticmethod
async def fetch_trading_pairs() -> List[str]:
async with aiohttp.ClientSession() as client:
async with client.get(EXCHANGE_INFO_URL, timeout=5) as response:
if response.status == 200:
try:
data: Dict[str, Any] = await response.json()
all_trading_pairs = data.get("data", [])
return [item["symbol"] for item in all_trading_pairs if item["enableTrading"] is True]
except Exception:
pass
# Do nothing if the request fails -- there will be no autocomplete for kucoin trading pairs
return []
@staticmethod
async def get_snapshot(client: aiohttp.ClientSession, trading_pair: str) -> Dict[str, Any]:
params: Dict = {"symbol": trading_pair}
async with client.get(SNAPSHOT_REST_URL, params=params) as response:
response: aiohttp.ClientResponse = response
if response.status != 200:
raise IOError(f"Error fetching Kucoin market snapshot for {trading_pair}. "
f"HTTP status is {response.status}.")
data: Dict[str, Any] = await response.json()
return data
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
async with aiohttp.ClientSession() as client:
snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = KucoinOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"symbol": trading_pair}
)
order_book: OrderBook = self.order_book_create_function()
active_order_tracker: KucoinActiveOrderTracker = KucoinActiveOrderTracker()
bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg)
order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)
return order_book
async def get_markets_per_ws_connection(self) -> List[str]:
# Fetch the markets and split per connection
all_symbols: List[str] = await self.get_trading_pairs()
market_subsets: List[str] = []
for i in range(0, len(all_symbols), self.SYMBOLS_PER_CONNECTION):
symbols_section: List[str] = all_symbols[i: i + self.SYMBOLS_PER_CONNECTION]
symbol: str = ','.join(symbols_section)
market_subsets.append(symbol)
return market_subsets
async def _start_update_tasks(self, stream_type: StreamType, output: asyncio.Queue):
self._stop_update_tasks(stream_type)
market_assignments: List[str] = await self.get_markets_per_ws_connection()
for task_index, market_subset in enumerate(market_assignments):
await self._start_single_update_task(stream_type,
output,
task_index,
market_subset)
async def _start_single_update_task(self,
stream_type: StreamType,
output: asyncio.Queue,
task_index: int,
market_subset: str):
self._tasks[stream_type][task_index] = self.TaskEntry(
set(market_subset.split(',')),
safe_ensure_future(self._collect_and_decode_messages_loop(stream_type, task_index, output))
)
async def _refresh_subscriptions(self, stream_type: StreamType, output: asyncio.Queue):
"""
modifies the subscription list (market pairs) for each connection to track changes in active markets
:param stream_type: whether diffs or trades
:param output: the output queue
"""
all_symbols: List[str] = await self.get_trading_pairs()
all_symbols_set: Set[str] = set(all_symbols)
pending_trading_pair_updates: Dict[Tuple[StreamType, int], Set[str]] = {}
# removals
# remove any markets in current connections that are not present in the new master set
for task_index in self._tasks[stream_type]:
update_key: Tuple[StreamType, int] = (stream_type, task_index)
if update_key not in pending_trading_pair_updates:
pending_trading_pair_updates[update_key] = self._tasks[stream_type][task_index].trading_pairs
pending_trading_pair_updates[update_key] &= all_symbols_set
# additions
# from the new set of trading pairs, delete any items that are in the connections already
for task_index in self._tasks[stream_type]:
all_symbols_set -= self._tasks[stream_type][task_index].trading_pairs
# now all_symbols_set contains just the additions, add each of those to the shortest connection list
for market in all_symbols_set:
smallest_index: int = 0
smallest_set_size: int = self.SYMBOLS_PER_CONNECTION + 1
for task_index in self._tasks[stream_type]:
if len(self._tasks[stream_type][task_index].trading_pairs) < smallest_set_size:
smallest_index = task_index
smallest_set_size = len(self._tasks[stream_type][task_index].trading_pairs)
if smallest_set_size < self.SYMBOLS_PER_CONNECTION:
update_key: Tuple[StreamType, int] = (stream_type, smallest_index)
if update_key not in pending_trading_pair_updates:
pending_trading_pair_updates[update_key] = self._tasks[stream_type][smallest_index].trading_pairs
pending_trading_pair_updates[update_key].add(market)
else:
new_index: int = len(self._tasks[stream_type])
await self._start_single_update_task(stream_type=stream_type,
output=output,
task_index=new_index,
market_subset=market)
# update the trading pairs set for all task entries that have pending updates.
for (stream_type, task_index), trading_pairs in pending_trading_pair_updates.items():
self._tasks[stream_type][task_index].update_trading_pairs(trading_pairs)
def _stop_update_tasks(self, stream_type: StreamType):
if stream_type in self._tasks:
for task_index in self._tasks[stream_type]:
if not self._tasks[stream_type][task_index].task.done():
self._tasks[stream_type][task_index].task.cancel()
del self._tasks[stream_type]
async def _collect_and_decode_messages_loop(self, stream_type: StreamType, task_index: int, output: asyncio.Queue):
while True:
try:
kucoin_msg_iterator: KucoinWSConnectionIterator = KucoinWSConnectionIterator(
stream_type, self._tasks[stream_type][task_index].trading_pairs
)
self._tasks[stream_type][task_index].message_iterator = kucoin_msg_iterator
async for raw_msg in kucoin_msg_iterator:
msg_type: str = raw_msg.get("type", "")
if msg_type in {"ack", "welcome", "pong"}:
pass
elif msg_type == "message":
if stream_type == StreamType.Depth:
order_book_message: OrderBookMessage = KucoinOrderBook.diff_message_from_exchange(raw_msg)
else:
trading_pair: str = raw_msg["data"]["symbol"]
data = raw_msg["data"]
order_book_message: OrderBookMessage = \
KucoinOrderBook.trade_message_from_exchange(
data,
metadata={"trading_pair": trading_pair}
)
output.put_nowait(order_book_message)
elif msg_type == "error":
self.logger().error(f"WS error message from Kucoin: {raw_msg}")
else:
self.logger().warning(f"Unrecognized message type from Kucoin: {msg_type}. "
f"Message = {raw_msg}.")
except asyncio.CancelledError:
raise
except asyncio.TimeoutError:
self.logger().error("Timeout error with WebSocket connection. Retrying after 5 seconds...",
exc_info=True)
await asyncio.sleep(5.0)
except Exception:
self.logger().error("Unexpected exception with WebSocket connection. Retrying after 5 seconds...",
exc_info=True)
await asyncio.sleep(5.0)
finally:
self._tasks[stream_type][task_index].message_iterator = None
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
await self._start_update_tasks(StreamType.Trade, output)
while True:
await asyncio.sleep(secs_until_next_oclock())
await self._refresh_subscriptions(StreamType.Trade, output)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(f"Unexpected error. {e}", exc_info=True)
await asyncio.sleep(5.0)
finally:
self._stop_update_tasks(StreamType.Trade)
async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
await self._start_update_tasks(StreamType.Depth, output)
while True:
await asyncio.sleep(secs_until_next_oclock())
await self._refresh_subscriptions(StreamType.Depth, output)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(f"Unexpected error. {e}", exc_info=True)
await asyncio.sleep(5.0)
finally:
self._stop_update_tasks(StreamType.Depth)
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
trading_pairs: List[str] = await self.get_trading_pairs()
async with aiohttp.ClientSession() as client:
for trading_pair in trading_pairs:
try:
snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = KucoinOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp,
metadata={"symbol": trading_pair}
)
output.put_nowait(snapshot_msg)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
await asyncio.sleep(self.SLEEP_BETWEEN_SNAPSHOT_REQUEST)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error.", exc_info=True)
await asyncio.sleep(5.0)
await asyncio.sleep(secs_until_next_oclock())
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error.", exc_info=True)
await asyncio.sleep(5.0)
|
# python 2.7.3
import sys
import math
n = input()
m = {}
for i in range(n):
num = input()
if num in m:
m[num] += 1
else:
m[num] = 1
cnt = 0
for k, v in m.iteritems():
if v >= 4:
cnt += v / 4
print cnt
|
from django.shortcuts import render
from django.views.generic import View
from django.http import JsonResponse
# Create your views here.
class PostView(View):
def post(self, request, *args, **kwargs):
return JsonResponse({'id': 3})
def get(self, request):
return render(request, 'posts/index.html')
|
# TEST :
# curl -v -X PUT -T <filepath to upload> http://127.0.0.1:8080/<optional filepath>
# Example : curl -v -X PUT -T test_wsgi1.txt http://127.0.0.1:8080/wsgi.txt
import pprint
import os
def sample_app(environ, start_response):
#pprint.pprint(environ)
# print
# The environment variable CONTENT_LENGTH may be empty or missing
try:
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
# Get file path from PATH env
if environ['REQUEST_METHOD'] == 'PUT':
path = environ['PATH_INFO']
file_path = 'downloaded/downloaded.txt'
if path:
file_path = 'uploaded' + path
# Create directory for download if not exist.
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
# When the method is POST the query string will be sent
# in the HTTP request body which is passed by the WSGI server
# in the file like wsgi.input environment variable.
request_body = environ['wsgi.input'].read(request_body_size)
print request_body
with open(file_path, 'w') as f:
f.write(request_body)
status = "200 OK"
body =['successfully uploaded\n']
res_headers = [('Content-Type', 'text/plain')]
start_response(status, res_headers)
elif environ['REQUEST_METHOD'] == 'GET':
print "Hello GET"
path = environ['PATH_INFO']
print path
if path:
file_path = "FileToDownload"+path
if not os.path.exists(os.path.dirname(file_path)):
print "Can't find the file"
f= open(file_path,'r')
data = f.read()
print data
if not os.path.exists(os.path.dirname("FileDownload/downloaded.txt")):
os.makedirs(os.path.dirname("FileDownload/downloaded.txt"))
fi = open("FileDownload/downloaded.txt",'w')
fi.write(data)
fi.close()
status = '200 OK'
body =["successfully Downloaded\n"]
res_headers = [('Content-Type','text/plain')]
start_response(status, res_headers)
return body
if __name__ == "__main__":
from paste import httpserver
httpserver.serve(sample_app, host='127.0.0.1', port='8080') |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 21 22:11:38 2016
@author: amino
"""
import pytest
from ctip import GenParser
def test_single_var_single_val():
result = GenParser.parseFile("tests/resources/genfile1_single_var_single_arg.gen")
assert result["name"] == "bows"
assert len(result["schema"]) == 1
domain = result["schema"][0]
assert domain["var"] == "type"
assert len(domain["values"]) == 1
assert domain["values"][0] == "long"
assert "deps" not in domain
def test_no_name():
result = GenParser.parseString("type = long")
assert "name" not in result
assert "schema" in result
def test_single_var_multiple_vals():
result = GenParser.parseFile("tests/resources/genfile2_single_var_multiple_args.gen")
assert result["name"] == "bows"
assert len(result["schema"]) == 1
domain = result["schema"][0]
assert domain["var"] == "type"
assert len(domain["values"]) == 2
assert domain["values"][0] == "long"
assert domain["values"][1] == "recurve"
assert "deps" not in domain
def test_special_chars():
result = GenParser.parseString("name7_with.special-chars = val1, val2")
assert result["schema"][0]["var"] == "name7_with.special-chars"
def test_quoted_values():
result = GenParser.parseString("variable = 'val1', \"#alwaystraining\", 'comma,string' # comment")
domain = result["schema"][0]
assert len(domain["values"]) == 3
assert domain["values"].asList() == ["val1", "#alwaystraining", "comma,string"]
result = GenParser.parseString("var = '{}',\"{}\"".format("\\'", '\\"'))
assert result["schema"][0]["values"].asList() == ["'", '"']
def test_multiple_vars():
result = GenParser.parseFile("tests/resources/genfile3_multiple_vars.gen")
assert result["name"] == "bows"
assert len(result["schema"]) == 2
domain = result["schema"][0]
assert domain["var"] == "type"
assert len(domain["values"]) == 2
assert domain["values"][0] == "long"
assert domain["values"][1] == "recurve"
assert "deps" not in domain
domain = result["schema"][1]
assert domain["var"] == "wood"
assert len(domain["values"]) == 4
assert domain["values"].asList() == ["osage orange", "yew", "oak", "hickory"]
assert "deps" not in domain
def test_simple_nested_preconstructed_args():
result = GenParser.parseFile("tests/resources/genfile4_simple_nested_preconstructed_args.gen")
assert result["name"] == "bows"
assert len(result["schema"]) == 3
domain = result["schema"][0]
assert domain["var"] == "type"
assert len(domain["values"]) == 2
assert domain["values"][0] == "long"
assert domain["values"][1] == "recurve"
assert "deps" not in domain
domain = result["schema"][1]
assert domain["var"] == "type"
assert len(domain["values"]) == 1
assert domain["values"][0] == "long"
assert len(domain["deps"]) == 1
dep = domain["deps"][0]
assert dep["var"] == "length"
assert dep["values"].asList() == [66, 72]
assert "deps" not in dep
domain = result["schema"][2]
assert domain["var"] == "type"
assert len(domain["values"]) == 1
assert domain["values"][0] == "recurve"
assert len(domain["deps"]) == 1
dep = domain["deps"][0]
assert dep["var"] == "length"
assert dep["values"].asList() == [42, 46]
assert "deps" not in dep
def test_multiple_vars_in_nest():
result = GenParser.parseFile("tests/resources/genfile6_multiple_vars_in_nest.gen")
assert result["name"] == "bows"
assert len(result["schema"]) == 2
domain = result["schema"][0]
assert domain["var"] == "type"
assert domain["values"].asList() == ["long"]
deps = domain["deps"]
assert len(deps) == 2
assert deps[0]["var"] == "length"
assert deps[0]["values"].asList() == [42, 46]
assert "deps" not in deps[0]
assert deps[1]["var"] == "wood"
assert deps[1]["values"].asList() == ['osage orange','yew']
assert "deps" not in deps[1]
domain = result["schema"][1]
assert domain["var"] == "type"
assert domain["values"].asList() == ["recurve"]
deps = domain["deps"]
assert len(deps) == 2
assert deps[0]["var"] == "length"
assert deps[0]["values"].asList() == [66, 72]
assert "deps" not in deps[0]
assert deps[1]["var"] == "wood"
assert deps[1]["values"].asList() == ['hickory']
assert "deps" not in deps[1]
def test_multi_nested():
result = GenParser.parseFile("tests/resources/genfile9_multi_nested.gen")
assert result["name"] == "p3"
assert len(result["schema"]) == 1
domain = result["schema"][0]
assert domain["var"] == "decoder"
assert domain["values"].asList() == ["Hypercube"]
deps = domain["deps"]
assert len(deps) == 2
assert deps[0]["var"] == "gates"
assert deps[0]["values"].asList() == [12]
deps2 = deps[0]["deps"]
assert len(deps2) == 2
assert deps2[0]["var"] == "complexity"
assert deps2[0]["values"].asList() == [2]
deps3 = deps2[0]["deps"]
assert len(deps3) == 1
assert deps3[0]["var"] == "length"
assert deps3[0]["values"].asList() == [80]
assert "deps" not in deps3[0]
assert deps2[1]["var"] == "complexity"
assert deps2[1]["values"].asList() == [3]
deps3 = deps2[1]["deps"]
assert len(deps3) == 1
assert deps3[0]["var"] == "length"
assert deps3[0]["values"].asList() == [110]
assert "deps" not in deps3[0]
assert deps[1]["var"] == "gates"
assert deps[1]["values"].asList() == [15]
deps2 = deps[1]["deps"]
assert len(deps2) == 2
assert deps2[0]["var"] == "complexity"
assert deps2[0]["values"].asList() == [2]
deps3 = deps2[0]["deps"]
assert len(deps3) == 1
assert deps3[0]["var"] == "length"
assert deps3[0]["values"].asList() == [116]
assert "deps" not in deps3[0]
assert deps2[1]["var"] == "complexity"
assert deps2[1]["values"].asList() == [3]
deps3 = deps2[1]["deps"]
assert len(deps3) == 1
assert deps3[0]["var"] == "length"
assert deps3[0]["values"].asList() == [140, 158]
assert "deps" not in deps3[0]
def test_multiple_vars_own_nest():
result = GenParser.parseFile("tests/resources/genfile10_multiple_vars_own_nest.gen")
assert result["name"] == "locations"
assert len(result["schema"]) == 1
domain = result["schema"][0]
assert domain["var"] == "city"
assert domain["values"].asList() == ["East Lansing", "Lansing", "Okemos"]
deps = domain["deps"]
assert len(deps) == 3
assert deps[0]["var"] == "county"
assert deps[0]["values"].asList() == ["Ingham"]
assert "deps" not in deps[0]
assert deps[1]["var"] == "state"
assert deps[1]["values"].asList() == ["Michigan"]
assert "deps" not in deps[1]
assert deps[2]["var"] == "country"
assert deps[2]["values"].asList() == ["United States"]
assert "deps" not in deps[2]
def test_integer_range():
result = GenParser.parseString("var = 0:4")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(0, 4)]
result = GenParser.parseString("var = -2:3")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(-2, 3)]
result = GenParser.parseString("var = 1:-1:1")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(1, -1, 1)]
result = GenParser.parseString("var = 3:0:-2")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(3, 0, -2)]
def test_floating_point_range():
result = GenParser.parseString("var = 0.0:1.0")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(0.0, 1.0)]
result = GenParser.parseString("var = .2:0.9:.05")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(0.2, 0.9, 0.05)]
result = GenParser.parseString("var = -7.7:6.5")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(-7.7, 6.5)]
result = GenParser.parseString("var = 2.3:-1.1:2")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(2.3, -1.1, 2)]
result = GenParser.parseString("var = 23.2:17:-0.2")
assert len(result["schema"]) == 1
assert result["schema"][0]["var"] == "var"
assert result["schema"][0]["values"].asList() == [(23.2, 17, -0.2)]
def test_commented():
result = GenParser.parseFile("tests/resources/genfile11_commented.gen")
assert result["name"] == "p3"
assert len(result["schema"]) == 3
domain = result["schema"][0]
assert domain["var"] == "decoder"
assert domain["values"].asList() == ["Hypercube", "Unstructured", "FixedLogic", "FixedInputs"]
assert "deps" not in domain
domain = result["schema"][1]
assert domain["var"] == "decoder"
assert domain["values"].asList() == ["Hypercube"]
deps = domain["deps"]
assert len(deps) == 2
assert deps[0]["var"] == "complexity"
assert deps[0]["values"].asList() == [2]
deps2 = deps[0]["deps"]
assert len(deps2) == 1
assert deps2[0]["var"] == "gates"
assert deps2[0]["values"].asList() == [12, 15]
assert "deps" not in deps2[0]
assert deps[1]["var"] == "complexity"
assert deps[1]["values"].asList() == [3]
deps2 = deps[1]["deps"]
assert len(deps2) == 1
assert deps2[0]["var"] == "gates"
assert deps2[0]["values"].asList() == [8, 11]
assert "deps" not in deps2[0]
domain = result["schema"][2]
assert domain["var"] == "decoder"
assert domain["values"].asList() == ["Unstructured"]
deps = domain["deps"]
assert len(deps) == 1
assert deps[0]["var"] == "complexity"
assert deps[0]["values"].asList() == [2, 3]
assert "deps" not in deps[0]
|
# Encoding: utf-8
import copy
class ProPlayer(object):
def __init__(self, name, color, board, rulebook):
self.name = name
self.color = color
self.board = board
self.rulebook = rulebook
self.weight_board = self.weightBoard()
self.depth = 5
def weightBoard(self):
board = [[4, -3, 2, 2, 2, 2, -3, 4],
[-3, -4, -1, -1, -1, -1, -4, -3],
[2, -1, 1, 0, 0, 1, -1, 2],
[2, -1, 0, 1, 1, 0, -1, 2],
[2, -1, 0, 1, 1, 0, -1, 2],
[2, -1, 1, 0, 0, 1, -1, 2],
[-3, -4, -1, -1, -1, -1, -4, -3],
[4, -3, 2, 2, 2, 2, -3, 4]]
return board
def coinParity(self, board, color):
total_my_color = 0
total_opponent_color = 0
for row in board:
for column in row:
if(column == color):
total_my_color += 1
elif(column == -color):
total_opponent_color += 1
heuristic = 100.0*(float(total_my_color - total_opponent_color)/float(total_my_color + total_opponent_color))
return heuristic
def mobility(self, board, color):
my_valid_moves = self.rulebook.getValidMoves(color, board)
opponent_valid_moves = self.rulebook.getValidMoves(-color, board)
my_moves = len(my_valid_moves)
opponent_moves = len(opponent_valid_moves)
if(my_moves + opponent_moves != 0):
heuristic = 100.0*(float(my_moves - opponent_moves)/float(my_moves + opponent_moves))
return heuristic
else:
return 0
def corners(self, board, color):
my_corners = self.getCorners(board, color)
opponent_corners = self.getCorners(board, -color)
if(my_corners + opponent_corners != 0):
heuristic = 100.0*(float(my_corners - opponent_corners)/float(my_corners + opponent_corners))
return heuristic
else:
return 0
def getCorners(self, board, color):
total_corners = 0
if(board[0][0] == color):
total_corners += 1
if(board[0][7] == color):
total_corners += 1
if(board[7][0] == color):
total_corners += 1
if(board[7][7] == color):
total_corners += 1
return total_corners
def stability(self, board, color):
my_stability = 0
opponent_stability = 0
i = 0
for row in board:
j = 0
for column in row:
if(column == color):
my_stability = self.weight_board[i][j]
elif(column == -color):
opponent_stability = self.weight_board[i][j]
j += 1
i += 1
if(my_stability + opponent_stability != 0):
heuristic = 100.0*(float(my_stability - opponent_stability)/float(my_stability + opponent_stability))
return heuristic
else:
return 0
def stateValue(self, board, color):
coin = self.coinParity(board.board, color)
mobility = self.mobility(board, color)
corner = self.corners(board.board, color)
stab = self.stability(board.board, color)
return 30*corner + 5*mobility + 10*stab + 2*coin
def cornerImmediatly(self, board, color):
pass
def copyBoard(self, board):
return copy.deepcopy(board)
def minimax(self, current_board, depth, color, maximizingPlayer, x, y, alpha, beta):
if(depth == 0 or current_board.isBoardFull()):
heuristic = self.stateValue(current_board, color)
return heuristic, x, y
if(maximizingPlayer):
bestValue = float("-inf")
best_i = 0
best_j = 0
available_tiles = []
valid_moves = self.rulebook.getValidMoves(color, current_board)
if not valid_moves:
heuristic = self.stateValue(current_board, color)
return heuristic, x, y
for move in valid_moves:
if move[0] not in available_tiles:
available_tiles.append(move[0])
#Para cada filho do nó
for tile in available_tiles:
flip_directions = []
#Verifica por movimentos repitidos
for moves in valid_moves:
if (moves[0] == tile):
flip_directions.append(moves[1])
#Filho criado
node = self.copyBoard(current_board)
i, j = tile[0], tile[1]
node.placePieceInPosition(color, i, j)
node.flipPieces(color, tile, flip_directions)
#captura diagonal imediatamente
#if(depth == self.depth):
# if(i == 0 and j == 0):
# return 100, i, j
# elif(i == 0 and j == 7):
# return 100, i, j
# elif(i == 7 and j == 0):
# return 100, i, j
# elif(i == 7 and j == 7):
# return 100, i, j
value, child_i, child_j = self.minimax(node, depth-1, color, False, i, j, alpha, beta)
if(value > bestValue):
best_i = i
best_j = j
bestValue = value
alpha = max(alpha, bestValue)
if(beta <= alpha):
break
return bestValue, best_i, best_j
else:
bestValue = float("inf")
best_i = 0
best_j = 0
available_tiles = []
valid_moves = self.rulebook.getValidMoves(-color, current_board)
if not valid_moves:
heuristic = self.stateValue(current_board, color)
return heuristic, x, y
for move in valid_moves:
if move[0] not in available_tiles:
available_tiles.append(move[0])
for tile in available_tiles:
flip_directions = []
#Verifica por movimentos repitidos
for moves in valid_moves:
if (moves[0] == tile):
flip_directions.append(moves[1])
#Filho criado
node = self.copyBoard(current_board)
i, j = tile[0], tile[1]
node.placePieceInPosition(-color, i, j)
node.flipPieces(-color, tile, flip_directions)
value, child_i, child_j = self.minimax(node, depth-1, color, True, i, j, alpha, beta)
if(value < bestValue):
best_i = i
best_j = j
bestValue = value
beta = min(beta, bestValue)
if(beta <= alpha):
break
return bestValue, best_i, best_j
def play(self):
if(self.board.isBoardFull() or self.board.noMoreMoves() or self.rulebook.pass_turn == 2):
self.rulebook.end_game = True
else:
valid_moves = self.rulebook.getValidMoves(self.color, self.board)
if not valid_moves:
print "No moves available, player must pass"
self.rulebook.pass_turn += 1
else:
board_copy = self.copyBoard(self.board)
bestValue, best_i, best_j = self.minimax(board_copy, self.depth, self.color, True, 0, 0, float("-inf"), float("inf"))
pro_move = [best_i, best_j]
flip_directions = []
for move in valid_moves:
if (pro_move == move[0]):
flip_directions.append(move[1])
self.board.placePieceInPosition(self.color, best_i, best_j)
self.rulebook.pass_turn = 0
self.board.flipPieces(self.color, pro_move, flip_directions)
return pro_move
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import re
import pandas as pd
from textblob import TextBlob
model = pickle.load(open('Pickle_SVM_sameday_stock3.pkl', 'rb'))
#!pip install joblib
#from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
app = Flask(__name__)
@app.route('/')
def home():
return render_template('HOME.html')
@app.route('/predict',methods=['POST'])
def predict():
if request.method == 'POST':
#Accepting the user inputs tweets/headline,Open,High,low.
#Do normalization on all data
Open=request.form['Open']
Low=request.form['Low']
High=request.form['High']
Open=float(Open)
High=float(High)
Low=float(Low)
#processing of twitter data
Tweets=request.form['Tweet/Headline']
df_copy=Tweets
#Cleaning and preprocessing
df_copy=df_copy.lower()
df_copy=re.sub(r'@[A-Z0-9a-z_:]+','',df_copy)#replace username-tags
df_copy=re.sub(r'^[RT]+','',df_copy)#replace RT-tags
df_copy = re.sub('https?://[A-Za-z0-9./]+','',df_copy)#replace URLs
df_copy=re.sub("[^a-zA-Z]", " ",df_copy)#replace hashtags
# Create textblob objects of the tweet
sentiment_objects = TextBlob(df_copy)
tweet_polarity=sentiment_objects.sentiment.polarity
result=[tweet_polarity]
dict={'Open':[Open],'High':[High],'Low':[Low],'Polarity':[result[0]]}
df_test= pd.DataFrame(dict)
data = pd.read_csv('labelled_dataset_full.csv')
data_1=data[['Low','Open','High','lab_sameday','Polarity']]
data_1.dropna(inplace=True)
X=data_1[['Open','High','Low','Polarity']]
y=data_1['lab_sameday'].values
X_train,X_test,y_train,y_test= train_test_split(X,y,test_size=0.2,random_state=0)
# normalize the data attributes
X_train= preprocessing.normalize(X_train)
X_test= preprocessing.normalize(X_test)
df_test = preprocessing.normalize(df_test)
prediction = model.predict(df_test)
if prediction[0]==1:
return render_template('HOME.html', prediction_text='This Headline/Tweet may increase market volatility' )
else:
return render_template('HOME.html', prediction_text='This Headline/Tweet may decrease market volatility' )
if __name__ == "__main__":
app.run(port=9000,debug=False)
# In[ ]:
|
from pathlib import Path
import numpy as np
from scipy.io import wavfile
from tempfile import TemporaryDirectory
from subprocess import run, DEVNULL
import shutil
import torch
import torchaudio
import torch.nn.functional as F
torch.set_num_threads(1)
class Psycho:
def __init__(self, phi):
self.phi = phi
self.sampling_rate = 16000
self.win_length = 512
self.hop_length = 256
@staticmethod
def calc_thresholds(in_file, out_file=None):
in_file = Path(in_file)
if not out_file: out_file = in_file.with_suffix(".csv")
with TemporaryDirectory() as tmp_dir:
# copy wav in tmp dir
tmp_wav_file = Path(tmp_dir).joinpath(in_file.name)
shutil.copyfile(in_file, tmp_wav_file)
# creat wav.scp
tmp_wav_scp = Path(tmp_dir).joinpath('wav.scp')
tmp_wav_scp.write_text(f'data {tmp_wav_file}\n')
# get hearing threshs
run(f"/root/hearing_thresholds/run_calc_threshold.sh /usr/local/MATLAB/MATLAB_Runtime/v96 {tmp_wav_scp} 512 256 {tmp_dir}/",
stdout=DEVNULL, stderr=DEVNULL, shell=True)
shutil.copyfile(Path(tmp_dir).joinpath('data_dB.csv'), out_file)
def get_psycho_mask(self, complex_spectrum, threshs_file):
tmp_complex_spectrum = complex_spectrum.detach().clone()
# Step 1: remove offset
offset = tmp_complex_spectrum[0,:,:]
features = tmp_complex_spectrum[1:,:,:]
# Step 2: represent as phase and magnitude
a_re = features[:,:,0]; a_re[torch.where(a_re == 0)] = 1e-20
b_im = features[:,:,1]
# phase
phase = torch.atan( b_im / a_re )
phase[torch.where(a_re < 0)] += np.pi
# magnitude
magnitude = torch.sqrt( torch.square(a_re) + torch.square(b_im) )
# Step 3: get thresholds
assert self.phi is not None
# import thresholds
assert threshs_file.is_file()
# read in hearing thresholds
thresholds = Path(threshs_file).read_text()
thresholds = [row.split(',') for row in thresholds.split('\n')]
# remove padded frames (copies frames at end and beginning)
thresholds = np.array(thresholds[4:-4], dtype=float)[:,:256]
thresholds = torch.tensor(thresholds, dtype=torch.float32)
thresholds = thresholds.permute((1,0))
# Step 4: calc mask
m_max = magnitude.max()
S = 20*torch.log10(magnitude / m_max) # magnitude in dB
H = thresholds - 95
# scale with phi
H_scaled = H + self.phi
# mask
mask = torch.ones(S.shape)
mask[torch.where(S <= H_scaled)] = 0
mask_offset = torch.ones((1, mask.shape[1]))
mask = torch.cat((mask_offset, mask), dim=0)
mask = torch.stack((mask, mask), dim=2)
return mask
def forward(self, signal, threshs_file):
if self.phi is None:
return signal
# fft
complex_spectrum = torch.stft(signal,
n_fft=self.win_length,
hop_length=self.hop_length,
win_length=self.win_length,
window=torch.hamming_window(self.win_length),
pad_mode='constant',
onesided=True)
# mask signal with psychoacoustic thresholds
mask = self.get_psycho_mask(complex_spectrum, threshs_file)
complex_spectrum_masked = complex_spectrum * mask
# ifft
signal_out = torch.istft(complex_spectrum_masked,
n_fft=self.win_length,
hop_length=self.hop_length,
win_length=self.win_length,
window=torch.hamming_window(self.win_length),
onesided=True)
return signal_out
def convert_wav(self, in_file, threshs_file, out_file, device='cpu'):
torch_signal, torch_sampling_rate = torchaudio.load(in_file)
torch_signal = (torch.round(torch_signal*32767)).squeeze().to(device)
signal_out = self.forward(torch_signal, threshs_file)
signal_out = torch.round(signal_out).cpu().detach().numpy().astype('int16')
wavfile.write(out_file, self.sampling_rate, signal_out)
|
# -*- coding:utf-8 -*-
"""输入一个链表,按链表值从尾到头的顺序返回一个ArrayList。"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# 返回从尾部到头部的列表值序列,例如[1,2,3]
def printListFromTailToHead(self, listNode):
# write code here
def recurse(node, list_):
if node:
recurse(node.next, list_)
list_.append(node.val)
return list_
return recurse(listNode, [])
s = Solution()
listNode = ListNode(67)
listNode.next = ListNode(0)
listNode.next.next = ListNode(24)
listNode.next.next.next = ListNode(58)
print(s.printListFromTailToHead(listNode)) |
# import module
import sqlite3
# connect to localhost database
SQLiteConn = sqlite3.connect('/path/to/localhost/database/file.db')
# create a cursor
SQLiteCursor = SQLiteConn.cursor()
# Execute some queries
SQLiteCursor.execute('''CREATE TABLE example
(date text, trans text, symbol text, qty real, price real)''')
SQLiteCursor.execute('''INSERT INTO example
VALUES ('2006-01-05','BUY','RHAT',100,35.14)''')
SQLiteCursor.execute("SELECT * FROM example")
# query example with python variables
SQLiteCursor.execute("SELECT * FROM %s" % (table_name,))
# commit changes
SQLiteConn.commit()
# close the connection
SQLiteConn.close()
|
"""
Django settings for course_rater project.
Generated by 'django-admin startproject' using Django 2.1.12.
"""
import os
if os.environ['ENVIRONMENT'] == 'circleci':
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.environ['ENVIRONMENT'] in ['development', 'circleci']:
SECRET_KEY = 'notasecretkey'
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
else:
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
ALLOWED_HOSTS = ['rate-my-waseda-api.qv84dmu98v.ap-northeast-1.elasticbeanstalk.com',
'api.ratemywaseda.com',]
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'course_rater_app.apps.CourseRaterAppConfig',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'course_rater.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'course_rater.wsgi.application'
# Database
if os.environ['ENVIRONMENT'] == 'development':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
elif os.environ['ENVIRONMENT'] == 'circleci':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PORT': 5432,
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
# Custom User
AUTH_USER_MODEL = 'course_rater_app.User'
# Rest Framework
if os.environ['ENVIRONMENT'] in ['development', 'circleci']:
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
}
else:
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
}
# CORS configuration
CORS_ORIGIN_ALLOW_ALL = True
|
char=input("Enter an alphabet:")
if(char>='A')and(char<='Z'):
print("the alphabets is uppercase")
else:
print("the alphabets is lowercase")
|
import sqlite3
from PyQt4. QtCore import *
from PyQt4. QtGui import *
from qgis. core import *
from qgis. gui import *
from qgis. networkanalysis import *
conn = sqlite3.connect('C:\Users\Shubham\Desktop\DATABASE_SPATIALITE\DBSPatiaLite.sqlite')
c = conn.cursor()
conn.enable_load_extension(True)
c.execute("select load_extension('mod_spatialite')")
c.execute('select ST_X(geom),ST_Y(geom) from Points')
points = []
for row in c.fetchall():
points.append(QgsPoint(row[0],row[1]))
print points
layer = QgsVectorLayer('Point', 'PointQuery' , "memory")
dr = layer.dataProvider()
for point in points:
pt=QgsFeature()
pt.setGeometry(QgsGeometry.fromPoint(point))
dr.addFeatures([pt])
layer.updateExtents()
QgsMapLayerRegistry.instance().addMapLayers([layer])
|
from tkinter import *
import Config
import Chart_plotter
class Gui:
def __init__(self):
self.root = Tk()
self.root.title("Currency prediction")
self.info = Label(self.root, text="Exchange rates from 2020", padx=10, pady=10, font='Helvetica 14 bold')
self.currency_label = Label(self.root, text="Currency", padx=30, pady=10, font='Helvetica 14 bold')
self.currency = StringVar()
self.currency.set("USD")
self.drop_menu = OptionMenu(self.root, self.currency, 'USD', 'EUR', 'GBP')
self.label_training_percent = Label(self.root, text="Percentage of data for training", padx=30, pady=10, font='Helvetica 14 bold')
self.input_training_percent = Entry(self.root, width=6, borderwidth=5)
self.input_training_percent.insert(0, "70")
self.label_arima = Label(self.root, text="ARIMA Config(p,d,q)", padx=30, pady=10, font='Helvetica 14 bold')
self.input_p = Entry(self.root, width=6, borderwidth=5)
self.input_p.insert(0, "5")
self.input_d = Entry(self.root, width=6, borderwidth=5)
self.input_d.insert(0, "1")
self.input_q = Entry(self.root, width=6, borderwidth=5)
self.input_q.insert(0, "0")
self.label_rnn = Label(self.root, text="RNN Config(Previous points, epochs, batch_size)", padx=30, pady=10, font='Helvetica 14 bold')
self.input_previous_data_points = Entry(self.root, width=6, borderwidth=5)
self.input_previous_data_points.insert(0, "3")
self.input_epochs = Entry(self.root, width=6, borderwidth=5)
self.input_epochs.insert(0, "50")
self.input_batch_size = Entry(self.root, width=6, borderwidth=5)
self.input_batch_size.insert(0, "2")
self.start = Button(self.root, text="Start", padx=350, pady=20, command=self.start_counting, font='Helvetica 18 bold')
self.info.grid(row=0, column=0, columnspan=2)
self.currency_label.grid(row=1, column=0)
self.drop_menu.grid(row=1, column=1)
self.label_training_percent.grid(row=2, column=0)
self.input_training_percent.grid(row=2, column=1)
self.label_arima.grid(row=3, column=0)
self.input_p.grid(row=4, column=0)
self.input_d.grid(row=5, column=0)
self.input_q.grid(row=6, column=0)
self.label_rnn.grid(row=3, column=1)
self.input_previous_data_points.grid(row=4, column=1)
self.input_epochs.grid(row=5, column=1)
self.input_batch_size.grid(row=6, column=1)
self.start.grid(row=7, columnspan=2)
self.root.mainloop()
def start_counting(self):
Config.currency = self.currency.get()
Config.training_percent = int(self.input_training_percent.get())/100
Config.p = int(self.input_p.get())
Config.d = int(self.input_d.get())
Config.q = int(self.input_q.get())
Config.previous_data_points = int(self.input_previous_data_points.get())
Config.batch_size = int(self.input_batch_size.get())
Config.epochs = int(self.input_epochs.get())
Chart_plotter.start()
self.root.destroy()
|
# -*- coding: utf-8 -*-
from common import *
sier = {"init": "A", "replace": {"A": "B-A-B", "B": "A+B+A"}, "delta": pi / 3}
dragon = {"init": "FX", "replace": {"X": "X+YF", "Y": "FX-Y"}, "delta": pi / 2}
koch = {"init": "F", "replace": {"F": "F+F-F-F+F"}, "delta": pi / 2}
def fractal(n, ruleset):
dir = ruleset["init"]
replace = ruleset["replace"]
for i in xrange(n):
next = ""
for ch in dir:
next += replace.get(ch, ch)
dir = next
return dir
def draw(n, ruleset, color='b', width=0.1):
delta = ruleset["delta"]
y, x, minx, miny, maxx, maxy, ang = 0, 0, 0, 0, 0, 0, pi / 4
clf()
ax = gcf().add_subplot(111,frameon=False,xticks=[],yticks=[])
for ch in fractal(n, ruleset):
if ch == "+":
ang += delta
elif ch == "-":
ang -= delta
else:
dy, dx = sin(ang), cos(ang)
ax.plot([x,x+dx], [y,y+dy], color=color, lw=width)
y += dy
x += dx
minx = min(minx, x)
miny = min(miny, y)
maxx = max(maxx, x)
maxy = max(maxy, y)
h, w = maxy - miny, maxx - minx
sz = max(h,w)
midy = (maxy + miny) * 0.5
midx = (maxx + minx) * 0.5
ylim(midy-sz*0.55,midy+sz*0.55)
xlim(midx-sz*0.55,midx+sz*0.55)
print sz
hideticks()
subplots_adjust(0,0,1,1)
gca().set_aspect("equal")
show()
|
# Retrieves data from Alberta Environment's website.
# Assumptions:
# 1. The station lists are up to date.
# 2. The url pattern for retrieving .csv data is up to date.
import os
import urllib
import datetime
# Declaration of each data category, url, and list of sites for streamflow,
# precipitation, snowpack, and reservoir levels.
input = [
('flow', 'http://www.environment.alberta.ca/apps/Basins/data/text/river/',
[('Bearspaw Diversion','05BH911'),
('Bow River at Banff','05BB001'),
('Bow River at Calgary','05BH004'),
('Bow River at Lake Louise','05BA001'),
('Bow River below Bassano Dam','05BM004'),
('Bow River below Carseland Dam','05BM002'),
('Bow River near the Mouth','05BN012'),
('Crowfoot Creek near Cluny','05BM008'),
('Elbow River at Bragg Creek','05BJ004'),
('Elbow River at Sarcee Bridge','05BJ010'),
('Elbow River below Glenmore Dam','05BJ001'),
('Ghost River above Waiparous Creek','05BG010'),
('Glenmore Diversion','05BJ917'),
('Highwood Diversion Canal near Headgates','05BL025'),
('Highwood River near the Mouth','05BL024'),
('Little Bow Canal at High River','05BL015'),
('Nose Creek above Airdrie','05BH014'),
('Pipestone River near Lake Louise','05BA002'),
('Sheep River at Okotoks','05BL012'),
('Spray River at Banff','05BC001'),
('Stimson Creek near Pekisko','05BL007'),
('Threepoint Creek near Millarville','05BL013'),
('Twelve Mile Creek near Cecil','05BN002'),
('Waiparous Creek near the Mouth','05BG006'),
('Western Irrigation District Canal near Headgates','05BM015')]),
('meteor', 'http://www.environment.alberta.ca/apps/Basins/data/text/meteor/',
[('Azur - AARD','05AC802'),
('Banff - MSC','MSC-001'),
('Bassano - AARD','05CJ802'),
('Black Diamond - AARD','05BL815'),
('Bow Summit','05BA813'),
('Bow Valley Provincial Park - MSC','MSC-005'),
('Brook CDA Met Site - MSC','MSC-006'),
('Burns Creek','05BL813'),
('Canmore Meteorological Site - TAU','TAU-010'),
('Cascade Reservoir - Tau','TAU-002'),
('Compression Ridge','05BJ806'),
('Cop Upper - MSC','MSC-013'),
('Cox Hill','05BH803'),
('Cuthead Lake','05BD801'),
('Elbow Ranger Station','05BJ804'),
('Evan Thomas Creek','05BF825'),
('Forget-me-not Mountain','05BL809'),
('Ghost Diversion','05BG802'),
('Ghost Lake near Cochrane','05BE005'),
('Ghost RS','05BG801'),
('Gleichen - AARD','05BM801'),
('Jumpingpound Ranger Station','05BH802'),
('Lake Louise - MSC','MSC-028'),
('Lathom - AGCM','05CJ806'),
('Little Elbow Summit','05BJ805'),
('Lost Creek South','05BL811'),
('Lower Kananaskis Lake','05BF009'),
('Mossleigh - AARD','05BM802'),
('Mount Odlum','05BL812'),
('Neir - AARD','05BH804'),
('Pekisko','05BL814'),
('Pika Run','05BA815'),
('Priddis Observatory - AARD','05BF827'),
('Queenstown - AARD','05AC801'),
('Rolling Hills - AARD','05BN801'),
('Rosemary - AARD','05CJ804'),
('Sheep River II','05BL810'),
('South Ghost Headwaters','05BG803'),
('Spray Reservoir at Three Sisters Dam','05BC006'),
('Strathmore - AARD','05CE808'),
('Sullivan Creek','05BL807'),
('Sunshine Village','05BB803'),
('Three Isle Lake','05BF824')]),
('snow', 'http://www.environment.alberta.ca/apps/Basins/data/text/snow/',
[('Akamina Pass','05AD803'),
('Akamina Pass','05AD803'),
('Flattop Mountain - Snotel','13A19S'),
('Gardiner Creek','05AA809'),
('Limestone Ridge','05DB802'),
('Little Elbow Summit','05BJ805'),
('Lost Creek South','05BL811'),
('Many Glacier - Snotel','13A27S'),
('Mount Odlum','05BL812'),
('Skoki Lodge','05CA805'),
('South Racehorse Creek','05AA817'),
('Sunshine Village','05BB803'),
('Three Isle Lake','05BF824')]),
('lake_level', 'http://www.environment.alberta.ca/apps/Basins/data/text/lake/',
[('Barrier Lake near Seebe','05BF024'),
('Bassano Forebay','05BM907'),
('Bearspaw Reservoir near Calgary','05BH010'),
('Cascade Reservoir - Tau','TAU-002'),
('Chestermere Lake at South Outlet','05BM904'),
('Ghost Lake near Cochrane','05BE005'),
('Glenmore Reservoir at Calgary','05BJ008'),
('Horseshoe Forebay - Tau','TAU-004'),
('Lake Newell','05BN901'),
('Lower Kananaskis Lake','05BF009'),
('Spray Reservoir at Three Sisters Dam','05BC006'),
('Upper Kananaskis Lake','05BF005')])]
today = datetime.date.today().isoformat()
log = 'The following data could not be retrieved:\n'
# Process each data category.
for category, url, sites in input:
# Ensure the output directory exists.
if not os.path.exists(category):
os.makedirs(category)
# Download the data in .csv format for each site.
for name, id in sites:
site_url = str.format('{0}{1}.csv', url, id)
csv_path = str.format('{0}/{1} ; {2} ; {3}.csv',
category, today, id, name)
if urllib.urlopen(site_url).getcode() == 404:
log += str.format('{0} ({1})\n', site_url, name)
else:
print csv_path
urllib.urlretrieve(site_url, csv_path)
# Ensure the log's output directory exists.
if not os.path.exists('logs'):
os.makedirs('logs')
# Write the log
with open(str.format('logs/{0} ; scrape.txt', today), 'w') as logF:
logF.write(log) |
#!/usr/bin/env python3
#
# Development Order #8:
#
# This file is called when perfSonar goes to print the result, which
# has been returned from the tool.
#
# To test this file, a result is needed. A sample one has been provided
# in this directory. Use the following syntax:
# cat example-result.json | ./result-format text/plain
# cat example-result.json | ./result-format text/html
#
import jsontemplate
import pscheduler
import sys
from validate import result_is_valid
try:
format = sys.argv[1]
except IndexError:
format = 'text/plain'
input = pscheduler.json_load(exit_on_error=True)
valid, message = result_is_valid(input["result"])
if not valid:
pscheduler.fail(message)
result = input["result"]
# NOTE: For more-complex restult structures, the 'jsontemplate' module
# used in the spec-format method might be a better choice than
# print().
if format == 'text/plain':
# Print results of the test here, in plaintext
print('Time: %s\n' % result['time'])
# Print ssid_list
print('BSSIDs:')
for entry in result['ssid_list']:
print("")
print(entry["ssid"] + ":")
print(f' Signal: {entry["signal"]}')
print(f' Address: {entry["address"]}')
print(f' Frequency: {entry["frequency"]}')
print(f' Quality: {entry["quality"]}')
print(f' Bitrates: {entry["bitrates"]}')
print(f' Encrypted: {entry["encrypted"]}')
print(f' Channel: {entry["channel"]}')
print(f' Mode: {entry["mode"]}')
print("")
elif format == 'text/html':
# Print results of the test here, in html
print('<table>')
print(' <tr>')
print(' <td><b>Time</b></td>')
print(' <td>%s</td>' % result['time'])
print(' </tr>')
print(' <tr>')
print(' <td colspan="2"><b>BSSIDs</b></td>')
print(' </tr>')
for entry in result['ssid_list']:
print(f' <tr><td><b>SSID</b></td> <td>{entry["ssid"]}</td></tr>')
print(f' <tr><td><b>Signal</b></td> <td>{entry["signal"]}</td></tr>')
print(f' <tr><td><b>Address</b></td> <td>{entry["address"]}</td></tr>')
print(f' <tr><td><b>Frequency</b></td> <td>{entry["frequency"]}</td></tr>')
print(f' <tr><td><b>Quality</b></td> <td>{entry["quality"]}</td></tr>')
print(f' <tr><td><b>Bitrates</b></td> <td>{entry["bitrates"]}</td></tr>')
print(f' <tr><td><b>Encrypted</b></td> <td>{entry["signal"]}</td></tr>')
print(f' <tr><td><b>Channel</b></td> <td>{entry["channel"]}</td></tr>')
print(f' <tr><td><b>Mode</b></td> <td>{entry["mode"]}</td></tr>')
print('</table>')
else:
pscheduler.fail("Unsupported format '%s'" % format)
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from common import *
SIZE = 101
RESIZE = 128
PAD = 14
Y0, Y1, X0, X1 = PAD,PAD+SIZE,PAD,PAD+SIZE,
## preload resnet34
def load_old_pretrain_file(net, pretrain_file, skip=[]):
pretrain_state_dict = torch.load(pretrain_file)
state_dict = net.state_dict()
keys = list(state_dict.keys())
for key in keys:
if any(s in key for s in skip):
continue
if 'encoder1.' in key:
# if 'conv1.' in key:
key0 = key.replace('encoder1.0','conv1').replace('encoder1.1','bn1')
state_dict[key] = pretrain_state_dict[key0]
continue
if 'encoder2.' in key:
key0 = key.replace('encoder2.1.','layer1.')
state_dict[key] = pretrain_state_dict[key0]
continue
if 'encoder3.' in key:
key0 = key.replace('encoder3.','layer2.')
state_dict[key] = pretrain_state_dict[key0]
continue
if 'encoder4.' in key:
key0 = key.replace('encoder4.','layer3.')
state_dict[key] = pretrain_state_dict[key0]
continue
if 'encoder5.' in key:
key0 = key.replace('encoder5.','layer4.')
state_dict[key] = pretrain_state_dict[key0]
continue
net.load_state_dict(state_dict)
return net
def valid_augment(image, mask, index, scale='pad'):
cache = Struct(image = image.copy(), mask = mask.copy())
if scale == 'resize':
image, mask = do_resize2(image, mask, RESIZE, RESIZE)
elif scale == 'pad':
image, mask = do_center_pad2(image, mask, PAD)
image, mask = image[:-1, :-1], mask[:-1, :-1]
return image,mask,index,cache
def train_augment(image, mask, index, scale='pad'):
cache = Struct(image = image.copy(), mask = mask.copy())
if np.random.rand() < 0.5:
image, mask = do_horizontal_flip2(image, mask)
pass
if np.random.rand() < 0.5:
c = np.random.choice(4)
if c==0:
image, mask = do_random_shift_scale_crop_pad2(image, mask, 0.2) #0.125
if c==1:
image, mask = do_horizontal_shear2( image, mask, dx=np.random.uniform(-0.07,0.07) )
pass
if c==2:
image, mask = do_shift_scale_rotate2( image, mask, dx=0, dy=0, scale=1, angle=np.random.uniform(0,15)) #10
if c==3:
image, mask = do_elastic_transform2(image, mask, grid=10, distort=np.random.uniform(0,0.15))#0.10
pass
if np.random.rand() < 0.5:
c = np.random.choice(3)
if c==0:
image = do_brightness_shift(image,np.random.uniform(-0.1,+0.1))
if c==1:
image = do_brightness_multiply(image,np.random.uniform(1-0.08,1+0.08))
if c==2:
image = do_gamma(image,np.random.uniform(1-0.08,1+0.08))
# if c==1:
# image = do_invert_intensity(image)
if scale == 'resize':
image, mask = do_resize2(image, mask, RESIZE, RESIZE)
if scale == 'pad':
image, mask = do_center_pad2(image, mask, PAD)
image, mask = image[:-1, :-1], mask[:-1, :-1]
return image,mask,index,cache
### training ##############################################################
def do_valid( net, valid_loader, scale='pad'):
valid_num = 0
losses = np.zeros(4,np.float32)
predicts = []
truths = []
corrects = []
for input, truth, index, cache in valid_loader:
input = input.cuda()
truth = truth.cuda()
with torch.no_grad():
logit, logit_pixel, logit_image = data_parallel(net,input) #net(input)
prob = F.sigmoid(logit)
loss_seg, loss_pixel, loss_image = net.criterion(logit, logit_pixel, logit_image, truth)
# loss = loss_seg + 0.1 * loss_pixel + 0.05 * loss_image
dice = net.metric(logit, truth)
batch_size = len(index)
losses += batch_size*np.array(( loss_seg.item(), loss_pixel.item(), loss_image.item(), dice.item()))
valid_num += batch_size
if scale == 'resize':
prob_shape = prob.data.cpu().numpy().shape
prob = prob.data.cpu().numpy()
prob = np.array([do_resize(prob[i, 0, :, :], SIZE, SIZE) for i in range(batch_size)])
prob = np.array(prob.reshape(batch_size, 1, SIZE, SIZE))
predicts.append(prob)
corrects.append(logit_image.data.cpu().numpy())
for c in cache:
truths.append(c.mask)
elif scale == 'pad':
prob = prob [:,:,Y0:Y1, X0:X1]
predicts.append(prob.data.cpu().numpy())
truth = truth[:,:,Y0:Y1, X0:X1]
truths.append(truth.data.cpu().numpy())
corrects.append(logit_image.data.cpu().numpy())
assert(valid_num == len(valid_loader.sampler))
losses = losses / valid_num
#--------------------------------------------------------
predicts = np.concatenate(predicts).squeeze()
corrects = np.concatenate(corrects)
if scale == 'resize':
truths = np.array(truths)
elif scale == 'pad':
truths = np.concatenate(truths).squeeze()
c = corrects < 0.5
empty_index = truths.reshape(valid_num, -1).sum(1) < 1
non_empty_index = ~ empty_index
empty_num = empty_index.sum()
non_empty_num = non_empty_index.sum()
p = (predicts.reshape(valid_num, -1) > 0.5).sum(1) < 1
t = empty_index
empty_tp = ((p == 1) * ( t == 1)).sum() / valid_num
empty_fp = ((p == 0) * ( t == 1)).sum() / valid_num
non_empty_tp = ((p == 0) * ( t == 0) * ( c == 0)).sum() / valid_num
non_empty_fp = ((p == 0) * ( t == 0) * ( c == 1)).sum() / valid_num
non_empty_fn = ((p == 1) * ( t == 0)).sum() / valid_num
precision, result, threshold = do_kaggle_metric(predicts[empty_index], truths[empty_index])
precision_empty = precision.mean()
correct_empty = corrects[empty_index].mean()
precision, result, threshold = do_kaggle_metric(predicts[non_empty_index], truths[non_empty_index])
precision_non_empty = precision.mean()
correct_non_empty = corrects[non_empty_index].mean()
precision = (empty_num * precision_empty + non_empty_num * precision_non_empty) / valid_num
valid_loss = np.array([
losses[0], losses[3], precision, # all images
losses[2], precision_empty, empty_tp, empty_fp, # empty
losses[1], precision_non_empty, non_empty_tp, non_empty_fp, non_empty_fn, # non-empty
])
return valid_loss
### training ##############################################################
def run_train(fold_num, out_dir, initial_checkpoint=None, target_loss=0.845, target_save_loss=None, scale='pad', model_version='resnet34', loss_type='focal', depth_type=0,
num_epoch=25, batch_size=16, schduler=None, sgd_lr=0.05, sgd_mmt=0.9, sgd_wd=1e-4, save_training_epoch_batch=5, restart=False):
if target_save_loss is None:
target_save_loss = target_loss - 0.01
########-------------------------------------------############
## import model
if model_version == 'resnet34':
from model.model_resnet_dss import UNetResNet34 as Net
elif model_version == 'se_resnext50':
from model.model_senet_dss import UNetSEResNext50 as Net
elif model_version == 'senet154':
from model.model_senet_dss import UNetSENext154 as Net
## pre-train model
if initial_checkpoint is None:
if model_version.startswith('resnet34'):
pretrain_file = DATA_DIR + '/model/resnet34-333f7ec4.pth'
elif model_version.startswith('se_resnext50'):
pretrain_file = DATA_DIR + '/model/se_resnext50_32x4d-a260b3a4.pth'
elif model_version.startswith('senet154'):
pretrain_file = DATA_DIR + '/model/senet154-c7b49a05.pth'
else:
pretrain_file = None
assert(('fold%s' % fold_num) in initial_checkpoint or ('fold-%s' % fold_num) in initial_checkpoint)
## record training details
training_record_folder = out_dir + os.sep + 'train'
training_record_paths = sorted(gb(training_record_folder + '/*.xlsx'))
training_counter = len(training_record_paths) + 1
training_record = out_dir + '/train/training_record_%s.xlsx' % (str(100 + training_counter)[1:])
## setup -----------------
os.makedirs(out_dir +'/checkpoint', exist_ok=True)
os.makedirs(out_dir +'/train', exist_ok=True)
os.makedirs(out_dir +'/backup', exist_ok=True)
backup_project_as_zip(PROJECT_DIR, out_dir +'/backup/code.train.%s.zip'%IDENTIFIER)
log = Logger()
log.open(out_dir+'/log.train.txt',mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
# log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_DIR)
log.write('\tDATA_PATH = %s\n' % DATA_DIR)
log.write('\t__file__ = %s\n' % __file__)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\tfold_num = %s\n' % fold_num)
log.write('\ttarget_loss = %s\n' % target_loss)
log.write('\tt_save_loss = %s\n' % target_save_loss)
log.write('\tloss_type = %s\n' % loss_type)
log.write('\tdepth_type = %s\n' % depth_type)
log.write('\tnum_epoch = %s\n' % num_epoch)
log.write('\tmomentum = %s\n' % sgd_mmt)
log.write('\tweight_decay = %s\n' % sgd_wd)
log.write('\ttrain_record = %s\n' % training_record)
log.write('\n')
log.write('\t<additional comments>\n')
log.write('\t ... \n')
log.write('\n')
## dataset ----------------------------------------
log.write('** dataset setting **\n')
batch_size = batch_size
train_dataset = TsgDataset('list_train%d_3600' % (fold_num), train_augment, 'train', scale=scale)
train_loader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
#sampler = ConstantSampler(train_dataset,[31]*batch_size*100),
batch_size = batch_size,
drop_last = True,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
valid_dataset = TsgDataset('list_valid%d_400' % (fold_num), valid_augment, 'train', scale=scale)
valid_loader = DataLoader(
valid_dataset,
sampler = RandomSampler(valid_dataset),
batch_size = batch_size,
drop_last = False,
num_workers = 8,
pin_memory = True,
collate_fn = null_collate)
assert(len(train_dataset)>=batch_size)
log.write('batch_size = %d\n'%(batch_size))
log.write('train_dataset.split = %s\n'%(train_dataset.split))
log.write('valid_dataset.split = %s\n'%(valid_dataset.split))
log.write('\n')
## net ----------------------------------------
log.write('** net setting **\n')
net = Net(loss_type=loss_type, depth_type=depth_type).cuda()
if initial_checkpoint is not None:
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))
if pretrain_file is not None:
log.write('\tpretrain_file = %s\n' % pretrain_file)
if model_version.startswith('resnet34'):
net = load_old_pretrain_file(net, pretrain_file, skip=['num_batches_tracked', 'scale'])
elif model_version.startswith('se_resnext50') or model_version.startswith('senet154'):
net.load_pretrain(pretrain_file)
## optimizer ----------------------------------
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=sgd_lr, momentum=sgd_mmt, weight_decay=sgd_wd)
if isinstance(schduler, list) and schduler[0] == 'ReduceLROnPlateau':
lr_factor = schduler[1]
lr_patience = schduler[2]
schduler = ReduceLROnPlateau(optimizer, mode='max', factor=lr_factor, patience=lr_patience, verbose=False, min_lr=1e-5)
log.write('\nUsing ReduceLROnPlateau: factor = %.2f, patience = %d ...' % (lr_factor, lr_patience))
log.write('\nnetwork: %s\n'%(type(net)))
log.write('schduler: %s\n'%(type(schduler)))
log.write('\n')
## record training ----------------------------------
rate_list, iter_list, epoch_list = [], [], []
valid_loss_list, valid_acc_list, valid_lb_list = [], [], []
train_loss_list, trian_acc_list, batch_loss_list, batch_acc_list = [], [], [], []
current_time, running_time, model_name_list, lovasz_list, fold_list = [], [], [], [], []
start_iter = 0
start_epoch= 0
if initial_checkpoint is not None:
checkpoint = torch.load(initial_checkpoint.replace('_model','_optimizer'))
if not restart:
start_iter = checkpoint['iter' ]
start_epoch = checkpoint['epoch']
rate = get_learning_rate(optimizer) #load all except learning rate
#optimizer.load_state_dict(checkpoint['optimizer'])
adjust_learning_rate(optimizer, rate)
pass
len_train_dataset = len(train_dataset)
num_iter_per_epoch = int(len_train_dataset * 1.0 / batch_size)
num_iters = min(int(start_iter + num_epoch * num_iter_per_epoch), 300 *1000)
iter_smooth = 50
iter_valid = 100
iter_save = [0, num_iters-1]\
+ list(range(0, 300 *1000, 500))
## start training here! ##############################################
log.write('** start training here! **\n')
log.write(' samples_per_epoch = %d\n\n'%len(train_dataset))
log.write(' | ------------------- VILID ---------------------------------------------------------------- | ---- TRAIN ----| --- BATCH --- | \n')
log.write(' | ---------- all -------| ------------ empty -----------| --------------- non-empty ----------- | \n')
log.write(' rate iter epoch | loss acc lb | loss lb tp fp | loss lb tp fp fn | train_loss | batch_loss | time \n')
log.write('----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n')
train_loss = np.zeros(6,np.float32)
valid_loss = np.zeros(6,np.float32)
batch_loss = np.zeros(6,np.float32)
rate = 0
iter = 0
i = 0
start = timer()
while iter<num_iters: # loop over the dataset multiple times
sum_train_loss = np.zeros(6,np.float32)
sum = 0
optimizer.zero_grad()
for input, truth, index, cache in train_loader:
len_train_dataset = len(train_dataset)
batch_size = len(index)
iter = i + start_iter
epoch = (iter-start_iter)*batch_size/len_train_dataset + start_epoch
num_samples = epoch*len_train_dataset
if iter % iter_valid==0:
net.set_mode('valid')
valid_loss = do_valid(net, valid_loader, scale=scale)
local_lb = valid_loss[2]
# valid_loss = np.array([
# losses[0], losses[3], precision, # all images
# losses[2], precision_empty, empty_tp, empty_fp, # empty
# losses[1], precision_non_empty, non_empty_tp, non_empty_fp, non_empty_fn, # non-empty
# ])
net.set_mode('train')
model_name = 'not_saved'
## save for good ones
if local_lb >= target_loss:
log.write('\n')
log.write('\n save good model at iter: %5.1fk, local lb: %.6f\n\n' % (iter/1000, local_lb))
torch.save(net.state_dict(),out_dir +'/checkpoint/%08d_model_%.6f.pth'%(iter, local_lb))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, out_dir +'/checkpoint/%08d_optimizer_%.6f.pth'%(iter, local_lb))
model_name = out_dir +'/checkpoint/%08d_model_%.6f.pth'%(iter, local_lb)
pass
elif iter in iter_save and local_lb >= target_save_loss:
torch.save(net.state_dict(),out_dir +'/checkpoint/%08d_model.pth'%(iter))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : iter,
'epoch' : epoch,
}, out_dir +'/checkpoint/%08d_optimizer.pth'%(iter))
model_name = out_dir +'/checkpoint/%08d_model.pth'%(iter)
pass
print('\r',end='',flush=True)
log.write('%0.5f %5.1f %6.1f | %0.3f %0.3f (%0.3f) | %0.3f (%0.3f) %0.3f %0.3f | %0.3f (%0.3f) %0.3f %0.3f %0.3f | %0.3f %0.3f | %0.3f %0.3f | %s \n' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], local_lb,
valid_loss[3], valid_loss[4], valid_loss[5], valid_loss[6],
valid_loss[7], valid_loss[8], valid_loss[9], valid_loss[10], valid_loss[11],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start))))
time.sleep(0.01)
rate_list.append(rate)
iter_list.append(iter)
epoch_list.append(epoch)
valid_loss_list.append(valid_loss[0])
valid_acc_list.append(valid_loss[1])
valid_lb_list.append(local_lb)
train_loss_list.append(train_loss[0])
trian_acc_list.append(train_loss[1])
batch_loss_list.append(batch_loss[0])
batch_acc_list.append(batch_loss[1])
current_time.append(strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
running_time.append(time_to_str((timer() - start)))
model_name_list.append(model_name)
lovasz_list.append(loss_type)
fold_list.append(fold_num)
if int(epoch - start_epoch + 1) % save_training_epoch_batch == 0:
temp_df = pd.DataFrame({
'rate': rate_list, 'iter': iter_list, 'epoch': epoch_list,
'valid_loss': valid_loss_list, 'valid_acc': valid_acc_list, 'valid_lb': valid_lb_list,
'train_loss': train_loss_list, 'train_acc': trian_acc_list,
'batch_loss': batch_loss_list, 'batch_acc': batch_loss_list,
'run_time': running_time, 'current_time': current_time,
'lovasz': lovasz_list, 'fold': fold_list, 'model': model_name_list
})
temp_df = temp_df[['rate', 'iter', 'epoch', 'valid_loss', 'valid_acc', 'valid_lb',
'train_loss', 'train_acc', 'batch_loss', 'batch_acc',
'run_time', 'current_time', 'lovasz', 'fold', 'model']]
temp_df.to_excel(training_record, index=False)
#learning rate schduler -------------
if schduler is not None:
if str(schduler).startswith('Snapshot'):
lr = schduler.get_rate(epoch - start_epoch)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
elif 'ReduceLROnPlateau' in str(schduler):
if iter % int(len_train_dataset / batch_size) == 0:
schduler.step(valid_loss[2])
else:
lr = schduler.get_rate(iter)
if lr<0 : break
adjust_learning_rate(optimizer, lr)
rate = get_learning_rate(optimizer)
# one iteration update -------------
net.set_mode('train')
input = input.cuda()
truth = truth.cuda()
logit, logit_pixel, logit_image = data_parallel(net,input) #net(input)
loss_seg, loss_pixel, loss_image = net.criterion(logit, logit_pixel, logit_image, truth)
loss = loss_seg + 0.1 * loss_pixel + 0.05 * loss_image
dice = net.metric(logit, truth)
## original SGD
loss.backward()
optimizer.step()
optimizer.zero_grad()
# print statistics flush ------------
batch_loss = np.array((
loss.item(),
dice.item(),
0, 0, 0, 0,
))
sum_train_loss += batch_loss
sum += 1
if iter%iter_smooth == 0:
train_loss = sum_train_loss/sum
sum_train_loss = np.zeros(6,np.float32)
sum = 0
print('\r%0.5f %5.1f %6.1f | %0.3f %0.3f (%0.3f) |------------------------------------------------------------------->>> | %0.3f %0.3f | %0.3f %0.3f | %s ' % (\
rate, iter/1000, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_loss[0], train_loss[1],
batch_loss[0], batch_loss[1],
time_to_str((timer() - start))), end='',flush=True)
i=i+1
pass #-- end of one data loader --
df = pd.DataFrame({
'rate': rate_list, 'iter': iter_list, 'epoch': epoch_list,
'valid_loss': valid_loss_list, 'valid_acc': valid_acc_list, 'valid_lb': valid_lb_list,
'train_loss': train_loss_list, 'train_acc': trian_acc_list,
'batch_loss': batch_loss_list, 'batch_acc': batch_loss_list,
'run_time': running_time, 'current_time': current_time,
'lovasz': lovasz_list, 'fold': fold_list, 'model': model_name_list
})
df = df[['rate', 'iter', 'epoch', 'valid_loss', 'valid_acc', 'valid_lb',
'train_loss', 'train_acc', 'batch_loss', 'batch_acc',
'run_time', 'current_time', 'lovasz', 'fold', 'model']]
df.to_excel(training_record, index=False)
pass #-- end of all iterations --
if 0: #save last
torch.save(net.state_dict(),out_dir +'/checkpoint/%d_model.pth'%(i))
torch.save({
'optimizer': optimizer.state_dict(),
'iter' : i,
'epoch' : epoch,
}, out_dir +'/checkpoint/%d_optimizer.pth'%(i))
log.write('\n')
return df
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_train()
print('\nsucess!')
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2014- ZwodahS
# github.com/ZwodahS/
# zwodahs.me
# @ZwodahS
"""
Every function that can be called are implemented as a class
The class MUST have a few property in order for it to be registered.
allowed_context : a list containing what context it can be run on
args : number of arguments, raise RuntimeException if argument don't fit.
name : name of the function i.e. #
Try to use namespace for function name that are not part of the default package.
For example, if you are implementing a new sort, don't override sort, instead call it foo.sort instead.
A copy of the data will be passed to the function.
"""
from jpio.jstql import JSTQLRuntimeException
from operator import itemgetter, attrgetter
def _sort_func(context, reverse=False, key=None, keyType=None):
if not key:
context.mdata.sort(reverse=reverse)
return context.mdata
keyType = keyType or "item"
if keyType == "attr":
context.mdata.sort(key=attrgetter(key), reverse=reverse)
elif keyType == "item":
context.mdata.sort(key=itemgetter(key), reverse=reverse)
else:
raise JSTQLRuntimeException(current_state=context.data, message="Unknown sort type {0}".format(keyType))
return context.mdata
class LenFunction(object):
name = "len"
allowed_context = [list, dict]
args = [0]
description = "Get the length of a list or dictionary"
usages = [ "len() : get the length" ]
is_modifier = False
@classmethod
def run(cls, context, *args):
return len(context.mdata)
class KeysFunction(object):
name = "keys"
allowed_context = [dict]
args = [0]
description = "Return all keys of a dictionary"
usages = [ "keys() : return all the keys" ]
is_modifier = False
@classmethod
def run(cls, context, *args):
return list(context.data.keys())
class SortFunction(object):
name = "sort"
allowed_context = [list]
args = [0, 1, 2]
description = "Sort a list using the values or a key."
usages = [ "sort() : sort by value",
"sort(key) : sort by a key" ]
is_modifier = True
@classmethod
def run(cls, context, *args):
if len(args) == 0:
return _sort_func(context)
elif len(args) == 1:
return _sort_func(context, key=args[0])
else:
return _sort_func(context, key=args[1], ketType=args[0])
class RSortFunction(object):
name = "rsort"
allowed_context = [list]
args = [0, 1, 2]
description = "Reverse sort a list using the values or a key"
usages = [ "rsort() : sort by value",
"rsort(key) : sort by a key" ]
is_modifier = True
@classmethod
def run(cls, context, *args):
if len(args) == 0:
return _sort_func(context, reverse=True)
elif len(args) == 1:
return _sort_func(context, key=args[0], reverse=True)
else:
return _sort_func(context, key=args[1], ketType=args[0], reverse=True)
class StringUpperFunction(object):
name = "upper"
allowed_context = [str]
args = [0]
description = "Change a string to upper case"
usages = [ "upper()" ]
is_modifier = True
@classmethod
def run(cls, context, *args):
return context.mdata.upper()
class StringLowerFunction(object):
name = "lower"
allowed_context = [str]
args = [0]
description = "Change a string to lower case"
usages = [ "lower()" ]
is_modifier = True
@classmethod
def run(cls, context, *args):
return context.mdata.lower()
functions = [SortFunction, RSortFunction, StringUpperFunction, StringLowerFunction, LenFunction, KeysFunction]
|
#!/usr/bin/env python
#
# Very basic example of using Python 3 and IMAP to iterate over emails in an 1un1 folder/label. Extracts data regarding
# reservations from booking.com and puts them in a dict for further processing.
#
#
import copy
import json
import sys
import imaplib
import email
import email.header
import datetime
from bs4 import BeautifulSoup
import account_data
from sql_server_processing import connect_to_sql_server
from utils import correct_entry_type
EMAIL_ACCOUNT = account_data.EMAIL_ACCOUNT
PASSWORD = account_data.PASSWORD
# Use 'INBOX' to read inbox. Note that whatever folder is specified,
# after successfully running this script all emails in that folder
# will be marked as read.
EMAIL_FOLDER = "INBOX"
RESERVATION_DETAILS = {
# RESERVATION DETAILS
'Booked On': '', 'Modified On': '', 'Cancelled On': '', 'Booking Confirmation Id': '', 'Reservation Remarks': '',
'Special Requests': '', 'Cancellation Fee': '', 'Guarantee Information': ' ', 'Total Price': ' ',
'Commission Payable': ' ', 'Total After Tax': '', 'Guest Details': '', 'Guest Count'
# ROOM
'ROOM': '',
'Check In Date': '', 'Check Out Date': '', 'Guest': '', 'Number of Nights': ' ', 'Number of Guests': ' ',
'Smoking': '', 'Meal Plan': '',
'Daily Room Rate Breakdown': {'Date': [], 'Rate Id': [], 'Name': [], 'Price': []},
# PAYMENT DETAILS
'Total Booking Cost': '', 'Credit Card Type': '', 'Name': '', 'Credit Card Number': '', 'Expiry': '', 'CVC': '',
'Billing Address': '',
'Add-Ons': {'Type': [], 'Nights': [], 'Guest Count': [], 'Price Mode': [], 'Price': []},
# BOOKER CONTACT DETAILS
'Booker Name': '', 'Booker Phone': '', 'Booker Email': '', 'Booker Address': ''
}
DAILY_ROOM_RATE_BREAKDOWN = {'Date': [], 'Rate': [], 'Extra Person Fee': [], 'Hotel Service Fee': [], 'Promo Name': []}
def process_html_part(soup, booked_via='Booking.com'):
"""
Extracts the data to fill the RESERVATION_DETAILS dictionary
:param soup: soup object
:return: copied reservation dictionary
"""
res_dict = copy.deepcopy(RESERVATION_DETAILS)
# adapt the res_dict according to the booking service
if booked_via == 'Expedia':
res_dict['Daily Room Rate Breakdown'] = DAILY_ROOM_RATE_BREAKDOWN
room = soup.select('h4.section-header')[1].get_text()
res_dict['ROOM'] = room
for p in soup.select('table.section-content p.bold'):
k = p.get_text().split(':')[0]
print(k)
if k in res_dict.keys():
if k == 'Daily Room Rate Breakdown' or k == 'Add-Ons':
ths = p.find_next_sibling('table').select('th')
ps = p.find_next_sibling('table').select('p')
for idx, th in enumerate(ths):
res_dict[k][th.get_text()].append([correct_entry_type(p.get_text()) for p in ps[idx::len(ths)]]) # get every len(ths)-th p element
else:
next_ps = p.find_next_siblings('p')
if len(next_ps) > 1:
res_dict[k] = ','.join([correct_entry_type(p2.get_text()) for p2 in next_ps])
else:
res_dict[k] = correct_entry_type(next_ps[0].get_text())
else:
print(k)
return res_dict
def process_mailbox(M):
"""
Reads the email in the mailbox folder and processes them.
:param M: mailbox-folder object
:return: filled reservation dictionary
"""
res_dict = None
rv, data = M.search(None, "ALL")
if rv != 'OK':
print("No messages found!")
return
for num in reversed(data[0].split()):
rv, data = M.fetch(num, '(RFC822)') # data is the raw_data of the email which need further processing
if rv != 'OK':
print("ERROR getting message", num)
return
msg = email.message_from_bytes(data[0][1])
hdr = email.header.make_header(email.header.decode_header(msg['Subject']))
subject = str(hdr)
if 'Booking.com Booking #' not in subject and \
'Booking.com Modification for #' not in subject and \
'Booking.com Cancellation for #' not in subject and \
'Expedia Booking #' not in subject:
continue
# depending on the booking service the reservation dict has to be adapted
booked_via = 'Expedia' if 'Expedia' in subject else 'Booking.com'
print('Message %s: %s' % (num, subject))
print('Raw Date:', msg['Date'])
# Now convert to local date-time
date_tuple = email.utils.parsedate_tz(msg['Date'])
if date_tuple:
local_date = datetime.datetime.fromtimestamp(
email.utils.mktime_tz(date_tuple))
print("Local Date:", local_date.strftime("%a, %d %b %Y %H:%M:%S"))
for part in msg.walk():
# each part is a either non-multipart, or another multipart message
# that contains further parts... Message is organized like a tree
if part.get_content_type() == 'text/plain':
text = part.get_payload(decode=True).strip()
print(text) # prints the raw text
if part.get_content_type() == 'text/html':
html = part.get_payload(decode=True).strip()
soup = BeautifulSoup(html, 'html.parser')
#print(soup.prettify())
res_dict = process_html_part(soup,booked_via=booked_via)
print(res_dict)
with open('res_dict.txt', 'a') as the_file:
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
the_file.write(json.dumps(res_dict, default=myconverter)+'\n')
return res_dict
def main():
M = imaplib.IMAP4_SSL('imap.1und1.de', '993')
try:
rv, data = M.login(EMAIL_ACCOUNT, PASSWORD)
except imaplib.IMAP4.error:
print("LOGIN FAILED!!! ")
sys.exit(1)
print(rv, data)
rv, mailboxes = M.list()
if rv == 'OK':
print("Mailboxes:")
print(mailboxes)
rv, data = M.select(EMAIL_FOLDER)
if rv == 'OK':
print("Processing mailbox...\n")
res_dict = process_mailbox(M)
M.close()
else:
print("ERROR: Unable to open mailbox ", rv)
M.logout()
connect_to_sql_server(res_dict=res_dict, test=True)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.