text stringlengths 8 6.05M |
|---|
import sys
import time
from threading import Thread, Lock
from spinners import Spinners
color_end = '\033[0;39m'
status_symbols = {
'info': '{0}ℹ{1}'.format('\033[34m', color_end),
'success': '{0}✔{1}'.format('\033[32m', color_end),
'warning': '{0}⚠{1}'.format('\033[33m', color_end),
'error': '{0}✖{1}'.format('\033[31m', color_end)
}
lock = Lock()
class LogSpinners(object):
def __init__(self, options):
super(LogSpinners, self).__init__()
if type(options) is str:
options = {
'text': options
}
self.options = {**{
'text': '',
'color': 'cyan',
'spinner': Spinners.dots
}, **options}
self.text = self.options['text']
self.spinner = self.options['spinner']
self.frames = self.spinner.value['frames']
self.interval = self.spinner.value['interval']
self.frame_index = 0
self.enabled = False
self.status = None
def output_log(self):
while self.enabled:
frame = self.frames[self.frame_index]
output = "\r{0} {1}".format(frame, self.text)
sys.stdout.write(output)
sys.stdout.flush()
self.frame_index = (self.frame_index + 1) % len(self.frames)
time.sleep(0.001 * self.interval)
if self.status in status_symbols:
symbol = status_symbols[self.status]
output = "\r{0} {1}\n".format(symbol, self.text)
sys.stdout.write(output)
# sys.stdout.flush()
lock.release()
def start(self):
self.enabled = True
lock.acquire()
t = Thread(target=self.output_log)
t.start()
def stop(self, status=None):
self.enabled = False
self.status = status
def update(self, text):
self.text = text
|
'''
Created on Mar 30, 2011
@author: jason
'''
import Users
import simplejson
import datetime
import bson
import tornado.web
from Auth.AuthHandler import ajax_login_authentication
from Map.BrowseTripHandler import BaseHandler
class MessageHandler(BaseHandler):
def Send(self, source_id, dest_id, message, type):
user = self.syncdb.users.find_one({"user_id": bson.ObjectId(source_id)})
_notification = Users.Notification.MessageNotificationGenerator(type, user['username'], user['slug'], user['picture'], datetime.datetime.utcnow(), user['user_id'], message)
self.syncdb.users.update({'user_id':bson.ObjectId(dest_id)}, {'$addToSet':{'new_notifications':_notification.notification}})
self.syncdb.users.update({'user_id':bson.ObjectId(dest_id)}, {'$addToSet':{'notifications':_notification.notification}})
self.syncdb.users.update({'user_id':bson.ObjectId(dest_id)}, {'$addToSet':{'message_request_receive':message}})
if type != 'system_message':
self.syncdb.users.update({'user_id':bson.ObjectId(source_id)}, {'$addToSet':{'message_send':message}})
class PostMessageHandler(BaseHandler):
@ajax_login_authentication
def post(self):
message = self.get_argument('message')
slugs = simplejson.loads(self.get_argument('slugs'))
_notification = Users.Notification.MessageNotificationGenerator('message_request', self.current_user['username'], self.current_user['slug'], self.current_user['picture'], datetime.datetime.utcnow(), self.current_user['user_id'], message)
for user_slug in slugs['slugs']:
self.syncdb.users.update({'slug':user_slug}, {'$addToSet':{'new_notifications':_notification.notification}})
self.syncdb.users.update({'slug':user_slug}, {'$addToSet':{'notifications':_notification.notification}})
self.syncdb.users.update({'slug':user_slug}, {'$addToSet':{'message_request_receive':message}})
self.syncdb.users.update({'user_id':bson.ObjectId(self.current_user['user_id'])}, {'$addToSet':{'message_send':message}})
|
from dwave_qbsolv import QBSolv
import matplotlib.pyplot as plt
import numpy as np
import minorminer
import networkx as nx
from dwave.system.composites import FixedEmbeddingComposite
from dwave.system.composites import EmbeddingComposite
from dwave.system.samplers import DWaveSampler
# Functions
def mapseq(F):
S=np.zeros(F.shape[0])
for i in range(F.shape[0]):
S[i]=np.asscalar(np.where(F[i,:]==1)[0])
return S
# Input
r=16
In=x=np.array([[ 0, 0, 0, 0],
[ 1, 0, 0, 1],
[ 0, 0,-1, 0],
[ 0, 0, 1, 0]])
# Exrtraxt Dimenssions
(m,n)=In.shape
# Declare Coefficients
Coefficients=np.zeros((r*m*n,r*m*n),dtype='float32')
# Valid Map (squence of vertices) Condition
for i in range(r):
for j in range(m*n):
Coefficients[i*m*n+j,i*m*n+j]-=2
for k in range(m*n):
Coefficients[i*m*n+j,i*m*n+k]+=1
# Injective Map (Vertix appears only once) Condition
for j in range(m*n):
for i in range(r):
Coefficients[i*m*n+j,i*m*n+j]-=1
for k in range(r):
Coefficients[i*m*n+j,k*m*n+j]+=1
# Forming a cycle Condition
Ind=set({})
for j1 in range(m*n):
for j2 in range(m*n):
Ind.update({(j1,j2)})
for i in range(m):
for j in range(n-1):
Ind-={(j+i*n,j+i*n+1)}
for i in range(m-1):
for j in range(n):
Ind-={(j+i*n,j+i*n+n)}
for i in range(m):
for j in range(1,n):
Ind-={(j+i*n,j+i*n-1)}
for i in range(1,m):
for j in range(n):
Ind-={(j+i*n,j+i*n-n)}
for i in range(r-1):
for (j1,j2) in Ind:
Coefficients[i*m*n+j1,(i+1)*m*n+j2]+=1
for (j1,j2) in Ind:
Coefficients[(r-1)*m*n+j1,(0)*m*n+j2]+=1
# White circles Condition
for i in range(1,r-1):
for j in range(m*n):
if (In[j//n,j%n]==1)&(0<j%n<n-1):
Coefficients[(i-1)*m*n+j-1,(i+1)*m*n+j+1]-=0.5
Coefficients[(i-1)*m*n+j+1,(i+1)*m*n+j-1]-=0.5
if (In[j//n,j%n]==1)&(0<j//n<m-1):
Coefficients[(i-1)*m*n+j-n,(i+1)*m*n+j+n]-=0.5
Coefficients[(i-1)*m*n+j+n,(i+1)*m*n+j-n]-=0.5
for j in range(m*n):
if (In[j//n,j%n]==1)&(0<j%n<n-1):
Coefficients[(r-2)*m*n+j-1,(0)*m*n+j+1]-=0.5
Coefficients[(r-2)*m*n+j+1,(0)*m*n+j-1]-=0.5
if (In[j//n,j%n]==1)&(0<j//n<m-1):
Coefficients[(r-2)*m*n+j-n,(0)*m*n+j+n]-=0.5
Coefficients[(r-2)*m*n+j+n,(0)*m*n+j-n]-=0.5
for j in range(m*n):
if (In[j//n,j%n]==1)&(0<j%n<n-1):
Coefficients[(r-1)*m*n+j-1,(1)*m*n+j+1]-=0.5
Coefficients[(r-1)*m*n+j+1,(1)*m*n+j-1]-=0.5
if (In[j//n,j%n]==1)&(0<j//n<m-1):
Coefficients[(r-1)*m*n+j-n,(1)*m*n+j+n]-=0.5
Coefficients[(r-1)*m*n+j+n,(1)*m*n+j-n]-=0.5
# Black circles Condition
for i in range(1,r-1):
for j in range(m*n):
if (In[j//n,j%n]==-1)&(0<j%n)&(0<j//n):
Coefficients[(i-1)*m*n+j-1,(i+1)*m*n+j-n]-=0.5
Coefficients[(i-1)*m*n+j-n,(i+1)*m*n+j-1]-=0.5
if (In[j//n,j%n]==-1)&(j%n<n-1)&(0<j//n):
Coefficients[(i-1)*m*n+j-n,(i+1)*m*n+j+1]-=0.5
Coefficients[(i-1)*m*n+j+1,(i+1)*m*n+j-n]-=0.5
if (In[j//n,j%n]==-1)&(j%n<n-1)&(j//n<m-1):
Coefficients[(i-1)*m*n+j+n,(i+1)*m*n+j+1]-=0.5
Coefficients[(i-1)*m*n+j+1,(i+1)*m*n+j+n]-=0.5
if (In[j//n,j%n]==-1)&(0<j%n)&(j//n<m-1):
Coefficients[(i-1)*m*n+j+n,(i+1)*m*n+j-1]-=0.5
Coefficients[(i-1)*m*n+j-1,(i+1)*m*n+j+n]-=0.5
for j in range(m*n):
if (In[j//n,j%n]==-1)&(0<j%n)&(0<j//n):
Coefficients[(r-2)*m*n+j-1,(0)*m*n+j-n]-=0.5
Coefficients[(r-2)*m*n+j-n,(0)*m*n+j-1]-=0.5
if (In[j//n,j%n]==-1)&(j%n<n-1)&(0<j//n):
Coefficients[(r-2)*m*n+j-n,(0)*m*n+j+1]-=0.5
Coefficients[(r-2)*m*n+j+1,(0)*m*n+j-n]-=0.5
if (In[j//n,j%n]==-1)&(j%n<n-1)&(j//n<m-1):
Coefficients[(r-2)*m*n+j+n,(0)*m*n+j+1]-=0.5
Coefficients[(r-2)*m*n+j+1,(0)*m*n+j+n]-=0.5
if (In[j//n,j%n]==-1)&(0<j%n)&(j//n<m-1):
Coefficients[(r-2)*m*n+j+n,(0)*m*n+j-1]-=0.5
Coefficients[(r-2)*m*n+j-1,(0)*m*n+j+n]-=0.5
for j in range(m*n):
if (In[j//n,j%n]==-1)&(0<j%n)&(0<j//n):
Coefficients[(r-1)*m*n+j-1,(1)*m*n+j-n]-=0.5
Coefficients[(r-1)*m*n+j-n,(1)*m*n+j-1]-=0.5
if (In[j//n,j%n]==-1)&(j%n<n-1)&(0<j//n):
Coefficients[(r-1)*m*n+j-n,(1)*m*n+j+1]-=0.5
Coefficients[(r-1)*m*n+j+1,(1)*m*n+j-n]-=0.5
if (In[j//n,j%n]==-1)&(j%n<n-1)&(j//n<m-1):
Coefficients[(r-1)*m*n+j+n,(1)*m*n+j+1]-=0.5
Coefficients[(r-1)*m*n+j+1,(1)*m*n+j+n]-=0.5
if (In[j//n,j%n]==-1)&(0<j%n)&(j//n<m-1):
Coefficients[(r-1)*m*n+j+n,(1)*m*n+j-1]-=0.5
Coefficients[(r-1)*m*n+j-1,(1)*m*n+j+n]-=0.5
for j in range(m*n):
if In[j//n,j%n]==-1:
for i in range(r):
Coefficients[i*m*n+j,i*m*n+j]-=2
for k in range(r):
Coefficients[i*m*n+j,k*m*n+j]+=1
# Filling The Coefficients
D=dict()
for i in range(r*m*n):
for j in range(i+1):
if i!=j:
D[(i,j)]= Coefficients[i,j]+Coefficients[j,i]
elif i==j:
D[(i,j)]= Coefficients[i,j]
# Solve the Puzzle
use_qpu=True
if use_qpu:
solver_limit = 256
G = nx.complete_graph(solver_limit)
system = DWaveSampler(token='DEV-***') #Replace 'DEV-***' by the API token
embedding = minorminer.find_embedding(D.keys(), system.edgelist)
print(embedding)
res = QBSolv().sample_qubo(D, solver=FixedEmbeddingComposite(system, embedding), solver_limit=solver_limit,token='DEV-6189564036d19f88b3a555b4175a353d6d2c0218', num_reads=20)
#Emb = EmbeddingComposite(DWaveSampler(token='DEV-6189564036d19f88b3a555b4175a353d6d2c0218'))
#res = Emb.sample_qubo(D, num_reads=10000)
else:
res = QBSolv().sample_qubo(D,num_repeats=20)
samples = list(res.samples())
energy = list(res.data_vectors['energy'])
print(samples)
print(energy)
# Represent the Results
for i in range(len(samples)):
result = samples[i]
output = []
# ignore ancillary variables, which are all negative, only get positive bits
for x in range(r*m*n):
output.append(result[x])
output = np.array(output)#DisplayOut(In,np.array(output),r)
F=output.reshape(r,m*n)
output=mapseq(F)
print("energy: {}_____________________________".format(energy[i]))
print(output)
|
#!/usr/bin/env python
#
# (c) Grant Rotskoff, 2013
# maintainer: gmr1887@gmail.com
# license: GPL-3
usage="./UmbrellaIntegrate.py metadata.txt path.dat mean-force.dat pmf.dat"
### parse the args ###
from sys import argv
try:
import numpy as np
except:
print "You must have numpy to use this script! Try \">$ module load python\" on clusters"
exit(0)
if len(argv)==5:
metadata=argv[1]
pathfile=argv[2]
mfrcfile=argv[3]
freefile=argv[4]
else:
print usage
exit(0)
#####################
### functions ###
def diff(a,b):
return [b[i]-a[i] for i in range(len(a))]
def dist(a,b):
v=diff(a,b)
return np.sqrt(np.inner(v,v))
def zip(a,b):
return [[a[i],b[i]] for i in range(len(a))]
def proj(a,b):
k=np.dot(a,b)/np.inner(a,a)
return [k*x for x in a]
#################
### get the data ###
files=[l.split()[0] for l in open(metadata,'r').readlines()]
ks=[float(l.split()[2]) for l in open(metadata,'r').readlines()]
path=[[float(x) for x in l.split()] for l in open('path.dat','r').readlines()]
####################
### compute the displacement from the window center ###
### (projected onto the path) ###
avgs=[]
i=0
for f in files:
dat=[[float(x) for x in l.split()] for l in open(f,'r').readlines()[3:]]
if i>0:
bvec=diff(path[i],path[i-1])
if i<len(files)-1:
fvec=diff(path[i+1],path[i])
# for i=0, i=len(path) default to the forward / backward projections
disps=[]
if i>0 and i<len(files):
for pt in dat:
disp=diff(pt,path[i])
bprj=-dist(proj(bvec,disp),bvec)
fprj=dist(proj(fvec,disp),[0]*len(fvec))
if abs(bprj)>fprj:
disp_prj=fprj
else:
disp_prj=bprj
disps.append(disp_prj)
elif i==0:
for pt in dat:
disp=diff(pt,path[i])
disps.append(dist(proj(fvec,disp),[0]*len(fvec)))
elif i==len(files)-1:
for pt in dat:
disp=diff(pt,path[i])
disps.append(-dist(proj(bvec,disp),bvec))
avgs.append(-np.mean(disps)*ks[i])
print "Average Force on pathpoint %d is %8.3f"%(i,avgs[i])
i+=1
######################################################
### compute the pmf ###
# project the path onto a line
proj=[0]
pathd=0
for i in range(len(path)-1):
pathd+=dist(path[i],path[i+1])
proj.append(pathd)
# write a data file with the interpolated average force
xpts=[x*pathd/10000 for x in range(10000)]
mfrc=np.interp(xpts,proj,avgs)
mfrcplot=zip(xpts,mfrc)
forcefile=open(mfrcfile,'w')
for pt in mfrcplot:
forcefile.write("%8.3f %8.3f\n"%tuple(pt))
# integrate to get the PMF
pmffile=open(freefile,'w')
for i in range(10000):
pmffile.write("%8.3f %8.3f\n"%(xpts[i],np.trapz(mfrc[:i],xpts[:i])))
#######################
#avgf=open('string-force.dat','w')
#avgf.write('x\ty\t\z\tf\n')
#for i in range(len(avgs)):
# avgf.write('%8.3f %8.3f %8.3f'%tuple(path[i]))
# avgf.write(' %8.3f\n'%avgs[i])
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
#admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'urlshot.views.home', name='home'),
# url(r'^urlshot/', include('urlshot.foo.urls')),
url(r'^$', 'urlshot.urlshot_app.views.get', name='home'),
url(r'^api/([a-zA-Z0-9#@=/_$.:-]+)$', 'urlshot.urlshot_app.views.getJSON'),
url(r'^[a-zA-Z0-9]+/?$', 'urlshot.urlshot_app.views.redirect_url'),
#url(r'^.+$', 'urlshot.urlshot_app.views.get', name='home'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
#url(r'^admin/', include(admin.site.urls)),
)
|
"""
CCT 建模优化代码
求三角形面积,展示P2类的使用方法
作者:赵润晓
日期:2021年5月3日
"""
# 因为要使用父目录的 cctpy 所以加入
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
# 方法一:海伦公式
# 定义三角形三个顶点
A = P2(21, 8)
B = P2(33, 28)
C = P2(39, 6)
# 求三条边的长度
AB_length = (A-B).length()
AC_length = (A-C).length()
BC_length = (B-C).length()
# 求 p
p = (AB_length+AC_length+BC_length)/2
# 海伦公式
S = math.sqrt(p*(p-AB_length)*(p-AC_length)*(p-BC_length))
# 输出
print(S) # 191.99999999999994
# 方法一:正弦面机公式 S = (1/2)ab*sin(c)
# 定义三角形三个顶点
A = P2(21, 8)
B = P2(33, 28)
C = P2(39, 6)
# 求 AB 和 BC 矢量,用于求其长度和夹角
AB_vector = B-A
AC_vector = C-A
# 求矢量 AB 和 BC 夹角 ∠BAC。a.angle_to(b)求矢量 a 到 b 的角度,
angle_BAC = AC_vector.angle_to(AB_vector)
# 求 矢量 AB 和 BC 的长度
AB_length = AB_vector.length()
AC_length = AC_vector.length()
# 求面积
S = 0.5*AB_length*AC_length*math.sin(angle_BAC)
# 输出
print(S)
#
a=P2()
b=P2()
included_angle = a.angle_to(b)
if included_angle > math.pi:
included_angle = included_angle - math.pi |
#!/usr/bin/python
import sys
from ROOT import gROOT
from ROOT import TFile
from ROOT import TKey
def GetKeyNames( self ):
return [key.GetName() for key in MyFile.GetListOfKeys()]
TFile.GetKeyNames = GetKeyNames
MyFile=TFile("DeepSingle+DelphMET_NoPU_DiBoson_his.root")
keyList = MyFile.GetKeyNames()
print "\nKeys in file:", keyList
# scenarios and samples
conditions=['TreeSR0','TreeSR1','TreeSR2','TreeSR3','TreeSR','TreeSR5','TreeSR6','TreeSR7','TreeSR8','TreeSR9','TreeSR10','TreeB0','TreeB1','TreeB2','TreeB3']
gROOT.ProcessLine(".L denemeStack.C")
for samp in keyList:
print "Key :", samp
if not samp=='TreeSR0' or samp=='TreeB0' or samp=='TreeSR1' or samp=='TreeSR2' or samp=='TreeSR3' or samp=='TreeSR4' or samp=='TreeSR5' or samp=='TreeSR6' or samp=='TreeSR7' or samp=='TreeSR8' or samp=='TreeSR9' or samp=='TreeSR10' or samp=='TreeB1' or samp=='TreeB2' or samp=='TreeB3':
from ROOT import denemeStack
denemeStack(samp)
else:
break
print "THE END"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
import requests
import os
class ImagePivot(Resource):
def __init__(self):
self.resourceName = 'imagepivot'
self.route('GET', (), self.getImagePivot)
@access.public
def getImagePivot(self, params):
pivot = params['pivot'] if 'pivot' in params else 'tiff_imagelength,tiff_imagewidth'
query = params['query'] if 'query' in params else '*'
base = os.environ['IMAGE_SPACE_SOLR'] + '/select?wt=json&indent=true'
url = base + '&q=' + query + '&rows=1&facet=on&facet.pivot=' + pivot
result = requests.get(url, verify=False).json()
return result['facet_counts']['facet_pivot'][pivot]
getImagePivot.description = Description('Performs pivot aggregation on image database')
|
import chainer
import chainer.functions as F
import chainer.links as L
class Generator(chainer.Chain):
"""docstring for Generator"""
def __init__(self):
super(Generator, self).__init__(
l1=L.Linear(100,50*5*5),
dcv1=L.Deconvolution2D(in_channels=50,out_channels=10,ksize=3,stride=3),
dcv2=L.Deconvolution2D(in_channels=10,out_channels=1,ksize=2,stride=2,pad=1),
bc1=L.BatchNormalization(size=50),
bc2=L.BatchNormalization(size=10))
self.in_size=100
self.imshape=(1,28,28)
def __call__(self, x, train=True):
h1 = F.leaky_relu(self.bc1(F.reshape(self.l1(x),(x.data.shape[0],50,5,5))))
h2 = F.leaky_relu(self.bc2(self.dcv1(h1)))
return F.sigmoid(self.dcv2(h2))
class Discriminator(chainer.Chain):
"""docstring for Discriminator"""
def __init__(self):
super(Discriminator, self).__init__(
conv1=L.Convolution2D(in_channels=1,out_channels=10,ksize=5,stride=2,pad=2),
conv2=L.Convolution2D(in_channels=10,out_channels=50,ksize=3,stride=1,pad=0),
bc1=L.BatchNormalization(size=10),
bc2=L.BatchNormalization(size=50),
l1=L.Linear(4*4*50, 1))
self.in_size = (1,28,28)
self.out_size = 1
self.imshape=(1,28,28)
def __call__(self, x, train=True):
h1 = F.max_pooling_2d(self.bc1(self.conv1(x)),4,stride=2)
h2 = F.relu(self.bc2(self.conv2(h1)))
if train:
h2 = F.dropout(h2)
return F.sigmoid(self.l1(h2)) |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
import os
from pyspark import HiveContext
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
def quiet_py4j():
logger = logging.getLogger('py4j')
logger.setLevel(logging.WARN)
@pytest.fixture(scope="session")
def spark_context(request):
conf = (SparkConf().setMaster("local[2]").setAppName("pytest-pyspark-local-testing"))
request.addfinalizer(lambda: sc.stop())
sc = SparkContext(conf=conf)
quiet_py4j()
return sc
@pytest.fixture(scope="session")
def hive_context(spark_context):
return HiveContext(spark_context)
@pytest.fixture(scope="session")
def streaming_context(spark_context):
return StreamingContext(spark_context, 1)
@pytest.fixture(scope="session")
def setup_pyspark_env():
os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/local/anaconda3/envs/nexus-messages3/bin/python'
os.environ['PYSPARK_PYTHON'] = '/usr/local/anaconda3/envs/nexus-messages3/bin/python' |
from flask import Flask, request, jsonify
import requests
from flask_cors import CORS
from os import environ
import json
import pika
import datetime
import json
from datetime import date
app = Flask(__name__)
CORS(app)
def send_alert_message(info):
return requests.post(
"https://api.mailgun.net/v3/sandbox2ba83bb738204aa1ace26f86872c85d7.mailgun.org/messages",
auth=("api", <APIKEY>),
data={"from": "Alert admin <postmaster@sandbox2ba83bb738204aa1ace26f86872c85d7.mailgun.org>",
"to": "Jon <jonathanlee.2018@smu.edu.sg>",
"subject": "Appointment confirmed",
"template": "alert",
"h:X-Mailgun-Variables": info})
def send_billing_message(info):
return requests.post(
"https://api.mailgun.net/v3/sandbox2ba83bb738204aa1ace26f86872c85d7.mailgun.org/messages",
auth=("api", <APIKEY>),
data={"from": "Mailgun Sandbox <postmaster@sandbox2ba83bb738204aa1ace26f86872c85d7.mailgun.org>",
"to": "jiamin <jonathanlee.2018@smu.edu.sg>",
"subject": "Billing - for your immediate attention",
"template": "billing",
"h:X-Mailgun-Variables": info})
@app.route("/appointmentemail", methods=['POST'])
def send_appt_email():
status = 201
email_input = request.json
customerID = email_input['customerID']
clinicID = email_input['clinicID']
doctorID = email_input['doctorID']
appointmentDate = email_input['appointmentDate']
appointmentTime = email_input['appointmentTime']
data = {"name": customerID, "date": appointmentDate, "time": appointmentTime, "clinic": clinicID}
info = json.dumps(data)
send_alert_message(info)
return 'OK'
@app.route("/paymentemail", methods=["POST"])
def send_pay_email():
status = 201
pay_email_input = request.json
AID = str(pay_email_input['AID'])
Medication = pay_email_input['Medication']
BillAmount = str(pay_email_input['BillAmount'])
ClaimAmount = str(pay_email_input['ClaimAmount'])
today = date.today()
today = today.strftime("%d/%m/%Y")
data = {"aid": AID, "date": today, "bill": BillAmount, "claim": ClaimAmount, "medication": Medication}
info = json.dumps(data)
send_billing_message(info)
return 'OK'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True) |
"""Module for converting numbers to various numeral systems."""
from typing import Union
def convert(number: Union[int, str], base_init: int, base_final: int) -> str:
"""Convert a number to another numeral system.
:param number: the initial number
:param base_init: a base of the initial number
:param base_final: a base of the final number
:return: converted number
"""
_bases_validate(base_init, base_final)
if base_init == base_final:
return number
if base_init == 10:
return _convert_dec_to_other(number, base_final)
if base_final == 10:
return str(_convert_to_dec(number, base_init))
return _convert_dec_to_other(
_convert_to_dec(number, base_init), base_final
)
def _bases_validate(init: int, final: int):
if not isinstance(init, int) or not isinstance(final, int):
raise ValueError("Base of the number must be integer.")
if not (1 < init < 37) or not (1 < final < 37):
raise ValueError(
"Base of the number must be at least 2 and not more than 36."
)
def _convert_to_dec(number: str, base: int) -> int:
"""Convert a number to the decimal numeral system.
:param number: the initial number
:param base: a base of the initial number
:return: converted number
"""
try:
return int(str(number), base=base)
except ValueError:
raise ValueError("Invalid number base.") from None
def _convert_dec_to_other(number: Union[int, str], base: int) -> str:
"""Convert the decimal number to another numeral system.
:param number: the initial number
:param base: a base of the final number
:return: converted number
"""
try:
number = int(number)
except ValueError:
raise ValueError(
"The decimal number can only consist of digits."
) from None
if number < 10 and number < base:
return str(number)
if base == 2:
return bin(number)[2:]
if base == 8:
return oct(number)[2:]
if base == 16:
return hex(number)[2:].upper()
result = ""
while number:
mod = number % base
result = f"""{(chr(mod + (48 if mod < 10 else 55))
if base > 10 else str(mod))}{result}"""
number //= base
return result.upper()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script daemonize runinng process
"""
import os
import sys
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
# Run first fork
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # First parent process closed
except OSError as e:
sys.stderr.write('Fork #1 failed: (%d) %s\n' % (e.errno, e.strerror))
sys.exit(1)
# Unset from parent process
os.chdir('/')
os.umask(0)
os.setsid()
# Run second fork
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Second parent process closed
except OSError as e:
sys.stderr.write('Fork #2 failed: (%d) %s\n' % (e.errno, e.strerror))
sys.exit(1)
# Now process is daemonized
for f in sys.stdout, sys.stderr:
f.flush()
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
|
# Сериализаторы
from rest_framework import filters
from rest_framework import viewsets
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
# Права доступа
from workprogramsapp.permissions import IsRpdDeveloperOrReadOnly
from .models import AdditionalMaterial, StructuralUnit, UserStructuralUnit
# Сериализаторы
from .serializers import AdditionalMaterialSerializer, CreateAdditionalMaterialSerializer, \
StructuralUnitSerializer, CreateStructuralUnitSerializer, \
CreateUserStructuralUnitSerializer, UserStructuralUnitSerializer, ShortStructuralUnitSerializer
from ..models import WorkProgram, DisciplineSection, PrerequisitesOfWorkProgram, OutcomesOfWorkProgram, \
WorkProgramInFieldOfStudy, СertificationEvaluationTool, EvaluationTool, Topic, Competence
from ..serializers import WorkProgramSerializer, WorkProgramShortForExperiseSerializer, CompetenceSerializer
from .serializers import CompetenceFullSerializer
class AdditionalMaterialSet(viewsets.ModelViewSet):
queryset = AdditionalMaterial.objects.all()
serializer_class = AdditionalMaterialSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
permission_classes = [IsRpdDeveloperOrReadOnly]
def get_serializer_class(self):
if self.action == 'create':
return CreateAdditionalMaterialSerializer
if self.action == 'update':
return CreateAdditionalMaterialSerializer
if self.action == 'partial_update':
return CreateAdditionalMaterialSerializer
return AdditionalMaterialSerializer
class StructuralUnitSet(viewsets.ModelViewSet):
queryset = StructuralUnit.objects.all()
serializer_class = StructuralUnitSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
search_fields = ['title']
permission_classes = [IsRpdDeveloperOrReadOnly]
def get_serializer_class(self):
if self.action == 'list':
return ShortStructuralUnitSerializer
if self.action == 'create':
return CreateStructuralUnitSerializer
if self.action == 'update':
return CreateStructuralUnitSerializer
if self.action == 'partial_update':
return CreateStructuralUnitSerializer
return StructuralUnitSerializer
class UserStructuralUnitSet(viewsets.ModelViewSet):
queryset = UserStructuralUnit.objects.all()
serializer_class = UserStructuralUnitSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
permission_classes = [IsRpdDeveloperOrReadOnly]
def get_serializer_class(self):
if self.action == 'create':
return CreateUserStructuralUnitSerializer
if self.action == 'update':
return CreateUserStructuralUnitSerializer
if self.action == 'partial_update':
return CreateUserStructuralUnitSerializer
return UserStructuralUnitSerializer
@api_view(['POST'])
@permission_classes((IsRpdDeveloperOrReadOnly,))
def CopyContentOfWorkProgram(request):
"""
API-запрос на компирование содержимого из одной РПД в другую, !!!при этом старая РПД удаляется!!!
Параметры: from_copy_id, to_copy_id
Возвращает: объект обновленной РПД (в которую были скопированы данные)
"""
try:
from_copy = request.data.get('from_copy_id')
to_copy = request.data.get('to_copy_id')
old_wp = WorkProgram.objects.get(pk=from_copy)
new_wp = WorkProgram.objects.get(pk=to_copy)
new_wp.approval_date = old_wp.approval_date
new_wp.authors = old_wp.authors
new_items = PrerequisitesOfWorkProgram.objects.filter(workprogram=to_copy)
new_out = OutcomesOfWorkProgram.objects.filter(workprogram=to_copy)
for item in PrerequisitesOfWorkProgram.objects.filter(workprogram=from_copy):
item_exists = False
for new_item in new_items:
if new_item.item == item.item and new_item.masterylevel == item.masterylevel:
print(item_exists)
item_exists = True
if not item_exists:
PrerequisitesOfWorkProgram.objects.create(item=item.item, workprogram=new_wp,
masterylevel=item.masterylevel)
discipline = DisciplineSection.objects.filter(work_program_id=old_wp.id)
disp_clone_list = []
eva_clone_list = []
for disp in discipline:
clone_discipline = disp.make_clone(attrs={'work_program': new_wp})
topic = Topic.objects.filter(discipline_section=disp)
for top in topic:
top.make_clone(attrs={'discipline_section': clone_discipline})
clone_dict = {'id': disp.id, 'clone_id': clone_discipline.id}
disp_clone_list.append(clone_dict)
for eva in EvaluationTool.objects.filter():
evaluation_disciplines = eva.evaluation_tools.all().filter(work_program_id=old_wp.id)
if (evaluation_disciplines):
clone_eva = eva.make_clone()
for disp in evaluation_disciplines:
for elem in disp_clone_list:
if (disp.id == elem['id']):
DisciplineSection.objects.get(pk=elem['clone_id']).evaluation_tools.add(clone_eva)
clone_dict = {'id': eva.id, 'clone_id': clone_eva.id}
eva_clone_list.append(clone_dict)
for out in OutcomesOfWorkProgram.objects.filter(workprogram=old_wp):
clone_outcomes = out.make_clone(attrs={'workprogram': new_wp})
for eva in out.evaluation_tool.all():
for elem in eva_clone_list:
if (eva.id == elem['id']):
clone_outcomes.evaluation_tool.add(EvaluationTool.objects.get(pk=elem['clone_id']))
for cerf in СertificationEvaluationTool.objects.filter(work_program=old_wp):
cerf.make_clone(attrs={'work_program': new_wp})
for cerf in СertificationEvaluationTool.objects.filter(work_program=new_wp):
if cerf.name == "No name":
cerf.delete()
new_wp.editors.add(*old_wp.editors.all())
new_wp.bibliographic_reference.add(*old_wp.bibliographic_reference.all())
new_wp.hoursFirstSemester = old_wp.hoursFirstSemester
new_wp.hoursSecondSemester = old_wp.hoursSecondSemester
new_wp.description = old_wp.description
new_wp.video = old_wp.video
new_wp.credit_units = old_wp.credit_units
new_wp.semester_hour = old_wp.semester_hour
new_wp.owner = old_wp.owner
new_wp.work_status = old_wp.work_status
new_wp.hours = old_wp.hours
new_wp.extra_points = old_wp.extra_points
# old_wp.delete()
new_wp.save()
serializer = WorkProgramSerializer(new_wp, many=False)
return Response(serializer.data)
except:
return Response(status=400)
@api_view(['POST'])
@permission_classes((IsRpdDeveloperOrReadOnly,))
def ReconnectWorkProgram(request):
try:
from_copy = request.data.get('from_copy_id')
to_copy = request.data.get('to_copy_id')
old_wp = WorkProgram.objects.get(pk=from_copy)
new_wp = WorkProgram.objects.get(pk=to_copy)
serializer = WorkProgramSerializer(new_wp, many=False)
return Response(serializer.data)
except:
return Response(status=400)
class CompetencesSet(viewsets.ModelViewSet):
queryset = Competence.objects.all()
serializer_class = CompetenceSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter, DjangoFilterBackend)
filterset_fields = []
def get_serializer_class(self):
if self.action == 'list':
return CompetenceSerializer
if self.action == 'create':
return CompetenceSerializer
if self.action == 'update':
return CompetenceSerializer
if self.action == 'retrieve':
return CompetenceFullSerializer
return CompetenceSerializer
@api_view(['POST'])
@permission_classes((IsAdminUser,))
def ChangeSemesterInEvaluationsCorrect(request):
needed_wp = WorkProgram.objects.filter(expertise_with_rpd__expertise_status__contains='AC').distinct()
for wp in needed_wp:
evaluation_tools = EvaluationTool.objects.filter(evaluation_tools__in=DisciplineSection.objects.filter(
work_program__id=wp.id))
min_sem = 12
for eva in evaluation_tools:
if eva.semester == None:
break
if eva.semester < min_sem:
min_sem = eva.semester
if min_sem != 1 and eva.semester != None:
for eva in evaluation_tools:
eva.semester = eva.semester - min_sem + 1
eva.save()
final_tool = СertificationEvaluationTool.objects.filter(work_program=wp)
min_sem = 12
for eva in final_tool:
if eva.semester == None:
break
if eva.semester < min_sem:
min_sem = eva.semester
if min_sem != 1 and eva.semester != None:
for eva in final_tool:
eva.semester = eva.semester - min_sem + 1
eva.save()
serializer = WorkProgramSerializer(needed_wp, many=True)
return Response(serializer.data)
|
def solution(board, moves):
answer = 0
row = [len(board) for i in range(len(board))]
# 인형이 담겨 있는 행의 위치를 찾는다.
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] != 0 and row[j] == len(board):
row[j] = i
stack = []
for i in moves:
if row[i - 1] < len(board):
if len(stack) >= 1:
if stack[-1] == board[row[i - 1]][i - 1]:
stack.pop()
answer += 2
else:
stack.append(board[row[i - 1]][i - 1])
else:
stack.append(board[row[i - 1]][i - 1])
row[i - 1] += 1
return answer
# 잘못된 풀이
# row의 default 값으로 0을 넣어줘서 해당 칸에 인형이 없는 경우 0번째로 저장회덩
# 값이 들어감, 기본 값으로 board의 길이를 넣어주어 해결한다.
def solution(board, moves):
answer = 0
row = [0 for i in range(len(board))]
# 인형이 담겨 있는 행의 위치를 찾는다.
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] != 0 and row[j] == 0:
row[j] = i
print(row)
stack = []
for i in moves:
if len(stack) >= 1:
if stack[-1] == board[row[i - 1]][i - 1]:
stack.pop()
answer += 2
else:
stack.append(board[row[i - 1]][i - 1])
else:
stack.append(board[row[i - 1]][i - 1])
row[i - 1] += 1
return answer |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from logging import Logger
from abc import ABCMeta, abstractmethod
from tonga.models.structs.store_record_type import StoreRecordType
__all__ = [
'BasePersistency'
]
class BasePersistency(metaclass=ABCMeta):
_initialize: bool = False
_logger: Logger
def is_initialize(self) -> bool:
""" Return true if persistency is initialized, false otherwise
Returns:
bool
"""
return self._initialize
def _set_initialize(self) -> None:
""" Set persistency initialize flag to true
Returns:
None
"""
self._initialize = True
@abstractmethod
async def get(self, key: str) -> bytes:
""" Get value by key in local store
Args:
key (str): Key entry as string
Returns:
bytes: return value as bytes
"""
raise NotImplementedError
@abstractmethod
async def set(self, key: str, value: bytes) -> None:
""" Set value & key in global store
Args:
key (str): Key entry as string
value (bytes): Value as bytes
Returns:
None
"""
raise NotImplementedError
@abstractmethod
async def delete(self, key: str) -> None:
""" Delete value by key in global store
Args:
key (str): Key entry as string
Returns:
None
"""
raise NotImplementedError
@abstractmethod
async def _build_operations(self, key: str, value: bytes, operation_type: StoreRecordType) -> None:
""" This function is used for build DB when store is not initialize
Args:
key (str): Key entry as string
value (bytes): Value as bytes
operation_type (StoreRecordType): Operation type (SET or DEL)
Returns:
None
"""
raise NotImplementedError
|
import json
from mock import patch
from grant.proposal.models import Proposal
from grant.utils.enums import ProposalStatus
from ..config import BaseProposalCreatorConfig
from ..test_data import test_proposal, mock_blockchain_api_requests
from ..mocks import mock_request
class TestProposalContributionAPI(BaseProposalCreatorConfig):
@patch('requests.get', side_effect=mock_blockchain_api_requests)
def test_create_proposal_contribution(self, mock_blockchain_get):
self.login_default_user()
contribution = {
"amount": "1.2345"
}
post_res = self.app.post(
"/api/v1/proposals/{}/contributions".format(self.proposal.id),
data=json.dumps(contribution),
content_type='application/json'
)
self.assertStatus(post_res, 201)
@patch('requests.get', side_effect=mock_blockchain_api_requests)
def test_create_duplicate_contribution(self, mock_blockchain_get):
self.login_default_user()
contribution = {
"amount": "1.2345"
}
post_res = self.app.post(
"/api/v1/proposals/{}/contributions".format(self.proposal.id),
data=json.dumps(contribution),
content_type='application/json'
)
self.assertStatus(post_res, 201)
dupe_res = self.app.post(
"/api/v1/proposals/{}/contributions".format(self.proposal.id),
data=json.dumps(contribution),
content_type='application/json'
)
self.assert200(dupe_res)
self.assertEqual(dupe_res.json['id'], post_res.json['id'])
@patch('requests.get', side_effect=mock_blockchain_api_requests)
def test_get_proposal_contribution(self, mock_blockchain_get):
self.login_default_user()
contribution = {
"amount": "1.2345"
}
post_res = self.app.post(
"/api/v1/proposals/{}/contributions".format(self.proposal.id),
data=json.dumps(contribution),
content_type='application/json'
)
contribution_id = post_res.json['id']
contribution_res = self.app.get(
f'/api/v1/proposals/{self.proposal.id}/contributions/{contribution_id}'
)
contribution = contribution_res.json
self.assertEqual(contribution['id'], contribution_id)
self.assertEqual(contribution['status'], ProposalStatus.PENDING)
|
import numpy as np
import cv2
class YCBCR:
def __init__(self):
pass
def getShadow(self, image):
h, w, c = image.shape
image = cv2.resize(image,(320,240))
y_cb_cr_img = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
binary_mask = np.zeros((y_cb_cr_img.shape[0],y_cb_cr_img.shape[1]),dtype=np.uint8)
y_mean = np.mean(cv2.split(y_cb_cr_img)[0])
y_std = np.std(cv2.split(y_cb_cr_img)[0])
for i in range(y_cb_cr_img.shape[0]):
for j in range(y_cb_cr_img.shape[1]):
if y_cb_cr_img[i, j, 0] < y_mean - (y_std / 3):
binary_mask[i, j] = 255
else:
binary_mask[i, j] = 0
kernel = np.ones((3, 3), np.uint8)
erosion = cv2.erode(binary_mask, kernel, iterations=1)
ret,th = cv2.threshold(erosion,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
median = cv2.medianBlur(th,15)
median = cv2.resize(median,(w,h))
image = cv2.resize(image,(w,h))
return median
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict
import gym
from gym.core import Env
from gym.envs.registration import register
from mtenv.envs.hipbmdp.wrappers import framestack, sticky_observation
def _build_env(
domain_name: str,
task_name: str,
seed: int = 1,
xml_file_id: str = "none",
visualize_reward: bool = True,
from_pixels: bool = False,
height: int = 84,
width: int = 84,
camera_id: int = 0,
frame_skip: int = 1,
environment_kwargs: Any = None,
episode_length: int = 1000,
) -> Env:
if xml_file_id is None:
env_id = "dmc_%s_%s_%s-v1" % (domain_name, task_name, seed)
else:
env_id = "dmc_%s_%s_%s_%s-v1" % (domain_name, task_name, xml_file_id, seed)
if from_pixels:
assert (
not visualize_reward
), "cannot use visualize reward when learning from pixels"
# shorten episode length
max_episode_steps = (episode_length + frame_skip - 1) // frame_skip
if env_id not in gym.envs.registry.env_specs:
register(
id=env_id,
entry_point="mtenv.envs.hipbmdp.wrappers.dmc_wrapper:DMCWrapper",
kwargs={
"domain_name": domain_name,
"task_name": task_name,
"task_kwargs": {"random": seed, "xml_file_id": xml_file_id},
"environment_kwargs": environment_kwargs,
"visualize_reward": visualize_reward,
"from_pixels": from_pixels,
"height": height,
"width": width,
"camera_id": camera_id,
"frame_skip": frame_skip,
},
max_episode_steps=max_episode_steps,
)
return gym.make(env_id)
def build_dmc_env(
domain_name: str,
task_name: str,
seed: int,
xml_file_id: str,
visualize_reward: bool,
from_pixels: bool,
height: int,
width: int,
frame_skip: int,
frame_stack: int,
sticky_observation_cfg: Dict[str, Any],
) -> Env:
"""Build a single DMC environment as described in
:cite:`tassa2020dmcontrol`.
Args:
domain_name (str): name of the domain.
task_name (str): name of the task.
seed (int): environment seed (for reproducibility).
xml_file_id (str): id of the xml file to use.
visualize_reward (bool): should visualize reward ?
from_pixels (bool): return pixel observations?
height (int): height of pixel frames.
width (int): width of pixel frames.
frame_skip (int): should skip frames?
frame_stack (int): should stack frames together?
sticky_observation_cfg (Dict[str, Any]): Configuration for using
sticky observations. It should be a dictionary with three
keys, `should_use` which specifies if the config should be
used, `sticky_probability` which specifies the probability of
choosing a previous task and `last_k` which specifies the
number of previous frames to choose from.
Returns:
Env:
"""
env = _build_env(
domain_name=domain_name,
task_name=task_name,
seed=seed,
visualize_reward=visualize_reward,
from_pixels=from_pixels,
height=height,
width=width,
frame_skip=frame_skip,
xml_file_id=xml_file_id,
)
if from_pixels:
env = framestack.FrameStack(env, k=frame_stack)
if sticky_observation_cfg and sticky_observation_cfg["should_use"]:
env = sticky_observation.StickyObservation( # type: ignore[attr-defined]
env=env,
sticky_probability=sticky_observation_cfg["sticky_probability"],
last_k=sticky_observation_cfg["last_k"],
)
return env
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 13:15:55 2020
@author: shaun
"""
from numpy import loadtxt as lt
from matplotlib import pyplot as plt
#crates run average function to compute the running
#average given a list of 11 points
def runaverage(list5):
r=5
a= 1/(2*r+1)
sum5=0
for x in list5:
sum5+=x
i=a*sum5
return i
#read in data
data=lt(r"C:\Users\shaun\Programs\Python\Computational Physics\cpresources\sunspots.txt",float)
ox=data[:,0]
y=data[:,1]
# make sure the inputed number of points is an integer
try:
points=int(input("please enter the number of points you'd like to plot \n"))
except ValueError:
print("you didn't enter a valid number try again next time we'll set the number of points to 1000")
points=1000
#select the given number of points
newx=ox[0:points]
newy=y[0:points]
running=[]
#plot the data
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
ax2.scatter(newx, newy, s=10, c='b', marker="s", label='Original Data')
fig2.suptitle("Sunspots")
ax2.set_xlabel("Months from January 1749")
ax2.set_ylabel("Sunspot Number")
plt.legend(loc='upper left');
#deal with the different cases for running average
for x in range(0,newx.size):
#deals with the case at the beginning of the list
if(x<5 and newx.size-1>=11):
newlist=[]
for l in range(0, 5):
newlist.append(newy[x])
newlist.append(newy[x])
for n in range(x+1,x+6):
newlist.append(newy[n])
#deals with the case in the middle of the list
if(x>=5 and x<newx.size-6):
newlist=[]
for l in range(-5, 1):
newlist.append(newy[x+l])
for n in range(x+1,x+6):
newlist.append(newy[n])
#deals with the case at the end of the list
if(x>newx.size-6):
newlist=[]
for l in range(-5, 1):
newlist.append(newy[x+l])
for n in range(x,newx.size-1):
newlist.append(newy[n+l])
while(len(newlist)<11):
newlist.append(newy[x])
running.append(runaverage(newlist))
#plot the data alongside it's running average
fig2 = plt.figure()
ax1 = fig2.add_subplot(1,1,1)
ax1.scatter(newx, newy, s=10, c='b', marker="s", label='Original Data')
ax1.scatter(newx,running, s=10, c='r', marker="o", label='running average')
fig2.suptitle("Sunspots")
ax1.set_xlabel("Months from January 1749")
ax1.set_ylabel("Sunspot Number")
ax1.legend(loc='upper left')
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AuditRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('changed_fields', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='audit_records', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Audit Record',
'verbose_name_plural': 'Audit Records',
},
),
]
|
import numpy as np
import random
import Hypothesis
COLORS=['red', 'blue', 'green']
SHAPES=['rectangle', 'circle', 'triangle']
class Grammar():
def __init__(self):
pass
def produce_hypothesis(self):
hypothesis=Hypothesis.Hypothesis()
n_disj_terms=0
#set number of disjunctions
while 1:
if(np.random.binomial(1,0.5)):
n_disj_terms+=1
else:
break
#each term a conjunction
for i in range(n_disj_terms):
new_conjunction=self.generate_conjunction()
hypothesis.add_term(new_conjunction)
#hypothesis.display()
return hypothesis
def generate_conjunction(self):
color_clause=self.instantiate_clause('color', random.choice([0,1,2,3,4]))
shape_clause=self.instantiate_clause('shape', random.choice([0,1,2,3,4]))
conjunction=[color_clause, shape_clause]
return [item for sublist in conjunction for item in sublist] #flatten
def instantiate_clause(self, clause_variable, clause_type):
if clause_variable=='color':
variable_space=[0,1,2]
variable_value=0
elif clause_variable=='shape':
variable_space=[0,1,2]
variable_value=1
if clause_type==0:
var1=random.choice(variable_space)
var2=random.choice(variable_space)
return [((variable_value,0),(var1,)),((variable_value,1),(var2,))]
elif clause_type==1:
var=random.choice(variable_space)
return [((variable_value, 0),(var,))]
elif clause_type==2:
var=random.choice(variable_space)
return [((variable_value, 1),(var,))]
elif clause_type==3:
return [((variable_value,1),(variable_value,0))]
elif clause_type==4:
return [((1,))]
# def instantiate_clause(self, clause_variable, clause_type):
# if clause_variable=='color':
# variable_space=COLORS
# variable_name='Color'
# elif clause_variable=='shape':
# variable_space=SHAPES
# variable_name='Shape'
# if clause_type==0:
# var1=random.choice(variable_space)
# var2=random.choice(variable_space)
# return "{0}(o)={1} ^ {0}(m)={2}".format(variable_name, var1, var2)
# elif clause_type==1:
# var=random.choice(variable_space)
# return "{0}(o)={1}".format(variable_name, var)
# elif clause_type==2:
# var=random.choice(variable_space)
# return "{0}(m)={1}".format(variable_name, var)
# elif clause_type==3:
# return "{0}(o)={0}(m)".format(variable_name)
# elif clause_type==4:
# return ""
# def choose_production(self, symbol):
# if symbol=='S':
# return 'D'
# if symbol=='D':
# return random.choice(['False', True])
# if symbol=='C':
# return 'True'
# |
import uvicorn
from app.database.crud import create_admin, create_post, get_user_by_username
from app.database.redis import redis
from app.database.sqlite import db
from app.factory import create_app
from app.utils.auth import get_password_hash
main_app = create_app()
@main_app.on_event('startup')
async def startup_event() -> None:
# Initialize SQLite asynchronously
await db.init()
# Add Admin if it doesn't already exist
async with db.create_session() as session:
if not await get_user_by_username(session=session, username='admin'):
user = await create_admin(
session=session,
username='admin',
full_name='Some Name',
hashed_password=get_password_hash('12345'),
)
await create_post(
session=session,
header='USA starts withdrawal of troops from Afghanistan',
photo=b'',
text='',
author=user,
)
await create_post(
session=session,
header='Trump is the first American President being impeached twice',
photo=b'',
text='',
author=user,
)
await create_post(
session=session,
header='Havertz double leaves Fulham in trouble',
photo=b'',
text='',
author=user,
)
await create_post(
session=session,
header='Manchester City could clinch the Football Premier League',
photo=b'',
text='',
author=user,
)
await create_post(
session=session,
header='La Liga: Real Madrid vs Osasuna - who will win the first prize?',
photo=b'',
text='',
author=user,
)
# Initialize Redis asynchronously
await redis.init()
@main_app.on_event('shutdown')
async def shutdown_event() -> None:
await redis.close()
if __name__ == '__main__':
# Only for debugging within the PyCharm IDE.
# To run this app from terminal use `docker-compose up`
uvicorn.run(main_app, host='0.0.0.0', port=8000)
|
import numpy as np
import setup
import gym
import time
import gym_airsim.envs
import gym_airsim
import argparse
def test():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--env-name', type=str, default='AirSimEnv-v42')
parser.add_argument('--weights', type=str, default=None)
args, unknown = parser.parse_known_args()
# Get the environment and extract the number of actions.
env = gym.make(args.env_name)
# Test for setting an item
item_val = 10
env.updateJson(("NumberOfObjects", item_val))
assert (item_val == env.getItemCurGameConfig("NumberOfObjects")), "item val should be equal"
# Test for passing the wrong number of inputs
try:
print(env.setRangeGameConfig("NumbeOfObjects", 1))
except AssertionError:
print("pass")
# Test for setting a range
range_val = list(range(10, 50))
env.setRangeGameConfig(("NumberOfObjects", range_val))
assert (range_val == env.getRangeGameConfig("NumberOfObjects")), "range_val should be equal"
# Testing random sampling
for i in range(0, 5):
env.sampleGameConfig("NumberOfObjects")
print(env.getItemCurGameConfig("NumberOfObjects"))
# Test for setting a range
range_val = [[20, 20, 3], [100, 100, 5]]
env.setRangeGameConfig(("ArenaSize", range_val))
assert (range_val == env.getRangeGameConfig("ArenaSize")), "range_val should be equal"
for i in range(0, 5):
env.sampleGameConfig("ArenaSize")
print(env.getItemCurGameConfig("ArenaSize"))
print("------------- Testing End now")
for i in range(0, 10):
env.sampleGameConfig("LevelDifficulty")
print(env.getItemCurGameConfig("LevelDifficulty"))
|
# -*- coding: utf-8 -*-
"""
Main script to train and export NN Forward models for the Ogden Material
"""
import numpy as np
from random import seed
import torch
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from time import time
import PreProcess
import Metamodels
import PostProcess
# Optimizer settings
RUN_NAME = 'BloodClotNN_'
EPOCHS = 500
LEARNING_RATE = 0.001
HIDDEN_DIM = 50
DEPTH = 3
NUM_FEATURES = 2
NUM_OUTPUT = 100
PRINT_INT = 1 # Print gif every other PRINT_INT epoch
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Training and testing data (number of FEBio simulations)
num_train_list = [100, 250, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 5000, 6000, 7000, 8000]
valid_sets = list(range(10001,11001))
n_val = len(valid_sets)
star_set = 250 # The ID of one validation test to visualize stress-strain
# Reproduce
np.random.seed(1234)
seed(1234)
torch.manual_seed(42)
# Initialize learning curve dictionary
lc_stats = {'num_train':[], 'train_time':[],'MAE_train': [], 'MAPE_train': [], 'R2_train': [],'MAE_val': [], 'MAPE_val': [], 'R2_val': [] }
# Load data
TestData = PreProcess.OgdenData()
StrainObservs = TestData.FEBio_strain # As is, use all available points
# Separate Validation data
X_val, Y_val = TestData.FE_in_out(valid_sets, strain_vals = StrainObservs )
# Loop Training sets
for kk in range(0, len(num_train_list)):
# Separate Training set
train_num = num_train_list[kk]
print(f'TRAIN_n...{train_num+0:03}')
BATCH_SIZE = min(train_num,100)
train_sets = list(range(1, train_num + 1))
X_train, Y_train = TestData.FE_in_out(train_sets, strain_vals = StrainObservs)
# Scale Training Set
scaler = StandardScaler()
scaler.fit(X_train)
# Prepare data to Torch compatible
X_train_tensor, train_data, train_loader = Metamodels.scaled_to_tensor(DEVICE, scaler, X_train, Y_train, BATCH_SIZE)
X_val_tensor, val_data, val_loader = Metamodels.scaled_to_tensor(DEVICE, scaler, X_val, Y_val, n_val)
# Set up neural network metamodel
model = Metamodels.NN(feature_dim=NUM_FEATURES, hidden_dim=HIDDEN_DIM, output_dim=NUM_OUTPUT, , depth=DEPTH)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
loss_func = torch.nn.L1Loss()
train_step = Metamodels.make_train_step(model, loss_func, optimizer)
# Initialize utils for post processing
export = PostProcess.ExportData('Ogden', RUN_NAME + str(train_num))
loss_stats = {'train': [], "val": [] }
epochFitImg = []
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(5,8))
# BEGIN TRAINNING
start = time()
for epoch in range(1,EPOCHS+1):
# Initialize train and validation error
train_epoch_loss = 0
val_epoch_loss = 0
# Batch training data
for x_batch, y_batch in train_loader:
# Send mini-batches to the device
x_batch, y_batch = x_batch.to(DEVICE), y_batch.to(DEVICE)
train_loss = train_step(x_batch, y_batch)
train_epoch_loss += train_loss
# Stop training and batch validation data
with torch.no_grad():
for x_val_batch, y_val_batch in val_loader:
x_val_batch = x_val_batch.to(DEVICE)
y_val_batch = y_val_batch.to(DEVICE)
model.eval()
yhat = model(x_val_batch)
val_loss = loss_func(y_val_batch, yhat)
val_epoch_loss += val_loss.item()
print(f'Epoch {epoch+0:03}: | Train Loss: {train_epoch_loss/len(train_loader):.5f} | Val Loss: {val_epoch_loss/len(val_loader):.5f}')
loss_stats['train'].append(train_epoch_loss/len(train_loader))
loss_stats['val'].append(val_epoch_loss/len(val_loader))
with torch.no_grad():
model.eval()
fX_val_tensor = model(X_val_tensor)
fX_train_tensor = model(X_train_tensor)
# Convert to numpy arrays
fX_val = fX_val_tensor.data.numpy()
fX_train = fX_train_tensor.data.numpy()
# Generate gif for Training of star set
export.stress_strain(Y_val, fX_val, epoch, val_epoch_loss/len(val_loader), StrainObservs, star_set, ax1, ax2)
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
if (epoch==1) or (epoch % PRINT_INT == 0):
epochFitImg.append(image)
# Calculate Run Time
end = time()
run_time = (end - start)/60
# Export total errors after training
lc_stats['num_train'].append(train_num)
lc_stats['train_time'].append(run_time)
lc_stats = export.compute_error(Y_train, fX_train, lc_stats, 'train')
lc_stats = export.compute_error(Y_val, fX_val, lc_stats, 'val')
export.dict_to_csv(lc_stats,'LC')
export.dict_to_csv(loss_stats,'EpochLoss')
export.stress_strain(Y_val, fX_val, epoch, val_epoch_loss/len(val_loader), StrainObservs, star_set, ax1, ax2, final = True)
export.epoch_curve(loss_stats)
# Plot stress scattered data
R2 = 1-lc_stats['R2_val'][-1]
export.stress_scatter(Y_val, fX_val, R2)
# Save training gif
export.save_gif(train_num, epochFitImg)
# Export trained models
export.trained_NNs(scaler, model, train_num, HIDDEN_DIM, NUM_FEATURES, NUM_OUTPUT)
plt.close('all')
# Plot Learning curve
export.learning_curve(num_train_list,lc_stats['MAE_train'],lc_stats['MAE_val'],'MAE')
export.learning_curve(num_train_list,lc_stats['MAPE_train'],lc_stats['MAPE_val'],'MAPE')
export.learning_curve(num_train_list,lc_stats['R2_train'],lc_stats['R2_val'],'R2')
|
from .package_analyzer import PackageAnalyzer
from xml.etree.cElementTree import parse
from xml.etree.cElementTree import ParseError
import logging
class PackageXmlAnalyzer(PackageAnalyzer):
"""
Analyzer plug-in for ROS' package.xml files (catkin).
"""
def analyze_file(self, path: str, dependencies: dict) -> dict:
# Parse xml
try:
file = open(path, "r")
tree = parse(file)
except ParseError:
logging.warning("[PackageXmlAnalyzer]: Could not parse " + path + "; omitting file.")
return dependencies
element = tree.getroot()
packagename = element.find('name').text
for tag in self._settings["package_xml_dependency_tags"]:
for element in element.findall(tag):
self.add_dependency(packagename, element.text, dependencies)
def _analyze(self, path: str) -> dict:
packages = dict()
filellist = self.search_files(path, "package.xml")
for filename in filellist:
logging.info("[PackageXmlAnalyzer]: Analyzing " + filename)
self.analyze_file(filename, packages)
return packages
|
import pytest
from flask import url_for
"""So far: Testing if the course routes are working
create_course - is the course creation process working
"""
"Test if create course route by lecturer is working"
def test_create_course(flask_app_client):
client = flask_app_client
request = client.get('/courses/create-course',follow_redirects=True)
assert request.status_code == 200
"Test if course session creation by lecturer route is working"
def test_tutorial_session_course(flask_app_client):
client = flask_app_client
request = client.get('/courses/create_session',follow_redirects=True)
assert request.status_code == 200
"Test if the view session by totor route is working"
def test_view_course_session(flask_app_client):
client = flask_app_client
request = client.get('/courses/view_session_tutor',follow_redirects=True)
assert request.status_code == 200
"test tutorial session creation by lecturer"
def test_session_creation(flask_app_client):
client = flask_app_client
request = client.post('/courses/create_session', data = dict(
course_code = 'Coms3001',
name = 'SomethingComs',
venue = 'MSL001',
start_time = '14:55:00',
end_time = '15:30:00',
day = '12.10.2020',
key = '300400555',
number_of_tutors = '6',
lecturer = 'steve',
course_lecturer = 'steve', follow_redirects = True))
assert request.status_code != 200
""" Use this template for testing routes. routes are the easiest to test for
def test_create_course(flask_app_client):
client = flask_app_client
request = client.get('/courses/create-course',follow_redirects=True)
assert request.status_code == 200
""" |
import glob
import sys
import pyaudio
import wave
import numpy as np
import tensorflow as tf
import librosa
from socket import *
from header import *
if len(sys.argv) < 4:
print("Compile error : python main.py [nodeNum] [posX] [posY]")
exit(1)
FORMAT = pyaudio.paInt16
NODE = sys.argv[1]
posX = sys.argv[2]
posY = sys.argv[3]
# connection
clientSocket = socket(AF_INET, SOCK_STREAM)
try:
clientSocket.connect((ADDRESS,PORT))
except Exception as e:
print('cannot connect to the server;', e)
exit()
# open pyaudio
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,frames_per_buffer = CHUNK,
#input_device_index = 0,
#output_device_index = 0)
)
# start loop
print("Start recording...")
while True:
try:
# initailize values
printer("Start")
sess = tf.Session()
init = tf.global_variables_initializer()
tf.reset_default_graph()
sess.run(init)
frames = []
# recording
for i in range(0, int(RATE/CHUNK*RECORD_SECONDS)):
data = stream.read(CHUNK, exception_on_overflow=False)
frames.append(data)
printer("Record")
# record/laod wav files
file_saver(frames, wave, p)
files = glob.glob(path)
raw_data = load(files)
printer("I/O")
# pre-processing
mfcc_data, y = mfcc4(raw_data, 1)
printer("MFCC")
X = np.concatenate((mfcc_data), axis=0)
X_input = X.reshape(-1,N_MFCC,N_FRAME,CHANNELS)
y = np.hstack(y)
n_labels = y.shape[0]
y_encoded = np.zeros((n_labels, N_UNIQ_LABELS))
y_encoded[np.arange(n_labels),y] = 1
X = tf.placeholder(tf.float32, shape=[None, N_MFCC*N_FRAME*CHANNELS])
X = tf.reshape(X, [-1, N_MFCC, N_FRAME, CHANNELS])
Y = tf.placeholder(tf.float32, shape=[None, N_UNIQ_LABELS])
# CNN layer
logits = conv(X)
# cost optimizer needed??? -> time consuming
printer("layer")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(cost)
printer("cost-optimizer")
# model saver
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, './model/CNN/cnn_model')
printer("Model saver")
# prediction
y_pred = sess.run(tf.argmax(logits,1), feed_dict={X:X_input})
#y_true = sess.run(tf.argmax(y_encoded,1))
from sklearn.metrics import accuracy_score
result = "%2.2f" %((accuracy_score(y, y_pred)*100)%100)
printer(result)
### send packet
message = NODE + ":" + str(result) + ":" + posX + ":" + posY
clientSocket.send(message.encode())
printer("TCP")
# exception handle
except KeyboardInterrupt:
print("wait seconds to terminate...")
stream.stop_stream()
stream.close()
p.terminate()
clientSocket.close()
break
|
import nltk
from collections import Counter
from nltk.stem import WordNetLemmatizer
import fileinput
from nltk.corpus import treebank
lemmatiser = WordNetLemmatizer()
verb_list = []
object_list = []
sentence_number = []
paragraph_number = []
with open('logapps_appendix.txt', "r") as test_words:
filestring = str(test_words.read())
def verbage(input_string):
sent_list = input_string.split('.')
tag_list = []
for item in sent_list:
tokens = nltk.word_tokenize(item )
tagged = nltk.pos_tag(tokens)
tag_list.append(tagged)
makeDict(tag_list)
return verb_list, object_list, sentence_number, paragraph_number
def makeDict(tagList):
paragraph_num = 1
sent_num = 1
for sentence in tagList:
if sent_num == 4:
paragraph_num = 2
elif sent_num >= 5:
paragraph_num = 3
index = 0
while ( index < len(sentence)-1):
if sentence[index][1] == 'IN':
index += 2
x = (lemmatiser.lemmatize(sentence[index][0] , pos="v"))
if x == "be":
index +=1
if sentence[index][1] == 'VB' or sentence[index][1] =='VBD' or sentence[index][1] == 'VBG' or sentence[index][1] =='VBN' or sentence[index][1] == 'VBP' or sentence[index][1] =='VBZ':
makeLists(sentence,index,sent_num,paragraph_num)
index += 1
sent_num +=1
def printSent(tagList):
# for key in final_dict.keys():
# print (key, final_dict[key], '\n')
#
# x = getKeys(final_dict)
# print(x)
pass
def getKeys(dictionary):
return dictionary.keys()
def makeLists(sentence,verb_index,sent_num,paragraph_num):
string = ' '
index = verb_index + 1
while index < len(sentence) :
#if there is a noun followed by an adverb
if index != len(sentence)-1:
if (sentence[index][1] == 'NN' or sentence[index][1] =='NNS' or sentence[index][1] == 'NNP' or sentence[index][1] =='NNPS') and (sentence[index +1 ][1] == 'RB' or sentence[index +1 ][1] =='RBR' or sentence[index + 1][1] == 'RBS'):
string += sentence[index][0] + " "
x = (lemmatiser.lemmatize(sentence[verb_index][0] , pos="v"))
verb_list.append(x)
object_list.append(string)
sentence_number.append(sent_num)
paragraph_number.append(paragraph_num)
return
#if there is punctuation or adjectives skip over them
while (sentence[index][1] == 'JJ' or sentence[index][1] =='JJR' or sentence[index][1] == 'JJS' or sentence[index][1] == ',' or sentence[index][1] =='.' or sentence[index][1] == ':' or sentence[index][1] == 'CC' ):
index += 1
string += sentence[index][0] + " "
index += 1
x = (lemmatiser.lemmatize(sentence[verb_index][0] , pos="v"))
verb_list.append(x)
object_list.append(string)
sentence_number.append(sent_num)
paragraph_number.append(paragraph_num)
return
def give_variables():
x = verb_list
return x
def ygive_variables():
y = object_list
return y
def zgive_variables():
z = sentence_number
return z
def wgive_variables():
w = paragraph_number
return w
verbage(filestring)
|
import requests
import csv
from dagster import solid, DagsterType, OutputDefinition, InputDefinition, TypeCheck
def is_list_of_dicts(_, value):
return isinstance(value, list) and all(
isinstance(element, dict) for element in value
)
def less_simple_data_frame_type_check(_, value):
if not isinstance(value, list):
return TypeCheck(
success=False,
description=f"LessSimpleDataFrame should be a list of dicts, got {type(value)}",
)
fields = [field for field in value[0].keys()]
for i in range(len(value)):
row = value[i]
idx = i + 1
if not isinstance(row, dict):
return TypeCheck(
success=False,
description=(
f"LessSimpleDataFrame should be a list of dicts, got {type(row)} for row {idx}"
),
)
row_fields = [field for field in row.keys()]
if fields != row_fields:
return TypeCheck(
success=False,
description=(
f"Rows in LessSimpleDataFrame should have the same fields, got {row_fields} "
f"for row {idx}, expected {fields}"
),
)
return TypeCheck(
success=True,
description="LessSimpleDataFrame summary statistics",
metadata={
"n_rows": len(value),
"n_cols": len(value[0].keys()) if len(value) > 0 else 0,
"column_names": str(
list(value[0].keys()) if len(value) > 0 else []
),
},
)
SimpleDataFrame = DagsterType(
name="SimpleDataFrame",
type_check_fn=less_simple_data_frame_type_check,
description="A naive representation of a data frame, e.g., as returned by csv.DictReader.",
)
@solid(output_defs=[OutputDefinition(SimpleDataFrame)])
def download_type_check_csv(context):
response = requests.get("https://docs.dagster.io/assets/cereal.csv")
lines = response.text.split("\n")
context.log.info("Read {n_lines} lines".format(n_lines=len(lines)))
return [row for row in csv.DictReader(lines)]
@solid(input_defs=[InputDefinition("cereals", SimpleDataFrame)])
def sort_by_calories_type(context, cereals):
sorted_cereals = sorted(cereals, key=lambda cereal: cereal["calories"])
context.log.info(f'Most caloric cereal: {sorted_cereals[-1]}["name"]')
|
import numpy as np
class suffix:
def __init__(self):
self.index = 0
self.rank = [0, 0]
# This is the main function that takes a
# string 'txt' of size n as an argument,
# builds and return the suffix array for
# the given string
def buildSuffixArray(txt, n):
# A structure to store suffixes
# and their indexes
suffixes = [suffix() for _ in range(n)]
# Store suffixes and their indexes in
# an array of structures. The structure
# is needed to sort the suffixes alphabatically
# and maintain their old indexes while sorting
for i in range(n):
suffixes[i].index = i
suffixes[i].rank[0] = (ord(txt[i]) -
ord("a"))
suffixes[i].rank[1] = (ord(txt[i + 1]) -
ord("a")) if ((i + 1) < n) else -1
# Sort the suffixes according to the rank
# and next rank
suffixes = sorted(
suffixes, key=lambda x: (
x.rank[0], x.rank[1]))
# At this point, all suffixes are sorted
# according to first 2 characters. Let
# us sort suffixes according to first 4
# characters, then first 8 and so on
ind = [0] * n # This array is needed to get the
# index in suffixes[] from original
# index.This mapping is needed to get
# next suffix.
k = 4
while (k < 2 * n):
# Assigning rank and index
# values to first suffix
rank = 0
prev_rank = suffixes[0].rank[0]
suffixes[0].rank[0] = rank
ind[suffixes[0].index] = 0
# Assigning rank to suffixes
for i in range(1, n):
# If first rank and next ranks are
# same as that of previous suffix in
# array, assign the same new rank to
# this suffix
if (suffixes[i].rank[0] == prev_rank and
suffixes[i].rank[1] == suffixes[i - 1].rank[1]):
prev_rank = suffixes[i].rank[0]
suffixes[i].rank[0] = rank
# Otherwise increment rank and assign
else:
prev_rank = suffixes[i].rank[0]
rank += 1
suffixes[i].rank[0] = rank
ind[suffixes[i].index] = i
# Assign next rank to every suffix
for i in range(n):
nextindex = suffixes[i].index + k // 2
suffixes[i].rank[1] = suffixes[ind[nextindex]].rank[0] \
if (nextindex < n) else -1
# Sort the suffixes according to
# first k characters
suffixes = sorted(
suffixes, key=lambda x: (
x.rank[0], x.rank[1]))
k *= 2
# Store indexes of all sorted
# suffixes in the suffix array
suffixArr = [0] * n
for i in range(n):
suffixArr[i] = suffixes[i].index
# Return the suffix array
return suffixArr
# A utility function to print an array
# of given size
def printArr(arr, n):
for i in range(n):
print(arr[i], end=" ")
print()
def kasai(txt, suffixArr):
n = len(suffixArr)
lcp = {}
invSuff = {}
k = 0
for i in range(n):
invSuff[suffixArr[i]] = i
for i in range(n):
if invSuff[i] == n-1:
k = 0
continue
j = suffixArr[invSuff[i]+1]
while i+k < n and j+k < n and txt[i + k] == txt[j + k]:
k += 1
lcp[invSuff[i]] = k
# print(lcp)
if k > 0:
k -= 1
return lcp
if __name__ == "__main__":
txt = "bddbbadcaccaccac$"
# txt = 'banana'
n = len(txt)
print('Suffix array:')
suffixArr = buildSuffixArray(txt, n)
print(suffixArr)
printArr(suffixArr, n)
print('LCP Array:')
lcp = kasai(txt, suffixArr)
printArr(lcp, n)
# DESPITE ERRORS IT WORKS!
|
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
def scrape():
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
mars_data = {}
# NASA Mars News
url = 'https://redplanetscience.com/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
# news_title = soup.find('div', class_='content_title').text
# news_p = soup.find('div', class_='article_teaser_body').text
mars_data['news_title'] = soup.find('div', class_='content_title').text
mars_data['news_p'] = soup.find('div', class_='article_teaser_body').text
# JPL Mars Space Images
image_url = 'https://spaceimages-mars.com/'
browser.visit(image_url)
# HTML Object
html = browser.html
# Parse HTML with Beautiful Soup
soup = bs(html, 'html.parser')
featured_image_url = soup.find('img', class_='headerimage fade-in')
#featured_image_url = image_url + featured_image_url['src']
mars_data['featured_image_url'] = image_url + featured_image_url['src']
## Mars Facts
url ='https://galaxyfacts-mars.com/'
tables = pd.read_html(url)
df = tables[0]
new_header = df.iloc[0]
df = df[1:]
df.columns = new_header
df.set_index('Mars - Earth Comparison',inplace=True)
html_table = df.to_html()
mars_data['table'] = html_table
## Mars Hemispheres
url = 'https://marshemispheres.com/'
browser.visit(url)
hemisphere_image_urls = []
hem_url = browser.find_element_by_css_selector('a', class_='itemLink product-item')
for item in range(len(hem_url)):
hemisphere = {}
browser.find_by_css("a.product-item h3").click()
mars_data["hem_title"] = browser.find_by_css("h2.title").text
sample_element = browser.find_link_by_text("Sample").first
mars_data["hem_img_url"] = sample_element["href"]
# Append Hemisphere Object to List
hemisphere_image_urls.append(hemisphere)
# Navigate Backwards
browser.back()
# Quit the browser
browser.quit()
return mars_data
|
import threading
import time
class Job(threading.Thread):
def __init__(self, *args, **kwargs):
super(Job, self).__init__(*args, **kwargs)
self.__flag = threading.Event()
self.__flag.set()
self.__running = threading.Event()
self.__running.set()
def run(self):
while self.__running.isSet():
# True return
# False block
self.__flag.wait()
print(time.time())
time.sleep(1)
def pause(self):
self.__flag.clear() # set False
def resume(self):
self.__flag.set() # set True
def stop(self):
self.__flag.set() # resume from pause
self.__running.clear()
a = Job()
a.start()
time.sleep(3)
a.pause()
time.sleep(3)
a.resume()
time.sleep(3)
a.pause()
time.sleep(2)
a.stop()
|
from source.world import World
from source.slam import SLAM2D
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--steps', type=int)
parser.add_argument('--num_landmarks', type=int)
parser.add_argument('--world_size', type=int, default=100)
parser.add_argument('--measurement_range', type=int, default=50.0)
parser.add_argument('--measurement_noise', type=int, default=2.0)
parser.add_argument('--motion_noise', type=int, default=2.0)
args = parser.parse_args()
if __name__ == '__main__':
world = World(args.world_size, args.num_landmarks)
slam = SLAM2D(args.steps, args.world_size, args.num_landmarks, args.measurement_noise, args.motion_noise)
# Make data and run SLAM
world.make_data(args.steps, args.measurement_range, args.motion_noise, args.measurement_noise)
slam.run_slam(world.data)
# Get estimated robot and landmark positions
poses, landmarks = slam.get_poses_landmarks()
# Print results
slam.show_results()
world.display_world(poses, landmarks)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.uwsgi_websocket import GeventWebSocket
import subprocess
import os.path
from contextlib import closing
from selenium.webdriver import PhantomJS # pip install selenium
from selenium.webdriver.support.ui import WebDriverWait
import urllib2
import requests
import bs4
import MySQLdb
import time
import re
import random
from multiprocessing import Process , Queue , JoinableQueue
import logging
from subprocess import call
import sys
import urllib
import json
app = Flask(__name__)
websocket = GeventWebSocket(app)
@websocket.route('/echo')
def echo(ws):
try:
msg = ws.receive()
if(len(msg)!=0):
inputs = msg.split(",")
print inputs
"""
cmd = list()
cmd.append('/Users/bbiiggppiigg/Sites/tt/search2.py')
ws.send(msg)
filename = '%s.out' % msg
if os.path.isfile(filename):
with open((msg+'.out')) as f:
ws.send(f.read())
return
with open(filename,"w") as f:
proc = subprocess.Popen(cmd+inputs, stdout=(f))
proc.wait()
with open((msg+'.out')) as f:
ws.send(f.read())
"""
gen_results(ws,inputs)
except Exception ,e :
print e
ws.send(str(e))
if __name__ == '__main__':
app.run(master=True,host='localhost',processes=8,threaded=True)
def gen_url(keyword):
stri = "http://gsearch.gmarket.co.kr/Listview/Search?"+keyword+"&GdlcCd=100000003&pageSize=500"
return stri
def gen_dict(soup,remainder_count = 0):
result = dict()
item_list = soup.find('tbody')
for item in item_list.find_all('tr'):
item_name_tag = item.find('li','item_name')
price_tag = item.find('li','discount_price').text
product_name = item_name_tag.text.replace('\t','').replace('\n','')
item_url = re.search('http://.*(?=\')',item_name_tag.find('a')['href']).group(0)
price = int(price_tag.replace(u"₩","").replace(",",""))
seller = item.find('ul','seller').a.text
result[item_url] = (product_name,price,seller)
return result
def fetch_pages(q,keyword):
f = { 'keyword' : keyword}
s = urllib.urlencode(f)
url_base = gen_url(s)
print url_base
result = {}
with closing (PhantomJS()) as browser:
try:
browser.get(url_base+"&page="+str(1))
WebDriverWait(browser, timeout=30).until(lambda x: x.find_element_by_id('sItemCount'))
page_source = browser.page_source
soup = bs4.BeautifulSoup(page_source,"lxml")
item_count = soup.find(id="sItemCount");
print item_count.text
try:
item_count = int(item_count.text.replace(",",""))
except Exception , e:
print "0 results"
q.put(result)
return
fetch_item_count = item_count
fetch_page_count = fetch_item_count / 500
remainder_count = fetch_item_count % 500
if(remainder_count!=0):
fetch_page_count = fetch_page_count+1
print(fetch_item_count,fetch_page_count,remainder_count)
except Exception , e:
logging.warning("Fetch Source Exception : " + str(e) )
raise
result.update( gen_dict(soup))
time.sleep(1.5)
for x in range(fetch_page_count-1):
browser.get(url_base+"&page="+str(x+2))
WebDriverWait(browser, timeout=20).until(lambda x: x.find_element_by_id('sItemCount'))
page_source = browser.page_source
soup = bs4.BeautifulSoup(page_source,"lxml")
if(x==fetch_page_count-1 and remainder_count!=0 ):
result.update(gen_dict(soup,remainder_count))
else:
result.update(gen_dict(soup))
time.sleep(1.5)
q.put(result);
def gen_results(ws,argvs):
try:
result = {}
sellers ={}
seller_list = list()
q = JoinableQueue()
for x in range(len(argvs)):
p = Process(target=fetch_pages,args=(q,argvs[x]))
p.start()
for x in range(len(argvs)):
result.update(q.get())
print "HI"
if(len(result)==0):
ws.send("0 Results");
return
total_count = len(result)
total_price = 0
for url in result:
(product_name,price,seller) = result[url]
total_price = price+total_price
if seller not in sellers:
sellers[seller] = list()
sellers[seller].append((product_name,price,url))
for key, value in sellers.iteritems():
temp = [key,value]
seller_list.append(temp)
seller_list = sorted(seller_list,key=lambda x: -len(x[1]))
avg_price = total_price / total_count
print "<table>"
print "<tr><th>Number of Matching Product</th><td>"+str(total_count)+"</td></tr>"
print "<tr><th>Average Price</th><td>"+str(avg_price)+"</td> </tr>"
print "<tr><th>Number of Matching Seller</th><td>"+str(len(sellers))+"</td></tr>"
print "</table>"
#print sellers
print "<table>"
print "<tr><th>Seller ID</th><th>Number of Matching Products</th></tr>"
for x in seller_list:
print "<tr><td>"+x[0].encode('utf-8')+"</td><td>"+str(len(x[1]))+"</td></tr>"
print "</table>"
return_data = {};
return_data['num_products']=total_count
return_data['avg_price']=avg_price
return_data['num_sellers']=len(sellers)
return_data['seller_list']=seller_list;
json_data = json.dumps(return_data)
print json_data
"""
ws.send("<table>")
ws.send("<tr><th>Number of Matching Product</th><td>"+str(total_count)+"</td></tr>")
ws.send( "<tr><th>Average Price</th><td>"+str(avg_price)+"</td> </tr>")
ws.send("<tr><th>Number of Matching Seller</th><td>"+str(len(sellers))+"</td></tr>")
ws.send("</table>")
ws.send("<table>")
ws.send("<tr><th>Seller ID</th><th>Number of Matching Products</th></tr>")
for x in seller_list:
ws.send("<tr><td>"+x[0].encode('utf-8')+"</td><td>"+str(len(x[1]))+"</td></tr>")
ws.send("</table>")
"""
ws.send(json_data)
except Exception, e:
print e
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='Regression_theano',
version='0.1',
description='linear and logistic regression in Theano',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='linear logistic regression theano',
url='https://github.com/mlampros/Regression_Theano',
author='Mouselimis Lampros',
packages=['Regression_theano'],
install_requires=[
'numpy', 'sklearn', 'scikits.statsmodels', 'theano',
],
include_package_data=True,
zip_safe=False)
|
import sys
if __name__ == "__main__":
rate = []
for i in range(16):
nums = list(map(float, sys.stdin.readline().strip().split()))
rate.append(nums)
finaleight = [0 for i in range(16)]
finalfour = [0 for i in range(16)]
finaltwo = [0 for i in range(16)]
champion = [0 for i in range(16)]
print(rate[1][2])
finaleight[0] = 1
# 先计算八分之一决赛
for i in range(8):
finaleight[2 * i] = rate[2 * i][2 * i + 1]
finaleight[2 * i + 1] = rate[2 * i + 1][2 * i]
# 再计算四强:
for j in range(4):
finalfour[4 * j] = finaleight[4 * j] * (rate[4 * j][4 * j + 2]*finaleight[4 * j + 2] + rate[4 * j][4 * j + 3]*finaleight[4 * j + 3])
finalfour[4 * j + 1] = finaleight[4 * j + 1] * (rate[4 * j + 1][4 * j + 2]*finaleight[4 * j + 2] + rate[4 * j + 1][4 * j + 3]*finaleight[4 * j + 3])
finalfour[4 * j + 2] = finaleight[4 * j + 2] * (rate[4 *j + 2][4 * j]*finaleight[4 * j] + rate[4 * j + 2][4 * j + 1]*finaleight[4 * j + 1])
finalfour[4 * j + 3] = finaleight[4 * j + 3] * (rate[4 *j + 3][4 * j]*finaleight[4 * j] + rate[4 * j + 3][4 * j + 1]*finaleight[4 * j + 1])
# 再计算决赛
for k in range(2):
for x in range(4):
finaltwo[8 * k + x] = 0.0
for y in range(4):
finaltwo[8 * k + x] += finalfour[8 * k + x] * finalfour[8 * k + 4 + y] * rate[8 * k + x][8 * k + 4 + y]
for z in range(4):
finaltwo[8 * k + 4 + z] = 0.0
for s in range(4):
finaltwo[8 * k + 4 + z] += finalfour[8 * k + 4 + z] * finalfour[8 * k + s] * rate[8 * k + 4 + z][8 * k + s]
# 最后计算夺冠
for k in range(2):
for x in range(8):
champion[8 * k + x] = 0.0
# 上半区
if k == 0:
for y in range(8):
m = finaltwo[8 * k + x]
n = finaltwo[8 * k + y + 8]
p = rate[8 * k + x][8 * k + 8 + y]
champion[8 * k + x] += m * n * p
# 下半区
if k == 1:
for z in range(8):
m = finaltwo[8 * k + x]
n = finaltwo[8 * k + z - 8]
p = rate[8 * k + x][8 * k + z - 8]
champion[8 * k + x] += m * n * p
champion[8 * k + x] = round(champion[8 * k + x], 10)
print(str(champion).replace("[","").replace("]","").replace(",","")) |
#a 16-bit field that has the value 1010101010101010, indicating that this is an ACK packet.
ACKmagicno = int('1010101010101010')
#Read the information from the CMD
import sys
if(len(sys.args)!=3):
print("Wrong Input")
prob = float(sys.args[-1])
file = sys.args[-2]
port = int(sys.args[-3])
seqno = 0
import socket
#https://wiki.python.org/moin/UdpCommunication
cl_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cl_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cl_socket.bind('',port)
def checksum(message,l):
#l is len of message
# if l is not even , padding an additional zero
if (l % 2 != 0):
message += "0".encode('utf-8')
x = message[0] + ((message[1]) << 8)
y = (x & 0xffff) + (x >> 16)
for i in range(2, len(msg), 2):
x = message[i] + ((message[i + 1]) << 8)
y = ((x+y) & 0xffff) + ((x+y) >> 16)
return ~s & 0xffff
def sequence_generator():
if seqno == int ((2**32)-1):
seqno = 0
else:
seqno+=1
def prob_gen():
import random as rnd
return rnd.random(seed=5)
|
# Unique Binary Search Trees
# Given n, how many structurally unique BST's (binary search trees) that store values 1 ... n ?
# Explanation: We could use dictionary to save some look up time since many of recursive calls
# hit the same n numbers
# Run Time: O(n log(n)) since we don't repeat anything that has been already explored but still
# go for log(n) depth to calculate answers up to n
def sol_dp(n):
if n < 3:
return n
ds = [1] * (n + 1)
for i in range(2, n + 1):
depth = int((i + 1) / 2)
sol = 0
for j in range(0, depth - 1):
sol += 2 * ds[j] * ds[i - j - 1]
if i % 2:
sol += ds[depth - 1] ** 2
else:
sol += 2 * ds[depth] * ds[i - depth - 1]
ds[i] = sol
return ds[n]
# Explanation: The trick here is that we have to calculate the number of different numerical
# calculation we have to come up for getting the final answer.
# For example:
# <1> -> 1 = 1
# <2> -> 1 2 1 2
# \ / = \ / = 1 x 2 = 2
# 2 1 <1> <1>
# <3> -> 1 3 2
# \ / / \ = 2 x 2 + 1 x 1 = 5
# <2> <2> <1> <1>
# <4> -> 1 4 2 3
# \ / / \ / \ = 5 x 2 + 1 x 2 x 2 = 14
# <3> <3> <1> <2> <2> <1>
# ...
# There is a pattern: to get the total number of possible BST, we have to go
# int((n + 1) / 2) deeper from n to get the answer. For example, for n = 3,
# we have to check int((3 + 1) / 2) = 2 deeper from n, which 2 & 1. Therefore,
# as recursive solution, it should call sol_rec() function int((n + 1) / 2) times.
# Run Time: O(log(n)^n). For each n, to n-1, n-2, ..., 1 recursively to calculate one type of tress
# with the certain root. We have to do this for round_up(n / 2), so it's O(log(n)^n)
def sol_rec(n):
if n == 1 or n == 0:
return 1
depth = int((n + 1) / 2)
rtn_val = 0
for i in range(0, depth - 1):
rtn_val += 2 * sol_rec(i) * sol_rec(n - i - 1)
# odd
if n % 2:
rtn_val += sol_rec(depth - 1) ** 2
else:
rtn_val += 2 * sol_rec(depth) * sol_rec(n - depth - 1)
return rtn_val
print('sol_dp')
for i in range(2, 7):
print(i, sol_dp(i))
print('sol_rec')
for i in range(2, 7):
print(i, sol_rec(i))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0014_game_winning_players'),
]
operations = [
migrations.AlterModelOptions(
name='game',
options={'ordering': ['-date', 'title']},
),
migrations.AlterField(
model_name='game',
name='winning_players',
field=models.ManyToManyField(blank=True, related_name='winning_players_set', to='basketball.Player'),
),
]
|
import commands
import os
import sys
min_size = 10
def merge(file1, file2, main_file):
f = open(main_file, 'w')
f.close()
f = open(main_file, 'a')
f1 = open(file1, 'r')
f2 = open(file2, 'r')
arr1 = [line for line in f1.read().split('\n')]
arr2 = [line for line in f2.read().split('\n')]
print(arr1,arr2)
i1 = len(arr1)
i2 = len(arr2)
l1 = 0
l2 = 0
while l1 < i1 and l2 < i2:
if int(arr1[l1]) >= int(arr2[l2]):
f.write(arr2[l2])
f.write("\n")
l2 += 1
elif int(arr1[l1]) <= int(arr2[l2]):
f.write(arr1[l1])
f.write("\n")
l1 += 1
while l1 < i1:
f.write(arr1[l1])
f.write("\n")
l1 += 1
while l2 < i2:
f.write(arr2[l2])
f.write("\n")
l2 += 1
f.close()
def divide(filename):
status, size = commands.getstatusoutput("ls -l %s | awk '{ print $5 }'" % filename)
status, lines = commands.getstatusoutput("wc -l %s | awk '{print $1}'" % filename)
if int(size) < min_size:
status, output = commands.getstatusoutput("sort -n %s" % filename)
f = open(filename, 'w')
f.close()
f = open(filename, 'a')
output = output + "\n"
f.write(output)
f.close()
else:
line = int(lines)
if line % 2 == 1:
line += 1
line /= 2
status, output = commands.getstatusoutput("split --lines=%d --numeric-suffixes --suffix-length=1 %s %s" % (line, filename, filename))
temp_file1 = str(filename + "0")
temp_file2 = str(filename + "1")
divide(temp_file1)
divide(temp_file2)
merge(temp_file1, temp_file2, filename)
divide(sys.argv[1])
|
from random_numbers import *
class Base_Account():
def __init__(self):
self.account_number = ran_16_card_num()
self.account_cards = []
self.account_balance = 0
def load(self,num,cards,bal):
self.account_number = num
self.account_cards = cards
self.account_balance = bal
def get_all_info(self):
return self.account_number,self.account_cards,self.account_balance
class Debit_Account(Base_Account):
def __init__(self):
super().__init__()
self.account_loans = []
def load(self,num,cards,bal,loans):
self.account_number = num
self.account_cards = cards
self.account_balance = bal
self.account_loans = loans
def get_all_info(self):
return self.account_number,self.account_cards,self.account_balance,self.account_loans
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-14 04:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PlayList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mal_or_kitsu_link', models.URLField()),
('mode', models.CharField(choices=[('&status=completed', 'Completed'), ('&status=current', 'Currently watching'), ('&status=planned', 'Plan to watch'), ('&status=completed', 'On hold'), ('&status=dropped', 'droped')], default='&status=completed', max_length=8)),
('order', models.CharField(choices=[('?sort=-rating', 'Highest first'), ('?sort=rating', 'Lowest first'), ('?sort=watched', 'Oldest')], default='?sort=-rating', max_length=25)),
('tv_size', models.BooleanField()),
('youtube_link', models.TextField(blank=True)),
],
),
]
|
Zi = ["L", "Ma", "Mi", 'J', "Vi", "S", "D"]
v = [1500, 2500, 3500, 4000, 4500, 5000, 0]
print("venitul sapatamnal = ", sum(v))
print("media venitului zilnic = ", sum(v)//7)
n= max(v)
y = v.index(n)
print("Ziua in care s-a obtinut cel mai mare venit este", Zi[y])
b = min(v)
a = v.index(b)
print("Ziua cu venitul cel mai mic este ", Zi[a]) |
from django.shortcuts import render, redirect, get_object_or_404
from .models import Book, Cart, BookOrder, Review
from django.urls import reverse
from django.utils import timezone
from django.http import JsonResponse
import paypalrestsdk
import stripe
from django.conf import settings
from random import randint, choice
from .forms import ReviewForm
from django.core.mail import EmailMultiAlternatives
from django.template import Context
from django.template.loader import render_to_string
import string
from . import signals
import logging
logger = logging.getLogger(__name__)
def index(request):
return redirect('store:index')
def store(request):
"""count = Book.objects.all().count()
context = {'count': count}
request.session['location'] = 'unknown'
if request.user.is_authenticated:
request.session['location'] = 'Earth'"""
books = Book.objects.all()
context = {'books': books}
return render(request, 'base.html', context=context)
def book_details(request, book_id):
# book = Book.objects.get(pk=book_id)
book = get_object_or_404(Book, id=book_id)
context = {'book': book}
if request.user.is_authenticated:
if request.method == 'POST':
form = ReviewForm(request.POST)
if form.is_valid():
new_review = Review.objects.create(
user=request.user,
book=context['book'],
text=form.cleaned_data.get('text')
)
new_review.save()
# Prepare and send message with discount
if Review.objects.filter(user=request.user).count() < 6:
subject = 'Your MysteryBooks.com discount code is here!'
from_email = 'librarian@mysterybooks.com'
to_email = [request.user.email]
email_context = Context({
'username': request.user.username,
'code': ''.join(choice(string.ascii_uppercase + string.digits) for _ in range(6)),
'discount': 10
})
text_email = render_to_string('email/review_email.txt', email_context)
html_email = render_to_string('email/review_email.html', email_context)
msg = EmailMultiAlternatives(subject, text_email, from_email, to_email)
msg.attach_alternative(html_email, 'text/html')
msg.content_subtype = 'html'
msg.send()
else:
if Review.objects.filter(user=request.user, book=context['book']).count() == 0:
form = ReviewForm()
context['form'] = form
context['reviews'] = book.review_set.all()
return render(request, 'store/detail.html', context=context)
def add_to_cart(request, book_id):
if request.user.is_authenticated:
try:
book = Book.objects.get(pk=book_id)
except Book.DoesNotExist:
pass
else:
try:
cart = Cart.objects.get(user=request.user, active=True)
except Cart.DoesNotExist:
cart = Cart.objects.create(user=request.user)
cart.save()
cart.add_to_cart(book_id)
return redirect('store:cart')
else:
return redirect('store:index')
def remove_from_cart(request, book_id):
if request.user.is_authenticated:
try:
book = Book.objects.get(pk=book_id)
except Book.DoesNotExist:
pass
else:
cart = Cart.objects.get(user=request.user, active=True)
cart.remove_from_cart(book_id)
return redirect('store:cart')
else:
return redirect('store:index')
def cart(request):
if request.user.is_authenticated:
cart = Cart.objects.filter(user=request.user, active=True)
if cart:
orders = BookOrder.objects.filter(cart=cart[0])
else:
orders = []
total, count = 0, 0
for order in orders:
total += (order.book.price * order.quantity)
count += order.quantity
context = {
'cart': orders,
'total': total,
'count': count
}
return render(request, 'store/cart.html', context)
else:
return redirect('index')
def checkout(request, processor):
if request.user.is_authenticated:
cart = Cart.objects.filter(user=request.user, active=True)
if cart:
orders = BookOrder.objects.filter(cart=cart[0])
else:
orders = []
if processor == 'paypal':
redirect_url = checkout_paypal(request, cart, orders)
return redirect(redirect_url)
elif processor == 'stripe':
token = request.POST['stripeToken']
status = checkout_stripe(cart, orders, token)
if status:
return redirect(reverse('store:process_order', args=['stripe']))
else:
return redirect('store:order_error', context={'message': 'There was a problem processing your payment.'})
else:
return redirect('store:index')
def checkout_paypal(request, cart, orders):
if request.user.is_authenticated:
items, total = [], 0
for order in orders:
total += (order.book.price * order.quantity)
book = order.book
item = {
'name': book.title,
'sku': book.id,
'price': str(book.price),
'currency': 'USD',
'quantity': order.quantity
}
items.append(item)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
paypalrestsdk.configure({
'mode': 'sandbox',
'client_id': settings.PAYPAL['client_id'],
'client_secret': settings.PAYPAL['client_secret']
})
payment = paypalrestsdk.Payment({
'intent': 'sale',
'payer': {'payer_method': 'paypal'},
'redirect_urls': {
'return_url': settings.PAYPAL['return_url'],
'cancel_url': settings.PAYPAL['cancel_url']
},
'transactions': [{
'item_list': {'items': items},
'amount': {'total': str(total), 'currency': 'USD'},
'description': 'Mystery Book order.'}]
})
if payment.create():
cart_instance = cart.get()
cart_instance.payment_id = payment.id
cart_instance.save()
for link in payment.links:
if link.method == 'REDIRECT':
redirect_url = str(link.href)
return redirect_url
else:
return reverse('store:order_error')
# dummy block instead above one
# cart_instance = cart.get()
# cart_instance.payment_id = randint(1000, 100000)
# cart_instance.save()
# return f'http://127.0.0.1:8000/store/process/paypal?paymentId={cart_instance.payment_id}&token=as-is&PayerID=2020'
else:
return redirect('store:index')
def checkout_stripe(cart, orders, token):
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Insert here your api_key
stripe.api_key = ""
total = 0
for order in orders:
total += (order.book.price * order.quantity)
status = True
try:
change = stripe.Charge.create(
amount=int(total * 100),
currency='USD',
source=token,
metadata={'order_id': cart.get().id}
)
cart_instance = cart.get()
cart_instance.payment_id = change.id
cart_instance.save()
except stripe.error.CardError as e:
status = False
return status
def order_error(request):
if request.user.is_authenticated:
return render(request, 'store/order_error.html')
else:
return redirect('store:index')
def process_order(request, processor):
if request.user.is_authenticated:
if processor == 'paypal':
payment_id = request.GET.get('paymentId')
cart = Cart.objects.filter(payment_id=payment_id)
if cart:
orders = BookOrder.objects.filter(cart=cart[0])
else:
orders = []
total = 0
for order in orders:
total += (order.book.price * order.quantity)
context = {
'cart': orders,
'total': total,
}
return render(request, 'store/process_order.html', context)
elif processor == 'stripe':
return JsonResponse({'redirect_url': reverse('store:complete_order', args=['stripe'])})
else:
return redirect('store:index')
def complete_order(request, processor):
if request.user.is_authenticated:
cart = Cart.objects.get(user=request.user, active=True)
if processor == 'paypal':
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
payment = paypalrestsdk.Payment.find(cart.payment_id)
if payment.execute({'payer_id': payment.payer_info.payer_id}):
message = f'Success! Your order has been completed and is being processed. Payment ID: {payment.id}'
cart.active = False
cart.order_date = timezone.now()
cart.payment_type = processor
cart.save()
else:
message = f'There was a problem with the transaction. Error: {payment.error.message}'
context = {'message': message}
# dummy block instead above one
# context = {'message': f'Success! Your order has been completed and is being processed. Payment ID: {cart.payment_id}'}
# cart.active = False
# cart.order_date = timezone.now()
# cart.payment_type = processor
# cart.save()
return render(request, 'store/order_complete.html', context)
elif processor == 'stripe':
cart.active = False
cart.order_date = timezone.now()
cart.payment_type = processor
cart.save()
context = {'message': f'Success! Your order has been completed, and is being processed. Payment ID: {cart.payment_id}'}
return render(request, 'store/order_complete.html', context)
else:
return redirect('store:index')
|
div_amounts = {'BISHNUPUR': 100000,
'CHANDEL': 100000,
'CHURACHANDPUR': 100000,
'IED-II': 100000,
'IED-III': 100000,
'IED-IV': 100000,
'JIRIBAM': 100000,
'KAKCHING': 100000,
'KAMJONG': 100000,
'KANGPOKPI': 100000,
'NONEY': 100000,
'PHERZAWL': 100000,
'SENAPATI': 100000,
'TAMENGLONG': 100000,
'TENGNOUPAL': 100000,
'THOUBAL': 100000,
'UKHRUL': 100000}
for divamount in div_amounts:
pass
|
import glob
import sys
import nltk
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import re
import pdb
import ujson
import time
import math
# Create Co-occurrence tuple for wnd[-1] with all other wnd[i]
# Get the expansion term for a word.
def get_expan_terms(aword):
return [rw for rw, fw in cfd[aword].most_common(TOP_N)]
def get_exapn_for_query(query_text):
qt = nltk.word_tokenize(query_text)
expans = nltk.FreqDist()
for query_term in qt:
expan_candidats = get_expan_terms(query_term)
# Because the different query item may get the same expansion term.
# We should sum the same expansion term scores for the different query items
list_scores = {ex_term: cfd[query_term][ex_term] for ex_term in expan_candidats}
# P(term_j | Q)
# = lambda * P_ml(term_j | Query) +
# (1-lambda)* sum{ P( term_j | term_i) * P_ml( term_i | Query) }
# = l * frequency_in_query(term_j)/length(Query) +
# (1-l)* sum_{i}{ score_term_term(term_i, term_j) * frequency_in_query(term_i)/length(Query) }
# Here we do the sum part
expans = expans + nltk.FreqDist(list_scores)
return expans
# Calculate P( term_j | term_i )
# For the cooccurrence,
# term_i and term_j is the two terms in the corpus
#
# #coocc(term_i, term_j)
# P( term_j | term_i ) = -----------------------------
# sum_k #coocc(term_i, term_k)
#
# P( term_i, term_j)
# PMI( term_i, term_j) = log10 ---------------------------
# P( term_i) P(term_j)
#
# #coocc(term_i, term_j) x number_of_all_the_cooccurences
# = log10 ---------------------------------------------------------------
# sum_k #coocc(term_i, term_k) x sum_k #coocc(term_j, term_k)
#
# #coocc(term_i, term_j) x N x TERM_DISTANCE x 2
# = log10 ---------------------------------------------------------------
# freq(term_i) x freq(term_j) x (TERM_DISTANCE x 2) ^2
#
# #coocc(term_i, term_j) x N
# = log10 ---------------------------------------------------------------
# freq(term_i) x freq(term_j) x (TERM_DISTANCE x 2)
def score_term_in_term(term_j, term_i, N):
global cfd
if PMI_FLAG:
pmi = math.log10(cfd[term_i][term_j]*N / (list_freq[term_i]*list_freq[term_j]*(TERM_DISTANCE*2)))
r = pmi
else:
p_term_j_in_term_i = cfd[term_i][term_j] / (list_freq[term_i]*TERM_DISTANCE*2)
r = p_term_j_in_term_i
return r
# Indri va faire ça, on ne fait pas le calcul
# P(term_j | Q)
# = lambda * P_ml(term_j | Query) +
# (1-lambda)* sum{ P( term_j | term_i) * P_ml( term_i | Query) }
# = l * frequency_in_query(term_j)/length(Query) +
# (1-l)* sum_{i}{ score_term_term(term_i, term_j) * frequency_in_query(term_i)/length(Query) }
#
# def score_term_in_query(term_j, qt_list, l=0.5):
# fd = nltk.FreqDist(qt_list)
# # If term_j is not in the fd, fd[term_j] equals 0
# r = l * fd[term_j] / len(qt_list) + \
# (1-l) * sum([cfd[term_i][term_j] * fd[term_i]/len(qt_list) for term_i in qt_list])
# return r
def add_conditional_frequence_table(wnd):
global cfd
new_term = wnd[-1]
for term in wnd[-WND_SIZE:-1]:
cfd[term][new_term] += 1
cfd[new_term][term] += 1
# Read the cfd.json file
def reload_cfd_json(fname):
global cfd
cfd_list = ujson.load(open(fname))
cfd = nltk.ConditionalFreqDist()
for w in cfd_list:
cfd[w] = nltk.FreqDist(cfd_list[w])
return cfd
def extract_cooccurence():
global cfd, list_freq
if len(sys.argv) > 1:
# Define the data path
data_path = sys.argv[1]
start_time = time.time()
list_of_file = sorted(glob.glob(data_path))
cfd = nltk.ConditionalFreqDist()
list_freq = nltk.FreqDist()
stop = set(stopwords.words('english'))
if not STOP_FLAG:
stop = []
ps = PorterStemmer()
for index, fname in enumerate(list_of_file):
print("No.{} File: {}".format(index, fname))
with open(fname, encoding='latin') as file:
raw = file.read()
# Extract all the <TEXT> field
result = re.findall(r'<TEXT>(.*?)</TEXT>', raw, re.DOTALL)
texts = ''.join(result)
# Tokenize
tokens = word_tokenize(texts)
# Filter Tokens is alphabetical and keep the in lower case
# Filter by stopwords
tokens_norm = [t.lower() for t in tokens if t.isalpha() and (t.lower() not in stop)]
# Count the Frequency for each word
list_freq += nltk.FreqDist(tokens_norm)
# Tokes neighbors window
wnd = []
for t in tokens_norm:
wnd.append(t)
wnd = wnd[-WND_SIZE:]
# Add to conditional frequency table
add_conditional_frequence_table(wnd)
print("Time1: {}".format(time.time() - start_time))
cfd_filter = nltk.ConditionalFreqDist()
# Filter the MIN_COOCC and Calculate the score
# Calculate cfd.N()
total_N = list_freq.N()
for term_i in cfd:
cfd_filter[term_i] = nltk.FreqDist({term_j: score_term_in_term(term_j, term_i, total_N)
for term_j in cfd[term_i] if cfd[term_i][term_j] > MIN_COOCC})
# Don't count the word itself as a relation
if term_i in cfd[term_i]:
cfd[term_i].pop(term_i)
print("Time2: {}".format(time.time() - start_time))
cfd_topn = nltk.ConditionalFreqDist()
# Get the TOP N
for w in cfd_filter:
cfd_topn[w] = nltk.FreqDist(dict(cfd_filter[w].most_common(DOUBLE_TOP_N)))
print("Time3: {}".format(time.time() - start_time))
print("Time4: {}".format(time.time() - start_time))
file_tag = {
'dist': '_dist'+str(TERM_DISTANCE),
'min': '_min'+str(MIN_COOCC),
'top': '_top'+str(TOP_N),
'stop': '_stp' if STOP_FLAG else '',
'pmi': '_pmi' if PMI_FLAG else ''
}
ujson.dump(cfd_topn, open("/Users/jason.wu/Downloads/ap_cfd{dist}{min}{top}{stop}{pmi}.json".format(
**file_tag), "w"), double_precision=3)
print("Time5: {}".format(time.time() - start_time))
pdb.set_trace()
return cfd_topn
# CONSTANTS
TERM_DISTANCE = 5
WND_SIZE = TERM_DISTANCE + 1
MIN_COOCC = 10
TOP_N = 10
DOUBLE_TOP_N = TOP_N * 2
PMI_FLAG = True
STOP_FLAG = True
# GLOBALS
cfd = nltk.ConditionalFreqDist()
if __name__ == "__main__":
extract_cooccurence()
|
# 最终结果就是每个石头前面加 + / -
# 直接分析成01背包
# 而且实际分析时只需考虑一边
class Solution:
def lastStoneWeightII(self, stones: List[int]) -> int:
n, s = len(stones), sum(stones)
t = s // 2
f = [0] * (t+1)
for i in range(1, n+1):
x = stones[i-1]
for j in range(t, x-1, -1):
f[j] = max(f[j], f[j - x] + x)
return abs(s - f[t] - f[t]) |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def hypothesisscatter():
adni = pd.read_csv("/Users/ariadnapuigventos/Documents/CURSOS/BRIDGE/DS_Ejercicios_Python/BootCamp_TheBridge/Alzheimers_Disease/Data/MCI_AD_CN_Final.csv", sep=",")
adni['Visits_numbering'] = adni["Visit"].map({"ADNI1/GO Month 12":4, "ADNI1/GO Month 24":6,'ADNI2 Year 1 Visit':6.5,'ADNI2 Year 2 Visit':7,'ADNI2 Year 3 Visit':8,'ADNI2 Year 4 Visit':9})
adni.plot.scatter(x="Visits_numbering", y="MMSE Total Score", c="blue")
fig.write_html('/templates/hyphotesis.html') |
import pickle
from configparser import ConfigParser
from pathlib import Path
import keras
from keras import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
from sklearn.metrics import f1_score
def train_dev_split(data, dev=.2):
assert (0 <= dev <= 1)
data_size = len(data)
dev_size = int(data_size * dev)
train, dev = data[:(data_size - dev_size)], data[(data_size - dev_size):]
return train, dev
config = ConfigParser()
config.read('config.INI')
input_shape = pickle.load(open('./experiments/pickle/input_shape.p', 'rb'))
filters = config.getint('NETWORK', 'filters')
n_grams = config.getint('NETWORK', 'n_gram')
vector_dim = input_shape[1]
dropout_1 = config.getfloat('NETWORK', 'dropout_1')
dense_neurons = config.getint('NETWORK', 'dense_neurons')
dropout_2 = config.getfloat('NETWORK', 'dropout_2')
model = Sequential()
model.add(Conv2D(filters, kernel_size=(n_grams, vector_dim),
activation='relu',
input_shape=input_shape,
name='conv2d'))
model.add(MaxPooling2D(pool_size=(model.get_layer('conv2d').output_shape[1], 1)))
model.add(Dropout(dropout_1))
model.add(Flatten())
model.add(Dense(dense_neurons, activation='relu'))
model.add(Dropout(dropout_2))
model.add(Dense(1, activation='sigmoid'))
model_path = Path('./experiments/model')
model_path.mkdir(exist_ok=True)
pickle_path = Path('./experiments/pickle')
callback = keras.callbacks.ModelCheckpoint(str(model_path / 'trained_model.hdf5'), monitor='val_acc', verbose=0,
save_best_only=True, save_weights_only=True, mode='auto', period=1)
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
model.summary()
x_train = pickle.load(open(pickle_path / 'x_train.p', 'rb'))
y_train = pickle.load(open(pickle_path / 'y_train.p', 'rb'))
x_test = pickle.load(open(pickle_path / 'x_test.p', 'rb'))
y_test = pickle.load(open(pickle_path / 'y_test.p', 'rb'))
x_train, x_dev = train_dev_split(x_train, dev=config.getfloat('TRAINING', 'dev_split'))
y_train, y_dev = train_dev_split(y_train, dev=config.getfloat('TRAINING', 'dev_split'))
batch_size = config.getint('TRAINING', 'batch_size')
epochs = config.getint('TRAINING', 'epochs')
model.fit(x_train, y_train,
batch_size=batch_size,
callbacks=[callback],
epochs=epochs,
verbose=1,
validation_data=(x_dev, y_dev))
pred_test = model.predict(x_test)
pred_test = pred_test >= 0.5
# simple F-score because of neglectable class imbalance
f1 = f1_score(y_test, pred_test)
print("Test F1: {}".format(f1))
|
"""Programs to calculate trajectory of projectiles or ratio of kinetic energy of projectiles at given angles of projection"""
import math
import matplotlib.pyplot as plt
"""Function to call for initial conditions depending on the program"""
def initial_conditions(program):
if program == 'TRAJECTORY': #Asks for inputs for trajectory program
v_0 = float(input("Enter the magnitude of the initial velocity in m/s: ")) # Prompting for initial velocity
theta = float(input("Enter the angle of projection in degrees: ")) # Prompting for angle of projection
beta = float(input("Enter the normalised drag coefficient : ")) # Prompting for normalised drag coefficient
dt = float(input("Enter the step interval in seconds: ")) # Promoting for step interval
g = 9.81 #Defining gravitational constant
vx_0 = float(v_0*math.cos(math.radians(theta))) #Calculating initial velocity in horizontal direction
vy_0 = float(v_0*math.sin(math.radians(theta))) #Calculating initial velocity in vertical direction
return vx_0,vy_0,theta,beta,dt,g
elif program == 'RATIO': #Asks for inputs for ratio program
v_0 = float(input("Enter the initial velocity in m/s: ")) # Prompting for initial velocity
beta = float(input("Enter the drag coefficient : ")) # Prompting for normalised drag coefficient
dt = float(input("Enter the step interval in seconds: ")) # Promoting for step interval
g = 9.81 #Defining gravitational constant
return v_0,beta,dt,g
else: #Typo prevention
print("USER ERROR: Program name must be typed as asked for above. Type: TRAJECTORY or RATIO . ")
"""Function to calculate the trajectory"""
def trajectory(vx_0,vy_0,theta,beta,dt,g):
velocity_x = [vx_0] #Horitzontal velocity list
velocity_y = [vy_0] #Vertical velocity list
displacement_x = [0.0] #Horitzontal displacement list starting at x=0
displacement_y= [0.0] #Vertical displacement list starting at y=0
for i in range(0,100000):
magnitude_velocity = float((velocity_x[i]**2+velocity_y[i]**2)**0.5) #Calculating initial magnitude of velocity
vx = velocity_x[i]+dt*(-1*beta*magnitude_velocity*velocity_x[i]) #Calculating each X direction of velocity at each point for given acceleration
velocity_x.append(vx)
vy = velocity_y[i]+dt*(-1*beta*magnitude_velocity*velocity_y[i])-dt*g #Calculating each Y direction of velocity at each point
velocity_y.append(vy)
x = displacement_x[i] + dt*velocity_x[i] #Calculating each X direction displacement for given time
displacement_x.append(x)
y = displacement_y[i] + dt*velocity_y[i] #Calculating each Y direction displacement for given time
displacement_y.append(y)
if y<=0:
break
"""Plotting vertical displacement against horizontal displacement"""
plt.plot(displacement_x,displacement_y)
plt.title("Displacement Plot of Trajectory of a Particle")
plt.xlabel("Horizontal Displacement (m)")
plt.ylabel("Vertical Displacement (m)")
plt.xlim(0.0, max(displacement_x))
plt.ylim(0.0,max(displacement_y))
plt.show()
return
"""Function to calculate the ratio of final kinetic energy to initial kinetic energy against launch angle for given inputs"""
def ratio(v_0,beta,dt,g):
theta_values = [0.0] #List of thetas between 0 and 90
KE_ratios = [] #List of kinetic energy ratios
velocity_x0 = [] #List of horizontal components of initial velocities for each theta
velocity_y0 = [] #List of vertical components of initial velocities for each theta
theta = 0.0 #Setting initial theta value
while theta < 90:
if theta<0:
break
else:
theta = float(theta)+0.001 #Dividing theta into strips
theta_values.append(theta)
for i in range(0,len(theta_values)):
vx_0 = v_0*math.cos(math.radians(theta_values[i])) #Calculating initial horizontal velocity for each theta
velocity_x0.append(vx_0)
vy_0 = v_0*math.sin(math.radians(theta_values[i])) #Calculating initial vertical velocity for each theta
velocity_y0.append(vy_0)
velocity_x = [vx_0] #List of horizontal velocities for each theta as it projects
velocity_y = [vy_0] #List of vertical velocities for each theta as it projects
displacement_y= [0.0] #Vertical displacement list starting at y=0
for i in range(0,10000):
magnitude_velocity = float((velocity_x[i]**2+velocity_y[i]**2)**0.5) #Calculating initial magnitude of velocity
vx = velocity_x[i]+dt*(-1*beta*magnitude_velocity*velocity_x[i]) #Calculating each X direction of velocity at each point for given acceleration
velocity_x.append(vx)
vy = velocity_y[i]+dt*(-1*beta*magnitude_velocity*velocity_y[i])-dt*g #Calculating each Y direction of velocity at each point
velocity_y.append(vy)
y = displacement_y[i] + dt*velocity_y[i] #Calculating each Y direction displacement for given time
displacement_y.append(y)
if y<0:
break
KE_ratio = float((velocity_x[-1]**2)+(velocity_y[-1]**2))/float((v_0)**2) #Calculating kinetic energy ratio - same as (Vf)^2/(Vs)^2
KE_ratios.append(KE_ratio)
"""Plotting the ratio of final kinetic energy to initial kinetic energy against launch angle"""
plt.plot(theta_values,KE_ratios)
plt.title("Graph of ratio of kinetic energies of finish to start against angle")
plt.xlabel("Launch angle (degrees)")
plt.ylabel("Ratio of final kinetic energy to initial kinetic energy")
plt.xlim(0.0, 90.0)
plt.show()
return
def main():
program = str(input("Type TRAJECTORY for trajectory program or type RATIO for ratio program: ")) #Asking which program to run
start = initial_conditions(program) #Calling for initial conditions for program
if program == 'TRAJECTORY':
trajectory(start[0],start[1],start[2],start[3],start[4],start[5]) #Calling for trajectory function under given inputs
elif program == 'RATIO':
ratio(start[0],start[1],start[2],start[3]) #Calling for ratio function under given inputs
else:
print("Try again.") #Typo prevention
main()
|
from common.run_method import RunMethod
import allure
@allure.step("备课评价/新增评价")
def courseWareEvaluateController_addEvaluate_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "备课评价/新增评价"
url = f"/service-research/courseWareEvaluateController/addEvaluate"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("备课评价/根据课件查询评价")
def courseWareEvaluateController_queryEvaluateList_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "备课评价/根据课件查询评价"
url = f"/service-research/courseWareEvaluateController/queryEvaluateList"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("备课评价/评价列表")
def courseWareEvaluateController_queryEvaluateByParam_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "备课评价/评价列表"
url = f"/service-research/courseWareEvaluateController/queryEvaluateByParam"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("备课评价/查询评价详情")
def courseWareEvaluateController_queryEvaluateById_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "备课评价/查询评价详情"
url = f"/service-research/courseWareEvaluateController/queryEvaluateById"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("备课评价/删除评价")
def courseWareEvaluateController_deleteEvaluate_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "备课评价/删除评价"
url = f"/service-research/courseWareEvaluateController/deleteEvaluate"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("备课评价/查询评价列表")
def courseWareEvaluateController_queryEvaluateListById_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "备课评价/查询评价列表"
url = f"/service-research/courseWareEvaluateController/queryEvaluateListById"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import base64
import os
import uuid
import flask
import flask_talisman
import redis
MAX_PLAN_SIZE_BYTES = 2 * 1024 * 1024
ONE_HOUR_SECONDS = 60 * 60
app = flask.Flask(__name__, template_folder="templates")
# Use Talisman to redirect to HTTPS
flask_talisman.Talisman(app, content_security_policy=None)
# Keep a small connection pool; don't want to exceed free tier usage.
redis_connection_pool = redis.BlockingConnectionPool.from_url(
os.environ["REDIS_URL"], max_connections=1
)
redis_client = redis.client.Redis(connection_pool=redis_connection_pool)
class PlanStatus:
PENDING = "pending"
APPROVED = "approved"
REJECTED = "rejected"
allowed_statuses = set([PENDING, APPROVED, REJECTED])
@app.route("/")
def hello():
return flask.render_template("index.html")
# POST /plan stashes contents in Redis, returns a uuid.
@app.route("/plan", methods=["POST"])
def store_plan():
plan_id = uuid.uuid4()
try:
plan_base64 = flask.request.json.get("plan_base64")
except Exception as e:
print("Request is not JSON", e)
return (
flask.jsonify(
{"error": 'Expecting JSON in the format {"plan_base64": "..."}'}
),
400,
)
if not plan_base64:
print("Request missing `plan_base64` field")
return flask.jsonify({"error": "`plan_base64` field expected"}), 400
if len(plan_base64) > MAX_PLAN_SIZE_BYTES:
print("`plan_base64` is too large")
return (
flask.jsonify(
{
"error": f"`plan_base64` value must be less than {MAX_PLAN_SIZE_BYTES} bytes long (got {len(plan_base64)} bytes)"
}
),
400,
)
try:
base64.b64decode(plan_base64)
except Exception as e:
print("`plan_base64` does not appear to be base64-encoded", e)
return (
flask.jsonify(
{"error": "`plan_base64` value does not appear to be base64-encoded"}
),
400,
)
redis_client.setex(f"{plan_id}-plan", ONE_HOUR_SECONDS, plan_base64)
redis_client.setex(f"{plan_id}-status", ONE_HOUR_SECONDS, PlanStatus.PENDING)
print(f"Accepted plan with id {plan_id}")
return flask.jsonify({"id": plan_id}), 201
# GET /plan/<uuid> serves an HTML page with an approval form.
@app.route("/plan/<string:plan_id>", methods=["GET"])
def display_plan(plan_id: str):
plan_base64 = redis_client.get(f"{plan_id}-plan")
if not plan_base64:
print("Cannot display form for unknown plan")
return flask.render_template("not_found.html"), 404
try:
plan = base64.b64decode(plan_base64).decode("utf8")
except Exception as e:
print("Stored plan could not be decoded as base64", e)
return flask.render_template("not_found.html"), 500
status = redis_client.get(f"{plan_id}-status")
if status:
status = status.decode("utf8")
else:
status = PlanStatus.PENDING
return flask.render_template(
"plan.html",
**{
"plan_id": plan_id,
"plan": plan,
"status": status,
"pending": status == PlanStatus.PENDING,
},
)
# PUT /plan/<uuid>/status approves or rejects the plan.
@app.route("/plan/<string:plan_id>/status", methods=["PUT"])
def set_status(plan_id: str):
status = None
try:
status = flask.request.json.get("status")
except Exception as e:
print("Request is not JSON", e)
return (
flask.jsonify({"error": 'Expecting JSON with format {"status": "..."}'}),
400,
)
if redis_client.get(f"{plan_id}-status") is None:
print("Could not set plan status: plan not found")
return flask.jsonify({"error": f"Plan id {plan_id} not found"}), 404
if status not in PlanStatus.allowed_statuses:
print("Attempt to set invalid plan status")
return (
flask.jsonify(
{"error": f"`status` must be one of {PlanStatus.allowed_statuses}"}
),
400,
)
redis_client.setex(f"{plan_id}-status", ONE_HOUR_SECONDS, status)
print(f"Setting plan {plan_id} status to {status}")
return flask.jsonify({}), 204
# GET /plan/<uuid>/status serves the status. This is the polling endpoint.
@app.route("/plan/<string:plan_id>/status", methods=["GET"])
def get_status(plan_id: str):
status = redis_client.get(f"{plan_id}-status")
if status is None:
print("Could not get plan status: plan not found")
return flask.jsonify({"error": f"Plan id {plan_id} not found"}), 404
status = status.decode("utf8")
if status not in PlanStatus.allowed_statuses:
print(
f"Falling back on {PlanStatus.PENDING} for plan with invalid status {status}"
)
status = PlanStatus.PENDING
return flask.jsonify({"status": status})
|
import os.path
from install_utils import ProjectBuilder
def main():
builder = ProjectBuilder.ProjectBuilder(ProjectBuilder.readConfigData())
builder.installProject()
if __name__ == "__main__":
main() |
import numpy as np
import random
import tetris
import neuralnetwork as NN
import losses
def cross(A, B):
C = A
ind = np.random.choice(B.shape[0], int(np.floor(len(B)/2)), replace=False)
C[ind] = B[ind]
## take a random pick of dimension / 2 from A, the others from B
return(C)
def mutate(C):
dimension = len(C)
a = 1
b = 0.05
D = (np.random.rand(dimension) >= (1-b)) * a * random.uniform(-1,1)
## for every gen take a b% chance to change the gen with a random value with amplitude a
return(np.add(C,D))
def cross_and_mutate(pop, pop_size):
## pop describes the complete population:
## numbers of individuals is
size_population = pop_size[0]
N = int(size_population/4)
dimension = pop_size[1]
offspring = pop
k = N
while k < size_population:
if (k < 2 * N):
offspring[k] = cross(pop[k], pop[k+1])
if (k >= 2 * N) and (k < 3*N):
offspring[k] = cross(pop[k], pop[random.randrange(0, 4*N)])
if (k >= 3 * N):
offspring[k] = cross(pop[random.randrange(0, 4*N)], pop[random.randrange(0, 4*N)])
## next mutate
offspring[-2:-1] = pop[0]
offspring[k] = mutate(offspring[k])
k = k + 1
return offspring
def run(N = 6, num_generations = 10000):
em = tetris.TetrisApp(10, 10, 750, False, 40, 30*100)
em.pcrun()
net = NN.DQN(em.get_state_size(), 1, losses.MSE_loss)
dimension = net.L1.W.size + net.L1.B.size + net.L2.W.size + net.L2.B.size + net.L3.W.size + net.L3.B.size
size_population = 4 * N
pop_size = (size_population, dimension)
new_population = np.random.rand(size_population, dimension)
fitness = np.ndarray(size_population)
generations = np.linspace(1, num_generations, num_generations)
maxscore = np.zeros(num_generations)
for generation in range(num_generations):
## compute the fitness of each individual
for it, row in enumerate(new_population):
index = 0
net.L1.W = row[index:index + net.L1.W.size].reshape(net.L1.W.shape)
index += net.L1.W.size
net.L1.B = row[index:index + net.L1.B.size].reshape(net.L1.B.shape)
index += net.L1.B.size
net.L2.W = row[index:index + net.L2.W.size].reshape(net.L2.W.shape)
index += net.L2.W.size
net.L2.B = row[index:index + net.L2.B.size].reshape(net.L2.B.shape)
index += net.L2.B.size
net.L3.W = row[index:index + net.L3.W.size].reshape(net.L3.W.shape)
index += net.L3.W.size
net.L3.B = row[index:index + net.L3.B.size].reshape(net.L3.B.shape)
em.reset()
done = False
while not done:
next_state = em.get_next_states()
predicted_qs = {}
for i, (*data,) in enumerate(next_state):
predicted_qs[(data[0], data[1])] = net.f_pass(np.array([next_state[data[0], data[1]]]).T)[0,0]
best_move = max(predicted_qs, key=predicted_qs.get)
reward, done = em.pcplace(best_move[0], best_move[1])
if em.get_game_score() > 20000:
break
fitness[it] = em.get_game_score()
## sort this such that the best is on top, etc
new_population = new_population[fitness.argsort()[::-1]]
## help: argsort
maxscore[generation] = max(fitness)
print(generation, max(fitness))
if max(fitness) > 20000:
break
np.savetxt("evolution\\generation" + str(generation) + ".csv", new_population[0], delimiter=',')
offspring_crossover = cross_and_mutate(new_population, pop_size)
new_population = offspring_crossover
np.savetxt("evolution\\scores.csv", np.array([generations, maxscore]).T, delimiter=',')
return(fitness[0], new_population[0])
fit, sol = run(50, 1000)
np.savetxt("evolution\\" + str(fit) + "gene.csv", sol, delimiter=',')
print(fit)
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, ValidationError
from app.models import User, List, Item
class ListCreationForm(FlaskForm):
listname = StringField('Новый список', validators=[DataRequired()])
submit = SubmitField('Создать')
class ListEditionForm(FlaskForm):
listname = StringField('Новое название', validators=[DataRequired()])
submit = SubmitField('Изменить')
class ItemAdditionForm(FlaskForm):
itemname = StringField('Новый продукт', validators=[DataRequired()])
submit = SubmitField('Добавить')
"""
class DeleteListForm(FlaskForm):
delete = SubmitField('Удалить список')
class DeleteItemForm(FlaskForm, item_id):
item = Item.query.filter_by(id=item_id)
delete = SubmitField('Удалить продукт')
""" |
#Made by Andre Augusto Leite de Almeida
import os
import random
import collections
from PIL import Image, ImageDraw
from numpy import array
from skimage import img_as_float
from skimage.measure import compare_ssim
#Fast, but supports only one image on template
class Basic_Meme:
def __init__(self,folder):
self.folder = folder
#Choose the random images
# Images, Templates, and Result folders need no exist"
def random_image(self):
#Choose a random image from the folder "Images"
path = os.path.join(self.folder,"Images")
random_filename = random.choice([
x for x in os.listdir(path)
if os.path.isfile(os.path.join(path, x))
])
imagefile = os.path.join(path, random_filename)
#Choose a random image from the folder "Templates"
path2 = os.path.join(self.folder,"Templates")
random_filename2 = random.choice([
x for x in os.listdir(path2)
if os.path.isfile(os.path.join(path2, x))
])
templatefile = os.path.join(path2, random_filename2)
return templatefile, imagefile
#Calculates the alpha on the template, actually only works for 1 blank space
def view_transparency(self):
img = Image.open(self.templatefile)
img = img.convert("RGBA")
pixels = img.load()
alpha = (255,255,255,0)
xlist = []
ylist = []
for y in range(img.size[1]):
for x in range(img.size[0]):
if pixels[x, y] == alpha:
xlist.append(x)
ylist.append(y)
xleft = min(xlist)
xright = max(xlist)
ytop = min(ylist)
ybot = max(ylist)
return xleft, ytop, xright, ybot
#Test the template alpha location, debug tool
def test_templates(self):
files = self.random_image()
self.templatefile = files[0]
image = Image.open(self.templatefile)
dr = ImageDraw.Draw(image)
alpha = (self.view_transparency()) # (x1,y1, x2,y2)
line = (alpha[0],alpha[1],alpha[0],alpha[3])
dr.line(line, fill="orange", width=10)
line = (alpha[0],alpha[1],alpha[2],alpha[1])
dr.line(line, fill="orange", width=10)
line = (alpha[0],alpha[3],alpha[2],alpha[3])
dr.line(line, fill="orange", width=10)
line = (alpha[2],alpha[1],alpha[2],alpha[3])
dr.line(line, fill="orange", width=10)
image.show()
#Finally make the meme
def make(self):
#Get the random files and view transparency
files = self.random_image()
self.templatefile = files[0]
self.imagefile = files[1]
local = self.view_transparency()
#Get alpha values
xleft = local[0]
xright = local[2]
ytop = local[1]
ybot = local[3]
#Get the size from variables
x = xright - xleft
y = ybot - ytop
size = (x,y)
#Open the images and resize to fit template
image = Image.open(self.imagefile)
image = image.resize(size)
#Open two templates, one for background, other for foreground
template = Image.open(self.templatefile)
template2 = Image.open(self.templatefile)
#Convert to RGB Alpha
image = image.convert("RGBA")
template = template.convert("RGBA")
#Finally paste the images
template.paste(image, (local[0],local[1]))
template.paste(template2, (0,0), template2)
#Save out
out = os.path.join(self.folder,"Result",str(random.randrange(100000,999999)) + ".jpg") #Random name
print(out)
template.save(out)
#And return the location
return out
#Slower, but can handle templates with any number of spaces
class Advanced_Meme:
def __init__(self,folder):
self.folder = folder
#Random image function
def random_image(self):
path = os.path.join(self.folder,"Images")
random_filename = random.choice([
x for x in os.listdir(path)
if os.path.isfile(os.path.join(path, x))
])
imagefile = os.path.join(path, random_filename)
path2 = os.path.join(self.folder,"Templates")
random_filename2 = random.choice([
x for x in os.listdir(path2)
if os.path.isfile(os.path.join(path2, x))
])
templatefile = os.path.join(path2, random_filename2)
return templatefile, imagefile
#Find the borders of template squares
def follow_border(self,im, x, y, used):
work = [(x, y)]
border = []
while work:
x, y = work.pop()
used.add((x, y))
border.append((x, y))
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1),
(1, 1), (-1, -1), (1, -1), (-1, 1)):
px, py = x + dx, y + dy
try:
if im[px, py] == 255 or (px, py) in used:
continue
work.append((px, py))
except:
pass
return border
#Find template spaces
def find_template_local(self):
# Opening a random template
self.template = self.random_image()[0]
self.original = Image.open(self.template)
orig = self.original
orig_bin = orig.point(lambda x: 0 if x < 128 else 255)
im = orig_bin.load()
border = Image.new('1', orig.size, 'white')
width, height = orig.size
bim = border.load()
# Keep only border points
for x in range(width):
for y in range(height):
if im[x, y][3] == 255:
continue
try:
if im[x + 1, y] or im[x - 1, y] or im[x, y + 1] or im[x, y - 1]:
bim[x, y] = 0
else:
bim[x, y] = 255
except:
pass
used = set()
border = []
for x in range(width):
for y in range(height):
if bim[x, y] == 255 or (x, y) in used:
continue
b = self.follow_border(bim, x, y, used)
border.append(b)
# Find the corners of each space
template_place = []
for b in border:
xmin, xmax, ymin, ymax = width, 0, height, 0
mean_x, mean_y = 0, 0
b = sorted(b)
top_left, bottom_right = b[0], b[-1]
for x, y in b:
mean_x += x
mean_y += y
b = sorted(b, key=lambda x: x[1])
curr = 0
while b[curr][1] == b[curr + 1][1]:
curr += 1
top_right = b[curr]
curr = len(b) - 1
while b[curr][1] == b[curr - 1][1]:
curr -= 1
bottom_left = b[curr]
template_place.append(
[top_left, top_right, bottom_right, bottom_left])
return template_place
def make(self):
template_places = self.find_template_local()
images = []
for s in template_places:
image = self.random_image()[1]
foreground = Image.open(image)
images.append(image)
xleft = min(s[0][0],s[3][0])
xright = max(s[1][0],s[2][0])
ytop = min(s[0][1],s[1][1])
ybot = max(s[2][1],s[3][1])
x = xright - xleft
y = ybot - ytop
#resize_factor = foreground.size[0]/foreground.size[1]
#x = y*resize_factor
size = (int(x),y)
original2 = Image.open(self.template)
foreground = foreground.resize(size)
self.original.paste(foreground, (xleft,ytop))
self.original.paste(original2, (0, 0), original2)
# Random name and save
out = os.path.join(self.folder, "Result", str(random.randrange(100000, 999999)) + ".jpg")
self.original.save(out)
# And return location
print(images)
return out, images
#Slowest, but can multiple imagens, and calculate the best images from fit in template
class Conscious_Meme(Advanced_Meme):
#Calculate SSIM
def ssim(self, image1, image2):
image1 = Image.open(image1).convert('RGB')
if image1.size[0] > 300:
new_size = (300, int(image1.size[1] / image1.size[0] * 300))
else:
new_size = image1.size
print(image1.size, new_size)
image1 = image1.resize(new_size)
image2 = Image.open(image2).resize(new_size).convert('RGB')
image1 = array(image1)
image2 = array(image2)
img1 = img_as_float(image1)
img2 = img_as_float(image2)
return compare_ssim(img1, img2, win_size=None, gradient=False, multichannel=True)
# Convert RGB to Hue
def RGBtoHue(self,r, g, b):
R = r / 255;
G = g / 255;
B = b / 255
RGB = {'R': R, 'G': G, 'B': B}
Max = max(RGB, key=RGB.get)
Min = min(RGB, key=RGB.get)
try:
if Max is 'R':
Hue = (RGB['G'] - RGB['B']) / (RGB[Max] - RGB[Min])
elif Max is 'G':
Hue = 2.0 + (RGB['B'] - RGB['R']) / (RGB[Max] - RGB[Min])
elif Max is 'B':
Hue = 4.0 + (RGB['R'] - RGB['G']) / (RGB[Max] - RGB[Min])
except ZeroDivisionError:
Hue = 0.0
Hue = Hue * 60
if Hue < 0:
Hue = Hue + 360
return Hue
# Calculates the most common hues of a image
def ImageCommonHue(self,image):
image = Image.open(image).convert('RGB')
ratio = image.size[1] / image.size[0]
image = image.resize((400, int(400 * ratio)))
image = image.convert('P', palette=Image.ADAPTIVE, colors=3)
image = image.convert('RGB')
pixels = image.load()
hueList = []
n = 0
for y in range(image.size[1]):
for x in range(image.size[0]):
rgb = pixels[x, y]
hue = self.RGBtoHue(rgb[0], rgb[1], rgb[2])
hueList.append(hue)
n += 1
hueList = [int(x / 10) * 10 for x in hueList]
hueDict = collections.Counter(hueList)
returning = [None] * 3
n = 0
while n < 3:
try:
for hue in range(len(hueDict)):
returning[n] = hueDict.most_common(3)[n][0]
n += 1
except IndexError:
break
for hue in range(len(returning)):
if returning[hue] is None:
returning[hue] = 0
return returning
#Calculate the difference of two images by hue
def ImageHueDiff(self,image1, image2, calculeImage1=True):
if calculeImage1 is True:
hue1 = self.ImageCommonHue(image1)
elif calculeImage1 is False:
hue1 = image1
hue2 = self.ImageCommonHue(image2)
diffHue = [None] * 3
n = 0
for hue in hue1:
diffHue[n] = hue - hue2[n]
n += 1
diffHue = [abs(x) for x in diffHue]
diff = sum(diffHue)
return diff
# Make a list of image semelhance
def compare_image(self, image1):
files = [f for f in os.listdir(os.path.join(self.folder, 'Images')) if
os.path.isfile(os.path.join(os.path.join(self.folder, 'Images'), f))]
choosen = []
results = {}
i = 0
while i < 100:
image = random.choice(files)
if image in choosen:
pass
else:
choosen.append(image)
i += 1
hueImage1 = self.ImageCommonHue(image1)
if max(hueImage1) is 0:
for image2 in choosen:
image2 = os.path.join(self.folder,'Images',image2)
results[image2] = self.ssim(image1, os.path.join(image2))
resultKeys = [x for x in results.keys()]
for x in resultKeys:
if results[x] is 1.0:
del results[x]
diffList = sorted(results.keys(), key=lambda k: results[k], reverse=True)
else:
n=0
for image2 in choosen:
image2 = os.path.join(self.folder,'Images',image2)
results[image2] = self.ImageHueDiff(hueImage1, image2, calculeImage1=False)
n += 1
print(str(n) + '=' + str(results[image2]))
resultKeys = [x for x in results.keys()]
for x in resultKeys:
if results[x] is 0:
del results[x]
diffList = sorted(results.keys(), key=lambda k: results[k])
return diffList
#Make the meme
def make(self):
template_places = self.find_template_local()
images = []
i = 0
image1 = self.random_image()[1]
if len(template_places) is 1:
images = [image1]
else:
images = [image1] + self.compare_image(image1)
for s in template_places:
image = images[i]
foreground = Image.open(image)
images.append(image)
xleft = min(s[0][0], s[3][0])
xright = max(s[1][0], s[2][0])
ytop = min(s[0][1], s[1][1])
ybot = max(s[2][1], s[3][1])
x = xright - xleft
y = ybot - ytop
size = (int(x), y)
original2 = Image.open(self.template)
foreground = foreground.resize(size)
self.original.paste(foreground, (xleft, ytop))
self.original.paste(original2, (0, 0), original2)
i += 1
# Random name and save
out = os.path.join(self.folder, "Result", str(random.randrange(100000, 999999)) + ".jpg")
self.original.save(out)
# And return location
return out, images
|
#!/usr/bin/env python
# # The imports
import xarray as xr
import time
import dask
# # The workers
ask_workers=8
ask_memory='4GB'
from dask_jobqueue import SLURMCluster
from dask.distributed import Client
cluster = SLURMCluster(cores=1,processes=1,name='pangeo',walltime='02:30:00',
job_extra=['--constraint=HSW24','--exclusive',
'--nodes=1'],memory=ask_memory,
interface='ib0')
cluster.scale(ask_workers)
c= Client(cluster)
c
from dask.utils import ensure_dict, format_bytes
wk = c.scheduler_info()["workers"]
text="Workers= " + str(len(wk))
memory = [w["memory_limit"] for w in wk.values()]
cores = sum(w["nthreads"] for w in wk.values())
text += ", Cores=" + str(cores)
if all(memory):
text += ", Memory=" + format_bytes(sum(memory))
print(text)
#Workers= 8, Cores=8, Memory=32.00 GB
# # The data
%time ds=xr.open_zarr('/store/albert7a/eNATL60/zarr/eNATL60-BLBT02-SSH-1h')
#1.73 s
%time mean=ds.sossheig.mean(dim='time_counter')
#365 ms
%time mean.load()
#13min 23s
cluster.close()
|
from Classes.ServiceDAO import ServiceDAO
class Service:
def __init__(self):
pass
def add_service(self):
print "Cadastrar novo servico"
service = ServiceDAO()
service.getData()
|
import numpy as np
x = np.random.random(12).reshape(3,2,2)
print(x) |
# Copyright 2023 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import numpy as np
import pytest
import qutip
import pulser
from pulser.devices import MockDevice
from pulser.waveforms import BlackmanWaveform
from pulser_simulation import SimConfig
from pulser_simulation.qutip_backend import QutipBackend
from pulser_simulation.qutip_result import QutipResult
from pulser_simulation.simresults import CoherentResults
@pytest.fixture
def sequence():
reg = pulser.Register({"q0": (0, 0)})
seq = pulser.Sequence(reg, MockDevice)
seq.declare_channel("raman_local", "raman_local", initial_target="q0")
seq.add(
pulser.Pulse.ConstantDetuning(BlackmanWaveform(1000, np.pi), 0, 0),
"raman_local",
)
return seq
def test_qutip_backend(sequence):
sim_config = SimConfig()
with pytest.raises(TypeError, match="must be of type 'EmulatorConfig'"):
QutipBackend(sequence, sim_config)
qutip_backend = QutipBackend(sequence)
results = qutip_backend.run()
assert isinstance(results, CoherentResults)
assert results[0].get_state() == qutip.basis(2, 0)
final_result = results[-1]
assert isinstance(final_result, QutipResult)
final_state = final_result.get_state()
assert final_state == results.get_final_state()
np.testing.assert_allclose(final_state.full(), [[0], [1]], atol=1e-5)
|
import time
from selenium.webdriver.common.by import By
from Utilities.BasePage import BasePage
from Utilities.TestData import TestData
class MyGarage(BasePage):
"""By Locators"""
MY_GARAGE_LINK = (By.LINK_TEXT, 'My Garage')
PAGE_NUMBER_BUTTONS = (By.XPATH, "//li[@role='presentation']")
FIRST_PAGE_BUTTON = (By.XPATH, "//button[@id='slick-slide-control00' and @aria-selected='true']")
#CAR_DETAIL_TEXT = (By.CSS_SELECTOR, 'div#slick-slide00 h5')
CAR_DETAIL_TEXT =(By.CSS_SELECTOR, 'div.slick-current h5')
REMOVE_BUTTON = (By.CSS_SELECTOR, 'div#slick-slide00 div p a')
REMOVE_ALL_BUTTON = (By.LINK_TEXT, 'Remove from watchlist')
NEXT_BUTTON = (By.CSS_SELECTOR, 'span.watch-next')
"""Constructor"""
def __init__(self, driver):
super().__init__(driver)
"""Action methods"""
def compare_watch_list(self):
actual_watch_list = []
# while self.is_visible(self.FIRST_PAGE_BUTTON): # move to first page
# self.js_click(self.NEXT_BUTTON)
no_of_pages = len(self.get_elements(self.PAGE_NUMBER_BUTTONS))
print('\n')
for i in range(no_of_pages):
# by_element = (By.CSS_SELECTOR, 'div#slick-slide0' + str(i) + ' h5')
print(self.get_element_text(self.CAR_DETAIL_TEXT))
# watch_list.append(self.get_element_text(by_element))
actual_watch_list.append(list(self.get_element_text(self.CAR_DETAIL_TEXT).split(", ")))
self.js_click(self.NEXT_BUTTON)
time.sleep(2)
print(actual_watch_list)
return actual_watch_list
def delete_all_watch_list(self):
self.do_click(self.MY_GARAGE_LINK)
print('\n')
while self.is_visible(self.REMOVE_ALL_BUTTON):
self.js_click(self.REMOVE_ALL_BUTTON)
print('Deleting watch list....')
time.sleep(2)
|
# From functools: https://hg.python.org/cpython/file/3.5/Lib/functools.py
'''
>>> from functools import partial
>>> p = partial(print, end=' ')
>>> p.func
<built-in function print>
>>> p.args
()
>>> p.keywords
{'end': ' '}
>>> p1 = partial(p, sep='\t')
>>> p1
functools.partial(functools.partial(<built-in function print>, end=' '), sep='\t')
>>> p1.args
()
>>> p1.func
functools.partial(<built-in function print>, end=' ')
>>> p1.keywords
{'sep': '\t'}
>>> p2 = partial(print, end=' ', sep='\t')
>>> p2.args
()
>>> p2.keywords
{'end': ' ', 'sep': '\t'}
>>> p2.func
<built-in function print>
'''
# Purely functional, no descriptor behaviour
def partial(func, *args, **keywords):
'''New function with partial application
of the given arguments and keywords.'''
if hasattr(func, 'func'):
args = func.args + args
tmpkw = func.keywords.copy()
tmpkw.update(keywords)
keywords = tmpkw
del tmpkw
func = func.func
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
from _functools import partial
except ImportError:
pass
# Descriptor version
class partialmethod(object):
"""Method descriptor with partial application of the given arguments
and keywords.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func, *args, **keywords):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError("{!r} is not callable or a descriptor"
.format(func))
# func could be a descriptor like classmethod which isn't callable,
# so we can't inherit from partial (it verifies func is callable)
if isinstance(func, partialmethod):
# flattening is mandatory in order to place cls/self before all
# other arguments
# it's also more efficient since only one function will be called
self.func = func.func
self.args = func.args + args
self.keywords = func.keywords.copy()
self.keywords.update(keywords)
else:
self.func = func
self.args = args
self.keywords = keywords
def __repr__(self):
args = ", ".join(map(repr, self.args))
keywords = ", ".join("{}={!r}".format(k, v)
for k, v in self.keywords.items())
format_string = "{module}.{cls}({func}, {args}, {keywords})"
return format_string.format(module=self.__class__.__module__,
cls=self.__class__.__qualname__,
func=self.func,
args=args,
keywords=keywords)
def _make_unbound_method(self):
def _method(*args, **keywords):
call_keywords = self.keywords.copy()
call_keywords.update(keywords)
cls_or_self, *rest = args
call_args = (cls_or_self,) + self.args + tuple(rest)
return self.func(*call_args, **call_keywords)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method._partialmethod = self
return _method
def __get__(self, obj, cls):
get = getattr(self.func, "__get__", None)
result = None
if get is not None:
new_func = get(obj, cls)
if new_func is not self.func:
# Assume __get__ returning something new indicates the
# creation of an appropriate callable
result = partial(new_func, *self.args, **self.keywords)
try:
result.__self__ = new_func.__self__
except AttributeError:
pass
if result is None:
# If the underlying descriptor didn't do anything, treat this
# like an instance method
result = self._make_unbound_method().__get__(obj, cls)
return result
@property
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
|
# -*-coding:utf8-*-
import logging
import os
import re
import time
import jieba
import numpy as np
import tensorflow as tf
import tensorlayer as tl
jieba.load_userdict(r"D:\PythonWorkstation\job_project\information_extraction\lib\dict.txt")
def data_prepossessing():
sen_list = []
with open(r'D:\PythonWorkstation\job_project\information_extraction\data\時間.txt', 'r', encoding='utf-8') as f:
for text in f.readlines():
sen_list += list(jieba.cut(re.sub('\W', '', text)))
vocabulary = list(set(sen_list))
vocabulary_size = len(vocabulary)
return sen_list, vocabulary, vocabulary_size
def save_checkpoint(ckpt_file_path):
"""保存模型训练状态
将会产生以下文件:
checkpoint
model_name.ckpt.data-?????-of-?????
model_name.ckpt.index
model_name.ckpt.meta
Args:
ckpt_file_path: 储存训练状态的文件路径
"""
path = os.path.dirname(os.path.abspath(ckpt_file_path))
if os.path.isdir(path) == False:
logging.warning('Path (%s) not exists, making directories...', path)
os.makedirs(path)
tf.train.Saver().save(sess, ckpt_file_path + '.ckpt')
def load_checkpoint(ckpt_file_path):
"""恢复模型训练状态
默认TensorFlow Session将从ckpt_file_path.ckpt中恢复所保存的训练状态
Args:
ckpt_file_path: 储存训练状态的文件路径
"""
ckpt = ckpt_file_path + '.ckpt'
index = ckpt + ".index"
meta = ckpt + ".meta"
if os.path.isfile(index) and os.path.isfile(meta):
tf.train.Saver().restore(sess, ckpt)
def save_embedding(data, count, dictionary, reverse_dictionary, emb_net, embedding_file_path):
"""保存词向量
将训练好的词向量保存到embedding_file_path.npy文件中
Args:
dictionary: 单词与单词ID映射表
{'UNK': 0, '你': 1, '我': 2, ..., '别生气': 2545, '小姐姐': 2546, ...}
network: 默认TensorFlow Session所初始化的网络结构
network = tl.layers.InputLayer(x, name='input_layer')
...
embedding_file_path: 储存词向量的文件路径
Returns:
单词与向量映射表以npy格式保存在embedding_file_path.npy文件中
{'关注': [-0.91619176, -0.83772564, ..., -1.90845013, 0.74918884], ...}
"""
words, ids = zip(*dictionary.items())
params = emb_net.normalized_embeddings
embeddings = tf.nn.embedding_lookup(params, tf.constant(ids, dtype=tf.int32)).eval()
wv = dict(zip(words, embeddings))
path = os.path.dirname(os.path.abspath(embedding_file_path))
if not os.path.isdir(path):
logging.warning('Path (%s) not exists, making directories...', path)
os.makedirs(path)
tl.files.save_any_to_npy(
save_dict={
'data': data,
'count': count,
'dictionary': dictionary,
'reverse_dictionary': reverse_dictionary
}, name=embedding_file_path + '.npy'
)
tl.files.save_npz_dict(emb_net.all_params, name=embedding_file_path + '.npz', sess=sess)
class Word2Vec:
def __init__(self):
self.data = data
self.vocabulary = vocabulary
self.vocabulary_size = vocabulary_size
self.batch_size = 30
self.embedding_size = 128
self.skip_window = 5
self.num_skips = 10
self.num_sampled = 8
self.learning_rate = 1e-3
self.n_epoch = 50
self.num_steps = int((self.vocabulary_size / self.batch_size) * self.n_epoch)
def training(self):
data, count, dictionary, reverse_dictionary = \
tl.nlp.build_words_dataset(self.data, self.vocabulary_size)
train_inputs = tf.placeholder(tf.int32, shape=[self.batch_size])
train_labels = tf.placeholder(tf.int32, shape=[self.batch_size, 1])
with tf.device('/cpu:0'):
emb_net = tl.layers.Word2vecEmbeddingInputlayer(
inputs=train_inputs,
train_labels=train_labels,
vocabulary_size=self.vocabulary_size,
embedding_size=self.embedding_size,
num_sampled=self.num_sampled)
loss = emb_net.nce_cost
optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
sess.run(tf.global_variables_initializer())
ckpt_file_path = "checkpoint/" + model_name
load_checkpoint(ckpt_file_path)
step = data_index = 0
loss_vals = []
while step < self.num_steps:
batch_inputs, batch_labels, data_index = tl.nlp.generate_skip_gram_batch(
data=data, batch_size=self.batch_size, num_skips=self.num_skips,
skip_window=self.skip_window, data_index=data_index)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = sess.run([optimizer, loss], feed_dict=feed_dict)
loss_vals.append(loss_val)
if (step != 0) and (step % 10) == 0:
logging.info("(%d/%d) latest average loss: %f.", step, self.num_steps, sum(loss_vals) / len(loss_vals))
del loss_vals[:]
save_checkpoint(ckpt_file_path)
save_embedding(data, count, dictionary, reverse_dictionary, emb_net, embedding_file_path)
step += 1
class Seq2seq:
def __init__(self):
self.embedding_size = 128
self.all_var = tl.files.load_npy_to_any(name=embedding_file_path + '.npy')
self.data = self.all_var['data']
self.count = self.all_var['count']
self.dictionary = self.all_var['dictionary']
self.reverse_dictionary = self.all_var['reverse_dictionary']
tl.nlp.save_vocab(self.count, name='vocab_' + model_name + '.txt')
def main_lstm_generate_text(self):
"""Generate text by Synced sequence input and output."""
# rnn model and update (describtion: see tutorial_ptb_lstm.py)
init_scale = 0.1
learning_rate = 1e-2
max_grad_norm = 5
sequence_length = 5
hidden_size = 512
max_epoch = 5
max_max_epoch = 100
lr_decay = 0.9
batch_size = 10
top_k_list = [20]
print_length = 4
model_file_name = "model_generate_text.npz"
# ===== Prepare Data
words = data
vocab = tl.nlp.create_vocab([words], word_counts_output_file='deep.txt', min_word_count=1)
vocab = tl.nlp.Vocabulary('deep.txt', unk_word="UNK")
vocab_size = vocab.unk_id + 1
train_data = [vocab.word_to_id(word) for word in words]
# Set the seed to generate sentence.
# seed = ['中小', '学校', '停课','半天','韶关','市区','7','月','2','日','至','3','日','凌晨','遭遇',
# '特大','暴雨','市区','范围','多地','发生','内涝']
seed = ['8']
# seed = basic_clean_str(seed).split()
# ===== Define model
input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
targets = tf.placeholder(tf.int32, [batch_size, sequence_length])
# Testing (Evaluation), for generate text
input_data_test = tf.placeholder(tf.int32, [1, 1])
def inference(x, is_train, sequence_length, reuse=None):
"""If reuse is True, the inferences use the existing parameters,
then different inferences share the same parameters.
"""
print("\nsequence_length: %d, is_train: %s, reuse: %s" % (sequence_length, is_train, reuse))
rnn_init = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope("model", reuse=reuse):
network = tl.layers.EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='embedding')
network = tl.layers.RNNLayer(
network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={
'forget_bias': 0.0,
'state_is_tuple': True
}, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,
return_seq_2d=True, name='lstm1'
)
lstm1 = network
network = tl.layers.DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=None,
name='output')
return network, lstm1
# Inference for Training
network, lstm1 = inference(input_data, is_train=True, sequence_length=sequence_length, reuse=None)
# Inference for generate text, sequence_length=1
network_test, lstm1_test = inference(input_data_test, is_train=False, sequence_length=1, reuse=True)
y_linear = network_test.outputs
y_soft = tf.nn.softmax(y_linear)
# y_id = tf.argmax(tf.nn.softmax(y), 1)
# ===== Define train ops
def loss_fn(outputs, targets, batch_size, sequence_length):
# Returns the cost function of Cross-entropy of two sequences, implement
# softmax internally.
# outputs : 2D tensor [n_examples, n_outputs]
# targets : 2D tensor [n_examples, n_outputs]
# n_examples = batch_size * sequence_length
# so
# cost is the averaged cost of each mini-batch (concurrent process).
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[outputs], [tf.reshape(targets, [-1])], [tf.ones([batch_size * sequence_length])]
)
cost = tf.reduce_sum(loss) / batch_size
return cost
# Cost for Training
cost = loss_fn(network.outputs, targets, batch_size, sequence_length)
# Truncated Backpropagation for training
with tf.variable_scope('learning_rate'):
lr = tf.Variable(0.0, trainable=False)
# You can get all trainable parameters as follow.
# tvars = tf.trainable_variables()
# Alternatively, you can specify the parameters for training as follw.
# tvars = network.all_params $ all parameters
# tvars = network.all_params[1:] $ parameters except embedding matrix
# Train the whole network.
tvars = network.all_params
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)
optimizer = tf.train.AdamOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))
# ===== Training
sess.run(tf.global_variables_initializer())
print("\nStart learning a model to generate text")
for i in range(max_max_epoch):
# decrease the learning_rate after ``max_epoch``, by multipling lr_decay.
new_lr_decay = lr_decay ** max(i - max_epoch, 0.0)
sess.run(tf.assign(lr, learning_rate * new_lr_decay))
print("Epoch: %d/%d Learning rate: %.8f" % (i + 1, max_max_epoch, sess.run(lr)))
epoch_size = ((len(train_data) // batch_size) - 1) // sequence_length
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining of every epoch
state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)):
_cost, state1, _ = sess.run(
[cost, lstm1.final_state, train_op], feed_dict={
input_data: x,
targets: y,
lstm1.initial_state: state1
}
)
costs += _cost
iters += sequence_length
# if step % (epoch_size // 10) == 1:
# print(
# "%.3f perplexity: %.3f speed: %.0f wps" %
# (
# step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))
# )
train_perplexity = np.exp(costs / iters)
# print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity))
# for diversity in diversity_list:
# testing: sample from top k words
for top_k in top_k_list:
# Testing, generate some text from a given seed.
state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)
# state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)
outs_id = [vocab.word_to_id(w) for w in seed]
# feed the seed to initialize the state for generation.
for ids in outs_id[:-1]:
a_id = np.asarray(ids).reshape(1, 1)
state1 = sess.run(
[lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
# feed the last word in seed, and start to generate sentence.
a_id = outs_id[-1]
for _ in range(print_length):
a_id = np.asarray(a_id).reshape(1, 1)
out, state1 = sess.run(
[y_soft, lstm1_test.final_state], feed_dict={
input_data_test: a_id,
lstm1_test.initial_state: state1
}
)
# Without sampling
# a_id = np.argmax(out[0])
# Sample from all words, if vocab_size is large,
# this may have numeric error.
# a_id = tl.nlp.sample(out[0], diversity)
# Sample from the top k words.
a_id = tl.nlp.sample_top(out[0], top_k=top_k)
outs_id.append(a_id)
sentence = [vocab.id_to_word(w) for w in outs_id]
sentence = "".join(sentence)
with open('./output/time.txt', 'a', encoding='utf-8') as f:
f.write(sentence + '\n')
# print(diversity, ':', sentence)
print(top_k, ':', sentence)
print("Save model")
tl.files.save_npz(network_test.all_params, name=model_file_name)
if __name__ == '__main__':
model_name = 'time_128'
embedding_file_path = "output/" + model_name
data, vocabulary, vocabulary_size = data_prepossessing()
w2v = Word2Vec()
fmt = "%(asctime)s %(levelname)s %(message)s"
logging.basicConfig(format=fmt, level=logging.INFO)
sess = tf.InteractiveSession()
w2v.training()
seq = Seq2seq()
seq.main_lstm_generate_text()
sess.close()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
from textwrap import dedent
from pants.testutil.pants_integration_test import PantsResult, run_pants, setup_tmpdir
def typecheck_file(path: str, filename: str) -> PantsResult:
return run_pants(
[
"--backend-packages=pants.backend.python",
"--backend-packages=pants.backend.python.typecheck.mypy",
"--python-interpreter-constraints=['==3.9.*']",
"check",
f"{path}/{filename}",
],
config={
"mypy": {"config": f"{path}/mypy.ini"},
},
)
def test_typechecking() -> None:
# NB: We install pantsbuild.pants.testutil and pantsbuild.pants in the same test because
# pantsbuild.pants.testutil depends on pantsbuild.pants, and we need to install that dependency
# from the filesystem, rather than falling back to PyPI.
pants_wheel = list(Path.cwd().glob("pantsbuild.pants-*.whl"))[0]
testutil_wheel = list(Path.cwd().glob("pantsbuild.pants.testutil-*.whl"))[0]
project = {
"BUILD": dedent(
f"""\
python_requirement(
name="pants",
requirements=["pantsbuild.pants @ file://{pants_wheel}"],
)
python_requirement(
name="testutil",
requirements=["pantsbuild.pants.testutil @ file://{testutil_wheel}"],
)
python_sources(name="lib", dependencies=[":pants", ":testutil"])
"""
),
"ok.py": dedent(
"""\
from pants.util.strutil import ensure_text
from pants.testutil.rule_runner import RuleRunner
assert ensure_text(b"hello world") == "hello world"
RuleRunner(rules=[], target_types=[])
"""
),
"err.py": dedent(
"""\
from pants.util.strutil import ensure_text
from pants.testutil.rule_runner import RuleRunner
assert ensure_text(123) == "123"
RuleRunner(bad_kwargs="foo")
"""
),
"mypy.ini": dedent(
"""\
[mypy]
namespace_packages = true
explicit_package_bases = true
mypy_path = "src/python:tests/python:testprojects/src/python"
"""
),
}
with setup_tmpdir(project) as tmpdir:
typecheck_file(tmpdir, "ok.py").assert_success()
typecheck_file(tmpdir, "err.py").assert_failure()
|
# splits facebook messages from subjects given in args[1] separated by '|'
# splits into groups given by args[3]
# writes files to sampleSplit/args[2]
import sys
import os
from os.path import isfile, join
import re
import shutil
CONST_GROUPING_SIZE = int(sys.argv[3])
line_endings = ['.', ',', '?', '!', ':', ';', '-', '+']
reu_path = "C:/Users/Daway Chou-Ren/Documents/REU/"
fromDir = "samples/facebookMessages/allMessages.txt"
toDir = "sampleSplit/" + sys.argv[2]
outputDir = reu_path + toDir
if not os.path.exists(outputDir):
os.makedirs(outputDir)
for folder in os.listdir(outputDir):
folderpath = os.path.join(outputDir, folder)
shutil.rmtree(folderpath)
labels = sys.argv[1].split("|")
for label in labels:
subfolder = outputDir + label[0:label.index(' ')] + '/'
if not os.path.exists(subfolder):
os.makedirs(subfolder)
label_counters = []
for label in labels:
label_counters.append(0)
samples_path = reu_path + fromDir
inputfile = open(samples_path, 'r')
which_label = ""
for line in inputfile.readlines():
if (which_label != ""):
name = which_label[0:which_label.index(' ')]
output = open(outputDir + name + '/' + name + '_'
+ str(label_counters[label_index] // CONST_GROUPING_SIZE + 1) + '.txt', 'a')
if (line[ len(line)-2 : len(line)-1 ] in line_endings):
output.write(line)
else:
line = line[:-1] + '.\n'
output.write(line)
which_label = ""
label_index = 0
for label in labels:
pattern = label + '[A-Z]'
if re.match(label, line):
which_label = label
label_counters[label_index] += 1
break;
label_index += 1
os.system('\a') |
"""
The core addon is responsible for verifying core settings that are not
checked by other addons.
"""
from mitmproxy import exceptions
from mitmproxy import platform
from mitmproxy.net import server_spec
from mitmproxy.utils import human
class CoreOptionValidation:
def configure(self, opts, updated):
if opts.add_upstream_certs_to_client_chain and not opts.upstream_cert:
raise exceptions.OptionsError(
"The no-upstream-cert and add-upstream-certs-to-client-chain "
"options are mutually exclusive. If no-upstream-cert is enabled "
"then the upstream certificate is not retrieved before generating "
"the client certificate chain."
)
if "body_size_limit" in updated and opts.body_size_limit:
try:
opts._processed["body_size_limit"] = human.parse_size(
opts.body_size_limit
)
except ValueError as e:
raise exceptions.OptionsError(
"Invalid body size limit specification: %s" %
opts.body_size_limit
)
if "mode" in updated:
mode = opts.mode
if mode.startswith("reverse:") or mode.startswith("upstream:"):
try:
server_spec.parse_with_mode(mode)
except ValueError as e:
raise exceptions.OptionsError(str(e)) from e
elif mode == "transparent":
if not platform.original_addr:
raise exceptions.OptionsError(
"Transparent mode not supported on this platform."
)
elif mode not in ["regular", "socks5"]:
raise exceptions.OptionsError(
"Invalid mode specification: %s" % mode
)
|
from flask import url_for
from flask_login import current_user, logout_user
from werkzeug.utils import redirect
from util.logutils import loghelpers
@loghelpers.log_decorator()
def logout():
logout_user()
return redirect(
url_for(
'get_all_posts',
# loggedin=current_user.is_authenticated
)
) |
import os
#
# call "encrypt()" or "decrypt()" in the command line to run
#
def encrypt():
# read the file (to be encrypted) and write it into a temporary file
fileName = input("Enter the name of your file: ")
file = open(fileName,"r")
temp = open("temp.txt","w")
temp.write("")
temp.close()
for line in file:
temp = open("temp.txt","a")
temp.write(line)
temp.close()
file.close()
file = open(fileName,"w")
file.write("") # empty the file
file.close()
temp = open("temp.txt","r")
# read from the temp file, and write back into the
# original file, incrementing each character
# by one ascii value
for line in temp:
for char in line:
#print(char)
char = chr(ord(char) + 1)
file = open(fileName,"a")
file.write(char)
file.close()
# close the file and delete the temp file
file.close()
os.remove("temp.txt")
def decrypt():
# read the file (to be decrypted) write it into a temporary file
fileName = input("Enter the name of your file: ")
file = open(fileName,"r")
temp = open("temp.txt","w")
temp.write("")
temp.close()
for line in file:
temp = open("temp.txt","a")
temp.write(line)
temp.close()
file.close()
file = open(fileName,"w")
file.write("") # empty the file
file.close()
temp = open("temp.txt","r")
# read from the temp file, and write back into the
# original file, decrementing each character
# by one ascii value
for line in temp:
for char in line:
#print(char)
char = chr(ord(char) - 1)
file = open(fileName,"a")
file.write(char)
file.close()
# close the file and delete the temp file
file.close()
os.remove("temp.txt")
|
import argparse
import os
from cyvcf2 import VCF
import random
import pdb
def getVCFlist(file_list, vcf_file, suffix):
vcf_list = []
if (file_list == "-9" and vcf_file == "-9") or (file_list != "-9" and vcf_file != "-9"):
print("Must provide either a vcf file (-v) and bam suffix (-s) or a file (-f) with paired entries of bam suffix and vcf file")
elif file_list != "-9":
assert os.path.exists(file_list), "Could not find file: %r" % file_list
with open(file_list, 'r') as vcf_file_list:
for line in vcf_file_list:
line = line.strip()
line = line.split()
assert len(line) == 2, "incorrect format on line: %r" % line
vcf_list.append(line)
elif vcf_file != "-9":
assert suffix != "-9", "Must specify the bam suffix to look for in vcf: %r" % vcf_file
assert os.path.exists(vcf_file), "Could not find file: %r" % vcf_file
vcf_list.append([suffix, vcf_file])
return(vcf_list)
def wholeVCFcommands(vcf_list, sample_list, outdir, bam_dir, samplot_directory, bcftools_executable):
for i in vcf_list:
if os.path.exists(i[1]):
if i[1].endswith("vcf"):
vcf = VCF(i[1])
if sample_list == "-9": samps = vcf.samples
else: samps = sample_list.split(",")
for sample in samps:
vcf_dir = i[1].split('/')[-1].strip('.vcf')
outdir = f"{outdir}/{vcf_dir}/{sample}"
bam_file = f"{bam_dir}/{sample}{i[0]}"
if os.path.exists(args.b + sample + i[0]):
cmd = f"{samplot_directory}/samplot_vcf.sh -o {outdir} -B {bcftools_executable} -v {i[1]} -S {samplot_directory}/samplot.py {bam_file}"
else:
cmd = f"ERROR: Bam file does not exist, {bam_file}"
try:
os.makedirs(outdir)
except FileExistsError:
cmd = 'WARNING: Output directory already exists;' + cmd
print(cmd)
elif i[1].endswith("gz"):
print("unzip vcf file: ", i[1])
else:
print(i[1], "does not exist")
return()
def makeDuoPics(vcf_list, sample_list, outdir, bam_dir, samplot_directory, bcftools_executable, num_duos, length_threshold = 100000):
for i in vcf_list:
if os.path.exists(i[1]):
if i[1].endswith("vcf"):
vcf = VCF(i[1])
vcf_dir = i[1].split("/")[-1].replace(".vcf","_duos")
# pdb.set_trace()
Outdir = f"{outdir}/{vcf_dir}"
if not os.path.exists(Outdir): os.mkdir(Outdir)
if sample_list == "-9": samps = vcf.samples
else: samps = sample_list.split(",")
for variant in vcf:
svtype = variant.INFO.get('SVTYPE')
if svtype == "CNV":
svtype = variant.INFO.get('GSCNCATEGORY')
svLen = variant.INFO.get('GSELENGTH')
if svtype == "None": print("Change Type to String for GSCNCATEGORY in VCF header")
else:
svLen = variant.INFO.get('SVLEN')
if svLen < length_threshold:
alts = [j for j, x in enumerate(variant.gt_types) if x == 3]
refs = [j for j, x in enumerate(variant.gt_types) if x == 0]
if len(alts) > 2 and len(refs) > 2:
for k in range(0,num_duos):
alt = samps[random.choice(alts)]
ref = samps[random.choice(refs)]
png_file = f"{svtype}_{variant.CHROM}_{variant.start}_{variant.end}_{alt}_{ref}.png"
cmd = f"{samplot_directory}/samplot.py -n {alt},{ref} -b {bam_dir}/{alt}{i[0]},{bam_dir}/{ref}{i[0]} -o {Outdir}/{png_file} -s {variant.start} -e {variant.end} -c {variant.CHROM} -a -t {svtype}"
print(cmd)
elif i[1].endswith("gz"):
print("unzip vcf file: ", i[1])
else:
print(i[1], "does not exist")
return()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Wrapper for samplot_vcf.sh')
parser.add_argument('-o', type=str, metavar='output_directory', required=True)
parser.add_argument('-B', type=str, metavar='bcftools_executable', default="/panfs/roc/msisoft/bcftools/1.6/bin/bcftools")
parser.add_argument('-S', type=str, metavar='samplot_directory', default="/home/hirschc1/pmonnaha/software/SV-plaudit/Samplot/src/")
parser.add_argument('-b', type=str, metavar='bam_dir', required=True)
parser.add_argument('-v', type=str, metavar='vcf_file', required=False, default="-9")
parser.add_argument('-s', type=str, metavar='bam_suffix', required=False, default="-9", help="sample name in vcf file + suffix = bamName")
parser.add_argument('-f', type=str, metavar='vcf_file_list', required=False, default="-9", help="Tab-delimited file with the bam suffix to look for followed by vcf file")
parser.add_argument('--samps', type=str, metavar='sample_names', required=False, default="-9", help="Sample names can be provided here as a comma-separated list (no spaces). Otherwise, samples will be retrieved from the header of the vcf")
parser.add_argument('-d', type=int, metavar='num_duos', required=False, default=-9, help="how many ref/alt sample pairs do you want to generate pictures for")
args = parser.parse_args()
if os.path.exists(args.o):
if args.o.endswith("/") is False: args.o += "/"
else:
print(args.o, "does not exist")
if os.path.exists(args.S):
if args.S.endswith("/") is False: args.S += "/"
else:
print(args.S, "does not exist")
vcf_list = getVCFlist(args.f, args.v, args.s)
if args.d != -9:
makeDuoPics(vcf_list, args.samps, args.o, args.b, args.S, args.B, args.d)
|
from sense_hat import SenseHat
import time,datetime,logging,subprocess
import ipdb
from influxdb import InfluxDBClient
'''initial var'''
factor=1.356
difference=12
sleeptime=60
now = datetime.datetime.now()
influxdb_user = 'pippo'
influxdb_password = 'pippopassword'
influxdb_db = 'TEMPERATURE'
influxdb_host = 'rpi2'
influxdb_port = 8086
influxdbclient = InfluxDBClient(influxdb_host, influxdb_port, influxdb_user, influxdb_password, influxdb_db)
'''logging config'''
logging.basicConfig(
level=logging.WARNING,
filename='temperature.log',
format='[%(asctime)s] %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
'''functions'''
def read_sense_temp(sense):
temp = sense.get_temperature()
return temp
def read_cpu_temp():
cpu_temp = subprocess.getoutput("vcgencmd measure_temp")
#ipdb.set_trace()
array = cpu_temp.split("=")
array2 = array[1].split("'")
cpu_tempc = float(array2[0])
cpu_tempc = float("{0:.2f}".format(cpu_tempc))
return cpu_tempc
def calculates_real_temp(temp, cpu_temp, factor, difference):
#ipdb.set_trace()
'''first try - trictly bounded to CPU temp'''
#return temp - ((cpu_temp - temp)/factor)
'''second try - simply temp difference'''
return temp - difference
def insert_influxdb_row(temperature):
utcnow = datetime.datetime.utcnow()
'''json_body template'''
json_body = [
{
"measurement": "temperatura",
"tags": {
"host": "rpi3"
},
"time": utcnow,
"fields": {
"value": temperature
}
}
]
logging.debug('writing a value to influxdb with temperature ', temperature)
influxdbclient.write_points(json_body)
def main():
sense = SenseHat()
while True:
temp = read_sense_temp(sense)
cpu_temp = read_cpu_temp()
real_temp = calculates_real_temp(temp, cpu_temp, factor, difference)
insert_influxdb_row(real_temp)
time.sleep(sleeptime)
if __name__ == "__main__":
main()
|
import pyttsx3
import os
file = open("english.txt","r").read().replace("\n","")
engine = pyttsx3.init()
engine.say(file)
engine.save_to_file(file,"voice.mp3")
os.system("play voice.mp3")
|
#!/usr/bin/env python3
#coding:utf-8
import os
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+'/'
"""
-------
属性设置
-------
"""
option_install = True
"""
awvs server
awvs apikey
"""
awvs_server = 'https://127.0.0.1:13443/api/v1'
awvs_apikey = ''
"""
nessus server
nessus apikey
"""
nessus_server = ''
nessus_secretKey = ''
nessus_accessKey = ''
"""
google 插件账号登入
"""
username = ''
password = ''
"""
zoomeye username and password
"""
username_z = ''
password_z = ''
"""
系统平台
可选参数:
Linux:
kali
deepin
"""
system_platform = 'kali'
"""
九世信息收集工具配置
"""
def jiushixxsj(url,domain,
thread=100,xianc=20,
directory=10000,subdomain=10000,
domain_baopo=0,dire_path='dict/scan.txt'):
content = """
# @author:九世
# @time:2019/7/2
# @file:mian.py
URL='{}' #输入要进行探测的url
DOMAIN='{}' #输入要进行探测的域名
THREAD={} #协程设置
XIANC={} #进程数设置
DIRECTORY={} #目录扫描的协程设置
SUBDOMAIN={} #子域名爆破协程设置
DOMAIN_BAOPO={} #0为不开启子域名爆破,1为开启
DIRE_PATH=r'{}' #引用dict目录下的指定字典
""".format(url,domain,thread,xianc,directory,subdomain,domain_baopo,dire_path)
with open('{}信息收集工具/config/config.py'.format(root),'w') as w:
w.write(content)
return True
"""
是否添加秘钥
"""
key_config = False
"""
Install Chrome.
"""
Ichrome = True
"""
root 模式下 add user
"""
kali_user = True
|
# -*- encoding: utf-8 -*-
#
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# info@vauxoo.com
#
# Coded by: julio (julio@vauxoo.com)
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import osv, fields
from openerp.addons.decimal_precision import decimal_precision as dp
from openerp.tools.translate import _
class MrpConsume(osv.TransientModel):
_name = 'mrp.consume'
def _get_moves_grouped_by_product(self, cr, uid, move_ids,
context=None):
"""
Return a dictionary with a list of the moves corresponding by
product {product: [move_ids]}.
@param move_ids: list of move ids.
"""
context = context or {}
moves_grouped = dict()
move_obj = self.pool.get('stock.move')
move_data_list = map(
lambda move_brw: {move_brw.product_id.id: move_brw.id},
move_obj.browse(cr, uid, move_ids, context=context))
for move_data in move_data_list:
for (product_id, move_id) in move_data.iteritems():
if product_id in moves_grouped:
moves_grouped[product_id] += [move_id]
else:
moves_grouped[product_id] = [move_id]
return moves_grouped
def _get_consume_lines_list(self, cr, uid, production_id, context=None):
"""
Get the consume lines to create.
@param production_id: manufacturing order id.
@return: a list of dictionaries with the values for the consume
lines to create
"""
context = context or {}
consume_line_ids = list()
active_move_ids = self._get_active_move_ids(
cr, uid, production_id, context=context)
moves_dict = self._get_moves_grouped_by_product(
cr, uid, active_move_ids, context=context)
for move_ids in moves_dict.values():
consume_line_ids += [self._get_consume_line_values(
cr, uid, production_id, move_ids, context=context)]
return consume_line_ids
def _get_default_consume_line_ids(self, cr, uid, context=None):
"""
Return the consume lines ids by default for the current work order lot
"""
context = context or {}
consume_line_ids = list()
wol_obj = self.pool.get('mrp.workorder.lot')
# getting the production_id
production_ids = context.get('active_ids', [])
active_model = context.get('active_model', False)
if not production_ids or len(production_ids) != 1:
raise osv.except_osv(
_('Error!!'),
_('You need to call method using the wizard, one by one per'
' manufacturing order or by an active work order lot.'))
if active_model not in ['mrp.production', 'mrp.workorder.lot']:
raise osv.except_osv(
_('Error!!'),
_('You this wizard can be only called by the manufacturing'
' order or by an active work order lot.'))
production_id = active_model == 'mrp.production' \
and production_ids[0] or wol_obj.browse(
cr, uid, production_ids,
context=context)[0].production_id.id
consume_line_ids = self._get_consume_lines_list(
cr, uid, production_id, context=context)
return consume_line_ids
_columns = {
'consume_line_ids': fields.one2many('mrp.consume.line',
'wizard_id', 'Consume')
}
_defaults = {
'consume_line_ids': _get_default_consume_line_ids,
}
def action_consume(self, cr, uid, ids, context=None):
context = context or {}
uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids, context=context):
for consume_line in production.consume_line_ids:
line_qty_left = consume_line.quantity
for move_line in consume_line.consume_line_move_ids:
if line_qty_left >= 0.0:
context.update({
'product_uom': consume_line.product_uom.id,
'product_uom_move':
move_line.move_id.product_uom.id,
'quantity': line_qty_left})
# TODO: this 'quantity': line_qty_left could be change
# becuase wath happend when products to consume moves
# are in different uom (test with mrp_request_return)
move_line.move_id.action_consume(
line_qty_left, move_line.location_id.id,
context=context)
move_apportionment_qty = uom_obj._compute_qty(
cr, uid, move_line.move_id.product_uom.id,
move_line.move_id.product_qty,
consume_line.product_uom.id)
line_qty_left -= move_apportionment_qty
return {}
#~ TODO: check this method, not used here but used in module
#~ mrp_request_return
def _partial_move_for(self, cr, uid, production_id, move_ids,
context=None):
"""
@param move_ids: list of stock move id.
@return: a dictionary of values for a consume/produce line.
"""
context = context or {}
product_id = self._get_consume_line_product_id(
cr, uid, move_ids, context=context)
product_uom = self._get_consume_line_uom_id(
cr, uid, production_id, product_id, context=context)
product_qty = self._get_consume_line_product_qty(
cr, uid, move_ids, product_uom, context=context)
consume_line_move_ids = self._get_consume_line_move_ids(
cr, uid, move_ids, context=context)
partial_move = {
'product_id': product_id,
'quantity': product_qty,
'product_uom': product_uom,
'consume_line_move_ids':
map(lambda move_line: (0, 0, move_line), consume_line_move_ids),
}
return partial_move
def _get_active_move_ids(self, cr, uid, production_id, context=None):
"""
Get the valid moves to be consume for a manufacturing order. That
are those stock move that are not in Done or Cancel state.
@param production_id: manufactuirng order id.
@return: list of stock move ids that can ve consumed
"""
context = context or {}
production_obj = self.pool.get('mrp.production')
move_brws = production_obj.browse(
cr, uid, production_id, context=context).move_lines
active_move_ids = [move_brw.id
for move_brw in move_brws
if move_brw.state not in ('done', 'cancel')]
return active_move_ids
def _get_consume_line_values(self, cr, uid, production_id, move_ids,
context=None):
"""
@param production_id: the production id where the wizard was called.
@param move_ids: list of stock move id.
@return: a dictionary of values to create a consume line.
"""
context = context or {}
product_id = self._get_consume_line_product_id(
cr, uid, move_ids, context=context)
product_uom = self._get_consume_line_uom_id(
cr, uid, production_id, product_id, context=context)
product_qty = self._get_consume_line_product_qty(
cr, uid, move_ids, product_uom, context=context)
consume_line_move_ids = self._get_consume_line_move_ids(
cr, uid, move_ids, context=context)
consume_line_dict = {
'product_id': product_id,
'product_uom': product_uom,
'quantity': product_qty,
'consume_line_move_ids':
map(lambda move_line: (0, 0, move_line), consume_line_move_ids),
}
return consume_line_dict
def _get_consume_line_product_id(self, cr, uid, move_ids, context=None):
"""
It gets a list of move ids and check that have the same product_id. If
this condition is True return the product_id, else it raise an
exception indicating that the moves correspond to different products
and can be use to create one mrp.comsume.line.
@param move_ids: stock move ids list to check.
"""
context = context or {}
move_obj = self.pool.get('stock.move')
move_brws = move_obj.browse(cr, uid, move_ids, context=context)
product_ids = [move_brw.product_id.id for move_brw in move_brws]
if len(set(product_ids)) != 1:
raise osv.except_osv(
_('Error!'),
_('You are trying to create a cosume line for two or more'
' different products.'),
)
return product_ids[0]
def _get_consume_line_uom_id(self, cr, uid, production_id, product_id,
context=None):
"""
Return the manufacturing order scheduled product uom defined for the
given product.
@param production_id: manufacturing order id.
@param product_id: raw material product id.
"""
context = context or {}
production_brw = self.pool.get('mrp.production').browse(
cr, uid, production_id, context=context)
uom_id = [product_line.product_uom.id
for product_line in production_brw.product_lines
if product_line.product_id.id == product_id][0]
return uom_id
def _get_consume_line_product_qty(self, cr, uid, move_ids, product_uom_id,
context=None):
"""
Return the summatory of every move given in move_ids.
@param move_ids: stock move ids list to check.
"""
context = context or {}
move_obj = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
move_brws = move_obj.browse(cr, uid, move_ids, context=context)
qty = \
sum([uom_obj._compute_qty(
cr, uid, move_brw.product_uom.id, move_brw.product_qty,
product_uom_id)
for move_brw in move_brws])
return qty
def _get_consume_line_move_ids(self, cr, uid, move_ids, context=None):
"""
Return a list of dictonary with consume line move to create for the
moves given.
@param move_ids: move ids list that will be convert into consume line
moes.
"""
context = context or {}
move_obj = self.pool.get('stock.move')
values = list()
for move_brw in move_obj.browse(cr, uid, move_ids, context=context):
values.append({
'move_id': move_brw.id,
'location_id': move_brw.location_id.id,
'location_dest_id': move_brw.location_dest_id.id,
})
return values
class MrpProduce(osv.TransientModel):
_name = 'mrp.produce'
def _get_produce_line_list(self, cr, uid, production_id, context=None):
"""
@param production_id: manufacturing order id.
@return: a list of dictionaries values with the produce lines to
create.
"""
context = context or {}
produce_line_list = list()
active_move_ids = self._get_active_move_ids(
cr, uid, production_id, context=context)
for move_id in active_move_ids:
produce_line_list += [self._get_produce_line_values(
cr, uid, move_id, context=context)]
return produce_line_list
def _get_default_produce_line_ids(self, cr, uid, context=None):
"""
Search the active stock moves from products to produce and then
generate the list of dictionary values to create the produce line ids
"""
context = context or {}
production_obj = self.pool.get('mrp.production')
produce_line_list = list()
production_ids = context.get('active_ids', [])
active_model = context.get('active_model', False)
if not production_ids or len(production_ids) != 1:
raise osv.except_osv(
_('Error!!'),
_('You need to call method using the wizard from the'
' manufacturing order one by one.'))
if active_model not in ['mrp.production']:
raise osv.except_osv(
_('Error!!'),
_('You this wizard can be only called by the manufacturing'
' order.'))
production_id = production_ids[0]
production_brw = production_obj.browse(
cr, uid, production_id, context=context)
produce_line_list = self._get_produce_line_list(
cr, uid, production_id, context=context)
return produce_line_list
_columns = {
'produce_line_ids': fields.one2many('mrp.produce.line',
'produce_id', 'Consume')
}
_defaults = {
'produce_line_ids': _get_default_produce_line_ids,
}
def _get_produce_line_values(self, cr, uid, move_id, context=None):
"""
return the dictionary that fill the produce lines with the move values.
@param move_id: move id.
"""
context = context or {}
move_obj = self.pool.get('stock.move')
move_brw = move_obj.browse(cr, uid, move_id, context=context) or False
if not move_id or not move_brw:
raise osv.except_osv(
_('Programming Error!'),
_('You are not given a valid stock move id so this feature can'
' be accomplished.'))
values = {
'product_id': move_brw.product_id.id,
'quantity': move_brw.product_qty,
'product_uom': move_brw.product_uom.id,
'move_id': move_brw.id,
'location_id': move_brw.location_id.id,
'location_dest_id': move_brw.location_dest_id.id,
}
return values
def action_produce(self, cr, uid, ids, context={}):
for production in self.browse(cr, uid, ids, context=context):
for raw_product in production.produce_line_ids:
context.update({
'product_uom': raw_product.product_uom.id,
'product_uom_move': raw_product.move_id.product_uom.id,
'quantity': raw_product.quantity})
raw_product.move_id.action_consume(
raw_product.quantity, raw_product.location_id.id,
context=context)
return {}
def _get_active_move_ids(self, cr, uid, production_id, context=None):
"""
Get the valid moves to be produce for a manufacturing order. That
are those stock move that are not in Done or Cancel state.
@param production_id: manufactuirng order id.
@return: list of stock move ids that can be produced
"""
context = context or {}
production_obj = self.pool.get('mrp.production')
move_brws = production_obj.browse(
cr, uid, production_id, context=context).move_created_ids
active_move_ids = [move_brw.id
for move_brw in move_brws
if move_brw.state not in ('done', 'cancel')]
return active_move_ids
class MrpConsumeLine(osv.TransientModel):
_name = 'mrp.consume.line'
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', string="Product",
required=True),
'quantity': fields.float("Quantity",
digits_compute=dp.get_precision(
'Product UoM'), required=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure',
required=True,),
'consume_line_move_ids': fields.one2many(
'mrp.consume.line.move',
'consume_line_id',
'Moves',
required=True,
help='Moves corresponding to the product in the consume line'),
'wizard_id': fields.many2one('mrp.consume', string="Wizard"),
}
class MrpProduceLine(osv.TransientModel):
_name = 'mrp.produce.line'
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one(
'product.product',
'Product',
required=True,
help='Product to be Produce'
),
'quantity': fields.float(
'Quantity',
digits_compute=dp.get_precision('Product UoM'),
required=True,
help='Quantity that will be produced'
),
'product_uom': fields.many2one(
'product.uom',
'Unit of Measure',
required=True,
help='Units of measure corresponding to the quantity'
),
'move_id': fields.many2one('stock.move', "Move"),
'location_id': fields.many2one(
'stock.location',
'Location',
required=True
),
'location_dest_id': fields.many2one(
'stock.location',
'Dest. Location',
required=True
),
'produce_id': fields.many2one(
'mrp.produce',
'Produce Wizard'
),
}
class MrpConsumeLineMove(osv.TransientModel):
"""
This model refered to stock moves dummy data that is used in the
mrp_consume_line model.
"""
_name = 'mrp.consume.line.move'
_description = 'MRP Consume Line Move'
_columns = {
'consume_line_id': fields.many2one(
'mrp.consume.line',
'Consume Line'),
'move_id': fields.many2one(
'stock.move',
'Move'),
'location_id': fields.many2one(
'stock.location',
'Location',
required=True),
'location_dest_id': fields.many2one(
'stock.location',
'Dest. Location',
required=True),
}
|
class Carro:
def __init__(self, marca, modelo, ano):
self.__marca = marca
self.__modelo = modelo
self.__anoFabricacao = ano
self.__velocidade = 0
#Encapsulamento
#def getMarca(self):
#return self.__marca
#def setMarca(self, novaMarca):
#self.__marca = novaMarca
#def getMode(self):
#return self.__modelo
#def setModelo(self, novoModelo):
#return self.__modelo = novoModelo
#def getAnoFabricacao(self):
#return self.__anoFabricacao
#def setNovoAno(self, novaFabricacao):
#return self.__anoFabricacao = novaFabricacao
def getVelocidade(self):
return self.__velocidade
def setVelocidade(self):
self.__velocidade = velocidade
def acelerar(self):
self.__velocidade += 10
def freiar(self):
self.__freiar -= 10
def exibirStatus(self):
status = f'modelo: {self.__modelo} velocidade: {self.__velocidade}'
return status
golzinho = Carro()
golzinho.setMarca('Vol')
golzinho.setModelo('G1')
golzinho.setAnoFabricacao(19)
print(golzinho.exibirStatus())
golzinho.freiar()
print(golzinho.getMarca())
|
#!/usr/bin/env python3
import progressbar
import logging
import logging.config
import os
import tensorflow as tf
import numpy as np
from model.resnet import ResNet
from dataset.voc_loader import VOCLoader
from dataset.instance_sampler import InstanceSampler
from utils.utils_tf import fill_and_crop
from configs.paths import CKPT_DIR, RAW_CONTEXT_DIR
from configs.config import (get_logging_config, args,
std_data_augmentation_config)
# import matplotlib
# matplotlib.rcParams['backend'] = "Qt4Agg"
slim = tf.contrib.slim
logging.config.dictConfig(get_logging_config(args.run_name))
log = logging.getLogger()
class InferenceModel(object):
def __init__(self, sess, net, sampler, img_size,
folder=None, context_estimation=False):
self.sess = sess
self.net = net
self.sampler = sampler
self.img_size = img_size
self.build_context_estimator()
def restore_from_ckpt(self, ckpt):
ckpt_path = os.path.join(CKPT_DIR, args.run_name, 'model.ckpt-%i000' % ckpt)
log.debug("Restoring checkpoint %s" % ckpt_path)
self.sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
saver.restore(self.sess, ckpt_path)
def build_context_estimator(self):
b = args.test_batch_size
self.images_ph = tf.placeholder(shape=[b, None, None, 3],
dtype=tf.float32, name='img_ph')
self.bboxes_ph = tf.placeholder(shape=[b, 4],
dtype=tf.float32, name='bboxes_ph')
self.frames_ph = tf.placeholder(shape=[b, 4],
dtype=tf.float32, name='frames_ph')
self.ws = tf.placeholder(shape=[b],
dtype=tf.float32, name='ws_ph')
self.hs = tf.placeholder(shape=[b],
dtype=tf.float32, name='hs_ph')
def fn(x):
return fill_and_crop(x[0], x[1], x[2], x[3], x[4],
std_data_augmentation_config)
imgs = tf.map_fn(fn, [self.images_ph, self.bboxes_ph,
self.frames_ph, self.ws, self.hs],
tf.float32, parallel_iterations=4, back_prop=False)
self.logits = self.net.build_net(imgs, self.sampler.num_classes)
self.output_probs = tf.nn.softmax(self.logits)
def estimate_context(self, imgs, bboxes, frames, ws, hs):
final_probs = []
b = args.test_batch_size
n_iters = int(np.floor(imgs.shape[0] / b))
for i in range(n_iters):
inds = np.arange(b*i, b*(i + 1))
feed_dict = {self.images_ph: imgs[inds],
self.bboxes_ph: bboxes[inds],
self.frames_ph: frames[inds],
self.ws: ws[inds],
self.hs: hs[inds]}
probs = self.sess.run(self.output_probs, feed_dict=feed_dict)
final_probs.append(probs)
final_probs = np.concatenate(final_probs, axis=0)
final_bboxes = np.array(bboxes * np.vstack([ws, hs, ws, hs]).T,
dtype=int)
# If sampling more than one context image per box
# averaging scores over them
if args.n_neighborhoods > 1:
nn = args.n_neighbors
final_bboxes = final_bboxes[::nn]
all_probs = [final_probs['probs'][i::nn] for i in range(nn)]
final_probs = np.stack(all_probs, -1).mean(-1)
return final_probs, final_bboxes
def sample2batch(s):
N = s['bboxes'].shape[0]
ws = np.array([s['w']] * N)
hs = np.array([s['h']] * N)
imgs = np.array([s['img']] * N)
return imgs, s['bboxes'], s['frames'], ws, hs
def main(argv=None): # pylint: disable=unused-argument
assert args.ckpt > 0
assert args.test_n % args.n_neighborhoods == 0, "test_n has to be a multiple of n_neighborhoods"
net = ResNet
net = net(training=False)
# extracting cats to exclude
excluded = [int(c) for c in args.excluded.split('_')] if args.excluded != "" else []
dataset, split = args.dataset, args.split
if '0712' in dataset:
loader = VOCLoader(['07', '12'], 'train', True, subsets=args.subsets,
cats_exclude=excluded, cut_bad_names=False)
elif '12' in dataset:
loader = VOCLoader('12', split, args.small_data,
cats_exclude=excluded,
cut_bad_names=False)
elif '7' in dataset:
loader = VOCLoader('07', split, cats_exclude=excluded,
cut_bad_names=False)
sampler = InstanceSampler(loader=loader,
n_neighborhoods=args.n_neighborhoods)
suff = '_small' if args.small_data else ''
context_folder = os.path.join(RAW_CONTEXT_DIR, args.run_name + '-' + dataset
+ split + suff + '-%dneib' % args.n_neighborhoods)
if not os.path.exists(context_folder):
os.makedirs(context_folder)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
estimator = InferenceModel(sess, net, sampler, args.image_size)
estimator.restore_from_ckpt(args.ckpt)
bar = progressbar.ProgressBar()
for name in bar(loader.filenames):
save_file = os.path.join(context_folder, name)
if os.path.exists(save_file):
continue
sample = sampler.get_test_sample(name, args.test_n)
imgs, bboxes, frames, ws, hs = sample2batch(sample)
probs, bboxes_out = estimator.estimate_context(imgs, bboxes,
frames, ws, hs)
context_dict = {'bboxes': bboxes_out, 'probs': probs}
np.save(save_file, context_dict)
print('DONE')
if __name__ == '__main__':
tf.app.run()
|
import unittest
from katas.kyu_7.pauls_misery import paul
class PaulTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(paul(['life', 'eating', 'life']), 'Super happy!')
def test_equal_2(self):
self.assertEqual(paul([
'life', 'Petes kata', 'Petes kata', 'Petes kata', 'eating']),
'Super happy!')
def test_equal_3(self):
self.assertEqual(paul([
'Petes kata', 'Petes kata', 'eating', 'Petes kata', 'Petes kata',
'eating']), 'Happy!')
def test_equal_4(self):
self.assertEqual(paul(['Petes kata'] * 7), 'Sad!')
def test_equal_5(self):
self.assertEqual(paul(['Petes kata'] * 10), 'Miserable!')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-07-26 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gram', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Follows',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('followee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='followers', to='gram.Profile')),
('follower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to='gram.Profile')),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='posts/')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='gram.Profile')),
],
options={
'ordering': ['-pk'],
},
),
migrations.CreateModel(
name='Saves',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gram.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='saves', to='gram.Profile')),
],
options={
'ordering': ['-pk'],
},
),
migrations.AddField(
model_name='likes',
name='photo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photolikes', to='gram.Post'),
),
migrations.AddField(
model_name='likes',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mylikes', to='gram.Profile'),
),
migrations.AddField(
model_name='comment',
name='photo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='gram.Post'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='gram.Profile'),
),
]
|
import hashlib
def to_short(url):
return hashlib.md5(url).hexdigest()[:7]
|
#!/usr/bin/env python
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
import fastjet as fj
import fjext
import fjcontrib
import fjtools
import pythia8
import pythiafjext
import pythiaext
from heppy.pythiautils import configuration as pyconf
# from tqdm.notebook import tqdm
from tqdm import tqdm
import argparse
import os
import sys
def get_args_from_settings(ssettings):
sys.argv=[' '] + ssettings.split()
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly')
pyconf.add_standard_pythia_args(parser)
parser.add_argument('--output', default="test_ang_ue.root", type=str)
parser.add_argument('--user-seed', help='pythia seed', default=1111, type=int)
args = parser.parse_args()
return args
def matched(j1, j2):
# j2i = fjtools.matched_Ry(j1, j2)
mpt = fjtools.matched_pt(j1, j2)
if mpt > 0.5:
return True, j1, j2, fjext.lambda_beta_kappa(j1, 1.0, 1.0, 1.0), fjext.lambda_beta_kappa(j2, 1.0, 1.0, 1.0)
return False
def fill_matched(j1s, j2s, tj_no_pup, tj_pup, tj_delta, jet_R0):
for j1 in j1s:
tj_no_pup.Fill(j1.perp(), j1.eta(), j1.phi(),
fjext.lambda_beta_kappa(j1, 1.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j1, 2.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j1, 3.0, 1.0, jet_R0))
for j2 in j2s:
mpt = fjtools.matched_pt(j1, j2)
tj_delta.Fill(j1.perp(), j1.eta(), j1.phi(),
fjext.lambda_beta_kappa(j1, 1.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j1, 2.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j1, 3.0, 1.0, jet_R0),
j2.perp(), j2.eta(), j2.phi(),
fjext.lambda_beta_kappa(j2, 1.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j2, 2.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j2, 3.0, 1.0, jet_R0),
mpt)
for j1 in j2s:
tj_pup.Fill(j1.perp(), j1.eta(), j1.phi(),
fjext.lambda_beta_kappa(j1, 1.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j1, 2.0, 1.0, jet_R0),
fjext.lambda_beta_kappa(j1, 3.0, 1.0, jet_R0))
def main():
mycfg = []
ssettings = "--py-ecm 5000 --py-minbias --user-seed=100000"
args = get_args_from_settings(ssettings)
pythia_mb = pyconf.create_and_init_pythia_from_args(args, mycfg)
mycfg = []
ssettings = "--py-ecm 5000 --user-seed=100000 --nev 100000"
args = get_args_from_settings(ssettings)
pythia_hard = pyconf.create_and_init_pythia_from_args(args, mycfg)
max_eta_hadron=1
parts_selector_h = fj.SelectorAbsEtaMax(max_eta_hadron)
jet_R0 = 0.4
jet_selector = fj.SelectorPtMin(20.0) & fj.SelectorPtMax(200.0) & fj.SelectorAbsEtaMax(max_eta_hadron - 1.05 * jet_R0)
n_pileup = 1 #5
# print the banner first
fj.ClusterSequence.print_banner()
print()
# set up our jet definition and a jet selector
jet_R0 = 0.4
jet_def = fj.JetDefinition(fj.antikt_algorithm, jet_R0)
print(jet_def)
fout = ROOT.TFile(args.output, 'recreate')
fout.cd()
tj_delta = ROOT.TNtuple("tj_delta", "tj_delta", "pt:eta:phi:L11:L21:L31:ptm:etam:phim:L11m:L21m:L31m:mpt")
tj_no_pup = ROOT.TNtuple("tj_no_pup", "tj_no_pup", "pt:eta:phi:L11:L21:L31")
tj_pup = ROOT.TNtuple("tj_pup", "tj_pup", "pt:eta:phi:L11:L21:L31")
hmult_hard = ROOT.TH1F("hmult_hard", "hmult_hard", 300, 0, 300)
hmult_pup = ROOT.TH1F("hmult_pup", "hmult_pup", 300, 0, 300)
hpt_acc_hard = ROOT.TProfile2D("hpt_acc_hard", "hpt_acc_hard;#eta;#varphi", 50, -1, 1, 50, 0, ROOT.TMath.Pi() * 2.)
hpt_acc_pup = ROOT.TProfile2D("hpt_acc_pup", "hpt_acc_pup;#eta;#varphi", 50, -1, 1, 50, 0, ROOT.TMath.Pi() * 2.)
for n in tqdm(range(args.nev)):
if not pythia_hard.next():
continue
parts_pythia_h = pythiafjext.vectorize_select(pythia_hard, [pythiafjext.kFinal, pythiafjext.kCharged], 0, False)
parts_pythia_h_selected = parts_selector_h(parts_pythia_h)
parts_pileup = None
for ipile in range(n_pileup):
while not pythia_mb.next():
continue
parts_pythia_h_ue = pythiafjext.vectorize_select(pythia_mb, [pythiafjext.kFinal, pythiafjext.kCharged], 10000, False)
parts_pythia_h_selected_ue = parts_selector_h(parts_pythia_h_ue)
if parts_pileup is None:
parts_pileup = parts_pythia_h_selected_ue
else:
parts_pileup += parts_pythia_h_selected_ue
mult_hard = len(parts_pythia_h_selected)
mult_ue = len(parts_pileup)
jets_h = fj.sorted_by_pt(jet_selector(jet_def(parts_pythia_h_selected)))
jets_h_w_ue = fj.sorted_by_pt(jet_selector(jet_def(parts_pileup + parts_pythia_h_selected)))
if len(jets_h) < 1:
continue
fill_matched(jets_h, jets_h_w_ue, tj_no_pup, tj_pup, tj_delta, jet_R0)
hmult_hard.Fill(mult_hard)
hmult_pup.Fill(mult_ue)
_tmp = [hpt_acc_hard.Fill(p.eta(), p.phi(), p.perp()) for p in parts_pythia_h_selected]
_tmp = [hpt_acc_pup.Fill(p.eta(), p.phi(), p.perp()) for p in parts_pileup]
pythia_hard.stat()
pythia_mb.stat()
fout.Write()
fout.Close()
print ('[i] written ', fout.GetName())
if __name__ == '__main__':
main()
|
import io
import os
import sys
import csv
import numpy as np
import random
if len(sys.argv) != 6:
print 'Usage: create_random_lines.py input_file repmap_file:rep_hdr:sample_hdr num_rand rep_per_rand ctrl_sample'
else:
infile = sys.argv[1]
outfile = infile[:-4] + '_rand' + infile[-4:]
repmap_file, rep_hdr, sample_hdr = sys.argv[2].split(':')
out_repmap_file = repmap_file[:-4] + '_rand' + repmap_file[-4:]
num_rand = eval(sys.argv[3])
rep_per_rand = eval(sys.argv[4])
ctrl_sample = sys.argv[5]
#Load replicate map
f = io.open(repmap_file)
rep_delim = ',' if (repmap_file.split('.')[-1] == 'csv') else '\t'
rep_map = {row[rep_hdr]:row[sample_hdr] for row in csv.DictReader(f, delimiter=rep_delim)}
f.close()
#Randomly select existing cell lines to randomise
f = io.open(infile)
delim = ',' if (infile.split('.')[-1] == 'csv') else '\t'
rdr = csv.DictReader(f, delimiter=delim)
ctrl_hdrs = [hdr for hdr in rep_map if rep_map[hdr] == ctrl_sample]
non_ctrl_hdrs = [hdr for hdr in rep_map if rep_map[hdr] != ctrl_sample]
rand_spec = {'RAND%d' % idx: random.sample(non_ctrl_hdrs, rep_per_rand) for idx in range(num_rand)}
rand_hdrs = sum([rand_spec[x] for x in rand_spec], [])
ctrl_data, rand_data = [], []
for row in rdr:
ctrl_data.append([eval(row[hdr]) for hdr in ctrl_hdrs])
rand_data.append([eval(row[hdr]) for hdr in rand_hdrs])
ctrl_data = np.log2(np.array(ctrl_data) + 32)
rand_data = np.log2(np.array(rand_data) + 32)
f.close()
#Median normalise each column and compute the fold changes
ctrl_meds, rand_meds = np.nanmedian(ctrl_data,axis=0), np.nanmedian(rand_data,axis=0)
ctrl_means = np.nanmean(ctrl_data - ctrl_meds,axis=1)
rand_fcs = ((rand_data- rand_meds).T - ctrl_means).T
#Randomly shuffle the guides within each column of the random samples
for i in range(rand_fcs.shape[1]):
np.random.shuffle(rand_fcs[:,i])
#Convert the fold changes back to read counts
rand_counts = np.round(2.0**((rand_fcs.T + ctrl_means).T+rand_meds)-32)
rand_counts *= (rand_counts>=0.0)
#Write the output count file (appending random samples at the end)
f,fout = io.open(infile), io.open(outfile, 'w')
rdr = csv.reader(f, delimiter=delim); hdrs = rdr.next()
rand_hdrs = sum([[rand_id + ' ' + hdr for hdr in rand_spec[rand_id]] for rand_id in rand_spec], [])
fout.write(u'%s\n' % delim.join(hdrs + rand_hdrs))
for i, toks in enumerate(rdr):
fout.write(u'%s\n' % delim.join(toks + ['%d' % x for x in rand_counts[i,:]]))
f.close(), fout.close()
#Write the new rep_map file (appending random samples)
f, fout = io.open(repmap_file), io.open(out_repmap_file, 'w')
fout.write(u'%s%s%s\n' % (rep_hdr,rep_delim,sample_hdr))
for row in csv.DictReader(f, delimiter=rep_delim):
fout.write(u'%s%s%s\n' % (row[rep_hdr],rep_delim,row[sample_hdr]))
for rand_id in rand_spec:
fout.write(u'%s\n' % '\n'.join(['%s%s%s' % (rand_id + ' ' + hdr, rep_delim, rand_id) for hdr in rand_spec[rand_id]]))
f.close(), fout.close()
|
import socket,os,shutil,sys
from zipfile import ZipFile
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt, QEvent)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
from ui_pydownloader import Ui_MainWindow
from hurry.filesize import size
class server(QMainWindow):
def __init__(self):
ip = socket.gethostbyname(socket.gethostname())
port = 52000
# set up the server and accept the client
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.ui.ip_entry.setText(ip)
self.ui.port_entry.setText(str(port))
self.ui.start_server_button.clicked.connect(self.start_server)
self.show()
def start_server(self):
# setting up the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self.ui.ip_entry.text(), int(self.ui.port_entry.text())))
s.listen(5)
self.conn, addr = s.accept()
name = self.conn.recv(1024).decode()
# shows message of who is connected
QMessageBox.information(self, "Connected", name + " has succesfully connected.")
self.delete_start_screen()
directory = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select Folder')
# starts the download
self.prepare_download(directory)
def delete_start_screen(self):
# deletes all the ui for setting up the server
self.ui.ip_entry.deleteLater()
self.ui.port_entry.deleteLater()
self.ui.ip_label.deleteLater()
self.ui.port_label.deleteLater()
self.ui.start_server_button.deleteLater()
def prepare_download(self,directory=""):
# gets the list of files to send to the client
if directory != "":
files = os.listdir(directory)
os.chdir(directory)
else:
files = os.listdir()
x = ""
for i in files:
x += i +" size: "+size(os.path.getsize(i))+","
self.conn.send(x.encode())
# receives the name of the file and sends back the size of it
file = self.conn.recv(1024).decode()
# checks if is it folder or a file
a = os.path.isdir(file)
self.is_dir = "0" if a == True else "1"
# if its a folder it makes the folder a zip file to send it
if self.is_dir == "0":
shutil.make_archive(file, 'zip',file)
file += ".zip"
# sends is it a folder or a file to the client
self.conn.send(self.is_dir.encode())
# sends the size of the file or folder
self.file_size = os.path.getsize(file)
self.conn.send(str(self.file_size).encode())
# Calls the download function
self.conn.recv(1024)
self.download(file)
def download(self,file):
# reads the file by 1024 and sends it to the client
with open(file,"rb") as r:
while True:
# reads the data by 1024
data = r.read(1024)
if not data:break
# sends the data
self.conn.send(data)
# removes the zip file after the download is over
if self.is_dir == "0": os.remove(file)
quit()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = server()
sys.exit(app.exec_())
|
from pathlib import Path
from gammapy.datasets import (
Datasets,
FluxPointsDataset,
MapDataset,
SpectrumDatasetOnOff,
)
path = Path("$GAMMAPY_DATA")
map_dataset = MapDataset.read(
path / "cta-1dc-gc/cta-1dc-gc.fits.gz",
name="map-dataset",
)
spectrum_dataset = SpectrumDatasetOnOff.read(
path / "joint-crab/spectra/hess/pha_obs23523.fits",
name="spectrum-datasets",
)
flux_points_dataset = FluxPointsDataset.read(
path / "hawc_crab/HAWC19_flux_points.fits",
name="flux-points-dataset",
)
datasets = Datasets([
map_dataset,
spectrum_dataset,
flux_points_dataset
])
print(datasets["map-dataset"])
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import resource_rc
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(436, 666)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/favicon/favicon.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(30, 30, 391, 91))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/logo/210201-coinauto.png"))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(50, 150, 341, 20))
self.lineEdit.setObjectName("lineEdit")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(50, 130, 351, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(50, 190, 311, 16))
self.label_3.setObjectName("label_3")
self.lineEdit_2 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_2.setGeometry(QtCore.QRect(50, 210, 341, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(50, 250, 181, 16))
self.label_4.setObjectName("label_4")
self.comboBox = QtWidgets.QComboBox(Dialog)
self.comboBox.setGeometry(QtCore.QRect(50, 270, 341, 22))
self.comboBox.setObjectName("comboBox")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(50, 310, 181, 16))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(Dialog)
self.label_6.setGeometry(QtCore.QRect(250, 310, 181, 16))
self.label_6.setObjectName("label_6")
self.checkBox = QtWidgets.QCheckBox(Dialog)
self.checkBox.setGeometry(QtCore.QRect(50, 370, 341, 21))
self.checkBox.setObjectName("checkBox")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(50, 410, 341, 23))
self.pushButton.setObjectName("pushButton")
self.textBrowser = QtWidgets.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(50, 450, 341, 101))
self.textBrowser.setObjectName("textBrowser")
self.label_7 = QtWidgets.QLabel(Dialog)
self.label_7.setGeometry(QtCore.QRect(50, 560, 341, 16))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(Dialog)
self.label_8.setGeometry(QtCore.QRect(50, 590, 341, 16))
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(Dialog)
self.label_9.setGeometry(QtCore.QRect(50, 620, 341, 16))
self.label_9.setObjectName("label_9")
self.lineEdit_3 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_3.setGeometry(QtCore.QRect(50, 340, 151, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.lineEdit_4 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_4.setGeometry(QtCore.QRect(240, 340, 151, 20))
self.lineEdit_4.setObjectName("lineEdit_4")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Coin Auto"))
self.label_2.setText(_translate("Dialog", "Bithumb API Connect Key"))
self.label_3.setText(_translate("Dialog", "Bithumb API Secret Key"))
self.label_4.setText(_translate("Dialog", "Choose Coin"))
self.label_5.setText(_translate("Dialog", "Buy Price (₩)"))
self.label_6.setText(_translate("Dialog", "Sell Price (₩)"))
self.checkBox.setText(_translate("Dialog", "All responsibilities are yours"))
self.pushButton.setText(_translate("Dialog", "Start Auto Trading"))
self.label_7.setText(_translate("Dialog", "Please Buy Me a Cup of Coffee."))
self.label_8.setText(_translate("Dialog", "ETH\t0x20B65F0C4027345163f0A4Ee9f9d6Dd39ec957A6"))
self.label_9.setText(_translate("Dialog", "BTC\t3JUoGmQLfhFBNfKBWNa92wGQ7w13CxjoBy"))
self.lineEdit_3.setText(_translate("Dialog", "0"))
self.lineEdit_4.setText(_translate("Dialog", "0")) |
#encoding=utf-8
import sys
import time
from elbApi.elbClient import elbClient
if __name__ == '__main__':
client = elbClient()
modid = int(sys.argv[1])
cmdid = int(sys.argv[2])
ret = client.apiRegister(modid, cmdid)#非必需使用的API
if ret == -9998:
print 'still no exist after register'
for i in range(10):
ret, hostOrEmsg = client.apiGetHost(modid, cmdid, 10)
if ret == 0:
ip = hostOrEmsg[0]
port = hostOrEmsg[1]
print '%s:%d' % (ip, port)
print 'sleep %ds' % (i % 2 + 1)
time.sleep(i % 2 + 1)
client.apiReportRes(modid, cmdid, ip, port, 1)
elif ret == -9998:
print '[%d,%d] not exist' % (modid, cmdid)
else:
print hostOrEmsg
|
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import stats
from scipy.stats import multivariate_normal
def opentxt(filename):#读取文件
fp=open(filename,'r')
a=fp.readlines()
return a
def div(a):#把信息存放在一个列表中,,第一个元素存放身高,第二个存放体重,第三个鞋码
c1=list()
d1=list()
e1=list()
for i in a:
c1.append(float(i.split('\t')[0]))
d1.append(float(i.split('\t')[1]))
e1.append(float(i.split('\t')[2]))
return [c1,e1,d1]
def mean(num):#求平均值
height=sum(num)/(len(num))
return height
def standard(num):#求出三个参数的标准差
avg=mean(num)
i=0
for item in num:
i=pow(item-avg,2)+i
dev=float(i/len(num))
stdev= np.math.sqrt(dev)
return stdev
def f1(x,num): # 分别求三个的正态分布概率密度函数值
prob1 = stats.norm.pdf(x, mean(num[0]), standard(num[0]))
prob2 = stats.norm.pdf(x, mean(num[1]), standard(num[1]))
prob3 = stats.norm.pdf(x, mean(num[2]), standard(num[2]))
return [prob1, prob2, prob3]
def ROC(prediction,test):
TP=0 #真为真
TN=0#假为假
FN=0#假为真
FP=0#真为假
for item in range(len(test)):
if (prediction[item]==1 and test[item]==1):
TP=TP+1
elif (prediction[item]==0 and test[item]==0):
TN=TN+1
elif (prediction[item] == 0 and test[item] == 1):
FN=FN+1
elif (prediction[item] == 1 and test[item]== 0):
FP=FP+1
return float(TP/(TP+FN)),float(TN/(FP+TN))
#第一个参数灵敏度,第二个为特异度
def canshu(a):
p=list()
for i in a:
p.append([float(i.split("\t")[0]),float(i.split('\t')[1])])
q=np.array(p)
mu = np.mean(q, axis=0)
s_in = 0
num=q.shape[0]
#for i in range(num):
# x=q[i]-mu
#s_in += np.dot(x, x.T)
#thu = s_in / num
thu=np.cov(q[:,0],q[:,1])
return mu, thu
def f2(x,a):
mu,thu=canshu(a)
return multivariate_normal.pdf(x, mean=np.array(mu), cov=thu)
def main1():
#学习
a=div(opentxt("boy.txt"))
b=div(opentxt("girl.txt"))
#身高
for item in [0,1,2]:
p=list()
q=list()
for i in a[item]:
j=f1(i,a)
z=f1(i,b)
p.append(j[item]*0.5/(j[item]*0.5+z[item]*0.5))
for i in b[item]:
q.append(f1(i,b)[item]*0.5/(f1(i,a)[item]*0.5+f1(i,b)[item]*0.5))
p=list(set(p))
q=list(set(q))
x=list(set(a[item]))
x.sort()
plt.plot(x, p, 'r-o', label='$line$', linewidth=1)
x=list(set(b[item]))
x.sort()
plt.plot(x, q, 'b-o', label='$line$', linewidth=1)
plt.show()
t=div(opentxt("boy82.txt"))
ans=list()
j=len(t[0])
for i in range(j):
ans.append(1)
for i in range(3):
t[i]=t[i]+div(opentxt('girl42.txt'))[i]
for i in range(len(t[0])-j):
ans.append(0)
for j in range(2):
x = list()
y = list()
for item in np.arange(0,1,0.01):
prediction=list()
for i in t[j]:
p=f1(i,z)
q=f1(i,b)
if (p[j]*0.5/(p[j]*0.5+q[j]*0.5)>item):
prediction.append(1)
else:
prediction.append(0)
r1,r2=ROC(prediction,ans)
x.append(r1)
y.append(1-r2)
plt.xlim(0,1)
plt.ylim(0,1)
if(j==0):
plt.plot(y,x,'r', label='$line$', linewidth=1)
elif(j==1):
plt.plot(y, x, 'b', label='$line$', linewidth=1)
elif(j==2):
plt.plot(y, x, 'g', label='$line$', linewidth=1)
plt.show()
return 0
def main2():
# 学习
a = opentxt("boy.txt")
b = opentxt("girl.txt")
c=opentxt("boy82.txt")
#训练散点图
x=div(a)
y=div(b)
plt.plot(x[0],x[1],'o')
plt.plot(y[0],y[1],'o')
plt.show()
j = list()
for i in c:
j.append([float(i.split("\t")[0]), float(i.split('\t')[1])])
ans=list()
for i in range(len(j)):
ans.append(1)
d=opentxt("girl42.txt")
x=list()
y=list()
for i in d:
j.append([float(i.split("\t")[0]), float(i.split('\t')[1])])
for i in range(len(j)-len(ans)):
ans.append(0)
o=list()
print(j)
for i in j:
p=f2(i,a)
q=f2(i,b)
o.append(p*0.5/(q*0.5+p*0.5))
print(max(o),min(o))
print(j)
for item in np.arange(min(o), max(o), 0.01):
prediction=list()
for i in j:
p=f2(i,a)
q=f2(i,b)
if((p*0.5/(q*0.5+p*0.5))>item):
prediction.append(1)
else:
prediction.append(0)
r1, r2 = ROC(prediction, ans)
x.append(r1)
y.append(1-r2)
plt.plot(y, x, 'r', label='$line$', linewidth=1)
plt.show()
return 0
main1() |
import json
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIRequestFactory
from api.views import LaunchSiteViewSet, OperationalStatusViewSet, OrbitalStatusViewSet, SourceViewSet, CatalogEntryViewSet, TLEViewSet, DataSourceViewSet, ComputeView
from api.tools import SatelliteComputation, format_inline_time
from catalog.models import CatalogEntry, TLE
def is_correct_json(string):
"""
Check if the string is a well formed json
"""
if len(string) == 0:
return False
if string[0] is not '{' and string[0] is not '[':
return False
try:
json.loads(string)
except ValueError:
return False
return True
def crawl_json(json):
"""
Retrieve all the keys in a json object
"""
for key in json:
if type(json[key]) is dict:
for k in crawl_json(json[key]):
yield k
yield key
class ApiGetTestCase(TestCase):
fixtures = [
'initial_data',
'test_data',
]
def setUp(self):
self.factory = APIRequestFactory()
def test_jsonIsCorrect(self):
"""
Test if basic GET views are returning a correctly formed JSON
"""
elements = [
'LaunchSite',
'OperationalStatus',
'OrbitalStatus',
'Source',
'CatalogEntry',
'TLE',
'DataSource',
]
for element in elements:
# Dynamicly instanciate the view class
request = self.factory.get('/api/v1/%s/?format=json' % element.lower())
view_class = globals()['%sViewSet' % element]
view = view_class.as_view({'get': 'list'})
response = view(request).render()
self.assertTrue(is_correct_json(response.content.decode('utf8')))
def test_jsonHasPagination(self):
"""
Test if some views has a pagination system
"""
elements = [
'TLE',
'CatalogEntry'
]
for element in elements:
# Dynamicly instanciate the view class
request = self.factory.get('/api/v1/%s/?format=json' % element.lower())
view_class = globals()['%sViewSet' % element]
view = view_class.as_view({'get': 'list'})
response = view(request).render()
json_data = response.content.decode('utf8')
self.assertIn('"count":', json_data)
self.assertIn('"next":', json_data)
self.assertIn('"previous":', json_data)
self.assertIn('"results":', json_data)
def test_listingCatalogEntriesWithLimit(self):
"""
Check if the limit parameter is working
"""
expected_results = {
'': 2,
'?limit=1': 2,
}
for search, expected in expected_results.items():
response = self.client.get(
'/api/v1/catalogentry/{}'.format(search)
)
content = response.content.decode('utf8')
json_data = json.loads(content)
result = json_data['count']
self.assertEqual(result, expected)
def test_listCatalogEntriesWithFilters(self):
"""
Check if filters in urls are working
"""
to_check_basic = {
'has_payload': True,
'has_payload': False,
}
to_check_child = {
'owner': 'ISS',
'owner': 'PRC',
'launch_site': 'TYMSC',
'launch_site': 'JSC',
'operational_status': '+',
}
for field, value in to_check_basic.items():
response = self.client.get(
'/api/v1/catalogentry/?{}={}'.format(field, value)
)
content = response.content.decode('utf8')
json_data = json.loads(content)
for result in json_data['results']:
self.assertEqual(json_data['results'][0][field], value)
for field, value in to_check_child.items():
response = self.client.get(
'/api/v1/catalogentry/?{}={}'.format(field, value)
)
content = response.content.decode('utf8')
json_data = json.loads(content)
for result in json_data['results']:
self.assertEqual(json_data['results'][0][field]['code'], value)
def test_listCatalogEntriesWithSortFilters(self):
"""
Check if filters in urls are working
"""
expected_orders = {
'launch_date': ['25544', '37820'],
'-launch_date': ['37820', '25544'],
'norad_catalog_number': ['25544', '37820'],
'-norad_catalog_number': ['37820', '25544'],
}
for param, order in expected_orders.items():
response = self.client.get(
'/api/v1/catalogentry/?ordering={}'.format(param)
)
content = response.content.decode('utf8')
json_data = json.loads(content)
for i in range(len(order)):
self.assertEqual(
json_data['results'][i]['norad_catalog_number'],
order[i]
)
def test_listCatalogEntriesWithComplexFilters(self):
"""
Check if the more complex filters are working
"""
expected_results = {
'?norad_catalog_number__in=25544%2C25545': ['25544'],
'?norad_catalog_number__startswith=255': ['25544'],
'?owner__description__startswith=international': ['25544'],
'?owner__operational_status__startswith=Operational&international_designator__startswith=2011-': ['37820'],
}
for search, expected in expected_results.items():
response = self.client.get(
'/api/v1/catalogentry/{}'.format(search)
)
content = response.content.decode('utf8')
json_data = json.loads(content)['results']
results = []
for data in json_data:
results.append(data['norad_catalog_number'])
self.assertEqual(results, expected)
def test_listCatalogEntriesWithSearchFilters(self):
"""
Check if the search filter is working corrently
"""
expected_results = {
'25544': ['25544'],
'zarya': ['25544'],
'international+space+station': ['25544'],
'china': ['37820'],
'jiuquan': ['37820'],
'tiangong': ['37820'],
'operational': ['25544', '37820'],
'earth+orbit': ['25544', '37820'],
}
for search, expected in expected_results.items():
response = self.client.get(
'/api/v1/catalogentry/?search={}'.format(search)
)
content = response.content.decode('utf8')
json_data = json.loads(content)['results']
results = []
for data in json_data:
results.append(data['norad_catalog_number'])
self.assertEqual(results, expected)
class ComputationTestCase(ApiGetTestCase):
"""
Tests on the computation part of the api
"""
fixtures = [
'initial_data',
'test_data',
]
def test_computeViewExists(self):
try:
from api.views import ComputeView
except ImportError:
self.fail("Compute view does not exist")
def test_computeRouteExists(self):
"""
Check if the route is working
"""
response = self.client.get('/api/v1/compute/25544/?time=20170825200000')
self.assertEquals(response.status_code, status.HTTP_200_OK)
def test_computeRouteOnlyMatchesDigits(self):
"""
Check if the route only matches digits in its parameter
"""
response = self.client.get('/api/v1/compute/notadigit/')
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
def test_computeRouteReturns404IfCatalogEntryDoesNotExist(self):
"""
Check if a non existent satellite number outputs a 404 error
"""
response = self.client.get('/api/v1/compute/0101/')
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
def test_computeViewReturnsJsonData(self):
"""
Check if the data returned by the view is json
"""
response = self.client.get('/api/v1/compute/25544/?time=20170825200000')
self.assertTrue(
is_correct_json(response.content.decode('utf8')),
"compute view does not return json"
)
def test_computeViewReturnsExpectedJsonFormat(self):
"""
Check if the returned json has the correct structure
"""
response = self.client.get('/api/v1/compute/25544/?time=20170825200000')
content = response.content.decode('utf8')
expected_keys = [
'longitude',
'latitude',
'elevation',
'velocity',
'tle',
]
json_data = json.loads(content)
json_keys = [key for key in crawl_json(json_data)]
for key in expected_keys:
self.assertTrue(
key in json_keys,
"there is no key '{}' in the json".format(key)
)
def test_computeViewReturnsExpectedValueFormat(self):
"""
Check if the returned json has the correct value types
"""
response = self.client.get('/api/v1/compute/25544/?time=20170825200000')
content = response.content.decode('utf8')
json_data = json.loads(content)
for key in json_data:
try:
float(json_data[key])
except ValueError:
self.fail("{} is not a number".format(key))
def test_computeViewReturnsSameDataAsSatelliteComputation(self):
"""
Check if the view returns the same data as SatelliteComputation tool
"""
tle = TLE.objects.findByCatalogEntryAndTime(
CatalogEntry.objects.first(),
format_inline_time('20170825200000')
)
sc = SatelliteComputation(tle = tle)
# Put the observer on a fixed date to avoid the test to fail while
# running after the TLE expires
sc.observer.date = '2017/8/25 20:00:00'
response = self.client.get('/api/v1/compute/25544/?time=20170825200000')
content = response.content.decode('utf8')
json_data = json.loads(content)
del json_data['tle']
expected_data = sc.compute()
self.assertEquals(json_data, expected_data)
def test_computeViewReturnsCorrectDataAccordingToTheDate(self):
"""
Check if the view returns the correct data according to the date
"""
tle = TLE.objects.findByCatalogEntryAndTime(
CatalogEntry.objects.first(),
format_inline_time('20170825200000')
)
sc = SatelliteComputation(tle=tle)
sc.observer.date = '2017/8/25 20:00:00'
expected_data_1 = sc.compute()
sc.observer.date = '2017/8/25 20:00:01'
expected_data_2 = sc.compute()
response = self.client.get('/api/v1/compute/25544/?time=20170825200000')
content = response.content.decode('utf8')
json_data_1 = json.loads(content)
del json_data_1['tle']
response = self.client.get('/api/v1/compute/25544/?time=20170825200001')
content = response.content.decode('utf8')
json_data_2 = json.loads(content)
del json_data_2['tle']
self.assertEquals(json_data_1, expected_data_1)
self.assertEquals(json_data_2, expected_data_2)
def test_computeViewReturns400IfTimeTooFarAway(self):
"""
Check if the view returns an error 400 if the given time too far away
"""
response = self.client.get('/api/v1/compute/25544/?time=20200825200000')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_computeViewReturns400IfNoTLEFoundForTime(self):
"""
Check if the view returns an error 400 if no TLE is found for the
given time
"""
response = self.client.get('/api/v1/compute/25544/?time=200008252000001')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_getTLEFromCatalogEntryIsReachable(self):
"""
Check if the request returns a correct JSON
"""
response = self.client.get('/api/v1/catalogentry/25544/tle/?time=20170825200000')
content = response.content.decode('utf8')
self.assertEqual(response.status_code, status.HTTP_200_OK)
json_data = json.loads(content)
self.assertTrue(is_correct_json(content))
def test_getTLEFromCatalogEntryHasTLE(self):
"""
Check if the request returns a TLE
"""
response = self.client.get('/api/v1/catalogentry/25544/tle/?time=20170825200000')
content = response.content.decode('utf8')
json_data = json.loads(content)
expected_data = {
'id': 4,
'first_line': 'ISS (ZARYA)',
'second_line': '1 25544U 98067A 17059.83075553 .00002893 00000-0 50327-4 0 9991',
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key, value in expected_data.items():
self.assertTrue(key in json_data)
self.assertEqual(json_data[key], value)
def test_getTLEFromCatalogEntryReturns400IfNoTLEFoundForTime(self):
"""
Check if the view returns an error 400 if not TLE is found for
the given time
"""
response = self.client.get('/api/v1/catalogentry/25544/tle/?time=20000825200000')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST) |
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CustCreateV1TestCase))
return suite
class CustCreateV1TestCase(unittest.TestCase):
def setUp(self):
self.component = components.user.UserComponent(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_1,
},
)
@responses.activate
def test_can_create_by_email(self):
responses.add(
responses.POST,
"http://foo.com/user/custcreate?type=foo&email=a@b.com&api_key=KEY&api_secret=SECRET",
)
self.component.cust_create(type="foo", email="a@b.com")
def test_requires_type(self):
with self.assertRaisesRegexp(ValueError, "'type' must be set"):
self.component.cust_create()
def test_requires_email(self):
with self.assertRaisesRegexp(ValueError, "'email' must be set"):
self.component.cust_create(type="foo")
if __name__ == "__main__":
unittest.main()
|
def tokenize(p):
tokens = []
index = 0
while index < len(p):
if index + 1 < len(p):
if p[index + 1] == "*":
tokens.append(p[index] + p[index+1])
index += 2
else:
tokens.append(p[index])
index +=1
else:
tokens.append(p[index])
index +=1
return tokens
def matchEmpty(tokens):
for token in tokens:
if token[-1] != '*':
return False
return True
# aa , a*b -> a, a*b or a, b ->
# ab, .*c -> b .*c or b, c -> '', .*c -> '', c ( false)
# aaa, ab*a -> aa b*a,
# a, "ab*" -> '', b* -> '', ''
def findMatch(s, tokens):
print vars(),
if not tokens:
print "Returned True"
return not s
if (len(s) > 0 and s[0] != tokens[0][0] or not s) and tokens[0][0] != '.' and tokens[0][-1] != "*":
print "Returned False"
return False
else:
if tokens[0][-1] == '*':
if (len(s) > 0 and tokens[0][0] == s[0]) or tokens[0][0] == ".":
return findMatch(s[1:], tokens[1:]) or findMatch(s[1:], tokens) or findMatch(s, tokens[1:])
else:
return findMatch(s, tokens[1:])
else:
return findMatch(s[1:], tokens[1:])
class Solution:
# @return a boolean
def isMatch(self, s, p):
tokens = tokenize(p)
return findMatch(s, tokens)
x = Solution().isMatch( "ab", ".*c")
print x
|
from ..requests import get_news
from . import main
from flask import render_template, request
from ..models import News_Article
# Viewscategory=business category=science category=sports entertainment
@main.route('/')
def index():
business_news = get_news('business')
science_news = get_news('science')
entertainment_news = get_news('entertainment')
title = 'Welcome to the news platform'
return render_template('index.html', business=business_news, science=science_news, entertainment=entertainment_news)
|
import pandas
IN_FILE_NAME = "gs://genomics-public-data/simons-genome-diversity-project/reports/Simons_Genome_Diversity_Project_sample_reference_results.csv"
OUT_FILE_NAME = "results.parquet"
def main():
# TODO use index column that comes from the CSV.
frame = pandas.read_csv(IN_FILE_NAME)
frame.to_parquet(OUT_FILE_NAME)
if __name__ == "__main__":
main()
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms import FileField, IntegerField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class UploadForm(FlaskForm):
photo = FileField()
submit = SubmitField(u'上传')
class DataForm(FlaskForm):
data = IntegerField('循环次数', validators=[DataRequired()])
submit = SubmitField('下载结果') |
from tkinter import*
import time
import datetime
import pygame
pygame.init()
root = Tk()
root.title("Music Box")
root.geometry('1352x700+0+0')
root.configure(background = 'white')
ABC =Frame(root, bg="powder blue", bd=20, relief= RIDGE)
ABC.grid()
ABC1 =Frame(ABC, bg="powder blue", bd=20, relief= RIDGE)
ABC1.grid()
ABC2 =Frame(ABC, bg="powder blue", bd=20, relief= RIDGE)
ABC2.grid()
ABC3 =Frame(ABC, bg="powder blue", bd=20, relief= RIDGE)
ABC3.grid()
str1 = StringVar()
str1.set("Ject Like Music")
Data1 = StringVar()
Time1=StringVar()
Data1.set(time.strftime("%d/%m/%Y"))
Time1.set(time.strftime("%H:%M:%S"))
#============================Label with title =====================
Label(ABC1, text="Synthesizer",font=('arial',25,'bold'),padx=8,pady=8,bd=4,bg="powder blue",
fg="white").grid(row=0,column=0,columnspan=11)
C = Canvas(ABC1, height=200, width=200, bg = 'white' )
C.grid(row=1,column=2)
#==================================================================
#txtDate=Entry(ABC1, textvariable=Data1, font=('arial',18,'bold'),bd=34,bg="powder blue",
#fg="white",width=28,justify=CENTER).grid(row=1,column=0,columnspan=1)
#txtDate=Entry(ABC1, textvariable=str1, font=('arial',18,'bold'),bd=34,bg="powder blue",
#fg="white",width=28,justify=CENTER).grid(row=1,column=1,columnspan=1)
#txtDate=Entry(ABC1, textvariable=Time1, font=('arial',18,'bold'),bd=34,bg="powder blue",
#fg="white",width=28,justify=CENTER).grid(row=1,column=2,columnspan=1)
btnCs=Button(ABC, height=2, width=4,bd=4, text="Rec", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=0,column=1,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=6,bd=4, text="sin", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=1,column=4,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=6,bd=4, text="square", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=1,column=5,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=6,bd=4, text="Triangle", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=1,column=6,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=7,bd=4, text="sawtooth", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=1,column=7,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=4,bd=4, text="Style1", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=2,column=4,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=4,bd=4, text="Style2", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=2,column=5,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=4,bd=4, text="Style3", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=2,column=6,padx=5,pady=5)
btnCs=Button(ABC1, height=2, width=4,bd=4, text="Style4", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=2,column=7,padx=5,pady=5)
#==============================Black Button===============================
btnCs=Button(ABC3, height=4, width=6,bd=4, text="C#", font=('arial',18,'bold'),bg="black",fg="white")
btnCs.grid(row=0,column=1,padx=5,pady=5)
btnDs=Button(ABC3, height=4, width=6,bd=4 , text="D#", font=('arial',18,'bold'),bg="black",fg="white")
btnDs.grid(row=0,column=2,padx=5,pady=5)
btnSpace=Button(ABC3, state=DISABLED, width=2,height=6 ,bg = "powder blue",relief=FLAT)
btnSpace.grid(row=0,column=3,padx=0,pady=0)
btnFs=Button(ABC3, height=4, width=6,bd=4 , text="F#", font=('arial',18,'bold'),bg="black",fg="white")
btnFs.grid(row=0,column=4,padx=5,pady=5)
btnGs=Button(ABC3, height=4, width=6,bd=4 , text="G#", font=('arial',18,'bold'),bg="black",fg="white")
btnGs.grid(row=0,column=5,padx=5,pady=5)
btnBb=Button(ABC3, height=4, width=6,bd=4 , text="Bb", font=('arial',18,'bold'),bg="black",fg="white")
btnBb.grid(row=0,column=6,padx=5,pady=5)
#===============================White button==========================
btnC=Button(ABC3, height=4, width=8, text="C", font=('arial',18,'bold'),bg="white",fg="black")
btnC.grid(row=1,column=0,padx=5,pady=5)
btnD=Button(ABC3, height=4, width=8, text="D", font=('arial',18,'bold'),bg="white",fg="black")
btnD.grid(row=1,column=1,padx=5,pady=5)
btnE=Button(ABC3, height=4, width=8, text="E", font=('arial',18,'bold'),bg="white",fg="black")
btnE.grid(row=1,column=2,padx=5,pady=5)
btnF=Button(ABC3, height=4, width=8, text="F", font=('arial',18,'bold'),bg="white",fg="black")
btnF.grid(row=1,column=3,padx=5,pady=5)
btnG=Button(ABC3, height=4, width=8, text="G", font=('arial',18,'bold'),bg="white",fg="black")
btnG.grid(row=1,column=4,padx=5,pady=5)
btnA=Button(ABC3, height=4, width=8, text="A", font=('arial',18,'bold'),bg="white",fg="black")
btnA.grid(row=1,column=5,padx=5,pady=5)
btnB=Button(ABC3, height=4, width=8, text="B", font=('arial',18,'bold'),bg="white",fg="black")
btnB.grid(row=1,column=6,padx=5,pady=5)
#==================================================================
root.mainloop() |
import sys
file_name=sys.argv[1]
ifile = open(file_name)
node_unique=[];node_outdegree=[];networks=[]
for line in ifile:
networks.append(line)
temp = line.split()
if temp[0] not in node_unique:
node_unique.append(temp[0])
node_outdegree.append(1)
else:
node_outdegree[node_unique.index(temp[0])]=node_outdegree[node_unique.index(temp[0])]+1
if temp[2] not in node_unique:
node_unique.append(temp[2])
node_outdegree.append(0)
ifile.close()
ofile = open(".".join(file_name.split(".")[:-1])+".prune0target.sif","w")
for line in networks:
temp = line.split()
if node_outdegree[node_unique.index(temp[2])]>0:
ofile.write(line)
ofile.close()
|
# Dependencies
import pandas as pd
import tweepy
import time
import json
import random
import config
# Twitter API Keys
consumer_key = config.consumer_key
consumer_secret = config.consumer_secret
access_token = config.access_token
access_token_secret = config.access_token_secret
# auth tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
# Quotes to Tweet
happy_quotes = [
"For every minute you are angry you lose sixty seconds of happiness. - Ralph Waldo Emerson",
"Folks are usually about as happy as they make their minds up to be. - Abraham Lincoln",
"Happiness is when what you think, what you say, and what you do are in harmony. - Mahatma Gandhi",
"Count your age by friends, not years. Count your life by smiles, not tears. - John Lennon",
"Happiness is a warm puppy. - Charles M. Schulz",
"The happiness of your life depends upon the quality of your thoughts. - Marcus Aurelius",
"Now and then it's good to pause in our pursuit of happiness and just be happy. - Guillaume Apollinaire"]
# Create function for tweeting
def tweet(status):
api.update_status(status)
# Twitter credentials
tweet_count = 0
while(tweet_count <7):
# Tweet a random quote
status = happy_quotes[random.choice(happy_quotes))]
# Print success message
try:
tweet(status)
print("Tweet sent")
tweet_count += 1
except:
print("Failed to tweet")
# Set timer to run every minute
time.sleep(60) |
#!/usr/bin/python
# Copyright 2009-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import optparse
import sys
import portage
from portage import os
def command_recompose(args):
usage = "usage: recompose <binpkg_path> <metadata_dir>\n"
if len(args) != 2:
sys.stderr.write(usage)
sys.stderr.write("2 arguments are required, got %s\n" % len(args))
return 1
binpkg_path, metadata_dir = args
if not os.path.isfile(binpkg_path):
sys.stderr.write(usage)
sys.stderr.write("Argument 1 is not a regular file: '%s'\n" % \
binpkg_path)
return 1
if not os.path.isdir(metadata_dir):
sys.stderr.write(usage)
sys.stderr.write("Argument 2 is not a directory: '%s'\n" % \
metadata_dir)
return 1
t = portage.xpak.tbz2(binpkg_path)
t.recompose(metadata_dir)
return os.EX_OK
def main(argv):
if argv and isinstance(argv[0], bytes):
for i, x in enumerate(argv):
argv[i] = portage._unicode_decode(x, errors='strict')
valid_commands = ('recompose',)
description = "Perform metadata operations on a binary package."
usage = "usage: %s COMMAND [args]" % \
os.path.basename(argv[0])
parser = optparse.OptionParser(description=description, usage=usage)
options, args = parser.parse_args(argv[1:])
if not args:
parser.error("missing command argument")
command = args[0]
if command not in valid_commands:
parser.error("invalid command: '%s'" % command)
if command == 'recompose':
rval = command_recompose(args[1:])
else:
raise AssertionError("invalid command: '%s'" % command)
return rval
if __name__ == "__main__":
rval = main(sys.argv[:])
sys.exit(rval)
|
import ConfigParser
from flask import Flask, request, jsonify
import datetime
import json
import os
config = ConfigParser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/../../server.cfg')
rd_email = config.get('RD', 'email')
rd_phone = config.get('RD', 'phone')
app = Flask(__name__)
@app.route('/update', methods=['POST'])
def update_incident():
data_str = request.stream.read()
decoded = json.loads(data_str)
incident_list = parse_incident(decoded)
for incident in incident_list:
short_msg = parse_short_status(incident)
long_msg = parse_long_status(incident)
from SMSSender import SMSSender
sms_client = SMSSender(short_msg, incident['gps_location'])
sms_client.start()
from EmailSender import EmailSender
email_client = EmailSender('incidentsinsg@gmail.com', '__REMOVED__', long_msg, incident['gps_location'])
email_client.start()
# print "Sent email", long_msg
ret = {
"success": True
}
return jsonify(ret)
"""
data format
[{
"tags": ["fire", "level1"],
"incident": {
"startTime": 1396509341400,
"isLatest": true,
"level": 1,
"timeStamp": 1396509354240,
"remark": "lala",
"location": [{
"location": "Changi Airport Terminal 2, Airport Boulevard, Singapore 819643",
"type": "string"
}, {
"lat": 1.35372,
"lng": 103.989,
"type": "gps"
}],
"parent": null,
"type": "fire",
"isValid": true,
"completeTime": null,
"operator": "operator5",
"initialId": "533d0aaa84ae5f59ea022715",
"_id": "533d0aaa84ae5f59ea022715"
}
}]
"""
def parse_incident(decoded):
incident_list = []
for incident in decoded:
notification_type = "new"
if incident['incident']['_id'] != incident['incident']['initialId']:
notification_type = "update"
if incident['incident']['completeTime'] is not None:
notification_type = "complete"
start_time = datetime.datetime.fromtimestamp(incident['incident']['startTime'] / 1e3)
start_time_str = start_time.strftime('%H:%M %m/%d/%Y')
remark = incident['incident']['remark']
type = incident['incident']['type']
level = incident['incident']['level']
string_location = "Singapore"
gps_location = None
for location in incident['incident']['location']:
if location['type'] == 'string':
string_location = location['location']
if location['type'] == 'gps':
gps_location = {
"lat": location['lat'],
"lng": location['lng']
}
incident_list.append({
"notification_type": notification_type,
"type": type,
"time": start_time_str,
"remark": remark,
"level": level,
"string_location": string_location,
"gps_location": gps_location
})
return incident_list
def parse_long_status(incident):
msg = """\
<html>
<head></head>
<body>"""
if incident['notification_type'] == "new":
msg += "<h3>Incident Alert</h3>"
elif incident['notification_type'] == "update":
msg += "<h3>Incident Update</h3>"
elif incident['notification_type'] == "complete":
msg += "<h3>Incident Alert Lifted</h3>"
msg += """\
<div>
"""
msg += "<p> Time: " + incident['time'] + "</p>"
msg += "<p> Type: " + incident['type'] + "</p>"
msg += "<p> Level: " + str(incident['level']) + "</p>"
msg += "<p> Location: " + incident['string_location'] + "</p>"
msg += "<p> Remark: " + incident['remark'] + "</p>"
msg += """
</div>
</body>
</html>
"""
return msg
def parse_short_status(incident):
title = ""
if incident['notification_type'] == 'new':
title = 'Incident Alert'
elif incident['notification_type'] == 'update':
title = 'Incident Update'
elif incident['notification_type'] == 'complete':
title = 'Incident Alert Lifted'
msg = title + ": " + incident['type']\
+ " level " + str(incident['level'])\
+ " reported at " + incident['time']\
+ ". Location: " + incident['string_location']\
+ ". " + incident['remark']
return msg
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0', port=16100) |
class GlobalConfig:
image_size = 1024
augment = True
#model setting
model_name = 'vfnet'
config_file = 'configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
pretrain_url = 'https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth'
#directory
csv_path = '../../data/csv'
data_path = '/content/drive/Shareddrives/Deep Learning/vinbigdata_1024x1024_png.zip'
output_path = '../vinbig_output'
log_path = '../logs'
pretrain_store_path = '../checkpoint'
config_path = '../configs'
### Overwrite model configuration
# Data
num_classes = 14
classes = ("Aortic enlargement", "Atelectasis", "Calcification", "Cardiomegaly",
"Consolidation", "ILD", "Infiltration", "Lung Opacity", "Nodule/Mass",
"Other lesion", "Pleural effusion", "Pleural thickening", "Pneumothorax",
"Pulmonary fibrosis")
# Path
train_root_path = '../../../train'
model_path = None
# Training
samples_per_gpu = 4
lr = 0.0025
seed = 2020
num_epochs = 12
gpu = [0]
# Logs
checkpoint_interval = 2
eval_interval = 2
test = {
'test_root_path': '../../../test',
'test_mode': True,
'pipeline_type': 'LoadImageFromFile',
'score_thr': 0.05
}
|
#Main file for Probot
#Authors: Jonathan D'Alonzo & Stephen Canzanese
#Rowan University Artifical Intelligence Semester Project
|
import pygame, sys
import numpy as np
import itertools
import neurodot_present.present_lib as pl
from neurodot_present.present_lib import Screen, FixationCross, CheckerBoardFlasher, UserEscape, run_start_sequence, run_stop_sequence
pl.DEBUG = False
################################################################################
if __name__ == "__main__":
import random
pygame.init()
pygame.mouse.set_visible(False)
FLASH_RATES = [16,19,23] #Hz
VSYNC_VALUES = [1,2,3]
BLOCKS = 3
REPITITIONS = 3
CHECKERBOARD_NROWS = 64
FLASH_DURATION = 8 #seconds
PAUSE_DURATION_RANGE = (2.0,5.0)
FC = FixationCross()
#CBFs
try:
#start sequence
run_start_sequence()
#trials
for b in range(BLOCKS):
stims = REPITITIONS*zip(FLASH_RATES, VSYNC_VALUES)
random.shuffle(stims)
for flash_rate, vsync_value in stims:
CBF = CheckerBoardFlasher(flash_rate=flash_rate)
CBF.setup_checkerboard(nrows = CHECKERBOARD_NROWS, show_fixation_dot = True)
CBF.run(duration = FLASH_DURATION, vsync_value = vsync_value)
SCR = Screen(color = "black", fixation_cross = FC)
pause_duration = random.uniform(*PAUSE_DURATION_RANGE)
SCR.run(duration = pause_duration, vsync_value = 0)
except UserEscape as exc:
print exc
finally:
#stop sequence
run_stop_sequence()
#exit
print "Exiting the presentation"
pygame.quit()
sys.exit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.