text
stringlengths 8
6.05M
|
|---|
import unittest
from katas.kyu_7.make_them_bark import Dog
class DogTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(
Dog('Apollo', 'Dobermann', 'male', '4').bark(), 'Woof!'
)
def test_equals_2(self):
self.assertEqual(
Dog('Zeus', 'Dobermann', 'male', '4').bark(), 'Woof!'
)
|
'''
Created on Jul 15, 2013
@author: emma
'''
from selenium import webdriver #imports selenium
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.action_chains import ActionChains
from UnitTesting.page_objects.base_page_object import base_page_object
import time
class book(base_page_object):
def __init__(self, webd_wrap):
base_page_object.__init__(self, webd_wrap)
def confirm_page(self):
''' raises AssertionError if page is incorrect '''
self._webd_wrap.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'purchase-button')), 'Not on Book Profile page')
_url = self._webd_wrap._driver.current_url
if not _url.startswith(self._webd_wrap._baseURL + '/book'):
raise AssertionError("Not on a Book Profile page.")
def click_my_zola(self):
self.confirm_page()
time.sleep(2)
self._webd_wrap._driver.find_element_by_id('h-user-personalized-toolbar').find_element_by_xpath('div/a').click()
########################################################################
########################################################################
def rate(self):
self.confirm_page()
self._webd_wrap._driver.find_element_by_class_name('ui-rating-bar-section-large').find_element_by_xpath('span/div[3]/a').click()
# make sure the rating bar reloads before moving on
self._webd_wrap.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'ui-rating-bar-section-large')), 'rating bar not present')
def click_buy(self):
self.confirm_page()
self._webd_wrap._driver.find_element_by_class_name('l-sidebar-primary').find_element_by_xpath('div/div/span/a').click()
def click_add_to_list(self):
self.confirm_page()
self._webd_wrap._driver.find_element_by_link_text("ADD TO LIST").click()
def click_recommend(self):
self.confirm_page()
self._webd_wrap._driver.find_element_by_class_name('l-sidebar-primary').find_element_by_xpath('div/div/ul/li[3]/a').click()
def choose_wishlist(self):
time.sleep(1)
self._webd_wrap._driver.find_element_by_link_text('My Wishlist').click()
|
from numpy import *
from matplotlib.pyplot import *
data = loadtxt('random.dat').transpose()
data[0] /= 1e6
plot(data[0],zeros_like(data[0]),'k--')
errorbar(data[0],data[1]-np.pi,data[2],label='drand48')
errorbar(data[0],data[3]-np.pi,data[4],label='mt19937')
legend()
xlabel('million samples')
ylabel('estimate - $\\pi$')
savefig('random.pdf')
|
import _plotly_utils.basevalidators
class SliderValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='sliderdefaults', parent_name='layout', **kwargs
):
super(SliderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Slider'),
data_docs=kwargs.pop('data_docs', """
"""),
**kwargs
)
|
import ordenamiento as order
class Node:
def __init__(self, numero):
self.numero = numero
self.next = None
def get_data(self):
return numero
class Lista:
def __init__(self):
self.head = None
self.size = 0
# def ordenar(self):
# if self.vacio():
# print "Lista vacia"
# else:
# aux = self.head
# l = []
# while aux:
# l.append(aux)
# aux = aux.next
# l.sort(reverse=True)
# newlist = Lista()
def insert_sort(self, numero):
if self.vacio():
self.head = Node(numero)
else:
actual = self.head
nuevo = Node(numero)
if actual.numero > nuevo.numero:
nuevo.next = actual
self.head = nuevo
while actual.next:
if actual.next.numero > nuevo.numero:
break
actual = actual.next
nuevo.next = actual.next
actual.next = nuevo
def select(self, i):
aux = self.head
count = 0
while aux:
if count == i:
print "el elemento",i,"de la lista es: ", aux.numero
return
else:
count = count + 1
aux = aux.next
print "elemento no esta en la lista"
def posicion(self,x):
aux = self.head
pos = 0
while aux:
if aux.numero == x:
print "la posicion del elemento",x,"en la lista es:",pos
return
else:
pos = pos + 1
aux = aux.next
print "elemento no esta en la lista"
def pertenencia(self, numero):
aux = self.head
while aux:
if aux.numero == numero:
print "el numero", numero, "pertenece al conjunto"
return
aux = aux.next
print "el numero", numero, "no esta en el conjunto"
def vacio(self):
if self.head == None:
return True
else:
return False
def minimo(self): # minimo esta al final siempre
aux = self.head
while aux:
aux = aux.next
return aux
def insert_first(self, numero): # agrega al principio
if self.vacio():
self.head = Node(numero)
else:
node = Node(numero)
node.next = self.head
self.head = node
def insert_last(self, numero):
if self.vacio():
self.head = Node(numero)
else:
aux = self.head
while aux.next:
aux = aux.next
node = Node(numero)
aux.next = node
def print_list(self):
if self.vacio():
print "Lista vacia"
else:
aux = self.head
while aux:
print "Numero: ", aux.numero
aux = aux.next
if __name__ == "__main__":
lista = Lista()
lista.insert_sort(0)
lista.insert_sort(400)
lista.insert_sort(4)
lista.insert_sort(5)
lista.insert_sort(1)
lista.insert_sort(20)
# lista.pertenencia(20)
# lista.pertenencia(1)
# lista.pertenencia(8)
# lista.posicion(20)
lista.print_list()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
provides function to check if a string matches a regular expression
"""
from re import match
@staticmethod
def validate_by_regex(string, regex):
"""
checks if a string matches the regular expression
validate_by_regex(string, regex) -> (True|False)
string - some text string (str)
regex - regular expression (str)
:return:
"""
return bool(match(regex, string))
|
# Generated by Django 2.1.5 on 2019-08-07 11:33
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0074_auto_20190806_1315'),
]
operations = [
migrations.AlterField(
model_name='grouppost',
name='date_posted',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='webgroup',
name='date_updated',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
# coding:utf-8
from socket import *
from time import ctime
import time
import struct
print("=====================时间戳TCP服务器=====================");
def ConfigServer():
HOST = '127.0.0.1' # 主机号为空白表示可以使用任何可用的地址。
PORT = 4444 # 端口号
BUFSIZ = 1024 # 接收数据缓冲大小
ADDR = (HOST, PORT)
tcpSerSock = socket(AF_INET, SOCK_STREAM) # 创建TCP服务器套接字
tcpSerSock.bind(ADDR) # 套接字与地址绑定
tcpSerSock.listen(5) # 监听连接,同时连接请求的最大数目
print('参数服务器等待客户端的连接...')
while True:
tcpCliSock, addr = tcpSerSock.accept() # 接收客户端连接请求
print('参数服务器取得连接:', addr)
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
a=struct.pack('>h',233)
print(a)
tcpCliSock.send(struct.pack('>h',233))
while True:
data = tcpCliSock.recv(BUFSIZ) # 连续接收指定字节的数据,接收到的是字节数组
print("byte类数据", data)
if data==b'\xfa\x5e':
tcpCliSock.send(data)
print('key接收成功,并返回key')
# data=struct.unpack('>h', data)
# print("short类数据",data)
if data==b'\xaa':
tcpCliSock.send(struct.pack('>h',1))
print("已返回连接反馈信号")
#print("字符串数据",result)
if not data: # 如果数据空白,则表示客户端退出,所以退出接收
break
# tcpCliSock.send('[%s] %s' % (bytes(ctime(), 'utf-8'), data))
#tcpCliSock.send(bytes('[%s] %s' % (ctime(), data.decode('utf-8')), 'utf-8')) # 向客户端发送时间戳数据,必须发送字节数组
tcpCliSock.close() # 关闭与客户端的连接
print("参数连接已关闭")
tcpSerSock.close() # 关闭服务器socket
def DataServer():
HOST = '127.0.0.1' # 主机号为空白表示可以使用任何可用的地址。
PORT = 4445 # 端口号
BUFSIZ = 1024 # 接收数据缓冲大小
ADDR = (HOST, PORT)
tcpSerSock = socket(AF_INET, SOCK_STREAM) # 创建TCP服务器套接字
tcpSerSock.bind(ADDR) # 套接字与地址绑定
tcpSerSock.listen(5) # 监听连接,同时连接请求的最大数目
print('数据服务器等待客户端的连接...')
while True:
tcpCliSock, addr = tcpSerSock.accept() # 接收客户端连接请求
print('数据服务器取得连接:', addr)
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
tcpCliSock.send(struct.pack('>h', 233))
while True:
data = tcpCliSock.recv(BUFSIZ) # 连续接收指定字节的数据,接收到的是字节数组
print("byte类数据", data)
if data == b'\xfa\x5e':
tcpCliSock.send(data)
print('key接收成功,并返回key')
# data=struct.unpack('>h', data)
# print("short类数据",data)
if data == b'\xaa':
tcpCliSock.send(struct.pack('>h', 1))
print("已返回连接反馈信号")
# print("字符串数据",result)
if not data: # 如果数据空白,则表示客户端退出,所以退出接收
break
# tcpCliSock.send('[%s] %s' % (bytes(ctime(), 'utf-8'), data))
# tcpCliSock.send(bytes('[%s] %s' % (ctime(), data.decode('utf-8')), 'utf-8')) # 向客户端发送时间戳数据,必须发送字节数组
tcpCliSock.close() # 关闭与客户端的连接
print("数据连接已关闭")
tcpSerSock.close() # 关闭服务器socket
print("数据服务器已关闭")
|
"""Main app package."""
|
import unittest
from katas.kyu_7.noonerize_me import noonerize
class NoonerizeTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(noonerize([12, 34]), 18)
def test_equals_2(self):
self.assertEqual(noonerize([55, 63]), 12)
def test_equals_3(self):
self.assertEqual(noonerize([357, 579]), 178)
def test_equals_4(self):
self.assertEqual(noonerize([1000000, 9999999]), 7000001)
def test_equals_5(self):
self.assertEqual(noonerize([1000000, 'hello']), 'invalid array')
def test_equals_6(self):
self.assertEqual(noonerize(['pippi', 9999999]), 'invalid array')
def test_equals_7(self):
self.assertEqual(noonerize(['pippi', 'hello']), 'invalid array')
def test_equals_8(self):
self.assertEqual(noonerize([1, 1]), 0)
def test_equals_9(self):
self.assertEqual(noonerize([1, 0]), 1)
def test_equals_10(self):
self.assertEqual(noonerize([0, 1]), 1)
|
# Generated by Django 3.2.4 on 2021-07-10 20:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0009_auto_20210709_0156'),
]
operations = [
migrations.AlterModelOptions(
name='exhibit',
options={'verbose_name_plural': 'Exhibit'},
),
]
|
import aiohttp
import requests
async def post_request(url, json, proxy=None):
async with aiohttp.ClientSession() as client:
try:
async with client.post(url, json=json, proxy=proxy, timeout=60) as response:
html = await response.text()
return {'html': html, 'status': response.status, 'error': None}
except aiohttp.ClientError as err:
return {'error': err}
def get_request(url, proxy=None):
try:
res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'})
return {'html': res.text, 'status': res.status_code, 'url': res.url, 'error': None, 'original_url': url}
except requests.RequestException:
return
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis"
# Import libraries
import os
import numpy as np
from util import *
from sort import *
from gaussian_back_sub import *
from train_color import *
from scipy import ndimage
import os.path
import numpy as np
dataset="own2" # "highway","fall", "traffic", "traffic_stabilized", "own1", "own1_stabilized", "own2", "own2_stabilized"
# Path configurations
video_path = "./videos/"
# Initialize centroids
centroids = []
# Create instance of the SORT tracker (Simple Online and Realtime Tracking)
tracker_motion = Sort()
# Frame counter
frame_counter = 0
path_in, first_train_frame, last_train_frame, first_test_frame, last_test_frame, im_size, alpha, colorSpace, connectivity, areaPixels, ac_morphology, SE1size, SE2size = setup(dataset)
if __name__ == "__main__":
# W5 T1.1 Tracking with Kalman filter using SORT tracker
# Use Kalman filter to track each vehicle appearing in the sequence
# Apply the background substraction work previously done
mu_matrix, sigma_matrix = training_color(path_in, first_train_frame, last_train_frame, alpha, colorSpace)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_path+"kalman_filter_"+dataset+".avi", fourcc, 10, (im_size[1],im_size[0]))
filenames=sorted(os.listdir(path_in))
# Read sequence of images sorted
for filename in filenames[first_test_frame:last_test_frame]:
# Display current frame
print "Processing frame: "+str(filename)
# Read image from groundtruth
frame = cv2.imread(path_in+filename)
# Apply background subraction
background_filtered = gaussian_color(frame, mu_matrix, sigma_matrix, alpha, colorSpace, connectivity, areaPixels,ac_morphology,SE1size,SE2size)
cv2.imwrite("bk_subs_images/"+filename[0:(len(filename)-4)]+".jpg",np.uint8(background_filtered))
# Track blobs
centroids = get_centroids(background_filtered, areaPixels)
# Update tracker motion
dets = np.array(centroids)
trackers = tracker_motion.update(dets)
# Save tracker values
save_tracker_positions(trackers)
# Show results
frame = display_detections(frame, background_filtered, areaPixels)
frame = display_motion(frame, trackers)
cv2.imshow("tracking KALMAN FILTER", frame)
cv2.waitKey(1)
# Write frame into video
out.write(np.uint8(frame))
# Update frame_counter
frame_counter = frame_counter + 1
|
from common.admin import create_admin
from starwars.models import ALL_MODELS
for model in ALL_MODELS:
create_admin(model)
|
from django.shortcuts import render
import requests
from rest_framework import viewsets
from .models import Weather
from .serialize import WeatherSerializer
# Create your views here.
class WeatherView(viewsets.ModelViewSet):
queryset = Weather.objects.all()
serializer_class = WeatherSerializer
|
from spack import *
import os
import distutils
class Madgraph5amcatnlo(Package):
homepage = "https://launchpad.net/mg5amcnlo/"
url = "http://cmsrep.cern.ch/cmssw/repos/cms/SOURCES/slc7_amd64_gcc810/external/madgraph5amcatnlo/2.6.0/MG5_aMC_v2.6.0.tar.gz"
version('2.6.0', sha256='ba182a2d85733b3652afa87802adee60bf6a5270cc260cdb38366ada5e8afef4')
patch('madgraph5amcatnlo-config.patch')
patch('madgraph5amcatnlo-compile.patch')
depends_on('python')
depends_on('hepmc')
depends_on('root')
depends_on('lhapdf')
depends_on('gosamcontrib')
depends_on('fastjet')
depends_on('pythia8')
depends_on('thepeg')
def setup_environment(self, spack_env, build_env):
spack_env.set('FC', spack_f77+' -std=legacy')
def install(self, spec, prefix):
filter_file('\${HEPMC_ROOT}', '%s'%spec['hepmc'].prefix,'input/mg5_configuration.txt')
filter_file('\${PYTHIA8_ROOT}', '%s'%spec['pythia8'].prefix,'input/mg5_configuration.txt')
filter_file('\${LHAPDF_ROOT}', '%s'%spec['lhapdf'].prefix,'input/mg5_configuration.txt')
filter_file('\${FASTJET_ROOT}', '%s'%spec['fastjet'].prefix,'input/mg5_configuration.txt')
filter_file('\${GOSAMCONTRIB_ROOT}', '%s'%spec['gosamcontrib'].prefix,'input/mg5_configuration.txt')
filter_file('\${THEPEG_ROOT}', '%s'%spec['thepeg'].prefix,'input/mg5_configuration.txt')
install('input/mg5_configuration.txt', 'input/mg5_configuration_patched.txt')
python=which('python')
python('./bin/compile.py')
os.remove('bin/compile.py')
os.remove('input/mg5_configuration.txt')
install('input/mg5_configuration_patched.txt', 'input/mg5_configuration.txt')
content=str("""
generate p p > t t~ [QCD]
output basiceventgeneration
launch
set nevents 5
""")
with open('basiceventgeneration.txt','w') as f:
f.write(content)
python('./bin/mg5_aMC', 'basiceventgeneration.txt')
for f in find('.', '*.tgz'):
os.remove(f)
distutils.dir_util.copy_tree('.', self.prefix)
|
import gffpandas.gffpandas as gffpd
import pandas as pd
import sys
import time
import multiprocessing as mp
import pysam
import itertools
bam=sys.argv[1]
cores=sys.argv[3]
gff=sys.argv[2]
#bam="data/120796AAligned.sortedByCoord.out.mkdup.bam"
#cores=4
#gff="data/Homo_sapiens.GRCh37.87.chr.gff3"
annotation = gffpd.read_gff3(gff)
attr_to_columns = annotation.attributes_to_columns()
exons=attr_to_columns[attr_to_columns["type"]=="exon"]
del attr_to_columns
del annotation
coords={}
for a,b,c,d in zip(exons["start"],exons["end"], exons["seq_id"],exons["exon_id"]):
if str(a)+";"+str(b) not in coords:
coords[(str(a)+";"+str(b) + ";" + str(c))]=d
else:
coords[(str(a)+";"+str(b) + ";" + str(c))]=coords[(str(a)+";"+str(b) + ";" + str(c))]+ ";"+d
del exons
def split_df(dic,cores):
if str(len(dic)/cores)[str(len(dic)/cores).index(".")+1]==0:
chunks=range(0,len(dic),(len(dic)/cores))
else:
rest="0."+str(len(dic)/cores)[str(len(dic)/cores).index(".")+1:]
chunks=[]
for core in range(1,cores,1):
if not chunks:
chunks.append(float(str(len(dic)/cores)[:str(len(dic)/cores).index(".")]))
else:
chunks.append(max(chunks)+float(str(len(dic)/cores)[:str(len(dic)/cores).index(".")]))
chunks.append(max(chunks)+float(str(len(dic)/cores)[:str(len(dic)/cores).index(".")])+((cores))*float(rest))
returnDict=[]
print(chunks)
a=dict(itertools.islice(dic.items(), 0,int(chunks[0])))
returnDict.append(a)
for chunk in range(0,len(chunks)):
if chunk<int(cores)-1:
a=dict(itertools.islice(dic.items(), int(chunks[chunk]),int(chunks[chunk+1])))
returnDict.append(a)
return returnDict
def calc_cov_bam(bam,dic):
fin_df=pd.DataFrame(columns=["start","end","cov","ID", "Chr"])
covlist=[]
covlist.append(bam)
samfile=pysam.AlignmentFile(bam, "rb")
for key,value in dic.items():
count=0
start=int((key.split(";"))[0])
end=int((key.split(";"))[1])
chr=(key.split(";"))[2]
for pos in samfile.pileup("chr"+str(chr), int(start)-1, int(end)-1, truncate=True):
count+=pos.n
for exonId in value.split(";"):
try:
fin_df=fin_df.append({"start":start,"end":end,"cov":(count/(abs(start-end))),"ID":exonId, "Chr":str(chr)}, ignore_index=True)
print(fin_df)
except ZeroDivisionError:
print(key,value)
return fin_df.values.tolist()
results=[]
def collect_results(result):
"""Uses apply_async's callback to setup up a separate Queue for each process"""
results.extend(result)
if __name__ == "__main__":
start_time = time.time()
# Repeats the compute intensive operation on 10 data frames concurrently
pool = mp.Pool(processes=12)
c=0
d=0
for chunk in split_df(coords,12):
pool.apply_async(calc_cov_bam, args=(bam,chunk),callback=collect_results)
pool.close()
pool.join()
# Converts list of lists to a data frame
df = pd.DataFrame(results,columns=["start","end","cov","ID", "Chr"])
print("--- %s seconds ---" % (time.time() - start_time))
df.to_csv(sys.argv[4], sep="\t", index=False,)
|
from marshmallow_sqlalchemy import ModelConverter
from marshmallow import fields
from citext import CIText
class AppModelConverter(ModelConverter):
"""
Partly override the marshmallow_sqlalchemy converter.
"""
SQLA_TYPE_MAPPING = dict(
list(ModelConverter.SQLA_TYPE_MAPPING.items()) +
[(CIText, fields.Str)]
)
def _add_column_kwargs(self, kwargs, column):
"""
Fix the case where Floats are returned with a null scale (which breaks Swagger) by removing the associated
'places' attribute which the converter should not have added.
"""
ModelConverter._add_column_kwargs(self, kwargs, column)
if not kwargs.get('places', 'OK'):
del kwargs['places']
|
class Chubs:
fat = 0 #pounds
name = 'generic chubs'
def eat(self, pounds_of_food):
self.fat = self.fat + pounds_of_food / 2
print("burp")
poop = pounds_of_food / 2
return poop
def __init__(self, pounds, name):
print('hit init')
self.fat = pounds
if(name != ''):
self.name = name
def main():
alan = Chubs(300, 'Alan')
print(alan.fat)
print(alan.name)
alans_poop = alan.eat(50)
print(alan.fat)
print(alans_poop)
baron = Chubs(100, 'Baron')
print(baron.fat)
print(baron.name)
barons_poop = baron.eat(alans_poop)
print(baron.fat)
alan.eat(barons_poop)
print(alan.fat)
eric = Chubs(50, '')
print(eric.fat)
print(eric.name)
main()
|
import matplotlib.pyplot as plt
import os.path as pth
import sys
# Pull data from log file
def GetData(logfile):
if not pth.exists(logfile): # checking of the existence of log file
print('Cannot read log file!')
return []
with open(logfile) as f: # reading of data from log file
data = f.read()
return data.splitlines()
# Get values of losses and accuracies during the training process
def GetFeatures(data):
Xloss, Yloss, Xacc, Yacc = [], [], [], []
# getting X and Y values for dependency of iterations and losses
str_iters = [st for st in data if ' Iteration ' and ' loss = ' in st]
for str_iter in str_iters:
sub_strs = str_iter.split()
if sub_strs[5].isdigit():
Xloss.append(int(sub_strs[5]))
else:
Xloss.append(int(sub_strs[5][:-1]))
Yloss.append(float(sub_strs[-1]))
# getting X and Y values for dependency of iterations and accuracies
step = int([st for st in data if 'test_interval: ' in st][0].split()[-1])
Xacc = [x for x in Xloss if x % step == 0]
str_accs = [st for st in data if ' accuracy@1 = ' in st]
for str_acc in str_accs:
Yacc.append(float(str_acc.split()[-1]))
return Xloss, Yloss, Xacc, Yacc
# Build the dependency of iterations and losses
def BuildLossHistoryImage(Xloss, Yloss):
plt.plot(Xloss, Yloss)
plt.xlabel('S')
plt.ylabel('L')
#plt.xlabel('Number of iteration')
#plt.ylabel('Loss')
#plt.xlabel('Число итераций')
#plt.ylabel('Потери')
plt.savefig('loss.png')
# Build the dependency of iterations and accuracies
def BuildAccuracyHistoryImage(Xacc, Yacc):
plt.plot(Xacc, Yacc)
plt.xlabel('S')
plt.ylabel('A')
#plt.xlabel('Number of iteration')
#plt.ylabel('Accuracy')
#plt.xlabel('Число итераций')
#plt.ylabel('Доля правильных ответов')
plt.savefig('accuracy.png')
# Build images of dependencies
def BuildDependenciesImages(Xloss, Yloss, Xacc, Yacc):
BuildLossHistoryImage(Xloss, Yloss)
plt.clf()
BuildAccuracyHistoryImage(Xacc, Yacc)
if (len(sys.argv) != 2): # checking of right call
print('The call of this script looks like this:\n' +
' python trainhistory.py log_file')
else:
logfile = sys.argv[1]
data = GetData(sys.argv[1])
if data: # main part of script
Xloss, Yloss, Xacc, Yacc = GetFeatures(data)
BuildDependenciesImages(Xloss, Yloss, Xacc, Yacc)
|
"""
Created by Alex Wang on 2018-2-27
Poisson Rescontruct
https://gist.github.com/jackdoerner/b9b5e62a4c3893c76e4c
"""
import cv2
def mask_det(maskpath): # maskpath为截取的水印样本
img = cv2.imread(maskpath,0)
sobelx = cv2.Sobel(img,cv2.CV_64F,1,0)
sobely = cv2.Sobel(img,cv2.CV_64F,0,1)
sobelxy = numpy.sqrt(sobely**2+sobelx**2)
mask = cv2.imread(maskpath)
img_dst = poisson_reconstruct(sobely,sobelx,sobelxy) #生成水印掩码
# cv2.imwrite('crop/douyin2/mask_xy3.png',img_dst) #保存的过程是对图像数据作了范围限制
# mask1 = cv2.imread('crop/douyin2/mask_xy3.png',0)
# for i in range(0,mask1.shape[0]):
# for j in range(0,mask1.shape[1]):
# if mask1[i,j]<80:#50 #对掩码设置阈值以精细化水印区域,阈值根据不同水印可微调
# mask[i,j,:]=[0,0,0]
# cv2.imwrite('crop/douyin2/mask_new3.jpg',mask) #保存水印模板
return img_dst
"""
poisson_reconstruct.py
Fast Poisson Reconstruction in Python
Copyright (c) 2014 Jack Doerner
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import math
import numpy
import scipy, scipy.fftpack
def poisson_reconstruct(grady, gradx, boundarysrc):
# Thanks to Dr. Ramesh Raskar for providing the original matlab code from which this is derived
# Dr. Raskar's version is available here: http://web.media.mit.edu/~raskar/photo/code.pdf
# Laplacian
gyy = grady[1:,:-1] - grady[:-1,:-1]
gxx = gradx[:-1,1:] - gradx[:-1,:-1]
f = numpy.zeros(boundarysrc.shape)
f[:-1,1:] += gxx
f[1:,:-1] += gyy
# Boundary image
boundary = boundarysrc.copy()
boundary[1:-1,1:-1] = 0;
# Subtract boundary contribution
f_bp = -4*boundary[1:-1,1:-1] + boundary[1:-1,2:] + boundary[1:-1,0:-2] + boundary[2:,1:-1] + boundary[0:-2,1:-1]
f = f[1:-1,1:-1] - f_bp
# Discrete Sine Transform
tt = scipy.fftpack.dst(f, norm='ortho')
fsin = scipy.fftpack.dst(tt.T, norm='ortho').T
# Eigenvalues
(x,y) = numpy.meshgrid(range(1,f.shape[1]+1), range(1,f.shape[0]+1), copy=True)
denom = (2*numpy.cos(math.pi*x/(f.shape[1]+2))-2) + (2*numpy.cos(math.pi*y/(f.shape[0]+2)) - 2)
f = fsin/denom
# Inverse Discrete Sine Transform
tt = scipy.fftpack.idst(f, norm='ortho')
img_tt = scipy.fftpack.idst(tt.T, norm='ortho').T
# New center + old boundary
result = boundary
result[1:-1,1:-1] = img_tt
return result
|
import sys
import os
f = open("C:/Users/user/Documents/python/atcoder/ABC118/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
n,m = map(int,input().split())
k = [0]
a = [[0]*(n+1)]*(n+1)
for i in range(1,n+1):
raw = list(map(int,input().split()))
k.append(raw[0])
tmp = [0] + raw[1:]
a[i] = tmp
all_a = []
for i in range(1,n+1):
all_a += a[i]
all_a.sort()
rev_a = all_a[:]
rev_a.reverse()
ans = 0
for i in range(1,m+1):
try:
if n == (len(all_a) - rev_a.index(i)) - all_a.index(i):
ans += 1
except ValueError:
pass
print(ans)
|
from datetime import timedelta
def arrayfy_ls(ls):
return str(ls).replace('(', '[').replace(')', ']').replace("'", '"')
class BusTrip:
def __init__(self, from_row, to_row):
self.bus_id = []
self.bus_dr = []
for rw1 in from_row:
for rw2 in to_row:
if rw1[0] == rw2[0] and rw2[1] - rw1[1] > 0:
self.bus_id.append(rw1[0])
self.bus_dr.append(rw2[1] - rw1[1])
def lq_bus_id(self):
q_str = ""
for item in self.bus_id:
q_str += " " + item + ","
return q_str[:-1]
def get_schedule(self, trips_details):
schedule = []
for row in trips_details:
time_elapsed = self.bus_dr[self.bus_id.index(row[0])]
schedule.append([row[0], std_small(str(row[1])), std_small(str(row[1] + timedelta(minutes=time_elapsed)))])
return schedule
def std_small(tm):
hr, mn, ap = tm.split(':')
hr = int(hr)
if hr > 24 and int(mn) > 0:
hr -= 24
if hr >= 12:
ap = ' PM'
if hr > 12:
hr -= 12
else:
ap = ' AM'
if hr == 0:
hr = 12
if hr < 10:
hr = "0" + str(hr)
return str(hr) + ":" + mn + ap
def small_std(tm):
tm, ap = tm.split(' ')
hr, mn = tm.split(':')
hr = int(hr)
if ap == 'PM' and hr != 12:
hr += 12
if hr > 9:
hr = str(hr)
else:
hr = "0" + str(hr)
return hr + ":" + mn + ":00"
|
import os
import json
import datetime
from tqdm import tqdm
import csv
from datetime import datetime
directory = "messages/inbox"
folders = os.listdir(directory)
if ".DS_Store" in folders:
folders.remove(".DS_Store")
for folder in tqdm(folders):
print(folder)
for filename in os.listdir(os.path.join(directory,folder)):
if filename.startswith("message"):
data = json.load(open(os.path.join(directory,folder,filename), "r"))
for message in data["messages"]:
try:
date = datetime.fromtimestamp(message["timestamp_ms"] / 1000).strftime("%Y-%m-%d %H:%M:%S")
sender = message["sender_name"]
content = message["content"]
with open("output.csv", 'a') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([date,sender,content])
except KeyError:
pass
|
R1=float(input("R1= "))
R2=float(input("R2= "))
S1=3.14*(R1**2)
S2=3.14*(R2**2)
S3=S1-S2
if (R1>R2):
print(S1)
print(S2)
print(S3)
else:
print("It is not true!!!")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scrub_csv', '0005_uploader_user'),
]
operations = [
migrations.RemoveField(
model_name='document',
name='csvfile',
),
migrations.RemoveField(
model_name='uploader',
name='first_name',
),
migrations.RemoveField(
model_name='uploader',
name='is_active',
),
migrations.RemoveField(
model_name='uploader',
name='last_name',
),
migrations.RemoveField(
model_name='uploader',
name='password',
),
migrations.RemoveField(
model_name='uploader',
name='username',
),
migrations.AddField(
model_name='document',
name='file_name',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='record',
name='doc_value',
field=models.CharField(default=b'', max_length=200),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('occ_survey', '0031_structure'),
]
operations = [
migrations.CreateModel(
name='ControlAdjustments',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('room', models.CharField(max_length=20)),
('lux_th', models.IntegerField(default=0)),
('upp_th', models.IntegerField(default=0)),
('td', models.IntegerField(default=0)),
('override', models.IntegerField(default=0)),
],
options={
'db_table': 'control_adjustments',
},
),
migrations.CreateModel(
name='ControlChanges',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField()),
('room', models.CharField(max_length=20)),
('lux_th_new', models.IntegerField(default=0)),
('upp_th_new', models.IntegerField(default=0)),
('td_new', models.IntegerField(default=0)),
('lux_th', models.IntegerField(default=0)),
('upp_th', models.IntegerField(default=0)),
('td', models.IntegerField(default=0)),
],
options={
'db_table': 'control_changes',
},
),
]
|
import random
import nltk
text="""A trade war is an economic conflict resulting from extreme protectionism in which states raise or create tariffs or other trade barriers against each other in response to trade barriers created by the other party.Increased protection causes both nations' output compositions to move towards their autarky position.
Some economists agree that certain economic protections are more costly than others, because they may be more likely to trigger a trade war. For example, if a country were to raise tariffs, then a second country in retaliation may similarly raise tariffs. An increase in subsidies, however, may be difficult to retaliate against by a foreign country. Many poor countries do not have the ability to raise subsidies. In addition, poor countries are more vulnerable than rich countries in trade wars; in raising protections against dumping of cheap products, a government risks making the product too expensive for its people to afford.[citation needed]
Trade wars and protectionism have been implicated by some scholars as the cause of some economic crises, in particular the Great Depression."""
n=3
ngrams={}
words=nltk.word_tokenize(text)
for i in range(len(words)-n):
grams=' '.join(words[i:i+n])
if grams not in ngrams.keys():
ngrams[grams]=[]
ngrams[grams].append(words[i+n])
CurrentGram=' '.join(words[0:n])
result=CurrentGram
for i in range(32):
if CurrentGram not in ngrams.keys():
break
possibilities=ngrams[CurrentGram]
nextItem=possibilities[random.randrange(len(possibilities))]
result+=' '+nextItem
rwords=nltk.word_tokenize(result)
CurrentGram=' '.join(rwords[len(rwords)-n:len(rwords)])
print(result)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: step02_rerun_preliminary_regression
# @Date: 2020/3/10
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
"""
python -m SortData.ConstructVariable.step02_rerun_preliminary_regression
"""
import os
from Constants import Constants as const
from Utilities.generate_stata_code import generate_regression_code
IND_VARS = [' '.join(
['formal_Extend_tm3', 'formal_Extend_tm2', 'formal_Extend_tm1', 'formal_Extend', 'formal_Extend_t1',
'formal_Extend_t2', 'formal_Extend_t3', 'formal_Extend_t4', 'formal_Extend_t5']),
' '.join(
['formal_Shrink_tm3', 'formal_Shrink_tm2', 'formal_Shrink_tm1', 'formal_Shrink', 'formal_Shrink_t1',
'formal_Shrink_t2', 'formal_Shrink_t3', 'formal_Shrink_t4', 'formal_Shrink_t5']),
' '.join(
['real_Shrink_tm3', 'real_Shrink_tm2', 'real_Shrink_tm1', 'real_Shrink', 'real_Shrink_t1',
'real_Shrink_t2', 'real_Shrink_t3', 'real_Shrink_t4', 'real_Shrink_t5']),
' '.join(
['real_Extend_tm3', 'real_Extend_tm2', 'real_Extend_tm1', 'real_Extend', 'real_Extend_t1',
'real_Extend_t2', 'real_Extend_t3', 'real_Extend_t4', 'real_Extend_t5']),
' '.join(
['formal_ToLimit_tm3', 'formal_ToLimit_tm2', 'formal_ToLimit_tm1', 'formal_ToLimit', 'formal_ToLimit_t1',
'formal_ToLimit_t2', 'formal_ToLimit_t3', 'formal_ToLimit_t4', 'formal_ToLimit_t5']),
' '.join(
['formal_ToUnlimit_tm3', 'formal_ToUnlimit_tm2', 'formal_ToUnlimit_tm1', 'formal_ToUnlimit',
'formal_ToUnlimit_t1',
'formal_ToUnlimit_t2', 'formal_ToUnlimit_t3', 'formal_ToUnlimit_t4', 'formal_ToUnlimit_t5']),
' '.join(
['real_ToUnlimit_tm3', 'real_ToUnlimit_tm2', 'real_ToUnlimit_tm1', 'real_ToUnlimit', 'real_ToUnlimit_t1',
'real_ToUnlimit_t2', 'real_ToUnlimit_t3', 'real_ToUnlimit_t4', 'real_ToUnlimit_t5']),
' '.join(
['real_ToLimit_tm3', 'real_ToLimit_tm2', 'real_ToLimit_tm1', 'real_ToLimit', 'real_ToLimit_t1',
'real_ToLimit_t2', 'real_ToLimit_t3', 'real_ToLimit_t4', 'real_ToLimit_t5']),
]
DEP_VARS = ['{}_1'.format(i) for i in ['CAPEX', 'EBITDA', 'PTBI', 'ROA', 'R_B', 'LEVERAGE', 'CASH_HOLDING',
'TANGIBILITY', 'TobinQ', 'ln_emp', 'ln_sale']]
CTRL_VARS = 'ln_at SGA TANGIBILITY CAPEX FOREIGN PTBI VOL_PTBI'
if __name__ == '__main__':
date_str = '20200310'
save_file = os.path.join(const.STATA_CODE_PATH, '{}_preliminary_code_1.do'.format(date_str))
output_path = os.path.join(const.STATA_RESULT_PATH, '{}_preliminary_1'.format(date_str))
if not os.path.isdir(output_path):
os.makedirs(output_path)
cmd_list = ['clear', 'use "{}"'.format(os.path.join(const.STATA_DATA_PATH, '20200310_regression_data.dta'))]
for ind_key in IND_VARS:
output_file = os.path.join(output_path, '{}.txt'.format(ind_key.split(' ')[3]))
for dep_key in DEP_VARS:
cmd_list.extend(generate_regression_code(dep=dep_key, ind=ind_key, ctrl=CTRL_VARS, fe_option='gvkey fyear',
cluster_option='gvkey', output_path=output_file,
text_option='Firm Dummy, Yes, Year Dummy, Yes, Cluster, Firm',
data_description='tstat bdec(4) tdec(4) rdec(4)'))
with open(save_file, 'w') as f:
f.write('\n'.join(cmd_list))
print('do "{}"'.format(save_file))
|
import socket
import struct
import binascii
import sys, os
class ClientTable_Handler(object):
def validate_passkey(self,mac,passkey):
tbl = open("/var/www/cgi-bin/ClientTable.txt").readlines()
valid = False
keyID = None
for line in range(len(tbl)):
if mac in tbl[line]:
#print tbl[line]
#print "Found MAC: ", mac
if passkey in tbl[line]:
tbl_line = tbl[line].rstrip(' \t\r\n\0')
data = tbl_line.split(',')
keyID = data[4]
valid = True
return valid, keyID
def add_entry(self, rcvData):
client_tbl = open("/var/www/cgi-bin/ClientTable.txt",'a')
client_tbl.write(rcvData+'\n')
client_tbl.close()
def delete_entry2(self, mac,keyID):
newTbl = []
tbl = open("/var/www/cgi-bin/ClientTable.txt").readlines()
for line in range(len(tbl)):
if mac not in tbl[line] and tbl[line] != "\n":
if keyID not in tbl[line]:
newTbl.append(tbl[line])
client_tbl = open("/var/www/cgi-bin/ClientTable.txt",'w')
for line in range(len(newTbl)):
if newTbl[line] != '\n':
client_tbl.write(newTbl[line])
client_tbl.close()
def delete_entry(self, keyID):
newTbl = []
tbl = open("/var/www/cgi-bin/ClientTable.txt").readlines()
for line in range(len(tbl)):
if tbl[line] != '\n':
tbl_line = tbl[line].rstrip(' \t\r\n\0')
data = tbl_line.split(',')
if keyID != data[4]:
newTbl.append(tbl[line])
client_tbl = open("/var/www/cgi-bin/ClientTable.txt",'w')
for line in range(len(newTbl)):
if newTbl[line] != '\n':
client_tbl.write(newTbl[line])
client_tbl.close()
def foundKey(self, found):
if found == True:
print "Valid Key"
else:
print "Key not valid"
## def send_revocation2(self,keyID):
## print "Sending Revocation to controller"
## try:
## os.system('python ICMP_Revocation.py '+keyID)
## except:
## print "Message not sent"
def send_revocation(self,keyID):
client_tbl = open("/var/www/cgi-bin/RevTable.txt",'a')
client_tbl.write(keyID+'\n')
client_tbl.close()
def main():
c_handle = ClientTable_Handler()
rcvData = "00:00:00:00:00:04,3,mPtpYIcp,s,102"
newmac = '00:00:00:00:00:04'
passkey = 'mPtpYIcp'
#c_handle.add_entry(rcvData)
#c_handle.delete_entry(newmac,passkey)
#c_handle.delete_entry('105')
found, keyID = c_handle.validate_passkey(newmac,passkey)
c_handle.foundKey(found)
if __name__=='__main__':
main()
|
from decimal import Decimal as Dec, getcontext
def PI(maxK=70):
getcontext().prec = (maxK*14) #For ther test values, between 14.19 and 14.4 - Wolfram says gains about 14 digits per term...
print "Ready to start:"
K, M, L, X, S = 6, 1, 13591409, 1, 13591409
for k in xrange(1, maxK+1):
M = (K**3 - (K<<4)) * M / k**3
L += 545140134
X *= -262537412640768000
S += Dec(M * L) / X
K += 12
if k%10 == 0:
print k
pi = 426880 * Dec(10005).sqrt() / S
print "\n"
print pi
print "\nPi was calculated to " + str(len(str(pi)) - 1) + " digits" # - 1 because the decimal place is not a digit, but is is a character in a string...
f = open('140,000.txt', 'w')
f.write(str(pi))
f.write("\n\n")
f.write("Pi was calculated to " + str(len(str(pi)) - 1) + " digits")
f.close()
PI(10000)
|
# author: Shuaishuai Sun
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
app = Flask(__name__, template_folder='templates', static_folder='static')
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret!'
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'database.db')
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
# 1.使用测试套件批量执行用例的顺序是?
# 如果是按照模块导入的方式将测试用例添加测试套件,执行顺序为添加先后顺序
# 如果是按照discover的方式将测试用例添加到测试套件,是按照ASCII码顺序从小到大执行的
#
# 2.将用例加载到测试套件中,有哪几种方式?
import unittest
# unittest.defaultTestLoader.discover() # 加载路径下所有以test开头的测试用例
# unittest.TestLoader.loadTestsFromModule() # 加载想要测试的模块下的测试用例
# 3.编写如下单元测试
# 将上一次作业中的两数相减测试类与两数相除测试类的所有用例,使用测试套件来批量执行
# 提示:
# a.需要使用日志文件,来记录用例执行日志
from Python_0717_job import python_0712_homework
from HTMLTestRunnerNew import HTMLTestRunner
import time
import os
# 创建测试套件对象
one_suite = unittest.TestSuite()
# 创建测试加载器对象
one_loader = unittest.TestLoader()
# 将测试用例添加到测试套件
one_suite.addTest(one_loader.loadTestsFromModule(python_0712_homework))
# one_suite.addTest(one_loader.discover(start_dir=".", pattern="python*"))
# 创建执行器对象
# one_runner = unittest.TextTestRunner()
if not os.path.exists("test_reports"): # 判断目录是否存在,不存在创建测试报告存放目录
os.mkdir("test_reports")
current_time = time.strftime("%Y-%m-%d", time.localtime()) # 获取当前时间并格式化
with open("test_reports/report{}.html".format(current_time), mode="wb") as write_report:
one_runner = HTMLTestRunner(stream=write_report, verbosity=2, title="测试报告", description="11111111", tester="jinbiao")
one_runner.run(one_suite)
|
import json
import urllib
from django.http.response import HttpResponseRedirect
from django.contrib import messages
from django.http import Http404
from django.contrib.auth import login as auth_login
class LoginAjaxMixin(object):
"""
Mixin which authenticates user if request is an ajax request
"""
def form_valid(self, form):
if not self.request.is_ajax():
auth_login(self.request, form.get_user(), backend='whiskysite.backends.EmailBackend')
messages.success(self.request, self.success_message)
return HttpResponseRedirect(self.get_success_url())
class NormalUserLoginMixin(object):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_active:
raise Http404
return super(NormalUserLoginMixin, self).dispatch(request, *args, **kwargs)
|
from django.test import TestCase, Client
from users.models import User
class WalletTestCase(TestCase):
def setUp(self) -> None:
c = Client()
url = '/api/user/create'
new_user = {
'email': 'newuser@email.com',
'username': 'NewUser',
'password': 'NewUserPassword',
'date_of_birth': '1990-06-20'
}
c.post(url, new_user)
self.user1 = User.objects.get(username=new_user['username'])
newer_user = {
'email': 'neweruser@email.com',
'username': 'neweruser@email.com',
'password': 'NewUserPassword',
'date_of_birth': '1990-06-20'
}
self.user2 = User.objects.create_user(
email=newer_user['email'],
username=newer_user['username'],
password=newer_user['password'],
date_of_birth=newer_user['date_of_birth']
)
def test_wallet_creation(self):
self.assertTrue(self.user1.wallet is not None)
self.assertEqual(
self.user1.wallet.shinies,
self.user2.wallet.shinies
)
self.assertEqual(self.user1.wallet.owner, self.user1)
def test_currency_transaction(self):
self.assertEqual(self.user1.wallet.shinies, 0)
wallet = self.user1.wallet
wallet.transact_currency(20, 'shinies')
self.assertEqual(wallet.shinies, 20)
wallet.transact_currency(-10, 'shinies')
self.assertEqual(wallet.shinies, 10)
self.assertEqual(self.user1.wallet.muns, 0)
wallet = self.user1.wallet
wallet.transact_currency(20, 'muns')
self.assertEqual(wallet.muns, 20)
wallet.transact_currency(-10, 'muns')
self.assertEqual(wallet.muns, 10)
def test_send_currency(self):
wallet1 = self.user1.wallet
wallet2 = self.user2.wallet
self.assertEqual(wallet1.shinies, 0)
self.assertEqual(wallet2.shinies, 0)
self.assertEqual(wallet1.muns, 0)
self.assertEqual(wallet2.muns, 0)
# Plump wallet1 so that a trade can take place
wallet1.transact_currency(20, 'shinies')
wallet1.send_currency(wallet2, 7, 'shinies')
self.assertEqual(wallet1.shinies, 13)
self.assertEqual(self.user2.wallet.shinies, 7)
# Plump wallet2 muns
wallet2.transact_currency(34, 'muns')
self.assertEqual(wallet2.muns, 34)
self.assertEqual(wallet1.muns, 0)
wallet2.send_currency(wallet1, 20, 'muns')
self.assertEqual(wallet1.muns, 20)
with self.assertRaises(AttributeError) as e:
# No attr for 'walrus'
wallet1.send_currency(wallet2, 20, 'walrus')
with self.assertRaises(TypeError) as e:
# Must send to wallet object instance
# not user
wallet1.send_currency(self.user2, 20, 'muns')
with self.assertRaises(TypeError) as e:
# Must send an integer ! str('twenty')
wallet1.send_currency(wallet2, 'twenty', 'muns')
def test_balance(self):
wallet1 = self.user1.wallet
self.assertEqual(wallet1.balance, {'shinies': 0, 'muns': 0})
# plump wallet1
wallet1.transact_currency(30, 'shinies')
self.assertEqual(wallet1.balance, {'shinies': 30, 'muns': 0})
|
from src.contracts.ethereum.erc20 import Erc20
from src.contracts.ethereum.event_listener import EthEventListener
from src.contracts.ethereum.multisig_wallet import MultisigWallet
from src.signer.erc20.impl import _ERC20SignerImpl
from src.signer.eth.signer import EtherSigner
from src.util.common import Token
from src.util.config import Config
from src.util.web3 import web3_provider
class ERC20Signer(EtherSigner):
"""
secretERC20 --> Swap TX --> ERC20
See EtherSigner for a description of what this does - just replace ETH with ERC20 :)
"""
def __init__(self, contract: MultisigWallet, token: Token, private_key: bytes, account: str, config: Config,
**kwargs):
super().__init__(contract, private_key, account, config, **kwargs)
# everything is basically the same, just overload the signer and event listener
token_contract = Erc20(web3_provider(config['eth_node_address']),
token,
contract.address)
self.signer = _ERC20SignerImpl(contract, token, private_key, account, config)
self.event_listener = EthEventListener(token_contract, config)
|
import pandas as pd
import numpy as np
import json
import os
# This script does four things :
# Delete recommended baskets with id 1111111111111
# Delete too old baskets for which we have no data available for training
# Delete useless columns
# Convert taffy to csv
def clean_taffy_liste(root_dir, directory, province) :
df = pd.read_json(directory + '/taffy.json', dtype = {'i': 'str'}) # Open taffy
path = root_dir + 'db_train/' + str(province) + '/data/' # Set path to files from user's province
df.drop(['___id', '___s', 'quantity'], axis = 1, inplace = True) # Drop useless columns
grouped = df.groupby(["c"]) # Group on timestamp's basket
# Get earliest file's timestamp we have in database
for file in os.listdir(path) :
timestamp = int(file[:13])
break
# Delete recommended basket and basket with no available data for training
for name, group in grouped :
if name == 1111111111111 :
df.drop(group.index, inplace = True)
elif name < timestamp :
df.drop(group.index, inplace = True)
df[["i"]] = df[["i"]].astype(str)
# Save cleaned taffy to csv
df.to_csv(directory + '/taffy_clean.csv', index = False)
print('Taffy cleaned and saved.')
|
#!/usr/bin/python
# coding=utf8
__author__ = "zhangchitc@gmail.com"
class Paper:
def __init__ (self, title, abst):
self.title = title
self.abst = abst
self.authors = []
def add_author (self, author):
self.authors.append (author)
def __str__ (self):
ret = "Title:\n %s\n" % self.title
ret += "Authors:\n"
for author in self.authors:
ret += " %s\n" % author
ret += "\n\n"
return ret
class Author:
def __init__ (self, name, affn):
self.name = name
self.affn = affn
def __str__ (self):
ret = "%s (%s)" % (self.name, self.affn)
return unicode (ret).encode ('utf-8')
if __name__ == '__main__':
print Paper ('abc', 'def')
print Author ('abc', 'def')
|
print("Ravi Hamse")
|
''' Copyright 2012 Smartling, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
#FileApi class implementation
from FileApiBase import FileApiBase
class SmartlingFileApi(FileApiBase):
def __init__(self, host, apiKey, projectId):
FileApiBase.__init__(self, host, apiKey, projectId)
def upload(self, uploadData):
return self.commandUpload( uploadData )
def list(self, **kw):
return self.commandList( **kw )
def get(self, fileUri, locale, **kw):
return self.commandGet(fileUri, locale, **kw)
def status(self, fileUri, locale, **kw):
return self.commandStatus( fileUri, locale, **kw )
def rename(self, fileUri, newUri, **kw):
return self.commandRename( fileUri, newUri, **kw )
def delete(self, fileUri, **kw):
return self.commandDelete(fileUri, **kw)
class SmartlingFileApiFactory:
sandbox_host = 'sandbox-api.smartling.com'
api_host = 'api.smartling.com'
def getSmartlingTranslationApi(self, productionMode, apiKey, projectId):
if (productionMode):
return SmartlingFileApi(self.api_host, apiKey, projectId)
return SmartlingFileApi(self.sandbox_host, apiKey, projectId)
def getSmartlingTranslationApiProd(self, apiKey, projectId):
return SmartlingFileApi(self.api_host, apiKey, projectId)
|
import urllib.request, urllib.error, urllib.parse
import json
address = input("Enter a location: ")
serviceurl = 'http://py4e-data.dr-chuck.net/json?'
api_key = 42
parms = dict() # the dictionary is used to be a part the the URL later
parms['address'] = address
parms['key'] = api_key
url = serviceurl + urllib.parse.urlencode(parms) #form the URL
print('Retrieving URL: ', url)
file = urllib.request.urlopen(url) #retrieve the URL data
data = file.read().decode() #read and decode the data because the data is in UTF-8 form
print('Retrieved', len(data), 'characters')
jsondata = json.loads(data)#load the data into python data using json.loads
address_id = jsondata['results'][0]['place_id'] #find the place of place_id just like in dictionary
print(address_id)
|
#!/usr/bin/python
count = 1
word = '<span class="word"><a class="swd" href="x-dictionary:r:'
a = '">'
wd = 'word</a></span>'
left = '<span class="left"><a class="slf" href="x-dictionary:r:'
lb = 'leftpage</a></span>'
right = '<span class="right"><a class="srt" href="x-dictionary:r:'
rb = 'right</a></span>'
while (count < 90):
print word + str(count+1) + a +wd,left + str(count) + a +lb,right + str(count+2) + a + rb
count = count + 1
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 04 10:16:17 2017
@author: liuka
"""
import cv2
import numpy as np
import os
import time
import pdb
import pyaudio
import wave
import utils
from array import array
class Audio(object):
def __init__(self):
None
#self._cap = cv2.VideoCapture(2)
def record_voice(self, wav_file, record_seconds):
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = record_seconds
threshold = 10
max_value = 0
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, output=True, frames_per_buffer=chunk)
print '* recording'
frames = []
for i in range( 0, 44100/chunk * RECORD_SECONDS):
data = stream.read(chunk)
as_ints = array('h', data)
max_value = max(as_ints)
if max_value > threshold:
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(wav_file, 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
#utils.wav_to_flac(wav_file, flac_file)
def play_voice(self,wav_file):
#define stream chunk
chunk = 1024
#open a wav format music
f = wave.open(wav_file,"rb")
#instantiate PyAudio
p = pyaudio.PyAudio()
#open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
#read data
data = f.readframes(chunk)
#paly stream
while data != '':
stream.write(data)
data = f.readframes(chunk)
#stop stream
stream.stop_stream()
stream.close()
#close PyAudio
p.terminate()
def get_one_frame(self, cap):
if cap.isOpened() :
ret,frame = cap.read()
return frame
else:
print "cap is closed!"
return False
if __name__ == "__main__":
# cap = cv2.VideoCapture(2)
# while(cap.isOpened()):
# # Capture frame-by-frame
# ret, frame = cap.read()
# # Our operations on the frame come here
# #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# gray = frame
# # Display the resulting frame
# cv2.imshow('frame',frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# # When everything done, release the capture
# cap.release()
# cv2.destroyAllWindows()
# wav_file = r"hello_world.wav"
# for i in range(3):
# play_voice(wav_file)
# time.sleep(2)
#test audio
audio = Audio()
wav_file = 'wav_test.wav'
audio.record_voice(wav_file,5)
print "recording endl, playing it"
audio.play_voice(wav_file)
#test video
# cap = cv2.VideoCapture(0)
# while(True):
# img = audio.get_one_frame(cap)
# cv2.imshow('frame',img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
|
from Pessoa import Pessoa
def insere_dados():
nome = input("Digite seu nome: ")
idade = int(input("Digite sua idade: "))
sexo = input("Digite seu sexo (M ou F): ")
cidade = input("Digite sua cidade: ")
estado = input("Digite seu estado (ex. CE): ")
return Pessoa(nome, idade, sexo, cidade, estado)
if __name__ == "__main__":
insere_dados()
|
import pandas as pd
def create_mock_pv_info():
"""Creates PV info data frame.
:return: (*pandas.DataFrame*) -- mock PV info.
"""
plant_code = [1, 2, 3, 4, 5]
state = ["UT", "WA", "CA", "CA", "CA"]
capacity = [10, 5, 1, 2, 3]
single = ["N", "Y", "Y", "Y", "Y"]
dual = ["Y", "N", "N", "N", "Y"]
fix = ["N", "Y", "Y", "N", "N"]
pv_info = pd.DataFrame(
{
"State": state,
"Nameplate Capacity (MW)": capacity,
"Single-Axis Tracking?": single,
"Dual-Axis Tracking?": dual,
"Fixed Tilt?": fix,
"Plant Code": plant_code,
}
)
return pv_info
|
#import rospy
#from nav_msgs.msg import Odometry
#from geometry_msgs.msg import Twist
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
import time
import matplotlib.pyplot as plt
from numpy.linalg import inv
from torch.utils.data import TensorDataset, DataLoader
import anfis
from membership import TrapezoidalMembFunc, make_trap_mfs, make_bell_mfs, BellMembFunc, Zero, make_zero
from experimental import train_anfis, test_anfis
import sys
from ddpg import DDPGagent
from model import *
dtype = torch.float
model= torch.load('anfis_initial.model1')
#model= torch.load('p2_p3_nodown.model')
print("Tewdes")
print(model.get_action(np.array([0.0,0.0,0.0])))
#new_state = np.array([0.0,0.0,0.0])
#action = model.get_action(new_state)
#print(action)
|
import sqlite3
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from kennelapp.models import Cat
from kennelapp.models import Kennel
# from kennelapp.models import model_factory
from ..connection import Connection
def get_cat(cat_id):
with sqlite3.connect(Connection.db_path) as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT
c.id,
c.name,
c.specie,
c.owner,
c.admitted,
c.caretaker_id,
c.location_id
from kennelapp_cat c
where c.id = ?
""", (cat_id,))
return db_cursor.fetchone()
def get_kennels():
with sqlite3.connect(Connection.db_path) as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
db_cursor.execute("""
select
k.id,
k.title,
k.address
from kennelapp_kennel k
""")
return db_cursor.fetchall()
@login_required
def cat_form(request):
if request.method == 'GET':
kennels = get_kennels()
template = 'cats/form.html'
context = {
'all_kennels': kennels
}
return render(request, template, context)
@login_required
def cat_edit_form(request, cat_id):
if request.method == 'GET':
cat = get_cat(cat_id)
kennels = get_kennels()
template = 'cats/form.html'
context = {
'cat': cat,
'all_kennels': kennels
}
return render(request, template, context)
|
# VOWELS = 'aeiou'
#
#
# def disemvowel(string):
# return ''.join(a for a in string if not a.lower() in VOWELS)
def disemvowel(s):
return s.translate(None, 'aeiouAEIOU')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'chendaopeng'
import os, logging
logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
from datetime import datetime
from www.app.orm import createpool
from jinja2 import Environment, FileSystemLoader
import orm
from .factorys import logger_factory, auth_factory, data_factory, response_factory, datetime_filter
from .coroweb import add_routes, add_static
from .config import configs
def init_jinja2(app, **kw):
logging.info('init jinja2...')
# 初始化模板配置,包括模板运行代码的开始结束标识符,变量的开始结束标识符等
options = dict(
autoescape=kw.get('autoescape', True), # 是否转义设置为True,就是在渲染模板时自动把变量中的<>&等字符转换为<>&
block_start_string=kw.get('block_start_string', '{%'), # 运行代码的开始标识符
block_end_string=kw.get('block_end_string', '%}'), # 运行代码的结束标识符
variable_start_string=kw.get('variable_start_string', '{{'), # 变量开始标识符
variable_end_string=kw.get('variable_end_string', '}}'), # 变量结束标识符
auto_reload=kw.get('auto_reload', True) # Jinja2会在使用Template时检查模板文件的状态,如果模板有修改, 则重新加载模板。如果对性能要求较高,可以将此值设为False
)
# 从参数中获取path字段,即模板文件的位置
path = kw.get('path', os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'))
logging.info('set jinja2 template path: %s' % path)
# Environment是Jinja2中的一个核心类,它的实例用来保存配置、全局对象,以及从本地文件系统或其它位置加载模板
# 这里把要加载的模板和配置传给Environment,生成Environment实例
env = Environment(loader=FileSystemLoader(path), **options)
# 从参数取filter字段
# filters: 一个字典描述的filters过滤器集合, 如果非模板被加载的时候, 可以安全的添加filters或移除较早的.
filters = kw.get('filters') # 提取传入的filters字典
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
# 给webapp设置模板
app['__templating__'] = env
async def create_server(loop):
# 创建数据库连接池,db参数传配置文件里的配置db
await createpool(loop=loop, **configs.db)
app = web.Application(loop=loop, middlewares=[logger_factory, auth_factory, data_factory, response_factory])
# 初始化jinja2模板
init_jinja2(app, filters=dict(datetime=datetime_filter))# 调用factorys中的拦截器datetime_filter转成字典filters传入init_jinja2函数
# 添加请求的handlers,即各请求相对应的处理函数
add_routes(app, 'app.API_handlers')
add_routes(app, 'app.route_handlers')
# 添加静态文件所在地址
add_static(app)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
|
import sqlite3
import csv
import xlrd
import xlwt
import pandas as pd
# 返回所有对应的营销人员已离职的用户
class inertialTestersQuery:
'''
选择20190501~20190621
期间,跟投1次或2次即放弃的用户5名
'''
'''
找出所有在某一个日期之后有阿尔法跟投的用户
日期格式: 20190621
'''
# ['13917035937'...]
def getallInertialTestersMobile(self):
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
sqStatement = 'SELECT mobile FROM inertialtesters'
allInertialTesters = []
for mobile in db.execute(sqStatement):
allInertialTesters.append(str(mobile).replace('(','').replace(')','').replace('\'','').replace(',',''))
print(mobile)
return allInertialTesters
#query = inertialTestersQuery()
#result = query.getallInertialTestersMobile()
#print(result)
|
#!/usr/bin/env python
# coding=utf-8
from torch.utils.data import DataLoader
import torch
from PA100KDataset import PA100KDataset
from HydraPlusNet import *
import numpy as np
from torchvision import transforms
from torch.autograd import Variable
test_batch_size = 1
load_path = "./weight/InceptionV3_5000.weight"
label_name = ["Female","AgeOver60","Age18-60","AgeLess18","Front","Side",
"Back","Hat","Glasses","HandBag","ShoulderBag","Backpack","HoldObjectsInFront",
"ShortSleeve","LongSleeve","UpperStride","UpperLogo","UpperPlaid","UpperSplice",
"LowerStripe","LowerPattern","LongCoat","Trousers","Shorts","Skirt&Dress","boots"]
torch.cuda.set_device(0)
test_dataset = PA100KDataset("/home/dataset/human_attribute/PA100K/test.txt",
transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize(320),transforms.RandomCrop((299,299)),transforms.ToTensor()]))
test_loader = DataLoader(test_dataset,test_batch_size,True)
hydraplus_net = HydraPlusNet(26,is_fusion = True)
checkpoint = torch.load(load_path)
hydraplus_net.load_state_dict(checkpoint["net"])
test_loss = 0
test_acc = 0
test_num_iter = 0
hydraplus_net.cuda()
hydraplus_net.eval()
for test_batch_img,test_batch_label in test_loader:
test_num_iter += 1
test_batch_img = Variable(test_batch_img.cuda())
test_batch_label = Variable(test_batch_label.cuda())
# bs,ncrops,c,h,w = test_batch_img.size()
output = hydraplus_net(test_batch_img)
# output_avg = output.view(bs,ncrops,-1).mean(1)
result = (output>=0)
result = result.float()
test_correct = torch.sum((result == test_batch_label),0)
test_correct = test_correct.cpu().numpy()
test_acc += test_correct
test_acc = test_acc/float(len(test_dataset))
print "test acc:"
mAP = 0
for i in range(26):
print label_name[i],":",test_acc[i]
print "mAP:",(np.sum(test_acc)/26)
|
from main.page.base import BasePage
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
class InboxTalkPage(BasePage):
_pl = "inbox-talk.pl"
#url = "https://www.tokopedia.com/inbox-talk.pl"
#Locators
#_filter_all_loc = (By.CSS_SELECTOR, 'div#talk-box div.row-fluid div.span12 span.pull-left small a.filter-all')
_filter_all_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div/div[1]/div/span/small/a[1]')
_filter_unread_loc = (By.XPATH, '/html/body/div[1]/div[5]/div/div[2]/div/div[1]/div/span/small/a[2]')
_next_page_loc = (By.CSS_SELECTOR, 'div.row-fluid div.text-right div.pagination ul li a i.icon-chevron-right')
#Tab Locators
_tab_all_loc = (By.XPATH, '//*[@id="tab-navigation"]/li[1]/a')
_tab_my_product_loc = (By.XPATH, '//*[@id="tab-navigation"]/li[2]/a')
_tab_following_loc = (By.XPATH, '//*[@id="tab-navigation"]/li[3]/a')
_total_message_loc = (By.XPATH, '//*[@id="tab-navigation"]/li[2]/a/span/span') #angka di counter di tab my product
#Talk Content
_talk_box = (By.XPATH, '//*[@id="talk-list-container"]')
_list_of_message_loc = (By.XPATH, '//*[@id="talk-box"]/div[2]/div/ul')
#_unread_flag_loc_1 = '//*[@id=\"talk-list-container" and @class=\"'
#_unread_flag_loc_2 = '\"]/div/div[2]/div[1]/a'
_unread_flag_loc = ' div.list-box-content div.list-box-detail div.first-talk span.unread-box'
_see_all_comment_link_loc = (By.CSS_SELECTOR, 'div.list-box-detail a.list-box-viewalltalk small span') #only appeared if there are more than 3 replies
_talkID_loc = 'div.span12 ul li.'
_reply_textarea_loc = ' div.list-box-replyholder div.row-fluid.talk-comment-input div div div div textarea:nth-child(2)'
#_reply_button_loc = (By.CSS_SELECTOR, 'div.list-box-textarea div.pull-left button#submit-talk-comment') #only visible if textarea is filled
#_reply_button_loc = (' div.list-box-content div.list-box-detail div.list-box-replyholder div.row-fluid.talk-comment-input div.list-box-replyholder.talk-response-input div.list-box-replycomment div.list-box-textarea div.pull-left button#submit-talk-comment')
_reply_button_loc = (' div.list-box-replyholder div.row-fluid.talk-comment-input div div div div button#submit-talk-comment')
_reply_list_loc = ' div.list-box-replyholder div#talk-comment-container div.talk-comment-list'
_sender_loc = ' div.list-box-content div.list-box-detail div.first-talk div.list-box-replybuyer div.list-box-sellerreplyinfo a small b'
_replier_name_loc_1 = ' div.list-box-content div.list-box-detail div.list-box-replyholder div#talk-comment-container div#'
_replier_name_loc_2 = ' div.list-box-replycomment div.list-box-replybuyer div.list-box-sellerreplyinfo span.pull-left b small a'
_delete_button_loc = ' div.list-box-content div.list-box-detail div.first-talk a span#delete-talk-button'
_single_talk_body_loc = ' div div.list-box-detail div.first-talk div.list-box-replybuyer div.list-box-sellerreplyinfo'
_delete_button_at_popup_loc = (By.CSS_SELECTOR, 'div.container-fluid div.row-fluid div form div.dialog-footer button#del-talk-button')
_product_name_loc = ' div.list-box-content div.list-box-product div.list-box-top a b'
_product_link_loc = ' div.list-box-content div.list-box-product div.list-box-top a'
#Actions
def open(self, site, _pl=""):
self._open(site, self._pl)
def next_page_element(self):
try:
self.find_element(*self._next_page_loc)
#print ('Next page available')
return (1)
except NoSuchElementException:
print ("Next page not found")
return (0)
def click_next_page(self):
#print(str(self.find_element(*self._next_page_loc)) + " next page found")
NextPage = self.next_page_element()
if NextPage == 1:
self.find_element(*self._next_page_loc).click()
print('Goto Next Page')
elif NextPage == 0:
print("end of page")
def select_tab_all(self):
tab_all = self.find_element(*self._tab_all_loc) #Tampung lokasi elemen ke dalam variabel
self._click(tab_all) #Gunakan fungsi Click kepunyaan Framework variable
def select_tab_my_product(self):
tab_my_product = self.find_element(*self._tab_my_product_loc)
self._click(tab_my_product)
def select_filter_unread(self):
self.check_visible_element(*self._filter_unread_loc)
filter_unread_loc = self.find_element(*self._filter_unread_loc)
try:
WebDriverWait(self.driver,10).until(EC.element_to_be_clickable(self._filter_unread_loc))
print ("JS clickable ni")
self.click_on_javascript(filter_unread_loc)
except NoSuchElementException:
print ("JS not found")
self._click(filter_unread_loc)
def select_filter_all(self):
self.check_visible_element(*self._filter_all_loc)
filter_all_loc = self.find_element(*self._filter_all_loc)
try:
WebDriverWait(self.driver,10).until(EC.element_to_be_clickable(self._filter_all_loc))
print ("JS clickable ni")
self.click_on_javascript(filter_all_loc)
except NoSuchElementException:
print ("JS not found")
#self._click(filter_unread_loc)
def check_talk_discussion_exists(self):
self.check_visible_element(*self._talk_box)
talk_found = self.find_element(*self._talk_box)
try:
if talk_found.is_displayed():
print("Talk Found")
except NoSuchElementException:
print ("No Talk Found")
def get_message_counter_value(self):
try:
jumlah_message = self.find_element(*self._total_message_loc).text
jumlah_message = int(jumlah_message)
return (jumlah_message)
except NoSuchElementException:
print('There is no message here')
return(0)
def get_list_message(self):
try:
list_message = self.find_elements(*self._talk_box)
return(list_message)
except NoSuchElementException:
print('Tidak ada pesan')
list_message = []
return(list_message)
def list_all_message_ID(self):
list_msg = self.get_list_message()
list_msg_ID = []
print('Counting message ....')
for i in list_msg:
#print(i.get_attribute('class'))
list_msg_ID.append(i.get_attribute('class'))
isNextPageAvailable = self.next_page_element()
while isNextPageAvailable == 1:
self.click_next_page()
time.sleep(5)
next_list_messages = self.get_list_message()
for i in next_list_messages:
#print(i.get_attribute('class'))
list_msg_ID.append(i.get_attribute('class'))
list_msg.extend(next_list_messages)
isNextPageAvailable = self.next_page_element()
print('end of inbox page')
print('List ID message dalam inbox:')
jml_msg = len(list_msg)
for i in list_msg_ID:
print(i)
print('List of message has been successfully populated...')
return(list_msg_ID)
def all_message_counts(self):
self.select_filter_all()
all_msg = len(self.list_all_message_ID())
return(all_msg)
#UNREAD TALKS#
def get_unread_flag(self, new_message_id):
#unread_sign = self._unread_flag_loc_1 + str(new_message_id) + self._unread_flag_loc_2
unread_sign = self._talkID_loc + str(new_message_id) + self._unread_flag_loc
try:
#unread_sign = self._talkID_loc + str(new_message_id) + self._unread_flag_loc
self.check_visible_element(By.CSS_SELECTOR, unread_sign)
print(unread_sign)
#self.find_element(By.CSS_SELECTOR, unread_sign)
return(1)
except:
print(unread_sign)
print('Unread flag not displayed')
return(0)
def list_all_unread_message_ID(self):
self.select_tab_my_product()
self.select_filter_unread()
print('Masuk Tab Unread')
unread_msg = self.get_list_message()
unread_msg_ID = []
for i in unread_msg:
unread_msg_ID.append(i.get_attribute('class'))
isNextPageAvailable = self.next_page_element()
while isNextPageAvailable == 1:
self.click_next_page()
time.sleep(5)
next_list_messages = self.get_list_message()
for i in next_list_messages:
unread_msg_ID.append(i.get_attribute('class'))
unread_msg.extend(next_list_messages)
isNextPageAvailable = self.next_page_element()
print('end of inbox page')
print('List unread ID message dalam inbox:')
for i in unread_msg_ID:
print(i)
print('List of message has been successfully populated...')
return(unread_msg_ID)
#actual number of all messages
def unread_message_counts(self):
self.select_filter_unread()
all_unread_msg = len(self.list_all_unread_message_ID())
return(all_unread_msg)
#REPLY#
def write_and_send_reply(self, talk_ID, reply_talk):
textarea = self._talkID_loc + talk_ID + self._reply_textarea_loc
reply_btn = self._talkID_loc + talk_ID + self._reply_button_loc
_reply_btn = (By.CSS_SELECTOR, reply_btn)
try:
self.find_element(By.CSS_SELECTOR, textarea).click()
time.sleep(1)
self.find_element(By.CSS_SELECTOR, textarea).send_keys(reply_talk)
print('berhasil tulis reply')
time.sleep(2)
#WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(_reply_btn))
#self.check_visible_element(_reply_btn)
#print('replybtn found')
self.find_element(By.CSS_SELECTOR, reply_btn).click() #should be visible after send_keys is performed
#self.find_element(By.CSS_SELECTOR, reply_btn).click()
#self.click_on_javascript(self.find_element(_reply_btn))
print('reply sent')
except Exception as inst:
print(inst)
def get_all_replyID_within_a_talkID(self, talk_ID):
replyID_list = []
reply = self._talkID_loc + talk_ID + self._reply_list_loc
try:
reply_list = self.find_elements(By.CSS_SELECTOR, reply)
print(reply_list)
for i in reply_list:
replyID_list.append(i.get_attribute('id'))
print(i.get_attribute('id'))
return(replyID_list)
except NoSuchElementException:
print('There is no reply within this talk_ID')
return(replyID_list)
def get_main_talk_sender(self, talk_ID): #get the username of talk creator
sender = self._talkID_loc + talk_ID + self._sender_loc
sender_name = self.find_element(By.CSS_SELECTOR, sender)
return(sender_name)
def get_last_reply_sender_name(self, talk_ID, reply_ID): #get the username of reply senders
name_location = self._talkID_loc + talk_ID + self._replier_name_loc_1 + reply_ID + self._replier_name_loc_2
reply_sender_name = self.find_element(By.CSS_SELECTOR, name_location)
return(reply_sender_name)
#DELETE TALK
def delete_talk(self, talk_ID):
selected_talk = self._talkID_loc + talk_ID + self._single_talk_body_loc
delete_button = self._talkID_loc + talk_ID + self._delete_button_loc
self.mouse_hover_to(By.CSS_SELECTOR, selected_talk)
print('Hovering to talk')
#self.mouse_hover_to(By.CSS_SELECTOR, delete_button)
time.sleep(2)
self.find_element(By.CSS_SELECTOR, delete_button)
self.mouse_hover_to(By.CSS_SELECTOR, delete_button).click()
self.find_element(*self._delete_button_at_popup_loc).click()
print('Delete button clicked')
def delete_at_popup(self):
try:
self.find_element(*self._delete_button_at_popup_loc).click()
except:
raise('Pop up delete button does not exist')
def get_product_name_of_talk_to_be_deleted(self, talk_ID):
product = self._talkID_loc + talk_ID + self._product_name_loc
product_name = self.find_element(By.CSS_SELECTOR, product).text
return(product_name)
def get_product_link_of_talk_to_be_deleted(self, talk_ID):
link = self._talkID_loc + talk_ID + self._product_link_loc
product_link = self.find_element(By.CSS_SELECTOR, link).get_attribute('href')
return(product_link)
|
def to_weird_case(string):
return ' '.join(''.join(a.upper() if i % 2 == 0 else a.lower() for i, a in
enumerate(word)) for word in string.split())
|
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from .models import Resource, Manager
def validate_manager_name(value):
managers = Manager.objects.filter(name=value)
if managers.count() > 0:
raise ValidationError('Manager with name "{}" already exists.'.format(value), code='invalid')
|
import json
import mock
from pygeoirish.geocoder import (
assemble_comparison,
base_filter,
read_townlands,
serialize,
extract_prefered_addresses,
geocode
)
from .fixtures import (
fixture_test_assemble_comparison,
fixture_assemble_comparison_onedistance_nexac,
fixture_items_base_filter,
fixture_expected_base_filter,
fixture_read_townlands,
fixture_read_townlands_expected,
fixture_test_serializer_in,
fixture_test_serializer_out,
fixture_prefered_exact,
fixture_prefered_nexact,
fixture_comparer_basic,
fixture_geocode_basic,
fixture_comparer_colision_match,
fixture_geocode_colision_match,
fixture_comparer_colision_aproximate,
fixture_geocode_colision_aproximate,
fixture_comparer_fullcolision_match,
fixture_geocode_fullcolision_match,
fixture_comparer_county,
fixture_geocode_county
)
def compare_dicts(dicta, dictb):
dicta = json.dumps(dicta, sort_keys=True)
dictb = json.dumps(dictb, sort_keys=True)
return dicta == dictb
def test_assemble_comparison():
item = {
'English_Name': 'Wicklow',
'County': 'Some County'
}
final = assemble_comparison('WICKLOW', 'SOME COUNTY', item)
assert compare_dicts(
final,
fixture_test_assemble_comparison
)
def test_assemble_comparison_onedistance_nexac():
item = {
'English_Name': 'Wicklow',
'County': 'Some County'
}
final = assemble_comparison('WICKLOW', 'SOME COUNTE', item)
assert compare_dicts(
final,
fixture_assemble_comparison_onedistance_nexac
)
def test_base_filter():
english_name = 'NITEROI'
county = 'RIO DE JANEIRO'
result = base_filter(
english_name,
county,
fixture_items_base_filter
)
assert compare_dicts(
result,
fixture_expected_base_filter
)
@mock.patch(
'pygeoirish.geocoder._read_townlands',
return_value=fixture_read_townlands
)
def test_read_townlands(_rt):
result = read_townlands()
_rt.assert_called_once()
assert compare_dicts(
result,
fixture_read_townlands_expected
)
def test_serializer_in():
output = serialize(fixture_test_serializer_in, 'alevel')
assert compare_dicts(
output,
fixture_test_serializer_out
)
def test_extract_prefered_addresses_exact():
result = extract_prefered_addresses(
fixture_prefered_exact
)
assert len(result) == 1
def test_extract_prefered_addresses_nexact():
result = extract_prefered_addresses(
fixture_prefered_nexact
)
assert len(result) == 2
@mock.patch(
'pygeoirish.geocoder.comparers',
fixture_comparer_basic
)
def test_basic_geocode():
result = geocode('Some English Name, Some County')
assert compare_dicts(
result,
fixture_geocode_basic
)
@mock.patch(
'pygeoirish.geocoder.comparers',
fixture_comparer_basic
)
def test_basic_geocode_exaustion():
result = geocode('Some English Name, Doesnt Matters,Some County')
assert compare_dicts(
result,
fixture_geocode_basic
)
@mock.patch(
'pygeoirish.geocoder.comparers',
fixture_comparer_colision_match
)
def test_collision_match_geocode():
result = geocode('Some English Name, Some County')
assert compare_dicts(
result,
fixture_geocode_colision_match
)
@mock.patch(
'pygeoirish.geocoder.comparers',
fixture_comparer_colision_aproximate
)
def test_collision_aproximate_geocode():
result = geocode('Some English Name, Some County')
assert compare_dicts(
result,
fixture_geocode_colision_aproximate
)
@mock.patch(
'pygeoirish.geocoder.comparers',
fixture_comparer_fullcolision_match
)
def test_fullcollision_aproximate_geocode():
result = geocode('Some English Name, Some County')
assert compare_dicts(
result,
fixture_geocode_fullcolision_match
)
@mock.patch(
'pygeoirish.geocoder.comparers',
fixture_comparer_county
)
def test_geocode_county():
result = geocode("A County")
assert compare_dicts(
result,
fixture_geocode_county
)
|
from metrics import full_wer
import sys
system = sys.argv[1]
code = sys.argv[2]
if system == "microsoft" or system == "msoft":
system = "msoft"
sub_dir = "msoft/"
elif system == "ibm":
sub_dir = "ibm/"
elif system == "google":
sub_dir = "google/"
else:
print("System not implemented") ## TODO Add elif statements as new systems are added.
# Add all methods to loop through.
methods = []
methods.append("swbc-gold+fp+et+res")
methods.append("swbc-gold+fp+et-res")
methods.append("swbc-gold+fp-et+res")
methods.append("swbc-gold+fp-et-res")
methods.append("swbc-gold-fp+et+res")
methods.append("swbc-gold-fp+et-res")
methods.append("swbc-gold-fp-et+res")
methods.append("swbc-gold-fp-et-res")
for method in methods:
# For each method, compare system outputs to gold transcripts cleaned with given method.
c_gold_A = "./results/cleaned-golds/" + method + "/sw" + code + "-A.out"
c_gold_B = "./results/cleaned-golds/" + method + "/sw" + code + "-B.out"
trans_a = "./results/" + sub_dir + "split-system-trans-text/left-sw0" + code + "-trans.txt"
trans_b = "./results/" + sub_dir + "split-system-trans-text/right-sw0" + code + "-trans.txt"
# Find WER assuming left channel is speaker A.
left_a = full_wer(c_gold_A, trans_a)
right_b = full_wer(c_gold_B, trans_b)
mean1 = (left_a + right_b)/2
# Find WER assuming left channel is speaker B.
left_b = full_wer(c_gold_B, trans_a)
right_a = full_wer(c_gold_A, trans_b)
mean2 = (left_b + right_a)/2
# Store minimum as that is WER when channel and speaker are correctly matched.
correct_mean = min(mean1, mean2)
outfile = "./results/" + sub_dir + "split-wer/" + method + "/wer-" + code + "-mean.txt"
with open(outfile, 'w') as f:
f.write(str(correct_mean))
|
''' download all relevant articles similar to original articles'''
#trainFile = '../data/tagged_data/whole_text_full_city/train.tag'
import sys, pickle, pdb
import query as query
from constants import int2tags
from train import load_data
NUM_ENTITIES = len(int2tags)
#EMA queries
#EXTRA_QUERY='( state | country | india | china | province )'
#EXTRA_QUERY='( adulterated | fake | food | products | samples )'
#EXTRA_QUERY='( food | oil | milk | honey | price | brands | formula )'
EXTRA_QUERY=''
# EXTRA_QUERY='(adulterated | scandal | countries | fake)'
# Shooter queries
# EXTRA_QUERY='( injured | wounded | victim )'
#EXTRA_QUERY='( suspect | shooter | identified | arrested | charged )'
if __name__ == '__main__':
trainFile = sys.argv[1]
saveFile = sys.argv[2]
extra_query = sys.argv[3]
#load data and process identifiers
articles, identifiers = load_data(trainFile)
identifiers_tmp = []
titles = []
for e in identifiers:
e = e.split(',')
for i in range(NUM_ENTITIES):
try:
e[i] = int(e[i])
e[i] = inflect_engine.number_to_words(e[i])
except:
pass
identifiers_tmp.append(e[:NUM_ENTITIES])
titles.append(','.join(e[NUM_ENTITIES:]))
identifiers = identifiers_tmp
#download related files
downloaded_articles = []
with open(saveFile, "wb" ) as f:
for i in range(len(titles)):
tmp = query.download_articles_from_query(titles[i]+' '+extra_query,' '.join(articles[i][0]),'bing')
downloaded_articles.append(tmp)
pickle.dump([articles[i], titles[i], identifiers[i], downloaded_articles[i]], f)
print '\r',i,'/',len(titles)
print
#save to file
print "Saved to file", saveFile
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = 'jimit'
__CreateAt__ = '2019\3\7 15:28'
from django.contrib.auth.decorators import login_required
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initialkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initialkwargs)
return login_required(view)
|
# coding=utf-8
# @Author: wjn
from api.browser.api_browser_action import BrowserAction # case需修改
from common.util import TYRequest
import unittest
from common.get_value import GetValue
from common.get_log import LogInfo
import json
import time
class TestBrowserActionAjax(unittest.TestCase, GetValue, LogInfo): # case需修改
@classmethod
@LogInfo.get_error
def setUpClass(cls) -> None:
cls.log.info('TestBrowserActionAjax测试用例开始执行') # case需修改
api = BrowserAction() # case需修改
cls.url = api.browser_action() # case需修改
cls.log.info('URL获取成功, URL:' + cls.url)
cls.request = TYRequest()
@LogInfo.get_error
def setUp(self):
pass
@LogInfo.get_error
def test_browser_action_ajax_success(self): # case需修改
body = 'c{"optCustomParam":{}}\na{"type":"ajax","subType":null,"start":1594277604425,"end":1594277608445,"duration":4020,"data":{"type":"ajax","id":0,"cid":1,"method":"POST","url":"http://localhost:8089/shop/list","state":"finish","ignore":false,"jserror":false,"start":1594277604427,"end":1594277607434,"du":3007,"cb":1000,"status":200,"err":0,"rec":317,"send":14,"bizId_param":{},"opt_param":{},"request_param":{},"opt_custom_param":{},"hasServerHeader":true,"s_id":"DXLxFK1c6us#Mz_EJ2Jjbb4","s_tname":"Transaction/SpringController/shop/list","s_tid":"92ce19fe6f55b9b0","s_rid":"92ce19fe6f55b9b0","s_duration":2565,"startSpeOffset":5,"items":[]}}'
res = self.request.getInterfaceRes_no_token(url=self.url, body=str(body))
self.log.debug(res.status_code)
self.assertEqual(res.status_code, 200, msg='接口返回错误码,code={}'.format(str(res.status_code)))
self.assertIn('+0800', res.text, msg='接口返回错误信息,code={}'.format(str(res.text)))
@LogInfo.get_error
def test_browser_action_ajax_fail(self): # case需修改
body = 'c{"optCustomParam":{}}\ne{"type":"event","subType":"click","start":1594952237068,"end":1594952237129,"duration":61,"data":{"type":"event","id":"btn2","nodeName":"BUTTON","className":"","title":"","value":"","text":"send ajax(jQuery)","path":"html > body > button:nth-child(2)#btn2","eventHandlerType":"addEventListener","state":"finish","items":[{"type":"ajax","id":2,"cid":5,"method":"GET","url":"https://reportalpha1.tingyun.com/demo-server/test/custom-request1?timeout=100","state":"finish","ignore":false,"jserror":false,"start":1594952237070,"end":1594952237126,"du":56,"cb":0,"status":403,"err":0,"rec":159,"send":0,"bizId_param":{},"opt_param":{},"request_param":{},"opt_custom_param":{},"hasServerHeader":false,"items":[]}]}}'
res = self.request.getInterfaceRes_no_token(url=self.url, body=str(body))
self.log.debug(res.status_code)
self.assertEqual(res.status_code, 200, msg='接口返回错误码,code={}'.format(str(res.status_code)))
self.assertIn('+0800', res.text, msg='接口返回错误信息,code={}'.format(str(res.text)))
|
import datetime
import hashlib
import logging
import os
import random
import re
import string
if __name__ == "__main__":
print('This script only contains functions and cannot be called directly. See demo scripts for usage examples.')
exit(1)
# Normalize path (replace '\' and '/' with '\\').
def normalize_path(path):
path.replace('\\', '\\\\')
normalized_path = path.replace('/', '\\\\')
return normalized_path
# Calculate file hashed (sha256 and md5)
def file_hash(file):
file_hash_sha256 = hashlib.sha256()
file_hash_md5 = hashlib.md5()
block_size = 65536
with open(file, 'rb') as f:
fb = f.read(block_size)
while len(fb) > 0:
file_hash_sha256.update(fb)
file_hash_md5.update(fb)
fb = f.read(block_size)
sha256 = file_hash_sha256.hexdigest()
md5 = file_hash_md5.hexdigest()
return sha256, md5
# Calculate file size (KB)
def file_size(file):
size = os.path.getsize(file)
size_kb = round(size / 1024)
return size_kb
# Show info about file
def file_info(file):
file = ''.join(file)
logging.info(f'File: "{file}"')
# Check if file exists
if not os.path.isfile(file):
logging.error(f'File "{file}" does not exists.')
return 1
# Print hash and links
sha256 = file_hash(file)[0]
md5 = file_hash(file)[1]
size = file_size(file)
logging.info(f'SHA256 hash: {sha256}')
logging.info(f'MD5 hash: {md5}')
logging.info(f'Size: {size} Kb')
logging.info(f'VirusTotal search: https://www.virustotal.com/gui/file/{sha256}/detection')
logging.info(f'Google search: https://www.google.com/search?q={sha256}\n')
return 0, sha256, md5, size
# Generate random file name
def randomize_filename(login, file, destination_folder):
# File name
random_name = ''.join(random.choice(string.ascii_letters) for _ in range(random.randint(4, 20)))
# File extension
file_extension = re.search(r'\.\w+$', file).group()
if not file_extension:
logging.debug('Unable to obtain file extension. Assuming .exe')
file_extension = '.exe'
# Destination folder
if destination_folder.lower() in ['desktop', 'downloads', 'documents']:
destination_folder = f'C:\\Users\\{login}\\{destination_folder}\\'
elif destination_folder.lower() == 'temp':
destination_folder = f'C:\\Users\\{login}\\AppData\\Local\\Temp\\'
else:
logging.debug('Using custom remote_folder')
random_filename = destination_folder + random_name + file_extension
logging.debug(f'Remote file: "{random_filename}"')
return random_filename
# Generate html report
def html_report(vm, snapshot, filename, file_args, file_size, sha256, md5, timeout, vm_network_state,
reports_directory='reports'):
# Set options and paths
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
destination_dir = reports_directory + '/' + sha256
destination_file = destination_dir + '/index.html'
# Create directory, if it does not exist
os.makedirs(destination_dir, mode=0o444, exist_ok=True)
# Content of html file
html_template_header = f'''
<!DOCTYPE html>
<html>
<title>Report</title>
<body>
<h3>File info:</h3>
<table>
<tr>
<td><b>Filename:</b></td>
<td>{filename}</td>
</tr>
<tr>
<td><b>File args:</b></td>
<td>{file_args}</td>
</tr>
<tr>
<td><b>File size:</b></td>
<td>{file_size} Kb</td>
</tr>
<tr>
<td><b>SHA256 hash:</b></td>
<td>{sha256} (<a href="https://www.virustotal.com/gui/search/{sha256}" target=_blank>VT Search</a>)</td>
</tr>
<tr>
<td><b>MD5 hash:</b></td>
<td>{md5}</td>
</tr>
<tr>
<td><b>Scanned on:</b></td>
<td>{time}</td>
</tr>
<tr>
<td><b>Timeout:</b></td>
<td>{timeout} seconds</td>
</tr>
<tr>
<td><b>Network:</b></td>
<td>{vm_network_state}</td>
</tr>
<tr>
<td><b>Downloads:</b></td>
<td>
<a href=./{sha256}/{vm}{snapshot}.webm target=_blank>Screen recording</a>,
<a href=./{sha256}/{vm}{snapshot}.pcap target=_blank>Traffic dump</a>,
<a href=./{sha256}/{vm}{snapshot}.dmp target=_blank>Memory dump</a>
</td>
</tr>
</table>
<br>
'''
# Search for screenshots in reports directory
screenshots = os.listdir(destination_dir)
html_template_screenshots = f'''<h3>VM:</b> {vm}, <b>Snapshot:</b> {snapshot}<h3>'''
for screenshot in screenshots:
# Check if filename matches task name and have .png extension
if re.search(rf'{vm}_{snapshot}_\d+\.png', screenshot):
html_template_screenshots += f'''
<a href="{screenshot}" target=_blank><img src="{screenshot}" width="320" high="240"></img></a>
'''
# Write data to report file
file_object = open(destination_file, mode='a', encoding='utf-8')
# If file is empty, write html header first
if os.path.getsize(destination_file) == 0:
file_object.write(html_template_header)
# Write screenshots block
file_object.write(html_template_screenshots)
|
from django.urls import path
from . import views
app_name = 'invoices'
urlpatterns = [
path('', views.InvoiceListView.as_view(), name='invoice_list'),
path('create/', views.InvoiceCreateView.as_view(), name='invoice_create'),
path('update/<int:pk>/', views.InvoiceUpdateView.as_view(), name='invoice_update'),
path('<int:pk>/', views.InvoiceDetailView.as_view(), name='invoice_detail'),
path('delete/<int:pk>/', views.InvoiceDeleteView.as_view(), name='invoice_delete'),
]
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.task.changed_file_task_mixin import ChangedFileTaskMixin
from pants.task.console_task import ConsoleTask
class WhatChanged(ChangedFileTaskMixin, ConsoleTask):
"""Emits the targets that have been modified since a given commit."""
@classmethod
def register_options(cls, register):
super(WhatChanged, cls).register_options(register)
cls.register_change_file_options(register)
register('--files', type=bool,
help='Show changed files instead of the targets that own them.')
def console_output(self, _):
change_calculator = self.change_calculator(self.get_options(),
self.context.address_mapper,
self.context.build_graph,
scm=self.context.scm,
workspace=self.context.workspace)
if self.get_options().files:
for f in sorted(change_calculator.changed_files()):
yield f
else:
for addr in sorted(change_calculator.changed_target_addresses()):
yield addr.spec
|
from .BaseEditor import BaseEditor
from .ScrubSpinBox import DoubleScrubSpinBox, MinVal, MaxVal
from PyQt5 import QtCore
class BaseVecEditor(BaseEditor):
class ComponentSpinBox(DoubleScrubSpinBox):
def __init__(self, editor, component):
DoubleScrubSpinBox.__init__(self, editor)
self.component = component
self.editor = editor
self.setRange(MinVal, MaxVal)
self.valueChanged.connect(self.__valueChanged)
self.setKeyboardTracking(False)
self.setAccelerated(True)
self.editor.layout().addWidget(self)
def __valueChanged(self, val):
self.editor.componentChanged()
def __init__(self, parent, item, model, numComponents):
BaseEditor.__init__(self, parent, item, model)
self.components = []
for i in range(numComponents):
spinBox = BaseVecEditor.ComponentSpinBox(self, i)
self.components.append(spinBox)
def componentChanged(self):
self.setModelData(self.model, self.item.index())
def setEditorData(self, index):
data = self.getItemData()
comps = data.split(' ')
for i in range(len(self.components)):
self.components[i].blockSignals(True)
self.components[i].setValue(float(comps[i]))
self.components[i].blockSignals(False)
def setModelData(self, model, index):
data = ""
for i in range(len(self.components)):
strVal = str(self.components[i].value())
if i < len(self.components) - 1:
data += "%s " % strVal
else:
data += strVal
model.setData(index, data, QtCore.Qt.EditRole)
class Vec2Editor(BaseVecEditor):
def __init__(self, parent, item, model):
BaseVecEditor.__init__(self, parent, item, model, 2)
class Vec3Editor(BaseVecEditor):
def __init__(self, parent, item, model):
BaseVecEditor.__init__(self, parent, item, model, 3)
class Vec4Editor(BaseVecEditor):
def __init__(self, parent, item, model):
BaseVecEditor.__init__(self, parent, item, model, 4)
|
"""SensorGrid and Sensor Schema"""
from pydantic import Field, constr
from typing import Optional
from .._base import NoExtraBaseModel
from .wea import Wea
class _GlowHemisphere(NoExtraBaseModel):
"""Hidden base class for Ground and SkyHemisphere."""
r_emittance: float = Field(
default=1.0,
ge=0,
le=1,
description='A value between 0 and 1 for the red channel emittance.'
)
g_emittance: float = Field(
default=1.0,
ge=0,
le=1,
description='A value between 0 and 1 for the green channel emittance.'
)
b_emittance: float = Field(
default=1.0,
ge=0,
le=1,
description='A value between 0 and 1 for the blue channel of the emittance.'
)
class Ground(_GlowHemisphere):
"""Ground component of the sky sphere."""
type: constr(regex='^Ground$') = 'Ground'
class SkyHemisphere(_GlowHemisphere):
"""SkyHemisphere component of the sky sphere."""
type: constr(regex='^SkyHemisphere$') = 'SkyHemisphere'
class SkyDome(NoExtraBaseModel):
"""Base class for all sky domes."""
type: constr(regex='^SkyDome$') = 'SkyDome'
ground_hemisphere: Optional[Ground] = Field(
default=Ground(),
description='Optional ground glow source.'
)
sky_hemisphere: Optional[SkyHemisphere] = Field(
default=SkyHemisphere(),
description='Optional sky hemisphere glow source.'
)
class _PointInTime(SkyDome):
"""Hidden base class for all point-in-time sky domes."""
ground_reflectance: float = Field(
default=0.2,
ge=0,
le=1,
description='A value between 0 and 1 for the ground reflectance.'
)
class _SkyWithSun(_PointInTime):
"""Hidden base class for all altitude/azimuth sky domes."""
altitude: float = Field(
...,
ge=-90,
le=90,
description='The solar altitude measured in degrees above the horizon.'
'Negative values indicate cases where the sun is below the horizon '
'(eg. twilight conditions).'
)
azimuth: float = Field(
...,
ge=0,
le=360,
description='The solar altitude measured in degrees above the horizon.'
'The azimuth is measured in degrees east of North. East is 90, South is 180 and '
'West is 270. Note that this input is different from Radiance convention. In '
'Radiance the azimuth degrees are measured in west of South.'
)
class CertainIrradiance(_PointInTime):
"""Sky with evenly distributed light at a certain irradiance value."""
type: constr(regex='^CertainIrradiance$') = 'CertainIrradiance'
irradiance: float = Field(
default=558.659,
ge=0,
description='A value for the horizontal diffuse irradiance value in W/m2.'
)
class CIE(_SkyWithSun):
"""CIE sky similar to using Radiance's gensky command."""
type: constr(regex='^CIE$') = 'CIE'
sky_type: int = Field(
0,
ge=0,
le=5,
description='An integer between 0..5 to indicate CIE Sky Type.'
'\n0 = Sunny with sun.'
'\n1 = Sunny without sun.'
'\n2 = Intermediate with sun.'
'\n3 = Intermediate without sun.'
'\n4 = Cloudy sky.'
'\n5 = Uniform cloudy sky.'
)
class ClimateBased(_SkyWithSun):
"""Point-in-time Climate-based sky."""
type: constr(regex='^ClimateBased$') = 'ClimateBased'
direct_normal_irradiance: float = Field(
...,
ge=0,
description='Direct normal irradiance (W/m2).'
)
diffuse_horizontal_irradiance: float = Field(
...,
ge=0,
description='Diffuse horizontal irradiance (W/m2).'
)
class SunMatrix(NoExtraBaseModel):
"""Annual Climate-based Sun matrix."""
type: constr(regex='^SunMatrix$') = 'SunMatrix'
wea: Wea = Field(
...,
description='A Ladybug wea schema.'
)
north: float = Field(
0,
ge=-360,
le=360,
description='A number between -360 and 360 for the counterclockwise '
'difference between the North and the positive Y-axis in degrees. '
'90 is West and 270 is East.'
)
class SkyMatrix(SunMatrix):
"""Annual Climate-based Sky matrix."""
type: constr(regex='^SkyMatrix$') = 'SkyMatrix'
density: int = Field(
1,
ge=1,
description='Sky patch subdivision density. This values is similar to '
'-m option in gendaymtx command. Default is 1 which means 145 sky patches '
'and 1 patch for the ground. One can add to the resolution typically by '
'factors of two (2, 4, 8, ...) which yields a higher resolution sky using '
'the Reinhart patch subdivision.'
)
|
import turtle
#Initialization
turtle.speed(0)
turtle.pu()
######
#changeColor variables
colorList = ["black","blue", "red", "green", "yellow", "magenta", "orange", "purple"]
currentColor = 1
#changeShape variables
shapeList = ["circle", "square", "triangle", "arrow"]
currentShape = 0
#changeSize
lengthSize = 1
widthSize = 1
def changeColor():
global currentColor
turtle.pencolor(colorList[currentColor])
turtle.fillcolor(colorList[currentColor])
if(currentColor >= len(colorList)-1):
currentColor = 0
else:
currentColor += 1
turtle.getscreen().onkeypress(changeColor,"n")
######
#changeShape function
def changeShape():
global shapeList
global currentShape
turtle.shape(shapeList[currentShape])
if currentShape >= len(shapeList)-1:
currentShape = 0
else:
currentShape += 1
turtle.onkeypress(changeShape,"c")
######
#changeSize function
def enlarge():
global lengthSize
global widthSize
turtle.resizemode("user")
turtle.shapesize(lengthSize+1,widthSize+1)
lengthSize += 1
widthSize += 1
turtle.onkeypress(enlarge,"equal")
def reduceSize():
global lengthSize
global widthSize
turtle.resizemode("user")
if lengthSize > 1 and widthSize >1:
turtle.shapesize(lengthSize-1,widthSize-1)
lengthSize -= 1
widthSize -= 1
else:
turtle.shapesize(1,1)
lengthSize = 1
widthSize = 1
turtle.onkeypress(reduceSize,"minus")
#Stamp function and definition
def stamp(x,y):
turtle.pu()
turtle.goto(x,y)
turtle.stamp()
turtle.ondrag(stamp, btn = 1, add = True)
######
turtle.onscreenclick(turtle.goto, btn = 1, add = True)
turtle.onkeypress(turtle.clear, "space")
print(turtle.shapesize(), turtle.turtlesize())
turtle.getscreen().listen()
turtle.mainloop()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Callable
import pytest
from pants.backend.python.goals.publish import (
PublishPythonPackageFieldSet,
PublishPythonPackageRequest,
rules,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact
from pants.core.goals.publish import PublishPackages, PublishProcesses
from pants.core.util_rules.config_files import rules as config_files_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST
from pants.engine.process import Process
from pants.testutil.process_util import process_assertion
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
rule_runner = PythonRuleRunner(
preserve_tmpdirs=True,
rules=[
*config_files_rules(),
*pex_from_targets.rules(),
*rules(),
QueryRule(PublishProcesses, [PublishPythonPackageRequest]),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
return set_options(rule_runner)
def set_options(rule_runner: PythonRuleRunner, options: list | None = None) -> PythonRuleRunner:
rule_runner.set_options(
options or [],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
env={
"TWINE_USERNAME": "whoami",
"TWINE_USERNAME_PYPI": "whoareyou",
"TWINE_PASSWORD_PYPI": "secret",
},
)
return rule_runner
@pytest.fixture
def packages():
return (
BuiltPackage(
EMPTY_DIGEST,
(
BuiltPackageArtifact("my-package-0.1.0.tar.gz"),
BuiltPackageArtifact("my_package-0.1.0-py3-none-any.whl"),
),
),
)
def project_files(
skip_twine: bool = False, repositories: list[str] = ["@pypi", "@private"]
) -> dict[str, str]:
return {
"src/BUILD": dedent(
f"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories={repositories!r},
skip_twine={skip_twine},
)
"""
),
"src/hello.py": """print("hello")""",
".pypirc": "",
}
def request_publish_processes(rule_runner: PythonRuleRunner, packages) -> PublishProcesses:
tgt = rule_runner.get_target(Address("src", target_name="dist"))
fs = PublishPythonPackageFieldSet.create(tgt)
return rule_runner.request(PublishProcesses, [fs._request(packages)])
def assert_package(
package: PublishPackages,
expect_names: tuple[str, ...],
expect_description: str,
expect_process: Callable[[Process], None] | None,
) -> None:
assert package.names == expect_names
assert package.description == expect_description
if expect_process:
assert package.process
expect_process(package.process.process)
else:
assert package.process is None
def test_twine_upload(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=False))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 2
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@pypi",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=pypi",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_USERNAME": "whoareyou", "TWINE_PASSWORD": "secret"}),
),
)
assert_package(
result[1],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@private",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=private",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_USERNAME": "whoami"}),
),
)
def test_skip_twine(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=True))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="(by `skip_twine` on src:dist)",
expect_process=None,
)
# Skip twine globally from config option.
rule_runner.set_options(["--twine-skip"])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 0
@pytest.mark.parametrize(
"options, cert_arg",
[
pytest.param(
[],
None,
id="No ca cert",
),
pytest.param(
["--twine-ca-certs-path={}"],
"--cert=ca_certs.pem",
id="[twine].ca_certs_path",
),
# This test needs a working ca bundle to work. Verified manually for now.
# pytest.param(
# ["--ca-certs-path={}"],
# "--cert=ca_certs.pem",
# id="[GLOBAL].ca_certs_path",
# ),
],
)
def test_twine_cert_arg(rule_runner, packages, options, cert_arg) -> None:
ca_cert_path = rule_runner.write_files({"conf/ca_certs.pem": ""})[0]
rule_runner.write_files(project_files(repositories=["@private"]))
set_options(rule_runner, [opt.format(ca_cert_path) for opt in options])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
process = result[0].process
assert process
if cert_arg:
assert cert_arg in process.process.argv
else:
assert not any(arg.startswith("--cert") for arg in process.process.argv)
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
MAX_VOLUME = 100
BACK_FADE_VOLUME = 20
BACK_VOLUME = 30
FRONT_VOLUME = 120
import subprocess
from time import sleep
PWD = '/home/shhdup/fade_play/'
BACK_MUSIC = (
'back2.mp3',
'back1.mp3',
)
FRONT_MUSIC = (
('01.mp3', 'Чтобы стать настоящим'),
('02.mp3', 'Это непросто делай то'),
('03.mp3', 'То что я тебе даю'),
('04.mp3', 'А теперь лезь под верёвку'),
('05.mp3', 'Я вручаю тебе зачарованные чернила'),
('06.mp3', 'Иди сюда!'),
('07.mp3', 'Осторожно Перед тобой дверь'),
('0751.mp3','Скрип'),
('0752.mp3','Летучие мыши'),
('09.mp3', 'Стой и не двигайся'),
('10.mp3', 'Ну и где тебя носит'),
('11.mp3', 'Перед тобой пески времени'),
('12.mp3', 'Ищи быстрее'),
('13.mp3', 'Прекрасно Этот ключ'),
('14.mp3', 'Вот оно Я предупреждал'),
('15.mp3', 'Не бойся я прогнал'),
('16.mp3', 'Спасибо Теперь я могу достать'),
('17.mp3', 'Я вижу Великому Порядку опять'),
('20.mp3', 'Разверни свиток который'),
('21.mp3', 'Теперь полей этот свиток'),
('22.mp3', 'Из свитка который тебе дал Порядок'),
('23.mp3', 'Иди сюда и держи питона'),
('24.mp3', 'Чувствуешь Питон начал извиваться'),
('25.mp3', 'ПОРЯДОК Да Я чувствую'),
('26.mp3', 'ХАОС Да Я чувствую Сделай два шага'),
('27.mp3', 'Но тебе пока рано есть его'),
('28.mp3', 'Съешь его и ты уже завтра'),
('29.mp3', 'Я знаю что в душе ты стремишься'),
('30.mp3', 'Подойди ко мне. На самом деле ты хочешь'),
('31.mp3', 'Пришла пора сделать окончательный выбор'),
('32.mp3', 'Что ж выбор сделан Чтобы завершить обряд'),
('33.mp3', 'Теперь выпей этот напиток'),
('34.mp3', 'Не забывай что здесь произошло'),
('35.mp3', 'И помни что нет ничего окончательного'),
('355.mp3', 'Гром'),
)
BACK_MUSIC = [PWD + foo for foo in BACK_MUSIC]
FRONT_MUSIC = [(PWD + foo, desc) for foo, desc in FRONT_MUSIC]
back = subprocess.Popen(['mplayer', '-softvol', '-loop', '0', '-slave', '-quiet'] + BACK_MUSIC, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
back.stdin.write(bytes('volume %d %d\n' % (BACK_VOLUME, MAX_VOLUME), 'utf-8'))
back.stdin.flush()
def play_front(snd):
back.stdin.write(bytes('volume %d %d\n' % (BACK_FADE_VOLUME, MAX_VOLUME), 'utf-8'))
back.stdin.flush()
front = subprocess.Popen(['mplayer', '-softvol', '-slave', '-quiet', snd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
front.stdin.write(bytes('volume %d %d\n' % (FRONT_VOLUME, MAX_VOLUME), 'utf-8'))
front.stdin.flush()
front.wait()
except:
print('FRONT INTERUPTED')
front.kill()
back.stdin.write(bytes('volume %d %d\n' % (BACK_VOLUME, MAX_VOLUME), 'utf-8'))
back.stdin.flush()
try:
while True:
cur = 0
while cur < len(FRONT_MUSIC):
snd, desc = FRONT_MUSIC[cur]
print('%2d: READY %s - %s' % (cur+1, snd, desc), end='? ')
inp = input()
if inp:
if inp.isnumeric():
inp = PWD + inp + '.mp3'
changed = False
for i in range(len(FRONT_MUSIC)):
if FRONT_MUSIC[i][0] == inp:
cur = i
changed = True
break
if not changed:
print('WARNING! "%s" not found' % inp)
elif inp[1:].isnumeric():
inp = int(inp[1:]) - 1
if 0 <= inp < len(FRONT_MUSIC):
cur = inp
else:
print('WARNING! Number should be between 1 and %d' % len(FRONT_MUSIC))
else:
foo = []
for i in range(len(FRONT_MUSIC)):
if inp.upper() in FRONT_MUSIC[i][1].upper():
foo.append((i+1, FRONT_MUSIC[i][0], FRONT_MUSIC[i][1]))
if len(foo) == 0:
print('WARNING! "%s" not found' % inp)
elif len(foo) == 1:
cur = foo[0][0] - 1
else:
print('WARNING! Too many variantes:')
for bar in foo:
print('%3d %s %s' % bar)
else:
play_front(snd)
print('OK %s - %s' % (snd, desc))
cur += 1
print('LOOP FINISHED')
except Exception as E:
print(str(E))
back.kill()
|
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
X = np.loadtxt("./data/encoded_train_150.out"); # this is the latent space array
print "load data shape:", X.shape;
# In[3]:
#from sklearn.manifold import TSNE;
X1 = X[::10];
#X_embedded = TSNE(n_components=3).fit_transform(X1);
#print "after TSNE operation: embedded shape", X_embedded.shape;
# In[4]:
#np.save("./encoded_TSNE_3D.npy", X_embedded)
# In[5]:
#X_embedded = np.load("./encoded_TSNE_3D.npy")
X_embedded = X1
print X_embedded.shape
# In[6]:
label = np.load("./data/cont-mat_mod.npy");
sep_1 = len(label)*0.8
print sep_1
label = label[0:int(sep_1)]
y_train_c = label[::10,1];
y_train_c_frac = label[::10,2];
y_train_t = label[::10,0];
x_pred_encoded = X_embedded;
# In[7]:
label_2 = np.load("./output/RMSD.npy")
print sep_1
label_2 = label_2[0:int(sep_1)]
y_train_rmsd = label_2[::10,1];
# In[8]:
print y_train_c.shape
print y_train_c_frac.shape
print y_train_t.shape
print y_train_rmsd.shape
# In[18]:
# plot 1:
import matplotlib.pyplot as plt;
plt.rcParams['axes.facecolor'] = 'black'
Dmax = y_train_c_frac;
[n,s] = np.histogram(Dmax, 11);
d = np.digitize(Dmax, s);
#[n,s] = np.histogram(-np.log10(Dmax), 11);
#d = np.digitize(-np.log10(Dmax), s);
from matplotlib import cm;
import matplotlib as mpl;
cmi = plt.get_cmap('jet');
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax));
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240);
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi);
import numpy as np;
from mpl_toolkits.mplot3d import Axes3D;
fig = plt.figure(figsize=(8.5,6));
ax = fig.add_subplot(111, projection='3d');
# scatter3D requires a 1D array for x, y, and z;
# ravel() converts the 100x100 array into a 1x10000 array;
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='.', c=scalarMap.to_rgba(Dmax));
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])));
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])));
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])));
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.tick_params(axis='z', colors='white')
#ax.xaxis.label.set_color('white')
#ax.yaxis.label.set_color('white')
#ax.zaxis.label.set_color('white')
#ax.set_xlabel('VAE 0');
#ax.set_ylabel('VAE 1');
#ax.set_zlabel('VAE 2');
scalarMap.set_array(Dmax);
fig.colorbar(scalarMap);
#plt.show()
plt.savefig('./fig/encoded_train_3D_frac.png', dpi=600);
plt.clf();
# In[19]:
# plot 1:
import matplotlib.pyplot as plt;
plt.rcParams['axes.facecolor'] = 'black'
Dmax = y_train_rmsd;
[n,s] = np.histogram(Dmax, 11);
d = np.digitize(Dmax, s);
#[n,s] = np.histogram(-np.log10(Dmax), 11);
#d = np.digitize(-np.log10(Dmax), s);
from matplotlib import cm;
import matplotlib as mpl;
cmi = plt.get_cmap('jet');
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax));
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240);
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi);
import numpy as np;
from mpl_toolkits.mplot3d import Axes3D;
fig = plt.figure(figsize=(8.5,6));
ax = fig.add_subplot(111, projection='3d');
# scatter3D requires a 1D array for x, y, and z;
# ravel() converts the 100x100 array into a 1x10000 array;
p = ax.scatter3D(np.ravel(x_pred_encoded[:, 0]),
np.ravel(x_pred_encoded[:, 1]),
np.ravel(x_pred_encoded[:, 2]),
marker='.', c=scalarMap.to_rgba(Dmax));
ax.set_xlim3d(np.amin(np.ravel(x_pred_encoded[:, 0])), np.amax(np.ravel(x_pred_encoded[:, 0])));
ax.set_ylim3d(np.amin(np.ravel(x_pred_encoded[:, 1])), np.amax(np.ravel(x_pred_encoded[:, 1])));
ax.set_zlim3d(np.amin(np.ravel(x_pred_encoded[:, 2])), np.amax(np.ravel(x_pred_encoded[:, 2])));
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.tick_params(axis='z', colors='white')
#ax.xaxis.label.set_color('white')
#ax.yaxis.label.set_color('white')
#ax.zaxis.label.set_color('white')
#ax.set_xlabel('VAE 0');
#ax.set_ylabel('VAE 1');
#ax.set_zlabel('VAE 2');
scalarMap.set_array(Dmax);
fig.colorbar(scalarMap);
#plt.show()
plt.savefig('./fig/encoded_train_3D_rmsd.png', dpi=600);
plt.clf();
|
try:
from tkinter import *
except:
from Tkinter import *
import sys
sys.path.append('../src/org')
from gameplay import PointItem as pi
from maps import Map1
from display import DrawingGenerics
import unittest
class test_PointItem(unittest.TestCase):
def setUp(self):
root = Tk()
gameCanvas = Canvas(root, width = 200, height = 200)
gameCanvas.grid(row = 0, column = 0)
self.specs1 = {'xCenter': (14 * DrawingGenerics.TILE_SIZE),
'yCenter': (14 * DrawingGenerics.TILE_SIZE) + DrawingGenerics.TILE_CENTERING,
'radius': DrawingGenerics.DOT_RADIUS,
'color': "white",
'tag': "dot"}
self.specs2 = {'xCenter': (14.5 * DrawingGenerics.TILE_SIZE),
'yCenter': (15 * DrawingGenerics.TILE_SIZE) + DrawingGenerics.TILE_CENTERING,
'radius': DrawingGenerics.DOT_RADIUS,
'color': "black",
'tag': "dot"}
self.specs3 = {'xCenter': (13.99 * DrawingGenerics.TILE_SIZE),
'yCenter': (15.49 * DrawingGenerics.TILE_SIZE) + DrawingGenerics.TILE_CENTERING,
'radius': DrawingGenerics.DOT_RADIUS,
'color': "red",
'tag': "cherry"}
self.ptItem1 = pi.PointItem(gameCanvas,self.specs1,50)
self.ptItem2 = pi.PointItem(gameCanvas,self.specs2,100)
self.ptItem3 = pi.PointItem(gameCanvas,self.specs3,200)
def test_setPoints(self):
#We also test getPoints
#Check initial values of points
self.assertEqual(self.ptItem1.getPoints(),50)
self.assertEqual(self.ptItem2.getPoints(),100)
self.assertEqual(self.ptItem3.getPoints(),200)
self.ptItem1.setPoints(500)
self.ptItem2.setPoints(5000)
self.ptItem3.setPoints(9001)
self.assertEqual(self.ptItem1.getPoints(),500)
self.assertEqual(self.ptItem2.getPoints(),5000)
self.assertEqual(self.ptItem3.getPoints(),9001)
def test_inTile(self):
#The inTile method rounds down the coordinates xCenter and yCenter to get the tile
self.assertEqual(self.ptItem1.inTile(),(14,14))
self.assertEqual(self.ptItem2.inTile(),(14,15))
self.assertEqual(self.ptItem3.inTile(),(13,15))
|
# This is where the answers to Chapter 8 questions for the BSS Dev RampUp go
# Name:
|
from __future__ import unicode_literals, print_function, division
__author__ = 'petrbouchal'
"""
Created on Jul 10, 2012
@author: petrbouchal
Collect all data from No. 10 business plan API, build a database with this data,
as well as with basic completion status data calculated, and deliver an analytical report on aggregates
and ideally with over-time analytics as well
"""
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# TODO: finish readme
analytics = 0 # 0 to skip writing CSV; set to 0 for debugging download. Must be 0 for scraperwiki deployment
timeseriesanalytics = 0 # 1 to run aggregate analytics; set to 0 for debugging download/raw data write. Must be 0 for scraperwiki deployment
checkchanges = 0 # 1 to run time-series analytics. Must be 0 for scraperwiki deployment
codebook = 0 # set to 1 to check changes in duedates across downloads.
log = 1 # 1 to generate codebook
toscraperwiki = 0 # 1 to log
# 1 for scraperwiki output
from calendar import monthrange
from bs4 import BeautifulSoup
#import time
from datetime import datetime
from datetime import timedelta
import json
import urllib2
import re
import shutil
import os
from pprint import pprint
import sqlite3
import litepiesql
#from pprint import pprint
now = datetime.now()
today = datetime.today()
if today.weekday() !=4:
print('It\'s not Friday, and I only scrape weekly. Exiting.')
raise SystemExit
# build date and time strings
datestring = datetime.strftime(today, '%Y-%m-%d')
datetimestring = datetime.strftime(now, '%Y-%m-%d %H:%M:%S')
filedatestring = datetime.strftime(now, '%Y%m%d_%H%M')
filedatestringlong = datetime.strftime(now, '%Y%m%d_%H%M%S')
# DB setup
db = sqlite3.connect('data.sqlite')
cursor = db.cursor()
alldata = []
# setup structure for assigning abbreviations to department names
deptdict = {}
deptdict['Department for Communities and Local Government'] = 'DCLG'
deptdict['Ministry of Justice'] = 'MoJ'
deptdict['Ministry of Defence'] = 'MoD'
deptdict['Cabinet Office'] = 'CO'
deptdict['Department of Energy and Climate Change'] = 'DECC'
deptdict['Department for Education'] = 'DfE'
deptdict['Department for Business, Innovation and Skills'] = 'BIS'
deptdict['Department for Transport'] = 'DfT'
deptdict['Her Majesty\'s Revenue and Customs'] = 'HMRC'
deptdict['Department for Work and Pensions'] = 'DWP'
deptdict['Department of Health'] = 'DH'
deptdict['Foreign and Commonwealth Office'] = 'FCO'
deptdict['Her Majesty\'s Treasury'] = 'HMT'
deptdict['Department for Environment, Food and Rural Affairs'] = 'Defra'
deptdict['Department for International Development'] = 'DfID'
deptdict['Department for Culture, Media and Sport'] = 'DCMS'
deptdict['Home Office'] = 'HO'
# build header
header = ['dept_abb', 'dept_name', 'dept_id', 'dept_url', 'priority_body', 'priority_id', \
'priority_strapline', 'action_id', 'action_body', 'action_notes', \
'schedule_start_date', 'schedule_end_date', 'actual_start_date', 'actual_end_date', 'subaction_id', 'subaction_body', \
'subaction_notes', 'subaction_schedule_start_date', 'subaction_schedule_end_date', 'act_start', \
'act_end', 'sched_start_endmonth', 'sched_end_endmonth', \
'started', 'ended', 'start_status', 'end_status', 'startearlyby', 'startedlateby', 'startoverdueby', \
'endearlyby', 'endedlateby', 'endedoverdueby', \
'carriedover', \
'subaction_schedule_start_orig', 'subaction_schedule_end_orig', \
'subaction_actual_start_orig', 'subaction_actual_end_orig', \
'datetime', 'date']
if len(cursor.execute("""SELECT name FROM sqlite_master WHERE type='table' AND name='data';""").fetchall()) == 0:
cursor.execute("""CREATE TABLE data
(dept_abb, dept_name, dept_id, dept_url, priority_body, priority_id, priority_strapline, action_id, action_body,
action_notes, schedule_start_date, schedule_end_date, actual_start_date, actual_end_date, subaction_id,
subaction_body, subaction_notes, subaction_schedule_start_date, subaction_schedule_end_date,
act_start, act_end, sched_start_endmonth, sched_end_endmonth,
started, ended, start_status, end_status, startearlyby, startedlateby, startoverdueby,
endearlyby, endedlateby, endedoverdueby,
carriedover, subaction_schedule_start_orig, subaction_schedule_end_orig,
subaction_actual_start_orig, subaction_actual_end_orig,
datetime, date)
""")
db.commit()
db.close()
db = litepiesql.Database('data.sqlite')
# this list will be used to store all rows
# Get list of departments
base_url = 'http://transparency.number10.gov.uk/api/'
urldepts = base_url + 'departments'
try:
depts0 = urllib2.urlopen(urldepts)
except IOError:
print('ERROR: API not available on initial call for department list.')
raise
depts = depts0.read()
deptsJ = json.loads(depts)
def savexml (filename, url):
rawstore1 = open('./' + rawdir + '/' + filename + '.xml', 'wb')
deptsxml = urllib2.urlopen(url + '.xml').read()
rawstore1.write(deptsxml)
rawstore1.close()
return "OK"
#get properties of each department
for dept in deptsJ:
dept_id = dept['department_id']
dept_name = dept['name']
# TODO: Create dictionary for alternative department names
dept_url = dept['url']
# get priorities for each department
urlpriorities = base_url + 'priorities/department_id/' + str(dept_id)
try:
priorities0 = urllib2.urlopen(urlpriorities)
except IOError:
print('ERROR: API not available on call for priorities.')
writecsv = 0
raise
priorities = priorities0.read()
prioritiesJ = json.loads(priorities)
#pprint.pprint(prioritiesJ)
# get properties of each priority
for priority in prioritiesJ:
priority_body = priority['body']
priority_id = priority['priority_id']
priority_strapline = priority['strapline']
# get actions for each priority
urlactions = base_url + 'actions/priority_id/' + str(priority_id)
try:
actions0 = urllib2.urlopen(urlactions)
except IOError:
print('ERROR: API not available when retrieving actions.')
writecsv = 0
raise
actions = actions0.read()
actionsJ = json.loads(actions)
# get properties of each action
for action in actionsJ:
action_id = action['action_id']
actual_end_date = action['actual_end_date']
actual_start_date = action['actual_start_date']
action_body = action['body']
action_notes = action['notes']
schedule_end_date = action['schedule_end_date']
schedule_start_date = action['schedule_start_date']
# cycle through sub-actions
for subaction in action['sub_actions']:
subaction_id = subaction['action_id']
subaction_actual_end_date = subaction['actual_end_date']
subaction_actual_start_date = subaction['actual_start_date']
subaction_body = subaction['body']
subaction_notes = subaction['notes']
subaction_schedule_end_date = subaction['schedule_end_date']
subaction_schedule_start_date = subaction['schedule_start_date']
subaction_schedule_end_date_orig = subaction_schedule_end_date
subaction_schedule_start_date_orig = subaction_schedule_start_date
subaction_actual_end_date_orig = subaction_actual_end_date
subaction_actual_start_date_orig = subaction_actual_start_date
#===========================================================
# FIX DATES
#===========================================================
# convert seasons to months
# TODO: devise more robust date processing, probably by using dateutil.parser
subaction_schedule_start_date = subaction_schedule_start_date.replace('Winter 2012', 'Dec 2012')\
.replace('Spring', 'Apr').replace('Summer', 'Jul').replace('Autumn', 'Oct')
subaction_schedule_end_date = subaction_schedule_end_date.replace('Winter 2012', 'Dec 2012')\
.replace('Spring', 'Apr').replace('Summer', 'Jul').replace('Autumn', 'Oct')
#fix known typos in scheduled date fields
subaction_schedule_end_date = subaction_schedule_end_date.replace('Sep 212', 'Sep 2012').replace('Octo', 'Oct')
# process scheduled dates
sched_start_text = False
sched_end_text = False
try:
# trying if the date is in Apr 13 type format ...
#
# NOTE dates whose name end in 0 are date objects, other date variables are strings
#
sched_end0 = datetime.strptime(subaction_schedule_end_date, '%b %Y')
sched_end = datetime.strftime(sched_end0, '%Y-%m-%d')
except ValueError:
# ... or in April 14 format:
try:
sched_end0 = datetime.strptime(subaction_schedule_end_date, '%B %Y')
sched_end = datetime.strftime(sched_end0, '%Y-%m-%d')
except ValueError:
sched_end = 'NA'
sched_end_text = True
try:
# same for start date - turn into real data and format correctly
sched_start0 = datetime.strptime(subaction_schedule_start_date, '%b %Y')
sched_start = datetime.strftime(sched_start0, '%Y-%m-%d')
except ValueError:
try:
sched_start0 = datetime.strptime(subaction_schedule_start_date, '%B %Y')
sched_start = datetime.strftime(sched_start0, '%Y-%m-%d')
except ValueError:
sched_start = 'NA'
sched_start_text = True
if sched_start_text != True:
sched_start_day = sched_start0.day
sched_start_month = sched_start0.month
sched_start_year = sched_start0.year
sched_start_numberofdays = int(monthrange(sched_start_year, int(sched_start_month))[1])
sched_start_endmonth0 = sched_start0.replace(day=sched_start_numberofdays)
sched_start_endmonth = datetime.strftime(sched_start_endmonth0, '%Y-%m-%d')
else:
sched_start_endmonth0 = 'NA'
sched_start_endmonth = 'NA'
if sched_end_text != True:
sched_end_day = sched_end0.day
sched_end_month = sched_end0.month
sched_end_year = sched_end0.year
sched_end_numberofdays = int(monthrange(sched_end_year, int(sched_end_month))[1])
sched_end_endmonth0 = sched_end0.replace(day=sched_end_numberofdays)
sched_end_endmonth = datetime.strftime(sched_end_endmonth0, '%Y-%m-%d')
else:
sched_end_endmonth0 = 'NA'
sched_end_endmonth = 'NA'
# process actual dates - turn into real dates and get rid of time component
try:
act_start0 = datetime.strptime(subaction_actual_start_date, '%Y-%m-%d %H:%M:%S')
act_start = datetime.strftime(act_start0, '%Y-%m-%d')
except TypeError:
act_start = 'NA'
act_start0 = 'NA'
except ValueError:
act_start = 'NA'
act_start0 = 'NA'
try:
act_end0 = datetime.strptime(subaction_actual_end_date, '%Y-%m-%d %H:%M:%S')
act_end = datetime.strftime(act_end0, '%Y-%m-%d')
except ValueError:
act_end = 'NA'
act_end0 = 'NA'
except TypeError:
act_end = 'NA'
act_end0 = 'NA'
#===============================================================================
# BASIC ANALYTICS: MARK EACH ITEM WITH STATUS CODES
#===============================================================================
print('Subaction ID: ' + str(subaction_id))
print(urlactions)
start_status = 'NA'
end_status = 'NA'
carriedover = 0
# calculate scheduled duration
# TODO: change duration counting and comparisons to MONTHS
if ((sched_end_text == False) & (sched_start_text == False)):
sched_duration = (sched_end_endmonth0 - sched_start0).days
else:
sched_duration = 'NA'
# first the start
if act_start != 'NA':
started = True
else:
started = False
if subaction_schedule_start_date == "Started":
sched_start_text = True
started = True
start_status = 'Carried over'
carriedover = 1
elif subaction_schedule_start_date == 'TBC':
start_status = 'TBC'
sched_start_text = True
if ((started == True) & (sched_start_text == False)):
startedoverdueby = 'NA'
startduein = 'NA'
if ((sched_start_endmonth0.month > act_start0.month) & (sched_start_endmonth0.year == act_start0.year)):
start_status = 'Early'
startearlyby = (sched_start0 - act_start0).days
elif (sched_start_endmonth0.year > act_start0.year):
start_status = 'Early'
startearlyby = (sched_start0 - act_start0).days
elif ((sched_start_endmonth0 >= act_start0) & (sched_start_endmonth0.month == act_start0.month)):
start_status = 'On time'
startearlyby = 'NA'
startedlateby = 'NA'
elif sched_start_endmonth0 < act_start0:
start_status = 'Late'
startedlateby = (act_start0 - sched_start_endmonth0).days
startearlyby = 'NA'
elif ((started == False) & (sched_start_text == False)):
startedlateby = 'NA'
startearlyby = 'NA'
if sched_start_endmonth0 > today:
start_status = 'Not due'
startoverdueby = 'NA'
startduein = (sched_start_endmonth0 - today).days
elif sched_start_endmonth0 < today:
start_status = 'Overdue'
startduein = ''
startoverdueby = (today - sched_start_endmonth0).days
elif ((started == True) & (sched_start_text == True)):
startearlyby = 'NA'
startoverdueby = 'NA'
startedlateby = 'NA'
startduein = 'NA'
elif ((started == False) & (sched_start_text == True)):
startearlyby = 'NA'
startoverdueby = 'NA'
startedlateby = 'NA'
startduein = 'NA'
# now the end
if subaction_schedule_end_date == 'Ongoing':
end_status = 'Ongoing'
sched_end_text = True
ended = False
elif subaction_schedule_end_date == 'TBC':
end_status = 'TBC'
sched_end_text = True
ended = False
elif act_end == 'NA':
ended = False
if act_end != 'NA':
ended = True
else:
ended = False
if (ended == True) & (sched_end_text != True):
endedoverdueby = 'NA'
endduein = 'NA'
if ((sched_end_endmonth0.month > act_end0.month) & (sched_end_endmonth0.year == act_end0.year)):
end_status = 'Early'
endedlateby = 'NA'
endearlyby = (sched_end0 - act_end0).days
elif ((sched_end_endmonth0.year > act_end0.year)):
end_status = 'Early'
endedlateby = 'NA'
endearlyby = (sched_end0 - act_end0).days
elif ((sched_end_endmonth0 >= act_end0) & (sched_end_endmonth0.month == act_end0.month)):
end_status = 'On time'
endedlateby = 'NA'
endearlyby = 'NA'
elif sched_end_endmonth < act_end:
end_status = 'Late'
endearlyby = 'NA'
endedlateby = (act_end0 - sched_end_endmonth0).days
elif ((ended == False) & (sched_end_text != True)):
endedlateby = 'NA'
endearlyby = 'NA'
if sched_end_endmonth0 >= today:
end_status = 'Not due'
endedoverdueby = 'NA'
endduein = (sched_end_endmonth0 - today).days
elif sched_end_endmonth0 < today:
end_status = 'Overdue'
endduein = 'NA'
endedoverdueby = (today - sched_end_endmonth0).days
elif ((ended == True) & (sched_end_text == True)):
endearlyby = 'NA'
endedoverdueby = 'NA'
endedlateby = 'NA'
endduein = 'NA'
elif ((ended == False) & (sched_end_text == True)):
endearlyby = 'NA'
endedoverdueby = 'NA'
endedlateby = 'NA'
endduein = 'NA'
# calculate actual duration and compare to scheduled
if ((started == True) & (ended == True) & (sched_start_text == False)):
act_duration = (act_end0 - act_start0).days + 1
duration_sofar = 'NA'
elif ((started == True) & (ended == False) & (sched_start_text == False)):
act_duration = 'NA'
duration_sofar = (today - act_start0).days + 1
else:
act_duration = 'NA'
duration_sofar = 'NA'
if ((sched_duration != 'NA') & (duration_sofar != 'NA')):
delta_duration = (duration_sofar - sched_duration)
# positive number means action overran the scheduled length
elif ((sched_duration != 'NA') & (act_duration != 'NA')):
delta_duration = (act_duration - sched_duration)
# positive number means action overran the scheduled length
else:
delta_duration = 'NA'
# Mark those due to start this month and in next 30 days
if ((started == False) & (sched_start_text != 1)):
duetostartthismonth = ((sched_start_endmonth0.month == today.month) & (sched_start_endmonth0.year == today.year))
duetostartwithin30days = (sched_start_endmonth0 < today - timedelta(days=30))
else:
duetostartthismonth = 'NA'
duetostartwithin30days = 'NA'
if ((sched_start_text != 1)):
duetostartlastmonth = ((sched_start_endmonth0.month == today.month - 1) & (sched_start_endmonth0.year == today.year))
duetostartinlast30days = (sched_start_endmonth0 < today - timedelta(days=30))
else:
duetostartlastmonth = 'NA'
duetostartinlast30days = 'NA'
# TODO: due to end/ended/due to start/started LAST MONTH
if ((ended == False) & (sched_end_text != 1)):
duetoendthismonth = ((sched_end_endmonth0.month == today.month) & (sched_end_endmonth0.year == today.year))
duetoendwithin30days = (sched_end_endmonth0 < today - timedelta(days=30))
else:
duetoendthismonth = 'NA'
duetoendwithin30days = 'NA'
if ((sched_end_text != 1)):
duetoendlastmonth = ((sched_end_endmonth0.month == today.month - 1) & (sched_end_endmonth0.year == today.year))
duetoendinlast30days = (sched_end_endmonth0 < today - timedelta(days=30))
else:
duetoendlastmonth = 'NA'
duetoendinlast30days = 'NA'
# Mark those started or ended this month and in last 30 days
if ((started == True) & (sched_start_text == False)):
startedthismonth = ((act_start0.month == today.month) & (sched_start_endmonth0.year == today.year))
startedinlast30days = (act_start0 < today - timedelta(days=30))
else:
startedthismonth = 'NA'
startedinlast30days = 'NA'
if ((ended == True) & (sched_end_text == False)):
endedthismonth = ((act_end0.month == today.month) & (sched_end_endmonth0.year == today.year))
endedinlast30days = (act_end0 < today - timedelta(days=30))
else:
endedthismonth = 'NA'
endedinlast30days = 'NA'
# TODO: but still write the end-of-month dates because of excel
#===================================================================
# WRITE PROCESSED RESULT TO CSV
#===================================================================
dept_abb = deptdict[dept_name]
# build row
row0 = [dept_abb, dept_name, dept_id, dept_url, priority_body, priority_id, priority_strapline, action_id, action_body, \
action_notes, schedule_start_date, schedule_end_date, actual_start_date, actual_end_date, subaction_id, \
subaction_body, subaction_notes, subaction_schedule_start_date, subaction_schedule_end_date, \
act_start, act_end, sched_start_endmonth, sched_end_endmonth, \
started, ended, start_status, end_status, startearlyby, startedlateby, startoverdueby, \
endearlyby, endedlateby, endedoverdueby, \
carriedover, subaction_schedule_start_date_orig, subaction_schedule_end_date_orig, \
subaction_actual_start_date_orig, subaction_actual_end_date_orig, \
datetimestring, datestring]
# clean output of HMTL tags and entities
# FIXME: make this cleanup work so it replaces all entities - the unescape solution is not robust
row = []
for cell in row0:
if isinstance(cell, basestring):
t = cell
# get rid of HTML coded entities which break R imports
cellsoup = BeautifulSoup(cell)
text_parts = cellsoup.findAll(text=True)
textcell = ' '.join(text_parts)
t1 = textcell
#t = re.sub(textcell, '\n', '')
t0 = t1.replace(' & #39;', '\'').replace('&#39;', '\'').replace('&#40;', '\'').replace('&#41;', '\'')
t = t0.replace('\r\n', ' ').replace('\n', ' ')
# FIXME: remove newline characters from within the resulting text
# TODO: check whether text within tags is not being lost
elif isinstance(cell, bool):
# replace boolean values with 0/1 for easy averaging in excel
if cell == True:
t = 1
else:
t = 0
else:
t = cell
row.append(t)
rowDict = dict(zip(header, row0))
db.insert('data',rowDict)
|
#!/usr/bin/env python3
import sys
def get_target(word, users, step):
if word[-1] in ':,':
word = word[:-1]
# Only count it if the user appeared recently
if step - users.get(word, -1000) < 200:
return word
else:
return None
def save_stats(stats, filename, ctime, msg_per_user, msg_with_target, words):
key = filename.strip() +':'+ ctime
stats[key] = (msg_per_user, msg_with_target)
counts = msg_per_user
targets = msg_with_target
n_users = len(counts)
n_msg = 0
sorted_counts = []
for user in counts:
n_msg += counts[user]
sorted_counts.append(counts[user])
perc = 100 * targets / n_msg
sorted_counts.sort()
median = None
if len(sorted_counts) > 0:
median = sorted_counts[len(sorted_counts) // 2]
print(key, n_users, n_msg, median, perc, words)
if __name__ == '__main__':
stats = {}
users = {}
step = 0
cur_msg_per_user = {}
cur_msg_with_target = 0
cur_words = 0
for filename in sys.stdin:
ctime = None
part_of_day = "am"
seen_hours = set()
chour = None
for line in open(filename.strip(), encoding='latin-1'):
if line[0] == '[':
hour = line[1:3]
if hour != chour:
chour = hour
if hour in seen_hours:
part_of_day = "pm"
seen_hours.add(hour)
if line[0] == '=':
if 'is now known as' in line:
oldname = line.strip().split()[1]
newname = line.strip().split()[-1]
users[newname] = step
if oldname in cur_msg_per_user:
cur_msg_per_user[newname] = cur_msg_per_user[oldname]
cur_msg_per_user.pop(oldname, None)
elif line[0] == '[':
step += 1
fields = line.strip().split()
time = fields[0][1:-4] + part_of_day
if time != ctime:
# Save stats
if ctime is not None:
save_stats(stats, filename, ctime, cur_msg_per_user,
cur_msg_with_target, cur_words)
# Reset
cur_msg_per_user = {}
cur_msg_with_target = 0
cur_words = 0
ctime = time
user = fields[1][1:-1]
users[user] = step
cur_msg_per_user[user] = cur_msg_per_user.get(user, 0) + 1
cur_words += len(fields) - 2
if len(fields) > 2:
target = get_target(fields[2], users, step)
if target is not None:
cur_msg_with_target += 1
# Save stats
if ctime is not None:
save_stats(stats, filename, ctime, cur_msg_per_user,
cur_msg_with_target, cur_words)
|
import logging
import os
from datetime import datetime
from flask import render_template, Blueprint, request, current_app
from app.user import User
from app.utils import allowed_file
from app.file_manager import FileManager
from app.queue_manager import QueueManager
api = Blueprint("api", __name__)
logger = logging.getLogger()
@api.route("/", methods=["GET"])
def home():
"""TODO:
if logged in, render template without email
"""
return render_template("home.html")
@api.route("/example", methods=["GET"])
def example():
"""Example samples with audio player"""
return render_template("example.html")
@api.route("/about", methods=["GET"])
def about():
"""Example samples with audio player"""
return render_template("about.html")
@api.route("/check", methods=["GET"])
def check():
return {"message": "check"}, 200
@api.route("/process", methods=["POST"])
def process():
logger.info("in POST /process")
email = request.form.get("email")
if "file" not in request.files:
logger.info("no file in request")
return "no file yo", 400
f = request.files["file"]
if not allowed_file(f.filename):
logger.info("file not an allowed file type")
return "nah, file not allowed", 400
# save user if dont exist
try:
# if user does exist, increase song count
# reduce credits by 1
user = User.get(email)
print(f'user {email} exists')
except User.DoesNotExist:
# if user doesnt exist, create w/
# 5 credits, 1 song_count, songs = [song]
print(f'user {email} doesnt exist, creating')
user = User(
email=email,
created_at=datetime.now(),
song_count=0,
credits=5,
)
user.save()
# save input file and separate
input_filepath, safe_filename = FileManager.save_file(f)
song_name = safe_filename.split(".")[0]
s3_object_name = FileManager.upload_to_s3(input_filepath, object_name=safe_filename)
data = {"s3_object_name": s3_object_name, "email": email, "song_name": song_name}
response = QueueManager.send_message("break_up_drums", data)
# update user
print('updating credits, song_count')
user.credits -= 1
user.song_count += 1
user.last_seen = datetime.now()
user.songs.append(song_name)
user.save()
return "ok", 200
@api.route("/process_error", methods=["POST"])
def handle_process_error():
"""This app should be responsible for managing dynamodb store
so CRUD actions can be done by client
but if we update someones credits and then the process fails,
need an endpoint for the worker to hit so that the credit isn't taken away
"""
# email
# error?
# restore_credit = true/false
pass
|
from django import forms
from django.utils.translation import gettext as _
from django.utils.safestring import mark_safe
PRESENCE_CONFIRMATION = (
(1, _('Yes of course :)')),
(0, _('No, unfortunately not :('))
)
HOTEL_CHOICES = (
(0, _('No')),
(1, _('Yes'))
)
class RsvpGuestsNumForm(forms.Form):
guests_num = forms.IntegerField(label=_('Number of guests you want to confirm/reject presence for:'), min_value=1, max_value=10)
class RsvpForm(forms.Form):
def __init__(self, n, *args, **kwargs):
super(RsvpForm, self).__init__(*args, **kwargs)
self.fields["name 0"] = forms.CharField(label=mark_safe("<strong> {} </strong>".format(_("Your name:"))), max_length=50)
self.fields["presence_confirmation 0"] = forms.ChoiceField(label=_('Are you going to attend our wedding party?'), choices=PRESENCE_CONFIRMATION)
self.fields["hotel_needed 0"] = forms.ChoiceField(label=_('Do you want to stay in hotel during the night?'), choices=HOTEL_CHOICES)
for i in range(1,n):
self.fields["name %d" % i] = forms.CharField(label=mark_safe("<strong> {} </strong>".format(_("Guest %(guest_id)s name:") % {'guest_id': i })), max_length=50)
self.fields["presence_confirmation %d" % i] = forms.ChoiceField(label=_('Is he/she going to attend our wedding party?'), choices=PRESENCE_CONFIRMATION)
self.fields["hotel_needed %d" % i] = forms.ChoiceField(label=_('Does he/she want to stay in hotel during the night?'), choices=HOTEL_CHOICES)
self.fields["email"] = forms.EmailField(label=mark_safe("<strong> {} </strong>".format(_('If you wish to get some update info, leave us your email:'))), required=False)
self.fields["comments"] = forms.CharField(label=mark_safe("<strong> {} </strong>".format(_('Put your comments, questions below:'))), required=False, widget=forms.Textarea)
|
class Solution(object):
def titleToNumber(self, columnTitle):
"""
https://leetcode.com/problems/excel-sheet-column-number/
26-base conversion to number does the trick here
"""
p = 1
x = 0
for i in range(len(columnTitle)-1, -1, -1):
c = ord(columnTitle[i]) - 64
x = x + c * p
p *= 26
return x
|
VOWELS = set('aeiouAEIOU')
def consonant_count(s):
return sum(1 for a in s if a.isalpha() and a not in VOWELS)
|
import pdb
import argparse
#import calendar
import numpy as np
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.coord_categorisation
import cmdline_provenance as cmdprov
import cmocean
# next block would be self-published
def read_data(fname, month):
"""Read an input data file"""
cube = iris.load_cube(fname, 'precipitation_flux')
# pdb.set_trace()
#add tracer; pause code here when loading for debugging
# type 'next' for next pdb; 'c' to run to completion
iris.coord_categorisation.add_month(cube, 'time')
cube = cube.extract(iris.Constraint(month=month))
return cube
def mask_data(cube, fname, realm):
"""mask data over land or ocean, specifying realm"""
cube.data = np.ma.asarray(cube.data) #ensure maskable data
sftlf_cube = iris.load_cube(fname, 'land_area_fraction')
if realm == 'ocean':
cube.data.mask = np.where(sftlf_cube.data < 50, True, False) #vectorised argument to create mask
elif realm == 'land':
cube.data.mask = np.where(sftlf_cube.data > 50, True, False)
return(cube)
def convert_pr_units(cube):
"""Convert kg m-2 s-1 to mm day-1"""
assert cube.units=='kg m-2 s-1', 'units of initial data should be kg m-2 s-1'
cube.data = cube.data * 86400
cube.units = 'mm/day'
return cube
def plot_data(cube, month, gridlines=False, levels=None):
"""Plot the data."""
fig = plt.figure(figsize=[12,5])
iplt.contourf(cube, levels=levels, cmap=cmocean.cm.haline_r, extend='max')
plt.gca().coastlines()
if gridlines:
plt.gca().gridlines()
cbar = plt.colorbar()
cbar.set_label(str(cube.units))
title = '%s precipitation climatology (%s)' %(cube.attributes['model_id'], month)
plt.title(title)
def main(inargs):
"""Run the program."""
inlogs = {}
my_log = cmdprov.new_log()
cube = read_data(inargs.infile, inargs.month)
inlogs[inargs.infile] = cube.attributes['history'] #add data history
cube = convert_pr_units(cube)
if type(inargs.mask) is list:
assert inargs.mask[1]=='land' or inargs.mask[1]=='ocean', 'mask should specify land or ocean'
cube = mask_data(cube, inargs.mask[0], inargs.mask[1])
inlogs[inargs.mask[0]] = cube.attributes['history'] #add mask history
clim = cube.collapsed('time', iris.analysis.MEAN)
plot_data(clim, inargs.month, inargs.gridlines, inargs.cbar_levels)
plt.savefig(inargs.outfile+'.png')
my_log = cmdprov.new_log(infile_history=inlogs, git_repo='.')
cmdprov.write_log(inargs.outfile+'.log', my_log)
if __name__ == '__main__':
description = 'Plot the precipitation climatology for a given month.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument("infile", type=str, help="Input file name")
# parser.add_argument("month", type=str, choices=['Jan','Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], help="Month to plot")
import calendar
parser.add_argument("month", type=str, choices=calendar.month_abbr[1:], help="Month to plot")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("-g", "--gridlines", help="add gridlines", default=False,
action="store_true")
parser.add_argument("-m", "--mask", type=str, nargs=2, metavar=('SFTLF_FILE','REALM'), default=None,
help="apply land or ocean mask (specify realm to mask)")
parser.add_argument("-l", "--cbar_levels", type=float, nargs='*', default=None,
help='list of levels / tick marks to appear on the colorbar')#list of float numbers, as in: 0 1 2 3 4 5 6
# parser.add_argument("-l", "--cbar_levels", type=float, nargs=3, default=None,
# help='list of levels / tick marks to appear on the colorbar')#list of float numbers, as in: 0 1 2 3
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
import numpy as np
def default_number_of_knots(x):
return min(len(np.unique(x)) // 4, 35)
def add_boundary_knots(ks, p):
b0 = np.asarray([ks[0] for _ in range(p)])
b1 = np.asarray([ks[-1] for _ in range(p)])
return np.hstack((b0, ks, b1))
def make_knots(x, k, boundary=0):
pcts = [100 * (n + 2) / (k + 2) for n in range(k - 2 * boundary)]
knots = np.percentile(x, pcts)
if boundary > 0:
knots = add_boundary_knots(knots, boundary)
return knots
|
'''
Digits
Given two integers, n and m, how many digits have nm?
Examples:
2 and 10 - 210 = 1024 - 4 digits
3 and 9 - 39 = 19683 - 5 digits
Input
The input is composed of several test cases. The first line has an integer C, representing the number of test cases.
The following C lines contain two integers N and M (1 <= N, M <= 100).
Output
For each input test case of your program, you must print an integer containing the number of digits of the
result of the calculated power in the respective test case.
'''
C = int(input())
for x in range(C):
a = input().split(" ")
n = int(a[0])
m = int(a[1])
aux = n**m
count = 0
while aux > 0:
aux = aux // 10
count = count + 1
print(count)
|
from selenium.webdriver.remote.remote_connection import RemoteConnection as RemoteConnection
class FirefoxRemoteConnection(RemoteConnection):
def __init__(self, remote_server_addr, keep_alive: bool = ...) -> None: ...
|
# Generated by Django 3.2.5 on 2021-07-27 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0002_email_verification'),
]
operations = [
migrations.AddField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, default='media/images/profile.png', upload_to='media/images/'),
),
]
|
import pandas as pd
from nipype.pipeline.engine import Node, Workflow, MapNode
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.afni as afni
import nipype.interfaces.nipy as nipy
import nipype.algorithms.rapidart as ra
from nipype.algorithms.misc import TSNR
import nipype.interfaces.ants as ants
import nilearn.image as nli
from functions import strip_rois_func, get_info, median, motion_regressors, extract_noise_components, selectindex, fix_hdr
from linear_coreg import create_coreg_pipeline
from nonlinear_coreg import create_nonlinear_pipeline
# read in subjects and file names
df=pd.read_csv('/scr/ilz3/myelinconnect/subjects.csv', header=0)
subjects_db=list(df['DB'])
# sessions to loop over
sessions=['rest1_1'] # ,'rest1_2', 'rest2_1', 'rest2_2']
# directories
working_dir = '/scr/ilz3/myelinconnect/working_dir/'
data_dir= '/scr/ilz3/myelinconnect/'
out_dir = '/scr/ilz3/myelinconnect/final_struct_space/rest1_1_trans'
# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
# main workflow
smooth = Workflow(name='smooth')
smooth.base_dir = working_dir
smooth.config['execution']['crashdump_dir'] = smooth.base_dir + "/crash_files"
# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subject']),
name='subject_infosource')
subject_infosource.iterables=[('subject', subjects_db)]
# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']),
name='session_infosource')
session_infosource.iterables=[('session', sessions)]
# select files
templates={'rest': 'final_struct_space/rest1_1_trans/{subject}_{session}_denoised_trans.nii.gz'
}
selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
name="selectfiles")
smooth.connect([(subject_infosource, selectfiles, [('subject', 'subject')]),
(session_infosource, selectfiles, [('session', 'session')])
])
# smooth by 3 mm
blur = Node(afni.Merge(blurfwhm=3,
doall=True,
outputtype='NIFTI_GZ'),
name='blur')
smooth.connect([(selectfiles, blur, [('rest', 'in_file')])])
# sink relevant files
sink = Node(nio.DataSink(parameterization=False,
base_directory=out_dir),
name='sink')
smooth.connect([(blur, sink, [('out_file', '@blur')])
])
smooth.run(plugin='MultiProc', plugin_args={'n_procs' : 9})
|
import unittest
from katas.kyu_7.filter_long_words import filter_long_words
class FilterLongWordsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(filter_long_words(
'The quick brown fox jumps over the lazy dog', 4),
['quick', 'brown', 'jumps'])
|
REST_HOST = '0.0.0.0'
REST_PORT = '5000'
|
#!/usr/bin/python3
age = int(input("please input dog's age:"))
#print("")
if age < 0:
print("are you kidding me ?")
elif age == 1:
print("equal age 14 years old")
elif age == 2:
print("equal age 22 years old")
elif age > 2:
human = 22 + (age - 2)*5
print("equal human's age is: ", human)
# quit tips
input("click enter for exit!")
|
import requests
import json
import numpy as np
import os
import re
threads = []
# url = 'http://srv-s2d16-22-01/es/'
url = 'http://srv-s2d16-22-01'
post = ':11001/'
get = '/es/'
dataindex = 'vdm'
# scroll_params = {
# 'size':100,
# 'scroll': '1m'
# }
# r = requests.get(url + get + 'data-' + dataindex + '/_search', params=scroll_params)
# j = r.json()
# scroll_id = j['_scroll_id']
# hits = j['hits']['hits']
# print len(hits)
# while True:
# scroll_params = {
# 'scroll': '1m',
# 'scroll_id': scroll_id,
# 'size':100
# }
# print scroll_params['scroll_id']
# r = requests.get(url + get + '_search/scroll', params=scroll_params)
# if r.status_code != 200:
# print r.text
# print 'statuscode 200'
# break
# # here handle failure
# j = r.json()
# print len(j['hits']['hits'])
# for hit in j['hits']['hits']:
# try:
# print hit['_source']['data']['timestamp']
# break
# except:
# continue
# if len(j['hits']['hits']) == 0:
# break
# hits = hits + j['hits']['hits']
# scroll_id = j['_scroll_id']
# print scroll_id
vdmemit = {}
# with open('web.json','w') as f:
# json.dump(hits,f)
# requests.delete(url + get + 'data-' + dataindex + '-*')
# with open('/brildata/vdmoutput/Automation/web.json','r') as f:
# hits = json.load(f)
# for hit in hits:
# if 'data' in hit['_source']:
# data = hit['_source']['data']
# if data['timestamp'] == 1501214335:
# vdmemit[data['detector']] = data['sigmavis_avg']
# for hit in hits:
# if 'data' in hit['_source']:
# data = hit['_source']['data']
automation = '/brildata/vdmoutput/Automation/Analysed_Data/'
ratiofolder = '6016_28Jul17_055855_28Jul17_060210'
for j in os.listdir(automation+ratiofolder):
if j[-4:]!='json': continue
with open(automation+ratiofolder+'/' +j) as f:
data = json.load(f)
vdmemit[data['detector']] = data['sigmavis_avg']
for folder in os.listdir(automation):
for js in os.listdir(automation+folder):
if js[-4:]!='json' or 'UTCA' in js or re.match('output\d{4}[A-Z1]*([_0-9]*)[A-Za-z]*\d?\.json',js).groups()[0]!='': continue
print(js)
with open(automation+folder+'/' +js) as f:
data = json.load(f)
if data['fill']==6399 and data['detector']=='PLT':break
if data['detector'] == 'BCM1F':
data['detector'] = 'BCM1FPCVD'
if data['sigmavis_avg'] < 0 or data['sigmavis_avg'] > 5000 or data['detector'] not in vdmemit.keys():
continue
sig = data['sigmavis_bx']
sbil = data['sbil_bx']
lsig,lsbil,tsig,tsbil = [],[],[],[]
for i,j in enumerate(sig):
if j!=0:
if i==0 or sig[i-1]==0:
lsig.append(j)
lsbil.append(sbil[i])
else:
tsig.append(j)
tsbil.append(sbil[i])
lm = np.mean(lsig)
lsbil = [i/j*lm for i,j in zip(lsbil,lsig)]
try:
(la,lb),lcov = np.polyfit(lsbil,lsig, 1, cov=True)
data['linearity_lead'] = la*100/lm
data['linearity_lead_err'] = lcov[0,0]*100/lm
except:
print('leading', data['detector'], data['fill'], data['timestamp'], lsbil,lsig)
data['efficiency_lead'] = lm/vdmemit[data['detector']]
data['efficiency_lead_err'] = np.std(lsig)/vdmemit[data['detector']]
if tsig and len(tsig)>0:
tm = np.mean(tsig)
tsbil = [i/j*tm for i,j in zip(tsbil,tsig)]
try:
(ta,tb),tcov = np.polyfit(tsbil,tsig, 1, cov=True)
data['linearity_train'] = ta*100/tm
data['linearity_train_err'] = tcov[0,0]*100/tm
except:
'train', data['detector'], data['fill'], data['timestamp'], tsbil,tsig
data['efficiency_train'] = lm/vdmemit[data['detector']]
data['efficiency_train_err'] = np.std(tsig)/vdmemit[data['detector']]
with open(automation+folder+'/'+js,'w') as f:
json.dump(data,f)
requests.post(url + post + dataindex, json.dumps(data))
# t = threading.Thread(target=deletescans,args=(j,))
# t.start()
# threads.append(t)
# for th in threads:
# th.join()
|
# Exercício 8.4 - Livro
def areaTriangulo (base, altura):
area = (base * altura) / 2
return area
at = areaTriangulo(5, 8)
print(at)
|
def how_much_coffee(lst):
result = sum(a.isalpha() if a.islower() else 2 * a.isalpha() for a in lst)
return 'You need extra sleep' if result > 3 else result
|
import arcade
arcade.open_window(600, 600, "Drawing Example")
arcade.set_background_color(arcade.color.COOL_BLACK)
arcade.start_render()
#Dibujar mar
arcade.draw_lrtb_rectangle_filled(0, 600, 250, 0, arcade.color.QUEEN_BLUE)
#Dibujar luna
arcade.draw_circle_filled(300,450,140,arcade.color.PLATINUM)
#Dibujar estrellas
arcade.draw_points([[50,580],[90,400],[100,500],[40,510],[60,350]],arcade.color.AUREOLIN,10)
arcade.draw_points([[400,580],[580,550],[500,400],[500,500],[530,350]],arcade.color.AUREOLIN,10)
#Dibujar pez
arcade.draw_ellipse_filled(100,200, 70, 30, arcade.color.ARSENIC)
arcade.draw_triangle_filled(75,202,40,220,40,182,arcade.color.ARSENIC)
arcade.draw_point(120,202,arcade.color.WHITE,10)
arcade.finish_render()
arcade.run()
|
file1 = open("data.csv", "r")
file2 = open("cleaned_data.csv", "w")
file2.write(file1.readline())
lines = file1.readlines()
lines2 = []
file1.close()
for line in lines:
line_first = line[0:line.find(',')]
line_second = line[line.find(',')+1:]
new_line = ""
for c in line_first:
if (".\/*-+?'!\";´^#%&()[]={}|_¨~<>".find(c) == -1 ):
new_line += c
else:
new_line += ' '
new_line.strip()
new_line += (',' + line_second)
lines2.append(new_line)
for line in lines2:
file2.write(line.strip() + '\n')
file2.close()
|
from time import sleep
findAlpha = list('abcdefghijklmnopqrstuvwxyz')
for x in range(26):
findAlpha[x] = findAlpha[x].upper()
class Piece():
def __init__(self, colour, position):
self.position = position
self.colour = colour
def checkKill(self, dest): # check if the move is killing a piece of the opponent.
colour = self.colour
position = self.position
src_x = findAlpha.uppercase.index(position[0])
src_y = position[1]
self.dest = dest
dest_x = dest[0]
dest_y = dest[1]
if colour == "R":
if dest_x > src_x: # check if the destination move is 2 boxes away in the x and y direction
if dest_x - 2 == src_x and dest_y - 2 == src_y:
return True
else:
return False
elif dest_x < src_x:
if dest_x + 2 == src_x and dest_y - 2 == src_y:
return True
else:
return False
elif colour == "B":
if dest_x > src_x:
if dest_x - 2 == src_x and dest_y + 2 == src_y:
return True
else:
return False
elif dest_x < src_x:
if dest_x + 2 == src_x and dest_y + 2 == src_y:
return True
else:
return False
def checkLegal(self, dest): # check if the move being made is a legal move.
colour = self.colour
position = self.position
src_x = findAlpha.uppercase.index(position[0]) # index of letter
src_y = position[1]
self.dest = dest
dest_x = dest[0]
dest_y = dest[1]
columnNum = src_x + 1 # cartesian row number
rowNum = src_y
if checkKill(dest) == False:
if colour == "R":
if columnNum >= 2 and columnNum <= 7: # check if its in in the center portion of the board
for i in [-1, 1]:
if type(layout[x + i][y + 1]) != Piece:
return True
else:
return False
elif columnNum == 1: # check if its on left/right edges
if type(layout[x + 1][y + 1]) != Piece:
return True
else:
return False
elif columnNum == 8:
if type(layout[x - 1][y + 1]) != Piece:
return True
else:
return False
if colour == "B":
if columnNum >= 2 and columnNum <= 7:
for i in [-1, 1]:
if type(layout[x + i][y - 1]) != Piece:
return True
else:
return False
elif columnNum == 1:
if type(layout[x + 1][y - 1]) != Piece:
return True
else:
return False
elif columnNum == 8:
if type(layout[x - 1][y - 1]) != Piece:
return True
else:
return False
def move(self, dest, layout):
self.position = dest
Board.updateDataStructure(layout)
class Board():
layout = []
num_co = [t for t in range(1, 9)]
alpha_co = ["A", "B", "C", "D", "E", "F", "G", "H"]
def __init__(self):
pass
def createBoard(self):
blank = True
start = 0
end = 8
for yCo in Board.num_co[0:3]:
for xCo in range(0, 8):
if blank == False:
Board.layout.append(Piece("R", (Board.alpha_co[xCo] + str(yCo))))
blank = True
elif blank == True:
Board.layout.append(Board.alpha_co[xCo] + str(yCo))
blank = False
blank = not blank
for yCo in Board.num_co[3:5]:
for xCo in range(0, 8):
Board.layout.append(Board.alpha_co[xCo] + str(yCo))
for yCo in Board.num_co[5:8]:
for xCo in range(0, 8):
if blank == False:
Board.layout.append(Piece("B", (Board.alpha_co[xCo] + str(yCo))))
blank = True
elif blank == True:
Board.layout.append(Board.alpha_co[xCo] + str(yCo))
blank = False
blank = not blank
return Board.layout
def createDataStructure(self, layout):
print()
print()
temp = []
layout_temp = []
for i in Board.alpha_co:
for j in layout:
if type(j) == Piece:
if j.position[0] == i:
temp.append(j)
else:
if j[0] == i:
temp.append(j)
layout_temp.append(temp)
temp = []
print(layout_temp)
return layout_temp
def updateDataStructure(layout):
updated = False
temp = []
loop1 = True
while not updated:
for x in range(0, len(layout)):
for y in range(0, len(layout[x])):
current_pos = findAlpha[x] + str(y + 1)
current_obj = layout[x][y]
if (x, y) != (8, 8) and loop1 == True:
if type(current_obj) == Piece:
if current_obj.position != current_pos:
temp.append(current_obj)
updated = False
layout[x][y] = current_pos
else:
for item in temp:
if item.position == current_pos:
layout[x][y] = item
temp.remove(item)
if len(temp) == 0:
updated = True
loop1 = False
def printLayout(self, layout):
temp = []
final = []
count = 0
for w in Board.num_co:
for x in layout:
for y in x:
if type(y) == Piece:
if y.position[1] == str(w):
temp.append(y.colour)
else:
if y[1] == str(w):
temp.append("X")
final.append(temp)
temp = []
for t in reversed(final):
print(" ".join(t))
board = Board()
pieces = board.createBoard()
layout = board.createDataStructure(pieces)
board.printLayout(layout)
print()
print()
layout[1][2].move('A5', layout)
board.printLayout(layout)
|
import random
import string
def random_string(minimum, maximum=None):
if maximum is None:
maximum = minimum
count = random.randint(minimum, maximum)
return "".join(random.choice(string.ascii_letters) for x in xrange(count))
def random_integer(digits):
start = 10 ** (digits - 1)
end = (10 ** digits) - 1
return random.randint(start, end)
|
#!/usr/bin/env python
import rospy
from hektar.msg import encoderPos, wheelVelocity, Move
from hektar.cfg import DeadReckonConfig
from dynamic_reconfigure.server import Server
from std_msgs.msg import Bool
MAX_SPEED = 127
class Dead_Reckon():
def __init__(self):
self.wheels_pub = rospy.Publisher("wheel_output", wheelVelocity, queue_size=1)
self.done_pub = rospy.Publisher("done_reckoning", Bool, queue_size=1)
self.ticks_to_stop = None
self.encoder_pos = None
self.target_posR = None
self.target_posL = None
self.at_target = False
self.dead_reckon = False
self.right_at_target = False
self.left_at_target = False
def drive_callback(self, encoder_pos):
wheels = wheelVelocity()
if self.dead_reckon:
if not self.at_target:
self.encoder_pos = encoder_pos
if ((target_pos - encoder_pos.wheelR) > self.ticks_to_stop):
wheels.wheelR = MAX_SPEED
else:
self.right_at_target = True
if ((target_posL - encoder_pos.wheelL) > self.ticks_to_stop):
wheels.wheelL = MAX_SPEED
else:
self.left_at_target = True
if self.right_at_target and self.left_at_target:
self.at_target = True
self.wheel_pub.publish(wheels)
else:
wheels.wheelR = 0
wheels.wheelL = 0
self.wheel_pub.publish(wheels)
self.done_pub.publish(doneReckoning())
dead_reckon = False
else:
pass
def dynamic_callback(self, config, level):
rospy.loginfo("""Reconfigure Request: {ticks_to_stop}""".format(**config))
self.ticks_to_stop = config['ticks_to_stop']
return config
def encoder_command_callback(self, delta_ticks):
self.target_posR = self.encoder_pos.wheelR + delta_ticks.wheelR
self.target_posL = self.encoder_pos.wheelL + delta_ticks.wheelL
self.at_target = False
self.dead_reckon = True
self.right_at_target = False
self.left_at_target = False
def control():
rospy.init_node("dead_reckon", anonymous=True)
reckoner = Dead_Reckon()
dynam_srv = Server(DeadReckonConfig, reckoner.dynamic_callback)
rospy.Subscriber('encoder_pos', encoderPos, reckoner.drive_callback, queue_size=1, tcp_nodelay=False)
rospy.Subscriber('encoder_command', Move, reckoner.encoder_command_callback, queue_size=1, tcp_nodelay=False)
rospy.spin()
if __name__ == '__main__':
try:
control()
except rospy.ROSInterruptException: pass
|
# Generated by Django 3.0.5 on 2020-04-28 14:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='price',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='product',
name='id',
field=models.IntegerField(editable=False, primary_key=True, serialize=False, unique=True),
),
]
|
'''
Created on Jul 15, 2013
@author: emma
'''
from UnitTesting.page_objects.base_page_object import base_page_object
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
class acp_profile(base_page_object):
def __init__(self, webd_wrap):
base_page_object.__init__(self, webd_wrap)
def confirm_page(self):
''' raises AssertionError if page is incorrect '''
self._webd_wrap.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "ul[class='ui-button-set ui-button-set-170px']")), 'Not on a Profile page.')
_url = self._webd_wrap._driver.current_url
if not _url.startswith(self._webd_wrap._baseURL + '/profile'):
raise AssertionError("Not on a profile page.")
def click_my_zola(self):
self.confirm_page()
time.sleep(2)
self._webd_wrap._driver.find_element_by_id('h-user-personalized-toolbar').find_element_by_xpath('div/a').click()
########################################################################
########################################################################
def click_follow(self):
''' clicks the follow link '''
self.confirm_page()
self._webd_wrap._driver.find_element_by_class_name('l-sidebar-primary').find_element_by_xpath('section/header/ul/li[1]/a[1]').click()
def star_first_activity(self):
''' stars the first activity on the page '''
self.confirm_page()
self._webd_wrap._driver.find_element_by_id('activity-container').find_element_by_xpath('section[1]').find_element_by_xpath('ul[1]/li/a').click()
########################################################################
########################################################################
def get_name(self):
self.confirm_page()
_name = self._webd_wrap._driver.find_element_by_class_name('l-main-primary').find_element_by_xpath('div/header/div[2]/div/h2/span')
return _name.text
|
import ast
from datetime import datetime, timezone, time
from decimal import Decimal, ROUND_HALF_UP
import datetime, pytz, time, json
import quicklook.calculations.garmin_calculation
from quicklook.calculations.converter.fitbit_to_garmin_converter import timestring_to_datetime
def apple_steps_minutly_to_quartly(summary_date,steps):
quaterly_data_list = []
if steps:
quarterly_data = {}
for step in steps:
start_time = step['Start date'][11:]
start_time_in_seconds = timestring_to_datetime(summary_date,start_time)
hour = start_time_in_seconds.hour
quarter = quicklook.calculations.garmin_calculation.\
which_quarter(start_time_in_seconds)
if not quarterly_data.get(hour,None):
quarter1 = "{}:00:00".format(hour)
quarter2 = "{}:15:00".format(hour)
quarter3 = "{}:30:00".format(hour)
quarter4 = "{}:45:00".format(hour)
quarterly_data[hour] = {
0:{"time":quarter1, "value":0, "activeSeconds":0},
1:{"time":quarter2, "value":0, "activeSeconds":0},
2:{"time":quarter3, "value":0, "activeSeconds":0},
3:{"time":quarter4, "value":0, "activeSeconds":0}
}
active_seconds = 0
if(step.get('steps',0)):
active_seconds = 60
steps_per_minute = round(step.get('steps',0.0))
quarterly_data[hour][quarter]["value"] += steps_per_minute
quarterly_data[hour][quarter]["activeSeconds"] += active_seconds
quaterly_data_list = [quaterly for hour in quarterly_data.values()
for quaterly in hour.values()]
return quaterly_data_list
def get_epoch_offset_from_timestamp(timestamp, timezone):
'''
Parse ISO 8601 timestamp string and return offset and
POSIX timestamp (time in seconds in UTC). For example - if timestamp
is "2018-08-19T15:46:35.000-05:00", then returned value would be a
tuple like this - (POSIX timestamp, UTC offset in seconds)
So, for timestamp in example, it would be - (1534711595, -18000)
Args:
timestamp(str): Timestamp in string
'''
if timezone == 'nil' or timezone == '(null)':
timezone = 0
if timestamp and timezone:
try:
continent,country = timezone.split("-")
except:
continent,country = timezone.split("/")
if continent[-1] == "\\":
continent = continent[:-1]
timezone = continent+'/'+country
local = pytz.timezone(timezone)
naive = datetime.datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
local_dt = local.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc).timetuple()
time_in_utc_seconds = int(time.mktime(utc_dt))
zone = pytz.timezone(timezone)
offset = zone.utcoffset(datetime.datetime.now()).total_seconds()
return (time_in_utc_seconds,offset)
else:
naive = datetime.datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
utc_dt = naive.timetuple()
time_in_utc_seconds = int(time.mktime(utc_dt))
return (time_in_utc_seconds,0)
def apple_to_garmin_activities(active_summary):
if active_summary:
result_reponse = []
for each in active_summary:
garmin_activites = {
'summaryId': '',
'durationInSeconds': None,
'startTimeInSeconds': None,
'startTimeOffsetInSeconds': None,
'activityType': '',
'averageHeartRateInBeatsPerMinute': None,
'averageRunCadenceInStepsPerMinute': None,
'averageSpeedInMetersPerSecond': 0,
'averagePaceInMinutesPerKilometer': 0,
'activeKilocalories': None,
'deviceName': 'forerunner935',
'distanceInMeters': 0,
'maxHeartRateInBeatsPerMinute': None,
'maxPaceInMinutesPerKilometer': None,
'maxRunCadenceInStepsPerMinute': None,
'maxSpeedInMetersPerSecond': None,
'startingLatitudeInDegree': None,
'startingLongitudeInDegree': None,
'steps': 0,
'totalElevationGainInMeters': None,
'totalElevationLossInMeters': None,
'resting_hr_last_night' : None
}
start_time_in_sec,start_time_offset_in_sec = get_epoch_offset_from_timestamp(each['Start date'],each['TimeZone'])
garmin_activites['summaryId'] = str(each['ActivityID'])
garmin_activites['distanceInMeters'] = int(float(each['Distance']))
garmin_activites['durationInSeconds'] = int(each['Duration'])
garmin_activites['startTimeInSeconds'] = start_time_in_sec
garmin_activites['startTimeOffsetInSeconds'] = start_time_offset_in_sec
garmin_activites['activityType'] = each['WorkoutType']
garmin_activites['activeKilocalories'] = each['totalEnergyBurned']
if each.get('AverageHearthRate'):
garmin_activites['averageHeartRateInBeatsPerMinute'] = round(float(each.get('AverageHearthRate')))
garmin_activites['steps'] = round(float(each.get('steps',0)))
result_reponse.append(garmin_activites)
return result_reponse
else:
return None
|
# Generated by Django 2.2.6 on 2019-11-11 17:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0014_auto_20191111_1741'),
]
operations = [
migrations.AlterField(
model_name='task',
name='due_date',
field=models.DateField(default=datetime.date(2019, 11, 11), verbose_name='due date'),
),
migrations.AlterField(
model_name='task',
name='due_time',
field=models.TimeField(default=datetime.time(17, 42, 38, 375366), verbose_name='due time'),
),
]
|
import base64
import MySQLdb as mdb
from . import view_all
def main():
port='65535'
pwd='cnmf.net.cn'
group=''
con = None
ip=view_all.ip
try:
con = mdb.connect('127.0.0.1', 'root','xx', 'test')
cur = con.cursor()
cur.execute("select * from xx")
data = cur.fetchall()
port=data[-1][1].replace('\n','')
print(port)
finally:
if con:
con.close()
ssr=ip+':'+port+':auth_sha1_v4:aes-192-cfb:http_simple:'+base64.b64encode(pwd.encode()).decode().replace('=','')+'/?obfsparam=&group='+base64.b64encode(group.encode()).decode()
print(ssr)
#xx
def _encode(s):
_t1=base64.b64encode(s.encode())
_t2=base64.b64encode(b'ssr://'+_t1).decode()
print(_t2)
return _t2
def _deocde(s):
return base64.b64decode(s).decode()
with open('C:/xampp/htdocs/xx.html','w') as f:
f.write(_encode(ssr))
if __name__ == "__main__":
main()
|
"""Supervisor definition anc control.
Linux:
Manages daemontools-like services inside the container.
For each application container there may be multiple services defined, which
are controlled by skarnet.org s6 supervision suite.
Application container is started in chrooted environment, and the root
directory structure::
/
/services/
foo/
bar/
Application container is started with the supervisor monitoring the services
directory using 'svscan /services'. The svscan become the container 'init' -
parent to all processes inside container.
Treadmill will put svscan inside relevant cgroup hierarchy and subsystems.
Once started, services are added by created subdirectory for each service.
The following files are created in each directory:
- run
- app.sh
The run file is executed by s6-supervise. The run file will perform the
following actions:
- setuidgid - change execution context to the proid
- softlimit - part of the suite, set process limits
- setlock ../../<app.name> - this will create a lock monitored by Treadmill,
so that Treadmill is notified when the app exits.
- exec app.sh
All services will be started by Treadmill runtime using 's6-svc' utility. Each
service will be started with 'svc -o' (run once) option, and Treadmill will
be responsible for restart and maintaining restart count.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import enum
import errno
import json
import logging
import os
import time
import jinja2
from treadmill import fs
from treadmill import utils
from treadmill import subproc
from . import _service_base
from . import _utils as supervisor_utils
if os.name == 'nt':
from . import winss as sup_impl
_PREFIX = 'winss'
else:
# Disable C0411: standard import "import pwd" comes before "import enum"
import pwd # pylint: disable=C0411
from . import s6 as sup_impl
_PREFIX = 's6'
_LOGGER = logging.getLogger(__name__)
JINJA2_ENV = jinja2.Environment(loader=jinja2.PackageLoader(__name__))
# svc exits 111 if it cannot send a command.
ERR_COMMAND = 111
# svc exits 100 if no supervise process is running on servicedir.
ERR_NO_SUP = 100
# svc exits 99 if a timed request timeouts.
ERR_TIMEOUT = 99
EXITS_DIR = 'exits'
def open_service(service_dir, existing=True):
"""Open a service object from a service directory.
:param ``str`` service_dir:
Location of the service to open.
:param ``bool`` existing:
Whether the service must already exist
:returns ``_service_base.Service``:
Instance of a service
"""
if not isinstance(service_dir, _service_base.Service):
svc_data = _service_base.Service.read_dir(
service_dir
)
if svc_data is None:
if existing:
raise ValueError('Invalid Service directory: %r' % service_dir)
else:
svc_type = _service_base.ServiceType.LongRun
svc_basedir = os.path.dirname(service_dir)
svc_name = os.path.basename(service_dir)
else:
svc_type, svc_basedir, svc_name = svc_data
return sup_impl.create_service(
svc_basedir=svc_basedir,
svc_name=svc_name,
svc_type=svc_type
)
return service_dir
# Disable W0613: Unused argument 'kwargs' (for s6/winss compatibility)
# pylint: disable=W0613
def _create_scan_dir_s6(scan_dir, finish_timeout, wait_cgroups=None,
kill_svc=None, **kwargs):
"""Create a scan directory.
:param ``str`` scan_dir:
Location of the scan directory.
:param ``int`` finish_timeout:
The finish script timeout.
:param ``str`` wait_cgroups:
Instruct the finish procedure to wait on all processes in the cgroup.
:param ``str`` kill_svc:
The service to kill before shutdown.
:returns ``_service_dir_base.ServiceDirBase``:
Instance of a service dir
"""
if not isinstance(scan_dir, sup_impl.ScanDir):
scan_dir = sup_impl.ScanDir(scan_dir)
svscan_finish_script = utils.generate_template(
's6.svscan.finish',
timeout=finish_timeout,
wait_cgroups=wait_cgroups,
_alias=subproc.get_aliases()
)
scan_dir.finish = svscan_finish_script
svscan_sigterm_script = utils.generate_template(
's6.svscan.sigterm',
kill_svc=kill_svc,
_alias=subproc.get_aliases()
)
scan_dir.sigterm = svscan_sigterm_script
svscan_sighup_script = utils.generate_template(
's6.svscan.sighup',
kill_svc=kill_svc,
_alias=subproc.get_aliases()
)
scan_dir.sighup = svscan_sighup_script
svscan_sigint_script = utils.generate_template(
's6.svscan.sigint',
kill_svc=kill_svc,
_alias=subproc.get_aliases()
)
scan_dir.sigint = svscan_sigint_script
svscan_sigquit_script = utils.generate_template(
's6.svscan.sigquit',
kill_svc=kill_svc,
_alias=subproc.get_aliases()
)
scan_dir.sigquit = svscan_sigquit_script
return scan_dir
# Disable W0613: Unused argument 'kwargs' (for s6/winss compatibility)
# pylint: disable=W0613
def _create_scan_dir_winss(scan_dir, finish_timeout, kill_svc=None, **kwargs):
"""Create a scan directory.
:param ``str`` scan_dir:
Location of the scan directory.
:param ``int`` finish_timeout:
The finish script timeout.
:param ``str`` kill_svc:
The service to kill before shutdown.
:returns ``_service_dir_base.ServiceDirBase``:
Instance of a service dir
"""
if not isinstance(scan_dir, sup_impl.ScanDir):
scan_dir = sup_impl.ScanDir(scan_dir)
svscan_finish_script = utils.generate_template(
'winss.svscan.finish',
timeout=finish_timeout,
scan_dir=scan_dir.directory,
_alias=subproc.get_aliases()
)
scan_dir.finish = svscan_finish_script
svscan_sigterm_script = utils.generate_template(
'winss.svscan.sigterm',
kill_svc=kill_svc,
_alias=subproc.get_aliases()
)
scan_dir.sigterm = svscan_sigterm_script
return scan_dir
# Disable C0103: Invalid constant name "create_service"
# pylint: disable=C0103
if _PREFIX == 'winss':
create_scan_dir = _create_scan_dir_winss
else:
create_scan_dir = _create_scan_dir_s6
def create_environ_dir(env_dir, env, update=False):
"""Create/update environment directory for the supervisor.
"""
fs.mkdir_safe(env_dir)
supervisor_utils.environ_dir_write(
env_dir, env,
update=update
)
def read_environ_dir(env_dir):
"""Read an existing environ directory into a ``dict``.
:returns:
``dict`` - Dictionary of environment variables.
"""
try:
return supervisor_utils.environ_dir_read(env_dir)
except (OSError, IOError) as err:
if err.errno == errno.ENOENT:
return {}
else:
raise
# Disable W0613: Unused argument 'kwargs' (for s6/winss compatibility)
# pylint: disable=W0613
def _create_service_s6(base_dir,
name,
app_run_script,
userid='root',
downed=False,
environ_dir=None,
environ=None,
environment='prod',
monitor_policy=None,
trace=None,
timeout_finish=None,
notification_fd=None,
call_before_run=None,
call_before_finish=None,
run_script='s6.run',
log_run_script='s6.logger.run',
finish_script='s6.finish',
logger_args=None,
**kwargs):
"""Initializes service directory.
Creates run, finish scripts as well as log directory with appropriate
run script.
"""
# Disable R0912: Too many branches
# pylint: disable=R0912
try:
user_pw = pwd.getpwnam(userid)
except KeyError:
# Check the identity we are going to run as. It needs to exists on the
# host or we will fail later on as we try to seteuid.
_LOGGER.exception('Unable to find userid %r in passwd database.',
userid)
raise
if isinstance(base_dir, sup_impl.ScanDir):
# We are given a scandir as base, use it.
svc = base_dir.add_service(name, _service_base.ServiceType.LongRun)
else:
svc = LongrunService(base_dir, name)
# Setup the environ
if environ is None:
svc_environ = {}
else:
svc_environ = environ.copy()
svc_environ['HOME'] = user_pw.pw_dir
svc.environ = svc_environ
if environment == 'prod':
ionice_prio = 5
else:
ionice_prio = 6
# Setup the run script
svc.run_script = utils.generate_template(
run_script,
user=userid,
shell=user_pw.pw_shell,
environ_dir=environ_dir,
trace=trace,
call_before_run=call_before_run,
_alias=subproc.get_aliases()
)
if monitor_policy is not None or call_before_finish is not None:
# Setup the finish script
svc.finish_script = utils.generate_template(
finish_script,
monitor_policy=monitor_policy,
trace=trace,
call_before_finish=call_before_finish,
_alias=subproc.get_aliases()
)
if log_run_script is not None:
if logger_args is None:
logger_args = '-b -p T n20 s1000000'
# Setup the log run script
svc.log_run_script = utils.generate_template(
log_run_script,
logdir=os.path.relpath(
os.path.join(svc.data_dir, 'log'),
svc.logger_dir
),
logger_args=logger_args,
_alias=subproc.get_aliases()
)
svc.default_down = bool(downed)
svc.notification_fd = notification_fd
if monitor_policy is not None:
svc.timeout_finish = 0
if monitor_policy['limit'] > 0:
exits_dir = os.path.join(svc.data_dir, EXITS_DIR)
fs.mkdir_safe(exits_dir)
fs.rm_children_safe(exits_dir)
else:
svc.timeout_finish = timeout_finish
svc.write()
# Write the app_start script
supervisor_utils.script_write(
os.path.join(svc.data_dir, 'app_start'),
app_run_script
)
return svc
# Disable W0613: Unused argument 'kwargs' (for s6/winss compatibility)
# pylint: disable=W0613
def _create_service_winss(base_dir,
name,
app_run_script,
downed=False,
environ=None,
monitor_policy=None,
timeout_finish=None,
run_script='winss.run',
log_run_script='winss.logger.run',
finish_script='winss.finish',
**kwargs):
"""Initializes service directory.
Creates run, finish scripts as well as log directory with appropriate
run script.
"""
if isinstance(base_dir, sup_impl.ScanDir):
# We are given a scandir as base, use it.
svc = base_dir.add_service(name, _service_base.ServiceType.LongRun)
else:
svc = LongrunService(base_dir, name)
# Setup the environ
if environ is None:
svc_environ = {}
else:
svc_environ = environ.copy()
svc.environ = svc_environ
# Setup the run script
svc.run_script = utils.generate_template(
run_script,
app_run_script=app_run_script,
_alias=subproc.get_aliases()
)
if monitor_policy is not None:
# Setup the finish script
svc.finish_script = utils.generate_template(
finish_script,
monitor_policy=monitor_policy,
_alias=subproc.get_aliases()
)
logdir = os.path.join(svc.data_dir, 'log')
fs.mkdir_safe(logdir)
if log_run_script is not None:
# Setup the log run script
svc.log_run_script = utils.generate_template(
log_run_script,
logdir=os.path.relpath(
logdir,
svc.logger_dir
),
_alias=subproc.get_aliases()
)
svc.default_down = bool(downed)
if monitor_policy is not None:
svc.timeout_finish = 0
if monitor_policy['limit'] > 0:
exits_dir = os.path.join(svc.data_dir, EXITS_DIR)
fs.mkdir_safe(exits_dir)
fs.rm_children_safe(exits_dir)
else:
svc.timeout_finish = timeout_finish
svc.write()
return svc
# Disable C0103: Invalid constant name "create_service"
# pylint: disable=C0103
if _PREFIX == 'winss':
create_service = _create_service_winss
else:
create_service = _create_service_s6
class ServiceWaitAction(enum.Enum):
"""Enumeration of wait actions."""
# pylint complains: Invalid class attribute name "up"
up = 'u' # pylint: disable=C0103
down = 'd'
really_up = 'U'
really_down = 'D'
class ServiceControlAction(enum.Enum):
"""Enumeration of control actions."""
kill = 'k'
once = 'o'
once_at_most = 'O'
down = 'd'
# pylint complains: Invalid class attribute name "up"
up = 'u' # pylint: disable=C0103
exit = 'x'
class SvscanControlAction(enum.Enum):
"""Enumeration of control actions."""
alarm = 'a'
abort = 'b'
nuke = 'n'
quit = 'q'
exit = 'x'
def _get_cmd(cmd):
return _PREFIX + '_' + cmd
def _get_wait_action(action):
if os.name == 'nt' and action == ServiceWaitAction.really_up:
action = ServiceWaitAction.up
return action
def is_supervised(service_dir):
"""Checks if the supervisor is running."""
try:
subproc.check_call([_get_cmd('svok'), service_dir])
return True
except subproc.CalledProcessError as err:
# svok returns 1 when the service directory is not supervised.
if err.returncode == 1:
return False
else:
raise
def control_service(service_dir, actions, wait=None, timeout=0):
"""Sends a control signal to the supervised process.
:returns:
``True`` - Command was successuful.
``False`` - Command timedout (only if `wait` was provided).
:raises ``subproc.CalledProcessError``:
With `returncode` set to `ERR_NO_SUP` if the service is not supervised.
With `returncode` set to `ERR_COMMAND` if there is a problem
communicating with the supervisor.
"""
cmd = [_get_cmd('svc')]
# XXX: A bug in s6 2.6.0.0 causes svc command to hang if the service is not
# supervised. Disable for now.
# This does not happen in winss but we are keeping consitent.
# if wait:
# cmd.append('-w' + _get_wait_action(wait).value)
# if timeout > 0:
# cmd.extend(['-T{}'.format(timeout)])
action_str = '-'
for action in utils.get_iterable(actions):
action_str += action.value
cmd.append(action_str)
cmd.append(service_dir)
try:
subproc.check_call(cmd)
# XXX: Remove below when above bug is fixed.
if wait is not None:
wait_service(service_dir, wait, timeout=timeout)
except subproc.CalledProcessError as err:
if err.returncode == ERR_TIMEOUT:
return False
else:
raise
return True
def control_svscan(scan_dir, actions):
"""Sends a control signal to a svscan instance."""
action_str = '-'
for action in utils.get_iterable(actions):
action_str += action.value
subproc.check_call([_get_cmd('svscanctl'), action_str, scan_dir])
def wait_service(service_dirs, action, all_services=True, timeout=0):
"""Performs a wait task on the given list of service directories.
"""
cmd = [_get_cmd('svwait')]
if timeout > 0:
cmd.extend(['-t{}'.format(timeout)])
if not all_services:
cmd.append('-o')
cmd.append('-' + _get_wait_action(action).value)
cmd.extend(utils.get_iterable(service_dirs))
subproc.check_call(cmd)
def ensure_not_supervised(service_dir):
"""Waits for the service and log service to not be supervised."""
service_dirs = []
if is_supervised(service_dir):
service_dirs.append(service_dir)
log_dir = os.path.join(service_dir, 'log')
if os.path.exists(log_dir) and is_supervised(log_dir):
service_dirs.append(log_dir)
for service in service_dirs:
try:
# Close supervised process as it should have already
# been told to go down
control_service(service, ServiceControlAction.exit,
ServiceWaitAction.really_down,
timeout=1000)
except subproc.CalledProcessError:
# Ignore this as supervisor may be down
pass
count = 0
while is_supervised(service):
count += 1
if count == 600:
raise Exception(
'Service dir {0} failed to stop in 60s.'.format(service)
)
time.sleep(0.1)
ScanDir = sup_impl.ScanDir
LongrunService = sup_impl.LongrunService
ServiceType = _service_base.ServiceType
__all__ = [
'ERR_COMMAND',
'ERR_NO_SUP',
'EXITS_DIR',
'LongrunService',
'ScanDir',
'ServiceControlAction',
'ServiceType',
'ServiceWaitAction',
'SvscanControlAction',
'control_service',
'control_svscan',
'create_environ_dir',
'create_scan_dir',
'create_service',
'is_supervised',
'open_service',
'read_environ_dir',
'wait_service',
]
if _PREFIX == 's6':
BundleService = sup_impl.BundleService
OneshotService = sup_impl.OneshotService
__all__ += [
'BundleService',
'OneshotService',
]
|
import itertools
class Card:
def __init__(self, suit, value):
self.mSuit = suit
self.mValue = value
self.mColor = ""
if suit == "clubs" or "spades":
self.mColor = "black"
else:
self.mColor = "red"
def get_value(self):
return self.mValue
def get_suit(self):
return self.mSuit
def get_color(self):
return self.mColor
def generate_deck():
deck = []
for c in range(13):
new_card = Card("clubs", c + 1)
deck.append(new_card)
for s in range(13):
new_card = Card("spades", s + 1)
deck.append(new_card)
for h in range(13):
new_card = Card("hearts", h + 1)
deck.append(new_card)
for d in range(13):
new_card = Card("diamonds", d + 1)
deck.append(new_card)
return deck
def royal_flush(hand):
# same suit, 10 J Q K A
has10 = False
hasj = False
hasq = False
hask = False
hasa = False
suit = hand[0].get_suit()
for card in hand:
if card.get_suit() != suit:
return False
v = card.get_value()
if v == 10:
has10 = True
elif v == 11:
hasj = True
elif v == 12:
hasq = True
elif v == 13:
hask = True
elif v == 1:
hasa = True
else:
return False
if has10 and hasj and hasq and hask and hasa:
return True
else:
return False
def straight_flush(hand):
suit = hand[0].get_suit()
values = []
for card in hand:
if card.get_suit() != suit:
return False
values.append(card.get_value())
values.sort()
good = True
for i in range(len(values) - 1):
if values[i] != values[i + 1] - 1:
good = False
if values == [1, 10, 11, 12, 13]:
return True
if good:
return True
else:
return False
def four_of_a_kind(hand):
value = hand[0].get_value()
notsame = 0
for card in hand:
if card.get_value() != value:
notsame += 1
if notsame > 1:
notsame = 0
value = hand[1].get_value()
for card in hand:
if card.get_value() != value:
notsame += 1
if notsame > 1:
return False
else:
return True
def full_house(hand):
one = False
two = False
good = False
values = []
for card in hand:
values.append(card.get_value())
values.sort()
# check 2-3
if values[0] == values[1]:
one = True
else:
one = False
if values[2] == values[3] and values[2] == values[4]:
two = True
else:
two = False
if one and two:
good = True
if not good:
# check 3-2
if values[0] == values[1] and values[0] == values[2]:
one = True
else:
one = False
if values[3] == values[4]:
two = True
else:
two = False
if one and two:
good = True
return good
def flush(hand):
suit = hand[0].get_suit()
for card in hand:
if card.get_suit() != suit:
return False
return True
def straight(hand):
values = []
for card in hand:
values.append(card.get_value())
values.sort()
good = True
for i in range(len(values) - 1):
if values[i] != values[i + 1] - 1:
good = False
if values == [1, 10, 11, 12, 13]:
good = True
return good
def three_of_a_kind(hand):
# 123 124 125 134 135 145 234 235 245 345
values = []
for card in hand:
values.append(card.get_value())
values.sort()
# first three
if values[0] == values[1] and values[0] == values[2]:
return True
# second three
if values[1] == values[2] and values[1] == values[3]:
return True
# last three
if values[2] == values[3] and values[2] == values[4]:
return True
return False
def two_pair(hand):
values = []
for card in hand:
values.append(card.get_value())
values.sort()
pairs = 0
if values[0] == values[1]:
pairs += 1
if values[1] == values[2]:
pairs += 1
if values[2] == values[3]:
pairs += 1
if values[3] == values[4]:
pairs += 1
if pairs == 2:
return True
else:
return False
def pair(hand):
values = []
for card in hand:
values.append(card.get_value())
values.sort()
pairs = 0
if values[0] == values[1]:
pairs += 1
if values[1] == values[2]:
pairs += 1
if values[2] == values[3]:
pairs += 1
if values[3] == values[4]:
pairs += 1
if pairs == 1:
return True
else:
return False
def main():
royal_flushes = 0
straight_flushes = 0
four_of_a_kinds = 0
full_houses = 0
flushes = 0
straights = 0
three_of_a_kinds = 0
two_pairs = 0
pairs = 0
nothings = 0
hands = 0
deck = generate_deck()
for hand in itertools.combinations(deck, 5):
hands += 1
nothing = True
if royal_flush(hand):
royal_flushes += 1
nothing = False
elif straight_flush(hand):
straight_flushes += 1
nothing = False
elif four_of_a_kind(hand):
four_of_a_kinds += 1
nothing = False
elif full_house(hand):
full_houses += 1
nothing = False
elif flush(hand):
flushes += 1
nothing = False
elif straight(hand):
straights += 1
nothing = False
elif three_of_a_kind(hand):
three_of_a_kinds += 1
nothing = False
elif two_pair(hand):
two_pairs += 1
nothing = False
elif pair(hand):
pairs += 1
nothing = False
elif nothing:
nothings += 1
print("Generated", hands, "combinations. ")
print("Out of all combinations the following occurred: ")
print("Royal Flush:", royal_flushes, "(%" + str(round((float(royal_flushes) * 100.00) / float(hands), 5)) + ")")
print("Straight Flush:", straight_flushes,
"(%" + str(round((float(straight_flushes) * 100.00) / float(hands), 5)) + ")")
print("Four of a Kind:", four_of_a_kinds,
"(%" + str(round((float(four_of_a_kinds) * 100.00) / float(hands), 5)) + ")")
print("Full House:", full_houses, "(%" + str(round((float(full_houses) * 100.00) / float(hands), 5)) + ")")
print("Flush:", flushes, "(%" + str(round((float(flushes) * 100.00) / float(hands), 5)) + ")")
print("Straight:", straights, "(%" + str(round((float(straights) * 100.00) / float(hands), 5)) + ")")
print("Three of a Kind:", three_of_a_kinds,
"(%" + str(round((float(three_of_a_kinds) * 100.00) / float(hands), 5)) + ")")
print("Two Pair:", two_pairs, "(%" + str(round((float(two_pairs) * 100.00) / float(hands), 5)) + ")")
print("Pair:", pairs, "(%" + str(round((float(pairs) * 100.00) / float(hands), 5)) + ")")
print("Nothing:", nothings, "(%" + str(round((float(nothings) * 100.00) / float(hands), 5)) + ")")
return
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.