text stringlengths 8 6.05M |
|---|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# 本题是算法课上的习题,与Leetcode无关。
# 给定一个以字符串形式表示的入栈序列,请求出一共有多少种可能的出栈顺序?如何输出所有可能的出栈序列?
# 比如入栈序列为:1 2 3 ,则出栈序列一共有五种,分别如下:1 2 3、1 3 2、2 1 3、2 3 1、3 2 1
from time import time
# Catalan!!!!!
# https://github.com/vo01github/Math/tree/master/%E7%BB%84%E5%90%88%E6%95%B0%E5%AD%A6/%E5%8D%A1%E7%89%B9%E5%85%B0%E6%95%B0
# https://zh.wikipedia.org/wiki/%E6%AC%A7%E4%BB%81%C2%B7%E6%9F%A5%E7%90%86%C2%B7%E5%8D%A1%E7%89%B9%E5%85%B0
class Solution(object):
def OrderStack(self, num):
"""
:type num: n
:rtype: List[int]
"""
res = []
start = time()
def dfs(sum, path, res):
if sum < 0:
return
if len(path) == num * 2:
if sum == 0:
res.append(path)
return
dfs(sum - 1, path + [-1], res)
# Use the -1 and 1 for easy traverse and judgement.
# Of course,the 1 and 0 are ok.
dfs(sum + 1, path + [1], res)
dfs(0, [], res)
for r in res:
dummy = [x for x in range(1, num+1)]
pop_queue = []
push_stack = []
for i in r:
if i == -1:
pop_queue += push_stack.pop(),
else:
push_stack += dummy.pop(0),
print(pop_queue)
print('The traverse has cost {}s'.format(time() - start))
if __name__ == '__main__':
print(Solution().OrderStack(10))
|
import torch.nn as nn
class EncoderDecoderTF(nn.Module):
""" Original Transformer architecture that uses both the encoder and decoder side"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
self.model_dim = src_embed[0].model_dim # grab attribute from token embedding layer of src embedder
self.src_vocab_size = src_embed[0].vocab_size
self.tgt_vocab_size = tgt_embed[0].vocab_size
def _encode(self, src, src_mask):
src = self.src_embed(src)
return self.encoder(src, src_mask)
def _decode(self, tgt, mem, tgt_mask, mem_mask):
tgt = self.tgt_embed(tgt)
return self.decoder(tgt, mem, tgt_mask, mem_mask)
def forward(self, src, tgt, src_mask, tgt_mask):
mem = self._encode(src, src_mask)
decoded = self._decode(tgt, mem, tgt_mask, src_mask)
return decoded
|
import sys, os, time, datetime
import wx
from tc_lib import sub, send
from pprint import pprint
import wx.lib.mixins.listctrl as listmix
e=sys.exit
class MessageList(wx.ListCtrl,): #listmix.ListCtrlAutoWidthMixin,
def __init__(self, win, parent, ID=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES|wx.LC_SORT_DESCENDING): #|wx.LIST_AUTOSIZE|wx.LIST_AUTOSIZE_USEHEADER):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
self.win=win
self.idx={}
self.idx['INFO'] = wx.Bitmap(os.path.join(home,"images","bullet_green_16.png"),wx.BITMAP_TYPE_PNG)
self.idx['WARNING'] = wx.Bitmap(os.path.join(home,"images","bullet_yellow_16.png"),wx.BITMAP_TYPE_PNG)
self.idx['ERROR'] = wx.Bitmap(os.path.join(home,"images","bullet_red_16.png"),wx.BITMAP_TYPE_PNG)
self.idx1={}
#self.idx['RUNNING'] = wx.Bitmap(os.path.join("images","bullet_blue_16.png"),wx.BITMAP_TYPE_PNG)
self.setImage('INFO')
self.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.InsertColumn(0, "ID")
self.InsertColumn(1, "Type")
self.InsertColumn(2, "Logger")
self.InsertColumn(3, "Message")
self.SetColumnWidth(0, 100)
self.SetColumnWidth(1, 40)
self.SetColumnWidth(2, 80)
self.SetColumnWidth(3, 400)
self.counter=0
#listmix.ListCtrlAutoWidthMixin.__init__(self)
#self.addMessage(['INFO','P','test567'])
#self.Refresh()
#self.Sort
self.msglog=[]
def setImage(self, status):
self.il = wx.ImageList(16, 16)
for x in self.idx:
self.idx1[x]=self.il.Add(self.idx[x])
self.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
def add(self, msg):
type, sender, text,ts = msg
#pprint(msg)
self.msglog.append(msg)
#j=self.counter
#self.InsertStringItem(self.GetItemCount() , text)
#self.Append([ts,type, sender, text])
j= self.counter
if 1:
item=self.InsertItem(self.counter, ts)
#self.SetItem(item, 0, str(self.counter))
self.SetItem(item, 1, type)
self.SetItem(item, 2, str(sender))
self.SetItem(item, 3, text)
#print(type)
if 1:
if True:
self.SetItemImage(item, self.idx1[type])
elif ex == 'py':
self.SetItemImage(j, 2)
elif ex == 'jpg':
self.SetItemImage(j, 3)
elif ex == 'pdf':
self.SetItemImage(j, 4)
else:
self.SetItemImage(j, 0)
if (j % 2) == 0:
self.SetItemBackgroundColour(j, '#e6f1f5')
#j = j + 1
self.counter +=1
#self.counter +=1
#listmix.ListCtrlAutoWidthMixin.__init__(self)
#self.Refresh()
#if not self.win.IsIconized():
# self.win.SetFocus()
class BusyFrame(wx.Frame):
#style= wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)
style=wx.CLIP_CHILDREN|wx.RESIZE_BORDER|wx.CLOSE_BOX|wx.FRAME_SHAPED|wx.FRAME_NO_TASKBAR|wx.NO_BORDER|wx.CAPTION |wx.FRAME_FLOAT_ON_PARENT #wx.STAY_ON_TOP #|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX
def __init__(self, parent, id, title, app, size=(450,650), style =style):
#wx.CLIP_CHILDREN|wx.MAXIMIZE_BOX|wx.CLOSE_BOX|wx.FRAME_FLOAT_ON_PARENT|
# wx.FRAME_SHAPED|wx.CAPTION|wx.RESIZE_BORDER|wx.FRAME_NO_TASKBAR): #|wx.ICONIZE wx.STAY_ON_TOP wx.BORDER_SUNKEN|wx.NO_BORDER
#wx.CAPTION|wx.FRAME_NO_TASKBAR|wx.FRAME_FLOAT_ON_PARENT|wx.CLOSE_BOX|wx.STAY_ON_TOP|wx.NO_BORDER|wx.FRAME_SHAPED):
wx.Frame.__init__(self, parent, id, title, size=size, style=style)
#self.CreateStatusBar(1)
self.parent=parent
self.style=style
self.app=app
self.Hide()
self.Freeze()
if 1:
if self.app.settings.ReadBool('GUI/load_default_state_on_start', True):
self.method_load_default_state()
if 0:
pos_x, pos_y=parent.GetWindow().GetPositionTuple()
#pos_x, pos_y=parent.GetWindow().GetDisplayPosition()
#print(pos_x, pos_y)
dw, dh = parent.GetWindow().GetSizeTuple()
#print(dw, dh)
self.SetSize(parent.GetWindow().GetSize())
self.SetPosition(parent.GetWindow().GetPositionTuple())
#self.SetPosition((pos_x+my_x/2, pos_y+my_y/2))
self.panel = wx.Panel(self)
sizer= wx.BoxSizer(wx.VERTICAL)
self.list =MessageList(self,self.panel, size=wx.Size(-1,150))
sizer.Add(self.list, 1, wx.EXPAND)
b=wx.Button(self.panel, -1, "Close", size=(-1,23))
sizer.Add(b, 0, wx.RIGHT|wx.ALIGN_RIGHT)
self.panel.SetSizer(sizer)
self.timer=[]
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_ICONIZE, self.OnIconize)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDblClick)
sub(self.OnParentClose, "on_parent_close")
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouseEvt)
self.Bind(wx.EVT_SIZE, self.OnResizeWindow)
#wx.CallAfter(self.method_load_default_state)
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
sub(self.OnParentClose, "on_parent_close")
sub(self.OnRaise, "raise_log_window")
self.Layout()
self.Update()
self.Thaw()
#self.Show()
sub(self.OnAddLogMessage, "add_log")
#wx.CallLater(1,self.Show)
#wx.CallAfter(self.Show)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyUP)
#sub(self.SetParent,'set_new_log_parent')
#self.Bind(wx.EVT_KILL_FOCUS, self.onTextKillFocus)
def onTextKillFocus0(self, evt):
#print('onTextKillFocus')
#evt.Skip()
self.SetFocus()
def SetParent0(self, parent):
self.parent=parent
#wx.FRAME_FLOAT_ON_PARENT
#new_style=(self.style|wx.FRAME_FLOAT_ON_PARENT) #& ~(wx.STAY_ON_TOP)
#self.SetWindowStyle( new_style)
self.Refresh()
self.Layout()
self.Update()
self.SetFocus()
def OnKeyUP(self, event):
#print ("KEY UP!")
keyCode = event.GetKeyCode()
if keyCode == wx.WXK_ESCAPE:
#self.min=5
self.Iconize(True)
#self.frame = win
self.setIconizedPos()
#self.Destroy()
#send("on_parent_close", () )
event.Skip()
#event.Veto()
def add(self, message):
self.list.add(message)
def info(self, msg):
ts=datetime.datetime.now().strftime("%H:%M:%S.%f")
self.list.add(['INFO']+msg+[ts])
def OnAddLogMessage(self, data, extra1, extra2=None):
[msg] = data
#pprint(data)
self.list.add(msg)
def OnButton(self, evt):
self.Iconize(True)
#self.frame = win
#self.setIconizedPos()
def OnResizeWindow(self, event):
#print ('OnResizeWindow')
event.Skip()
def OnDblClick(self,e):
#print ('OnDblClick')
e.skip()
def OnMouseEvt(self,e):
#print ('OnMouseEvt')
e.skip()
def OnParentClose(self, data, extra1, extra2=None):
self.Iconize()
#self.setIconizedPos()
self.Hide()
def OnIconize(self,e):
#print ('on iconize')
#pprint(dir(e))
if e.IsIconized():
new_style=(self.style|wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX) & ~(wx.CLOSE_BOX)
self.SetWindowStyle( new_style)
#myx, myy=self.GetSize()
#self.SetSize((myx+50,-1))
self.setIconizedPos()
self.Update()
else:
new_style=(self.style|wx.CLOSE_BOX) # & ~(wx.MAXIMIZE_BOX)
self.SetWindowStyle( new_style)
self.Refresh()
#myx, myy=self.GetSize()
#self.SetSize((myx+50,-1))
#self.Update()
e.Skip()
def OnRaise(self, data, extra1, extra2=None):
[if_raise] = data
self.Iconize(False)
new_style=(self.style|wx.CLOSE_BOX) & ~(wx.MAXIMIZE_BOX|wx.MINIMIZE_BOX)
self.SetWindowStyle( new_style)
self.Refresh()
if if_raise:
self.Raise()
#self.app.SetTopWindow(self.parent)
self.parent.filter.SetFocus()
#print ('eee')
#send('set_focus_on_search', ())
def OnClose(self,e):
#pprint(self.timer)
#self.Hide()
#pprint(self.list.msglog)
self.list.msglog=[]
if 0:
for t in self.timer:
if self.timer.IsRunning():
self.timer.Stop()
#self.Hide()
self.Iconize()
#self.method_load_default_state()
#self.SetSize(parent.GetWindow().GetSize())
#self.hideCloseBox()
#self.setIconizedPos()
#e.Skip()
#self.Show()
def setIconizedPos(self):
px,py=self.parent.GetScreenPosition()
#print(wx.GetDisplaySize())
myx, myy=self.GetSize()
#print(self.GetScreenPosition())
#print(wx.GetDisplaySize())
#wx.CallAfter(parent_pos)
x,y= self.parent.GetSize()
#self.SetSize((myx+50,-1))
#self.Refresh()
self.SetPosition((px+x-myx/2+30,py+y-83))
def hideCloseBox(self):
new_style=self.style & ~(wx.RESIZE_BORDER | wx.MINIMIZE_BOX)
self.SetWindowStyle( new_style)
self.Refresh()
def hideMaximizeBox(self):
new_style=self.style & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX)
self.SetWindowStyle(new_style)
self.Refresh()
def method_load_default_state(self):
if 0:
print(self.parent.GetScreenPosition())
print(self.GetScreenPosition())
print(wx.GetDisplaySize())
px,py=self.parent.GetScreenPosition()
myx, myy=self.GetSize()
x,y= self.parent.GetSize()
self.SetPosition((px+x/2,py))
if 1:
x, y= eval(self.app.settings.Read('GUI/position', '(100,100)'))
#print(x, y)
x_size,y_size=eval(self.app.settings.Read('GUI/size', '(100,100)'))
#print(x_size,y_size)
my_x, my_y=self.GetSize()
#print (my_x, my_y)
self.SetPosition((x+x_size/4*3-5,y+y_size/2-my_y/2))
#print (x+x_size/2-my_x/2,y+y_size/2-my_y/2) |
"""class MyException(Exception): # Некоректное входное значение
def __init__(self, text):
super().__init__(text)
try:
print("My")
except MyException as arr:
print(err)"""
def my_iter(obj):
for i in obj:
yield i
l1 = [1,2,3,4,5,6,7,]
iterator_object = my_iter(l1)
print(next(iterator_object))
print(next(iterator_object))
print(next(iterator_object))
print(next(iterator_object))
|
from datetime import datetime
from requests.exceptions import ConnectionError
from django.test import TestCase
from fetcher.tools import SatcatParser
class SatcatParserTestCase(TestCase):
"""
Test the behavior of the SatcatParser tool
"""
def test_canImportSatcatparser(self):
"""
Test if SatcatParser exists
"""
try:
from fetcher.tools import SatcatParser
except ImportError:
self.fail('Cannot import SatcatParser')
def test_defaultFormatIsCelesTrak(self):
"""
Test if the default format is CelesTrak
"""
parser = SatcatParser()
self.assertEquals(SatcatParser.CELESTRAK, parser.format)
def test_explodeWorksInSimpleCase(self):
"""
Test if SatcatParser parses data correctly
"""
line = "1957-001A 00001 D SL-1 R/B CIS 1957-10-04 TYMSC 1957-12-01 96.2 65.1 938 214 20.4200 "
expected = {
'international_designator': '1957-001A',
'norad_catalog_number': '00001',
'multiple_flag': None,
'has_payload': None,
'operational_status': 'D',
'names': 'SL-1 R/B',
'owner': 'CIS',
'launch_date': '1957-10-04',
'launch_site': 'TYMSC',
'decay_date': '1957-12-01',
'orbital_period': '96.2',
'inclination': '65.1',
'apogee': '938',
'perigee': '214',
'radar_cross_section': '20.4200',
'orbital_status': None,
}
parser = SatcatParser()
data = parser.explode(line)
for key in expected:
self.assertEquals(expected[key], data[key])
def test_parserChecksForInvalidValues(self):
"""
Test if SatcatParser checks for invalid values
"""
line = "1957-001A 00001 D SL-1 R/B CIS 1957-10-04 TYMSC 1957-12-01 96.2 65.1 938 214 N/A "
expected = {
'international_designator': '1957-001A',
'norad_catalog_number': '00001',
'multiple_flag': None,
'has_payload': None,
'operational_status': 'D',
'names': 'SL-1 R/B',
'owner': 'CIS',
'launch_date': '1957-10-04',
'launch_site': 'TYMSC',
'decay_date': '1957-12-01',
'orbital_period': '96.2',
'inclination': '65.1',
'apogee': '938',
'perigee': '214',
'radar_cross_section': None,
'orbital_status': None,
}
parser = SatcatParser()
data = parser.explode(line)
for key in expected:
self.assertEquals(expected[key], data[key])
|
import sys
import tensorflow as tf
import tensorflow.keras as kr
import Configuration as cfg
import DataOperator as do
import RNNLMNetwork as rn
def generate_sentence(char_to_index, index_to_char, char_count, model, input_char, generate_num):
sentence = input_char.lower()
for _ in range(generate_num):
encoded = [char_to_index[char] for char in sentence]
encoded = kr.preprocessing.sequence.pad_sequences([encoded], maxlen=cfg.max_sequence_len, padding='pre', truncating='pre')
encoded = kr.utils.to_categorical(encoded, num_classes=char_count)
predict_label_index = model.predict_classes(encoded)
char = index_to_char[predict_label_index[0]]
sentence = sentence + char
return sentence
def main():
input_model_path = sys.argv[1]
input_char_to_index_path = sys.argv[2]
input_char = sys.argv[3]
input_predict_count = int(sys.argv[4])
char_to_index = do.load_char_to_index(input_char_to_index_path)
index_to_char = {}
for char, index in char_to_index.items():
index_to_char[index] = char
char_count = len(char_to_index) + 1
rnnlm_model = rn.create_model(char_count, input_model_path)
print()
print(generate_sentence(char_to_index, index_to_char, char_count, rnnlm_model, input_char, input_predict_count))
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 21:43:45 2020
@author: garethlomax
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('bgnd2.jpg')
#route should be this form
route = np.array([[11,1], [10,5], [6, 6]])
two_starts = True
feet_follow_hands = True
def grid_to_graph_coord(route):
route[:,0] = 11 - route[:,0] #flip
return route
def route_coords(route):
img_len = img.shape[0]
offset = int((10/240) * img_len)
spacing = int((20/240) * img_len)
route *= spacing
route += offset
return route
def plot_lines():
square = True
# wall_dim = 240
wall_dim = img.shape[0]
x = [0,wall_dim,wall_dim,0,0]
y = [wall_dim,wall_dim,0,0,wall_dim]
plt.plot(x,y)
if __name__ == "__main__":
plt.figure()
plt.imshow(img)
plot_lines()
# route = grid_to_graph_coord(route)
route = route_coords(route)
colors = np.zeros(len(route))
colors[0] = 1
if two_starts:
colors[1] = 1
print("bing")
# colors = ['b', 'g','g']
plt.scatter(route[:,1], route[:,0], c = colors)
plt.plot(route[:,1], route[:,0])
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
str = raw_input("请输入: ");
print "你输入的内容是: ",str
str2 = input("请输入: ");
print "你输入的内容是: ",str2;
|
import sys, string, math
p,r = map(int,input().split())
k = 0
for l in range(p,r+1) :
for j in range(2,l) :
if l%j == 0 :
break
else :
k += 1
print(k)
|
#-*-coding: utf-8-*-
from django.shortcuts import render, render_to_response
# Create your views here.
def index(req):
return render_to_response("webapp/index.html") |
from .web import *
from .flask import *
|
from coreMechanics import Dungeon
from time import sleep
import pickle
method = "whole"
test = Dungeon(120, 120, method=method)
print test
f = open("saves/pregeneratedDungeon.dun", 'w')
pickle.dump(test, f)
f.close() |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 20:11:06 2020
@author: user
"""
import numpy as np
import argparse
import AnoGANmodel
parser = argparse.ArgumentParser()
data = np.load('traindata2.npz')
parser.add_argument('--mode', type=str, default='train', help='train, test')
args = parser.parse_args()
scr_images , tar_images = data['arr_0'],data['arr_1']
X_train = (scr_images.astype(np.float32)-127.5)/127.5
X_test = (tar_images.astype(np.float32)-127.5)/127.5
x = X_train[0]
X_train = X_train[:,:,:,None]
X_test = X_test[:,:,:,None]
if args.mode == 'train':
print("Training")
AnoGANmodel.train(32, X_train)
# img = AnoGANmodel.generate(36)
# y = img[1] |
import turtle
turtle1 = turtle.Pen()
turtle1.forward(100)
turtle1.left(90)
turtle1.forward(100)
turtle1.left(90)
turtle1.forward(100)
turtle.left(90)
turtle1.forward(100)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import mutual_info_regression, mutual_info_classif
from mine_estimator import mine, DistributionSimulator
def gen_x(data_size):
return np.random.normal(1.,1.,[data_size[0],data_size[1]])
def gen_y(x, data_size):
y = 2*x[:, 0] + 0.5*np.square(x[:, 1])*x[:, -1] + np.random.normal(0.1, np.sqrt(2),[data_size,])
return y
if __name__ == "__main__":
x = gen_x([10000, 3])
print('-'*100)
y = gen_y(x, 10000)
est, hist = mine(x, y, stop_wait=150)
print(f'MINE output {est}')
plt.plot(np.arange(len(hist)), np.array(hist), label='MINE estimation')
plt.legend()
plt.show() |
grade1 = float(input("Digite a 1a nota: "))
grade2 = float(input("Digite a 2a nota: "))
grade3 = float(input("Digite a 3a nota: "))
grade4 = float(input("Digite a 4a nota: "))
average = (grade1+grade2+grade3+grade4)/4
print(f"A média das notas é: {average}")
|
from SignalGenerationPackage.SignalMainWindow import SignalMainWindow
from SignalGenerationPackage.DynamicPointsDensitySignal.Ui_DynamicPointsDensitySignalWindow import Ui_DynamicPointsDensitySignalWindow
from SignalGenerationPackage.DynamicPointsDensitySignal.DynamicPointsDensityUIParameters import DynamicPointsDensityUIParameters
class DynamicPointsDensityMainWindow(SignalMainWindow):
def __init__(self):
super().__init__()
# overridden
def init_user_interface(self):
self.user_interface = Ui_DynamicPointsDensitySignalWindow()
def init_plot(self):
self.plot = self.user_interface.plot_widget
# overridden
def get_start_button(self):
return self.user_interface.pushButtonStartSignalSending
# overridden
def get_pause_radio_button(self):
return self.user_interface.PauseSendingradioButton
# overridden
def get_resume_radio_button(self):
return self.user_interface.ResumeSendingradioButton
# overridden
def get_stop_button(self):
return self.user_interface.pushButtonStopSignalSending
# overridden
def get_endless_send_radiobutton(self):
return self.user_interface.EndlessSendingradioButton
# overridden
def get_cycle_send_radiobutton(self):
return self.user_interface.SendCyclesradioButton
# overridden
def get_cycles_number_widget(self):
return self.user_interface.CyclesNumberspinBox
# overridden
def get_LCD_display(self):
return self.user_interface.lcdNumber
|
thislist = ["apple", "banana", "cherry"]
print(len(thislist))
thislist = ["apple", "banana", "cherry"]
thislist.append("orange")
print(thislist)
thislist = ["apple", "banana", "cherry"]
thislist.insert(1, "orange")
print(thislist)
thislist = ["apple", "banana", "cherry"]
thislist.remove("banana")
print(thislist)
thislist = ["apple", "banana", "cherry"]
thislist.pop()
print(thislist)
thislist = ["apple", "banana", "cherry"]
thislist.clear()
print(thislist)
thislist = ["apple", "banana", "cherry"]
mylist = thislist.copy()
print(mylist)
thislist = ["Ahmad", "omer", "khalid"]
mylist = thislist.copy()
thislist.pop(0)
print(mylist)
print(thislist)
thislist = ["apple", "banana", "cherry"]
mylist = list(thislist)
print(mylist)
thislist = list(("apple", "banana", "cherry")) # note the double round-brackets
print(thislist)
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
# In[4]:
df=pd.read_csv("https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv")
# In[5]:
df.head()
# In[7]:
cdf=df[['ENGINESIZE' , 'CYLINDERS' , 'FUELCONSUMPTION_COMB' , 'CO2EMISSIONS' ]]
cdf.head(9)
# In[8]:
cdf.hist()
plt.show()
# In[12]:
plt.scatter(cdf.ENGINESIZE ,cdf.CO2EMISSIONS, color='blue')
plt.xlabel("ENGINESIZE")
plt.ylabel("EMISSIONS")
plt.show()
plt.scatter(cdf.CYLINDERS , cdf.CO2EMISSIONS,color='red')
plt.xlabel("CYLINDERS")
plt.ylabel("EMISSIONS")
plt.show()
plt.scatter(cdf.FUELCONSUMPTION_COMB, cdf.CO2EMISSIONS, color='green')
plt.xlabel("FUELCONSUMPTION_COMB")
plt.ylabel("EMISSIONS")
plt.show()
# In[13]:
msk=np.random.rand(len(df))<0.8
train=cdf[msk]
test=cdf[~msk]
# In[15]:
plt.scatter(train.ENGINESIZE , train.CO2EMISSIONS)
plt.xlabel("ENGINESIZE")
plt.ylabel("CO2EMISSIONS")
plt.show()
# In[18]:
from sklearn import linear_model
regr= linear_model.LinearRegression()
train_x=np.asanyarray(train[['ENGINESIZE','FUELCONSUMPTION_COMB']])
train_y=np.asanyarray(train[['CO2EMISSIONS']])
regr.fit(train_x , train_y)
print("Coefficients: ", regr.coef_)
print("Intercept: ", regr.intercept_)
# In[20]:
y_hat=regr.predict(test[['ENGINESIZE','FUELCONSUMPTION_COMB']])
x=np.asanyarray(test[['ENGINESIZE','FUELCONSUMPTION_COMB']])
y=np.asanyarray(test[['CO2EMISSIONS']])
print("Residual sum of squares %.2f"
% np.mean(y_hat-y)**2)
print("Variance score %.2f"
% regr.score(x,y))
# In[ ]:
|
import xlrd
class excel_utils():
def get_data(self,file_name, sheet_index):
# create an empty list to store rows
values = []
# open the specified Excel spreadsheet as workbook
book = xlrd.open_workbook(file_name)
# get the first sheet
sheet = book.sheet_by_index(sheet_index)
# iterate through the sheet and get data from rows in list
#for cell_index in range(0, sheet.nrows):
print(sheet.nrows)
print(sheet.ncols)
for j in range(0,sheet.nrows):
for i in range(0,sheet.ncols):
if(sheet.cell(j,i).value is not ''):
values.append(sheet.cell(j, i).value)
print(values)
return values
def read_cell(self, filepath, sheet_index, row, col):
book = xlrd.open_workbook(filepath)
sheet = book.sheet_by_index(sheet_index)
return sheet.cell(row, col).value
def spit_list(self, mylist, start_string, end_string):
#list1 = excel_utils().get_data('/Users/shreyas/Documents/TestCase.xls', 0)
list1 = mylist
start = list1.index(start_string)
end = list1.index(end_string)
return list1.__getslice__(start, end+1)
#sl= excel_utils().spit_list(excel_utils().get_data('/Users/shreyas/Documents/TestCase.xls', 0), 'name','password')
#print(sl)
#cellValue= excel_utils().read_cell('/Users/shreyas/Documents/TestCase.xls', 0, 6, 1)
#print('this is the cell value '+cellValue)
|
from .BooleanType import BooleanType
from .SequenceType import SequenceType
from .UTF8String import UTF8String
from .IntegerType import IntegerType
from .EnumeratedType import EnumeratedType
from .ChoiceType import ChoiceType
from .SequenceOfType import SequenceOfType
from .BitStringType import BitStringType
from .OctetStringType import OctetStringType
|
import cPickle as pickle
import vivisect
def saveWorkspaceChanges(vw, filename):
elist = vw.exportWorkspaceChanges()
if len(elist):
f = file(filename, 'ab')
pickle.dump(elist, f, protocol=2)
f.close()
def saveWorkspace(vw, filename):
f = file(filename, "wb")
vwevents = vw.exportWorkspace()
pickle.dump(vwevents, f, protocol=2)
f.close()
def loadWorkspace(vw, filename):
f = file(filename, "rb")
#if vw.verbose: vw.vprint('Un-pickling Exported Workspace...')
explist = []
# Incremental changes are saved to the file by appending more pickled
# lists of exported events
e = None
while True:
try:
import time
begin = time.time()
explist.append( pickle.load(f) )
except EOFError, e:
break
except pickle.UnpicklingError, e:
raise vivisect.InvalidWorkspace(filename, "invalid workspace file")
f.close()
for elist in explist:
vw.importWorkspace(elist)
|
import pytest
import requests
from requests.exceptions import RequestException
from tests.test_vars import *
def service_available():
try:
requests.get(BASE_URL)
return True
except RequestException:
return False
@pytest.mark.skipif(service_available() is False, reason='Service unavailable')
class TestRecordsAPI:
# =============================================
# ================== Fixtures =================
def _post_record(self, **kwargs):
return requests.post(RECORDS_URL, json=kwargs)
# =============================================
# ============ Positive test cases ============
def test_api_return_records_list(self):
resp = requests.get(RECORDS_URL)
assert 'results' in resp.json(), f'Response does not contain records list'
@pytest.mark.skip
def test_api_create_records(self):
resp = self._post_record(record='Test', list=f'{LIST_URL}1/')
assert resp.status_code == 201, f'Record was not created. Response Code={resp.status_code}'
def test_api_returns_record(self):
resp = requests.get(f'{RECORDS_URL}5/')
assert resp.status_code == 200, f'Request does not return record. Response Code={resp.status_code}'
def test_record_contain_required_fields(self):
req_fields = ['url', 'record', 'list']
resp_body = requests.get(f'{RECORDS_URL}5/').json()
assert all(fld in resp_body for fld in req_fields), \
f'Record does not contain all required fields.\n Required: {req_fields}.\n Actual: {resp_body.keys()}'
# =============================================
# ============ Negative test cases ============
def test_api_send_errors_on_empty_post_body(self):
resp_body = self._post_record().json()
assert resp_body['record'] == [REQUIRED_FIELD_ERROR]
assert resp_body['list'] == [REQUIRED_FIELD_ERROR]
def test_api_send_errors_on_empty_post_values(self):
resp_body = self._post_record(record='', list='').json()
assert resp_body['record'] == [BLANK_FIELD_ERROR]
assert resp_body['list'] == [NULL_FIELD_ERROR]
def test_api_send_errors_on_not_existing_list(self):
resp_body = self._post_record(list=f'{LIST_URL}not_exist/').json()
assert resp_body['list'] == [NOT_EXIST_ERROR]
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from enum import IntEnum
from typing import Any, ClassVar
from pants.build_graph.address import Address
# -----------------------------------------------------------------------------------------------
# Basic JSON Structures
# See https://build-server-protocol.github.io/docs/specification.html#basic-json-structures
# -----------------------------------------------------------------------------------------------
Uri = str
@dataclass(frozen=True)
class BuildTargetIdentifier:
"""A unique identifier for a target, can use any URI-compatible encoding as long as it is unique
within the workspace.
Clients should not infer metadata out of the URI structure such as the path or query parameters,
use BuildTarget instead.
"""
# The target’s Uri
uri: Uri
@classmethod
def from_json_dict(cls, d):
return cls(uri=d["uri"])
def to_json_dict(self):
return {"uri": self.uri}
@classmethod
def from_address(cls, addr: Address) -> BuildTargetIdentifier:
return cls(uri=f"pants:{str(addr)}")
@dataclass(frozen=True)
class BuildTargetCapabilities:
# This target can be compiled by the BSP server.
can_compile: bool = False
# This target can be tested by the BSP server.
can_test: bool = False
# This target can be run by the BSP server.
can_run: bool = False
# This target can be debugged by the BSP server.
can_debug: bool = False
@classmethod
def from_json_dict(cls, d):
return cls(
can_compile=d["canCompile"],
can_test=d["canTest"],
can_run=d["canRun"],
can_debug=d["canDebug"],
)
def to_json_dict(self):
return {
"canCompile": self.can_compile,
"canTest": self.can_test,
"canRun": self.can_run,
"canDebug": self.can_debug,
}
# Note: The BSP "build target" concept is _not_ the same as a Pants "target". They are similar but
# should be not be conflated with one another.
@dataclass(frozen=True)
class BuildTarget:
"""Build target contains metadata about an artifact (for example library, test, or binary
artifact)"""
# The target’s unique identifier
id: BuildTargetIdentifier
# A human readable name for this target.
# May be presented in the user interface.
# Should be unique if possible.
# The id.uri is used if None.
display_name: str | None
# The directory where this target belongs to. Multiple build targets are allowed to map
# to the same base directory, and a build target is not required to have a base directory.
# A base directory does not determine the sources of a target, see buildTarget/sources. */
base_directory: Uri | None
# Free-form string tags to categorize or label this build target.
# For example, can be used by the client to:
# - customize how the target should be translated into the client's project model.
# - group together different but related targets in the user interface.
# - display icons or colors in the user interface.
# Pre-defined tags are listed in `BuildTargetTag` but clients and servers
# are free to define new tags for custom purposes.
tags: tuple[str, ...]
# The capabilities of this build target.
capabilities: BuildTargetCapabilities
# The set of languages that this target contains.
# The ID string for each language is defined in the LSP.
language_ids: tuple[str, ...]
# The direct upstream build target dependencies of this build target
dependencies: tuple[BuildTargetIdentifier, ...]
# Language-specific metadata about this target.
# See ScalaBuildTarget as an example.
data: BSPData | None
@classmethod
def from_json_dict(cls, d):
return cls(
id=BuildTargetIdentifier.from_json_dict(d["id"]),
display_name=d.get("displayName"),
base_directory=d["baseDirectory"],
tags=tuple(d.get("tags", [])),
capabilities=BuildTargetCapabilities.from_json_dict(d["capabilities"]),
language_ids=tuple(d.get("languageIds", [])),
dependencies=tuple(
BuildTargetIdentifier.from_json_dict(x) for x in d.get("dependencies", [])
),
# data_kind=d.get("dataKind"), # TODO: figure out generic decode, this is only used in tests!
data=d.get("data"),
)
def to_json_dict(self):
result = {
"id": self.id.to_json_dict(),
"capabilities": self.capabilities.to_json_dict(),
"tags": self.tags,
"languageIds": self.language_ids,
"dependencies": [dep.to_json_dict() for dep in self.dependencies],
}
if self.display_name is not None:
result["displayName"] = self.display_name
if self.base_directory is not None:
result["baseDirectory"] = self.base_directory
if self.data is not None:
result["dataKind"] = self.data.DATA_KIND
result["data"] = self.data.to_json_dict()
return result
class BuildTargetDataKind:
# The `data` field contains a `ScalaBuildTarget` object.
SCALA = "scala"
# The `data` field contains a `SbtBuildTarget` object.
SBT = "sbt"
class BuildTargetTag:
# Target contains re-usable functionality for downstream targets. May have any
# combination of capabilities.
LIBRARY = "library"
# Target contains source code for producing any kind of application, may have
# but does not require the `canRun` capability.
APPLICATION = "application"
# Target contains source code for testing purposes, may have but does not
# require the `canTest` capability.
TEST = "test"
# Target contains source code for integration testing purposes, may have
# but does not require the `canTest` capability.
# The difference between "test" and "integration-test" is that
# integration tests traditionally run slower compared to normal tests
# and require more computing resources to execute.
INTEGRATION_TEST = "integration-test"
# Target contains source code to measure performance of a program, may have
# but does not require the `canRun` build target capability.
BENCHMARK = "benchmark"
# Target should be ignored by IDEs. */
NO_IDE = "no-ide"
# Actions on the target such as build and test should only be invoked manually
# and explicitly. For example, triggering a build on all targets in the workspace
# should by default not include this target.
#
# The original motivation to add the "manual" tag comes from a similar functionality
# that exists in Bazel, where targets with this tag have to be specified explicitly
# on the command line.
MANUAL = "manual"
@dataclass(frozen=True)
class TaskId:
"""The Task Id allows clients to uniquely identify a BSP task and establish a client-parent
relationship with another task id."""
# A unique identifier
id: str
# The parent task ids, if any. A non-empty parents field means
# this task is a sub-task of every parent task id. The child-parent
# relationship of tasks makes it possible to render tasks in
# a tree-like user interface or inspect what caused a certain task
# execution.
parents: tuple[str, ...] | None = None
@classmethod
def from_json_dict(cls, d):
return cls(
id=d["id"],
parents=tuple(d["parents"]) if "parents" in d else None,
)
def to_json_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {
"id": self.id,
}
if self.parents is not None:
result["parents"] = self.parents
return result
class StatusCode(IntEnum):
# Execution was successful.
OK = 1
# Execution failed.
ERROR = 2
# Execution was cancelled.
CANCELLED = 3
class BSPData:
"""Mix-in for BSP spec types that can live in a data field."""
DATA_KIND: ClassVar[str]
def to_json_dict(self) -> dict[str, Any]:
raise NotImplementedError
|
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
m = {}
index1 = 0
index2 = 0
for i in xrange(0,len(num)):
if num[i] in m:
index1 = i + 1
index2 = m[num[i]] + 1
m[target - num[i]] = i
if index1 > index2:
return (index2, index1)
else:
return (index1, index2)
s = Solution()
print s.twoSum([-1,-2,-3,-4,-5], -8) |
import torch
import torch.nn as nn
from torchsummary import summary
import math
class BasicBlock(nn.Module):
def __init__(self, n_features, bias):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(n_features, momentum=0.001) # when trying EMA with many epochs, try using BN with momentum=0.001
self.relu = nn.LeakyReLU(negative_slope=0.1)
self.conv1 = nn.Conv2d(n_features, n_features, kernel_size=(3, 3), padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(n_features, momentum=0.001)
self.conv2 = nn.Conv2d(n_features, n_features, kernel_size=(3, 3), padding=1, bias=bias)
def forward(self, x):
x = self.relu(self.bn1(x))
identity = x
out = self.conv1(x)
out = self.relu(self.bn2(out))
out = self.conv2(out)
out += identity
return out
class TransitionBlock(nn.Module):
"""
Block used for one or both of the following reasons:
- Downsample input by 2 using conv of stride=2
- Adapt number of features using 1x1 filters
"""
def __init__(self, in_f, out_f, downsample, bias):
if downsample:
stride = 2
else:
stride = 1
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_f, momentum=0.001)
self.relu = nn.LeakyReLU(negative_slope=0.1)
self.conv1 = nn.Conv2d(in_f, out_f, kernel_size=(3, 3), padding=1, stride=stride, bias=bias)
self.bn2 = nn.BatchNorm2d(out_f, momentum=0.001)
self.conv2 = nn.Conv2d(out_f, out_f, kernel_size=(3, 3), padding=1, bias=bias)
# Shortcut connection for identity to match dimensions
self.shortcut = nn.Conv2d(in_f, out_f, kernel_size=(1, 1), stride=stride, bias=bias)
def forward(self, x):
x = self.relu(self.bn1(x))
identity = self.shortcut(x)
out = self.conv1(x)
out = self.relu(self.bn2(out))
out = self.conv2(out)
out += identity
return out
class ConvGroup(nn.Module):
def __init__(self, in_features, out_features, blocks, bias, downsample=True):
super(ConvGroup, self).__init__()
self.conv_blocks = nn.Sequential(TransitionBlock(in_features, out_features, downsample, bias),
*[BasicBlock(out_features, bias) for _ in range(blocks - 1)])
def forward(self, x):
return self.conv_blocks(x)
class WideResNet(nn.Module):
def __init__(self, depth, k, n_out, bias=True):
super(WideResNet, self).__init__()
assert (depth - 4) % 6 == 0, "depth must be 6*n + 4"
n = int((depth - 4) / 6)
n_features = [16, 16*k, 32*k, 64*k]
self.conv1 = nn.Conv2d(3, n_features[0], kernel_size=(3, 3), padding=1, bias=bias)
self.conv_group1 = ConvGroup(n_features[0], n_features[1], blocks=n, downsample=False, bias=bias)
self.conv_group2 = ConvGroup(n_features[1], n_features[2], blocks=n, bias=bias)
self.conv_group3 = ConvGroup(n_features[2], n_features[3], blocks=n, bias=bias)
self.bn = nn.BatchNorm2d(n_features[3], momentum=0.001)
self.relu = nn.LeakyReLU(negative_slope=0.1)
self.avg_pool = nn.AvgPool2d(kernel_size=8)
self.linear = nn.Linear(n_features[3], n_out)
def forward(self, x):
x = self.conv1(x)
x = self.conv_group1(x)
x = self.conv_group2(x)
x = self.conv_group3(x)
x = self.relu(self.bn(x))
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = WideResNet(depth=28, k=2, n_out=10)
model.to(device)
summary(model, (3, 32, 32))
|
import android
import webbrowser
droid = android.Android()
code = droid.scanBarcode()
url = code[1]['extras']['SCAN_RESULT']
droid.makeToast("The url scanned is " + url)
droid.notify('Scan result',url)
droid.dialogCreateAlert('Scan Result', url)
droid.dialogSetPositiveButtonText('Open in browser')
droid.dialogSetNegativeButtonText('Exit')
droid.dialogShow()
response = droid.dialogGetResponse().result
droid.dialogDismiss()
if response.has_key('which'):
result = response['which']
if result == 'positive':
droid.startActivity('android.intent.action.VIEW', url)
#webbrowser.open(url)
#url = "http://books.google.com?q=%d" % isbn
#droid.startActivity('android.intent.action.VIEW', url)
|
from typing import Tuple
import numpy as np
from qtpy.QtCore import QModelIndex, Qt
from napari._qt.containers import QtLayerList
from napari.components import LayerList
from napari.layers import Image
def test_set_layer_invisible_makes_item_unchecked(qtbot):
view, image = make_qt_layer_list_with_layer(qtbot)
assert image.visible
assert check_state_at_layer_index(view, 0) == Qt.CheckState.Checked
image.visible = False
assert check_state_at_layer_index(view, 0) == Qt.CheckState.Unchecked
def test_set_item_unchecked_makes_layer_invisible(qtbot):
view, image = make_qt_layer_list_with_layer(qtbot)
assert check_state_at_layer_index(view, 0) == Qt.CheckState.Checked
assert image.visible
view.model().setData(
layer_to_model_index(view, 0),
Qt.CheckState.Unchecked,
Qt.ItemDataRole.CheckStateRole,
)
assert not image.visible
def make_qt_layer_list_with_layer(qtbot) -> Tuple[QtLayerList, Image]:
image = Image(np.zeros((4, 3)))
layers = LayerList([image])
view = QtLayerList(layers)
qtbot.addWidget(view)
return view, image
def layer_to_model_index(view: QtLayerList, layer_index: int) -> QModelIndex:
return view.model().index(layer_index, 0, view.rootIndex())
def check_state_at_layer_index(
view: QtLayerList, layer_index: int
) -> Qt.CheckState:
model_index = layer_to_model_index(view, layer_index)
value = view.model().data(model_index, Qt.ItemDataRole.CheckStateRole)
# The data method returns integer value of the enum in some cases, so
# ensure it has the enum type for more explicit assertions.
return Qt.CheckState(value)
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from enum import Enum
from typing import Match, Optional, Tuple, cast
from pants.backend.python.target_types import PexCompletePlatformsField, PythonResolveField
from pants.backend.python.util_rules.faas import (
PythonFaaSCompletePlatforms,
PythonFaaSDependencies,
PythonFaaSHandlerField,
PythonFaaSRuntimeField,
)
from pants.backend.python.util_rules.faas import rules as faas_rules
from pants.base.deprecated import warn_or_error
from pants.core.goals.package import OutputPathField
from pants.core.util_rules.environments import EnvironmentField
from pants.engine.addresses import Address
from pants.engine.rules import collect_rules
from pants.engine.target import COMMON_TARGET_FIELDS, InvalidFieldException, StringField, Target
from pants.util.docutil import doc_url
from pants.util.strutil import help_text, softwrap
class PythonGoogleCloudFunctionHandlerField(PythonFaaSHandlerField):
# GCP requires "Your main file must be named main.py"
# https://cloud.google.com/functions/docs/writing#directory-structure-python
reexported_handler_module = "main"
help = help_text(
f"""
Entry point to the Google Cloud Function handler.
{PythonFaaSHandlerField.help}
This is re-exported at `{reexported_handler_module}.handler` in the resulting package to
used as the configured handler of the Google Cloud Function in GCP. It can also be accessed
under its source-root-relative module path, for example: `path.to.module.handler_func`.
"""
)
class PythonGoogleCloudFunctionRuntimes(Enum):
PYTHON_37 = "python37"
PYTHON_38 = "python38"
PYTHON_39 = "python39"
PYTHON_310 = "python310"
PYTHON_311 = "python311"
class PythonGoogleCloudFunctionRuntime(PythonFaaSRuntimeField):
PYTHON_RUNTIME_REGEX = r"^python(?P<major>\d)(?P<minor>\d+)$"
valid_choices = PythonGoogleCloudFunctionRuntimes
help = help_text(
"""
The identifier of the Google Cloud Function runtime to target (pythonXY). See
https://cloud.google.com/functions/docs/concepts/python-runtime.
In general you'll want to define either a `runtime` or one `complete_platforms` but not
both. Specifying a `runtime` is simpler, but less accurate. If you have issues either
packaging the Google Cloud Function PEX or running it as a deployed Google Cloud Function,
you should try using `complete_platforms` instead.
"""
)
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value = super().compute_value(raw_value, address)
if value is None:
return None
if not re.match(cls.PYTHON_RUNTIME_REGEX, value):
raise InvalidFieldException(
f"The `{cls.alias}` field in target at {address} must be of the form pythonXY, "
f"but was {value}."
)
return value
def to_interpreter_version(self) -> Optional[Tuple[int, int]]:
"""Returns the Python version implied by the runtime, as (major, minor)."""
if self.value is None:
return None
mo = cast(Match, re.match(self.PYTHON_RUNTIME_REGEX, self.value))
return int(mo.group("major")), int(mo.group("minor"))
@classmethod
def from_interpreter_version(cls, py_major: int, py_minor) -> str:
return f"python{py_major}{py_minor}"
class GoogleCloudFunctionTypes(Enum):
EVENT = "event"
HTTP = "http"
class PythonGoogleCloudFunctionType(StringField):
alias = "type"
required = True
valid_choices = GoogleCloudFunctionTypes
help = help_text(
"""
The trigger type of the cloud function. Can either be `'event'` or `'http'`.
See https://cloud.google.com/functions/docs/concepts/python-runtime for reference to
`--trigger-http`.
"""
)
class PythonGoogleCloudFunction(Target):
alias = "python_google_cloud_function"
core_fields = (
*COMMON_TARGET_FIELDS,
OutputPathField,
PythonFaaSDependencies,
PythonGoogleCloudFunctionHandlerField,
PythonGoogleCloudFunctionRuntime,
PythonFaaSCompletePlatforms,
PythonGoogleCloudFunctionType,
PythonResolveField,
EnvironmentField,
)
help = help_text(
f"""
A self-contained Python function suitable for uploading to Google Cloud Function.
See {doc_url('google-cloud-function-python')}.
"""
)
def validate(self) -> None:
has_runtime = self[PythonGoogleCloudFunctionRuntime].value is not None
has_complete_platforms = self[PexCompletePlatformsField].value is not None
runtime_alias = self[PythonGoogleCloudFunctionRuntime].alias
complete_platforms_alias = self[PexCompletePlatformsField].alias
if has_runtime and has_complete_platforms:
warn_or_error(
"2.19.0.dev0",
f"using both `{runtime_alias}` and `{complete_platforms_alias}` in the `{self.alias}` target {self.address}",
softwrap(
f"""
The `{complete_platforms_alias}` now takes precedence over the `{runtime_alias}` field, if
it is set. Remove the `{runtime_alias}` field to only use the `{complete_platforms_alias}`
value, or remove the `{complete_platforms_alias}` field to use the default platform
implied by `{runtime_alias}`.
"""
),
)
def rules():
return (
*collect_rules(),
*faas_rules(),
)
|
raw_activation_key = input()
while True:
tokens = input()
if tokens == "Generate":
print(f"Your activation key is: {raw_activation_key}")
break
tokens = tokens.split(">>>")
command = tokens[0]
if command == "Contains":
substring = tokens[1]
if substring in raw_activation_key:
print(f"{raw_activation_key} contains {substring}")
else:
print("Substring not found!")
elif command == "Flip":
upper_lower = tokens[1]
start_index = int(tokens[2])
end_index = int(tokens[3])
if upper_lower == "Upper":
current_string = raw_activation_key[start_index:end_index]
new_string = current_string.upper()
raw_activation_key = raw_activation_key.replace(current_string, new_string, 1)
elif upper_lower == "Lower":
current_string = raw_activation_key[start_index:end_index]
new_string = current_string.lower()
raw_activation_key = raw_activation_key.replace(current_string, new_string, 1)
print(raw_activation_key)
elif command == "Slice":
start_index = int(tokens[1])
end_index = int(tokens[2])
raw_activation_key = raw_activation_key[:start_index] + raw_activation_key[end_index:]
print(raw_activation_key)
|
# -*- coding: utf-8 -*-
# Converts a T-cell density map (as produced by Ilastik) to a detection map and, optionally, to a list of
# coordinates for the centers of the detections.
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = 0.1
import skimage.io as skio
import skimage.morphology as skm
import skimage.feature as skf
import h5py
import simplejson as json
import pandas
import feather
import numpy as np
import argparse as opt
import logging
import warnings
warnings.simplefilter("ignore", UserWarning)
def main():
p = opt.ArgumentParser(description="Converts a density map into a detection map.")
p.add_argument("dens_map", action="store", help="a HDF5 file with the density map")
p.add_argument("det_map", action="store", help="name of the file storing the detections. (image or .npz)")
p.add_argument("-m", "--map", action="store", help="name (path) of the map in the HDF5 file",
default="t-cell-dens")
p.add_argument("-c", "--config", action="store", help="JSON configuration file",
default="tcell_dens.json")
p.add_argument("-p", "--points", action="store", help="file to store the points coordinates", default=None)
args = p.parse_args()
logging.captureWarnings(True)
logger = logging.getLogger('TCellDens')
logger.setLevel(logging.DEBUG)
logger_handler = logging.FileHandler('.tcell_dens.log')
logger_handler.setLevel(logging.DEBUG)
logger.addHandler(logger_handler)
logger.info("========= T-Cell density to detection =========")
with open(args.config, 'r') as fp:
params = json.load(fp, use_decimal=True)
logger.info("-reading density map")
with h5py.File(args.dens_map) as f:
res = f[args.map][()].squeeze()
res[res < params['min_posterior']] = 0
d = res
d[d > 0] = 1
d = d.astype('bool')
logger.info("-morphological processing the map")
d = skm.remove_small_objects(d, params['min_obj_size'])
d = skm.remove_small_holes(d, params['fill_size'])
d = skm.opening(d, selem=skm.disk(params['open_disk_size']))
res[np.where(np.logical_not(d))] = 0
logger.info("-detecting small blobs")
blobs = skf.blob_dog(res,
min_sigma=float(params['blob_min_sigma']),
max_sigma=float(params['blob_max_sigma']),
threshold=float(params['blob_threshold']))
# convert to int coordinates
blobs = blobs.astype(np.uint64)
if args.points is not None:
# create a DataFrame to store metadata as well (col names mainly):
df = pandas.DataFrame(blobs, columns=["row", "col", "radius"])
feather.write_dataframe(df, args.points+".feather")
m = np.zeros(res.shape, dtype=np.uint8)
m[blobs[:, 0], blobs[:, 1]] = 1
if args.det_map.lower().endswith(".npz"):
np.savez_compressed(args.det_map, map=m)
else:
# assume image and try to save
skio.imsave(args.det_map, 255*m)
return 0
if __name__ == "__main__":
main()
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
# Save to file
# remember to define the same dtype and shape when restore
def net_save():
W = tf.Variable([[1, 2, 3], [3, 4, 5]], dtype=tf.float32, name='weights')
b = tf.Variable([[1, 2, 3]], dtype=tf.float32, name='biases')
if int(tf.__version__.split('.')[1]) < 12 and int(tf.__version__.split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
save_path = saver.save(sess, "my_net/save_net.ckpt")
print("Save to path: ", save_path)
# restore variables
# redefine the same shape and same type for your variables
def net_load():
W = tf.Variable(np.arange(6).reshape((2, 3)), dtype=tf.float32, name="weights")
b = tf.Variable(np.arange(3).reshape((1, 3)), dtype=tf.float32, name="biases")
# not need init step
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "my_net/save_net.ckpt")
print("weights:", sess.run(W))
print("biases:", sess.run(b))
# 这里只演示了保存和load的步骤,没有通过训练来修改weights, biases
# net_save()
net_load()
|
# coding: utf-8
# Standard Python libraries
from io import IOBase
from pathlib import Path
from typing import Optional, Union
import numpy as np
# https://github.com/usnistgov/atomman
import atomman as am
import atomman.unitconvert as uc
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# iprPy imports
from .. import Calculation
from .stacking_fault_map_2D import stackingfaultmap
from ...calculation_subset import (LammpsPotential, LammpsCommands, Units,
AtommanSystemLoad, LammpsMinimize,
StackingFault)
class StackingFaultPath():
"""Class for managing a path along a stacking fault map"""
def __init__(self, sp: DM):
self.__direction = sp['direction']
self.__error = sp.get('error', None)
if self.__error is None:
self.__coord = uc.value_unit(sp['minimum-energy-path'])
self.__usf_mep = uc.value_unit(sp['unstable-fault-energy-mep'])
self.__usf_urp = uc.value_unit(sp['unstable-fault-energy-unrelaxed-path'])
self.__shear_mep = uc.value_unit(sp['ideal-shear-stress-mep'])
self.__shear_urp = uc.value_unit(sp['ideal-shear-stress-unrelaxed-path'])
else:
self.__coord = None
self.__usf_mep = None
self.__usf_urp = None
self.__shear_mep = None
self.__shear_urp = None
@property
def direction(self) -> str:
"""str: The direction of slip"""
return self.__direction
@property
def coord(self) -> np.ndarray:
"""numpy.ndarray: Coordinates along the path"""
return self.__coord
@property
def usf_mep(self) -> float:
"""float: The USF energy found along the minimum energy path"""
return self.__usf_mep
@property
def usf_urp(self) -> float:
"""float: The USF energy found along the unrelaxed (ideal) path"""
return self.__usf_urp
@property
def shear_mep(self) -> float:
"""float: The ideal shear stress found along the minimum energy path"""
return self.__shear_mep
@property
def shear_urp(self) -> float:
"""float: The ideal shear stress found along the unrelaxed (ideal) path"""
return self.__shear_urp
@property
def error(self) -> Optional[str]:
"""str or None: Any error that may have been issued during the MEP calculation"""
return self.__error
def build_model(self,
length_unit: str = 'angstrom',
energyperarea_unit: str = 'mJ/m^2',
stress_unit: str = 'GPa') -> DM:
"""
Constructs the model contents associated with the path.
Parameters
----------
length_unit : str, optional
The unit of length to use when outputting the path coordinates.
energyperarea_unit : str, optional
The unit of energy per area to use when outputting the unstable fault
energies.
stress_unit : str, optional
The unit of stress to use when outputting the ideal shear stresses.
"""
sp = DM()
sp['direction'] = self.direction
if self.error is None:
sp['minimum-energy-path'] = uc.model(self.coord, length_unit)
sp['unstable-fault-energy-mep'] = uc.model(self.usf_mep, energyperarea_unit)
sp['unstable-fault-energy-unrelaxed-path'] = uc.model(self.usf_urp, energyperarea_unit)
sp['ideal-shear-stress-mep'] = uc.model(self.shear_mep, stress_unit)
sp['ideal-shear-stress-unrelaxed-path'] = uc.model(self.shear_urp, stress_unit)
else:
sp['error'] = self.error
return sp
class StackingFaultMap2D(Calculation):
"""Class for managing 2D maps of stacking fault energy calculations"""
############################# Core properties #################################
def __init__(self,
model: Union[str, Path, IOBase, DM, None]=None,
name: Optional[str]=None,
database = None,
params: Union[str, Path, IOBase, dict] = None,
**kwargs: any):
"""
Initializes a Calculation object for a given style.
Parameters
----------
model : str, file-like object or DataModelDict, optional
Record content in data model format to read in. Cannot be given
with params.
name : str, optional
The name to use for saving the record. By default, this should be
the calculation's key.
database : yabadaba.Database, optional
A default Database to associate with the Record, typically the
Database that the Record was obtained from. Can allow for Record
methods to perform Database operations without needing to specify
which Database to use.
params : str, file-like object or dict, optional
Calculation input parameters or input parameter file. Cannot be
given with model.
**kwargs : any
Any other core Calculation record attributes to set. Cannot be
given with model.
"""
# Initialize subsets used by the calculation
self.__potential = LammpsPotential(self)
self.__commands = LammpsCommands(self)
self.__units = Units(self)
self.__system = AtommanSystemLoad(self)
self.__minimize = LammpsMinimize(self)
self.__defect = StackingFault(self)
subsets = (self.commands, self.potential, self.system,
self.minimize, self.defect, self.units)
# Initialize unique calculation attributes
self.num_a1 = 10
self.num_a2 = 10
self.__gamma = None
self.__paths = None
self.__E_isf = None
# Define calc shortcut
self.calc = stackingfaultmap
# Call parent constructor
super().__init__(model=model, name=name, database=database, params=params,
subsets=subsets, **kwargs)
@property
def filenames(self) -> list:
"""list: the names of each file used by the calculation."""
return [
'stacking_fault_map_2D.py',
'sfmin.template'
]
############################## Class attributes ###############################
@property
def commands(self) -> LammpsCommands:
"""LammpsCommands subset"""
return self.__commands
@property
def potential(self) -> LammpsPotential:
"""LammpsPotential subset"""
return self.__potential
@property
def units(self) -> Units:
"""Units subset"""
return self.__units
@property
def system(self) -> AtommanSystemLoad:
"""AtommanSystemLoad subset"""
return self.__system
@property
def minimize(self) -> LammpsMinimize:
"""LammpsMinimize subset"""
return self.__minimize
@property
def defect(self) -> StackingFault:
"""StackingFault subset"""
return self.__defect
@property
def num_a1(self) -> int:
"""int: Number of fractional shifts along the a1vect direction to evaluate"""
return self.__num_a1
@num_a1.setter
def num_a1(self, val: int):
self.__num_a1 = int(val)
@property
def num_a2(self) -> int:
"""int: Number of fractional shifts along the a2vect direction to evaluate"""
return self.__num_a2
@num_a2.setter
def num_a2(self, val: int):
self.__num_a2 = int(val)
@property
def gamma(self) -> am.defect.GammaSurface:
"""atomman.defect.GammaSurface: GSF results"""
if self.__gamma is None:
raise ValueError('No results yet!')
if not isinstance(self.__gamma, am.defect.GammaSurface):
self.__gamma = am.defect.GammaSurface(model=self.__gamma)
return self.__gamma
@property
def paths(self) -> list:
"""list: Any StackingFaultPath results"""
if self.__paths is None:
raise ValueError('No path results!')
return self.__paths
@property
def E_isf(self) -> Optional[float]:
"""float or None: Intrinsic stacking fault energy for the plane, if exists and found."""
return self.__E_isf
def set_values(self,
name: Optional[str] = None,
**kwargs: any):
"""
Set calculation values directly. Any terms not given will be set
or reset to the calculation's default values.
Parameters
----------
name : str, optional
The name to assign to the calculation. By default, this is set as
the calculation's key.
num_a1 : int, optional
The number of shifts to evaluate along the a1 shift vector.
num_a2 : int, optional
The number of shifts to evaluate along the a2 shift vector.
**kwargs : any, optional
Any keyword parameters supported by the set_values() methods of
the parent Calculation class and the subset classes.
"""
# Call super to set universal and subset content
super().set_values(name=name, **kwargs)
# Set calculation-specific values
if 'num_a1' in kwargs:
self.num_a1 = kwargs['num_a1']
if 'num_a2' in kwargs:
self.num_a2 = kwargs['num_a2']
####################### Parameter file interactions ###########################
def load_parameters(self,
params: Union[dict, str, IOBase],
key: Optional[str] = None):
"""
Reads in and sets calculation parameters.
Parameters
----------
params : dict, str or file-like object
The parameters or parameter file to read in.
key : str, optional
A new key value to assign to the object. If not given, will use
calc_key field in params if it exists, or leave the key value
unchanged.
"""
# Load universal content
input_dict = super().load_parameters(params, key=key)
# Load input/output units
self.units.load_parameters(input_dict)
# Change default values for subset terms
input_dict['sizemults'] = input_dict.get('sizemults', '3 3 3')
input_dict['forcetolerance'] = input_dict.get('forcetolerance',
'1.0e-6 eV/angstrom')
# Load calculation-specific strings
# Load calculation-specific booleans
# Load calculation-specific integers
self.num_a1 = int(input_dict.get('stackingfault_num_a1', 10))
self.num_a2 = int(input_dict.get('stackingfault_num_a2', 10))
# Load calculation-specific unitless floats
# Load calculation-specific floats with units
# Load LAMMPS commands
self.commands.load_parameters(input_dict)
# Load minimization parameters
self.minimize.load_parameters(input_dict)
# Load LAMMPS potential
self.potential.load_parameters(input_dict)
# Load initial system
self.system.load_parameters(input_dict)
# Load defect parameters
self.defect.load_parameters(input_dict)
def master_prepare_inputs(self,
branch: str = 'main',
**kwargs: any) -> dict:
"""
Utility method that build input parameters for prepare according to the
workflows used by the NIST Interatomic Potentials Repository. In other
words, transforms inputs from master_prepare into inputs for prepare.
Parameters
----------
branch : str, optional
Indicates the workflow branch to prepare calculations for. Default
value is 'main'.
**kwargs : any
Any parameter modifications to make to the standard workflow
prepare scripts.
Returns
-------
params : dict
The full set of prepare parameters based on the workflow branch
"""
# Initialize params and copy over branch
params = {}
params['branch'] = branch
# main branch
if branch == 'main':
# Check for required kwargs
assert 'lammps_command' in kwargs
# Set default workflow settings
params['buildcombos'] = [
'atomicparent load_file parent',
'defect stackingfault_file'
]
params['parent_record'] = 'relaxed_crystal'
params['parent_method'] = 'dynamic'
params['parent_standing'] = 'good'
params['defect_record'] = 'stacking_fault'
params['sizemults'] = '5 5 10'
params['stackingfault_num_a1'] = '30'
params['stackingfault_num_a2'] = '30'
# Copy kwargs to params
for key in kwargs:
# Rename potential-related terms for buildcombos
if key[:10] == 'potential_':
params[f'parent_{key}'] = kwargs[key]
# Copy/overwrite other terms
else:
params[key] = kwargs[key]
else:
raise ValueError(f'Unknown branch {branch}')
return params
@property
def templatekeys(self) -> dict:
"""dict : The calculation-specific input keys and their descriptions."""
return {
'stackingfault_num_a1': ' '.join([
"The number of fractional shift steps to measure along the a1",
"shift vector. Default value is 10."]),
'stackingfault_num_a2': ' '.join([
"The number of fractional shift steps to measure along the a2",
"shift vector. Default value is 10."]),
}
@property
def singularkeys(self) -> list:
"""list: Calculation keys that can have single values during prepare."""
keys = (
# Universal keys
super().singularkeys
# Subset keys
+ self.commands.keyset
+ self.units.keyset
# Calculation-specific keys
)
return keys
@property
def multikeys(self) -> list:
"""list: Calculation key sets that can have multiple values during prepare."""
keys = (
# Universal multikeys
super().multikeys +
# Combination of potential and system keys
[
self.potential.keyset +
self.system.keyset
] +
# Defect multikeys
self.defect.multikeys +
# Run parameter keys
[
[
'stackingfault_num_a1',
'stackingfault_num_a2',
]
] +
# Minimize keys
[
self.minimize.keyset
]
)
return keys
########################### Data model interactions ###########################
@property
def modelroot(self) -> str:
"""str: The root element of the content"""
return 'calculation-stacking-fault-map-2D'
def build_model(self) -> DM:
"""
Generates and returns model content based on the values set to object.
"""
# Build universal content
model = super().build_model()
calc = model[self.modelroot]
# Build subset content
self.commands.build_model(calc, after='atomman-version')
self.potential.build_model(calc, after='calculation')
self.system.build_model(calc, after='potential-LAMMPS')
self.defect.build_model(calc, after='system-info')
self.minimize.build_model(calc)
# Build calculation-specific content
if 'calculation' not in calc:
calc['calculation'] = DM()
if 'run-parameter' not in calc['calculation']:
calc['calculation']['run-parameter'] = DM()
run_params = calc['calculation']['run-parameter']
run_params['stackingfault_num_a1'] = self.num_a1
run_params['stackingfault_num_a2'] = self.num_a2
# Build results
if self.status == 'finished':
energy_per_area_unit = f'{self.units.energy_unit}/{self.units.length_unit}^2'
gamma_model = self.gamma.model(length_unit=self.units.length_unit,
energyperarea_unit=energy_per_area_unit)
calc['stacking-fault-map'] = gamma_model['stacking-fault-map']
if self.E_isf is not None:
calc['intrinsic-fault-energy'] = uc.model(self.E_isf, 'mJ/m^2')
try:
paths = self.paths
except:
pass
else:
for path in paths:
calc.append('slip-path', path.build_model())
self._set_model(model)
return model
def load_model(self,
model: Union[str, DM],
name: Optional[str] = None):
"""
Loads record contents from a given model.
Parameters
----------
model : str or DataModelDict
The model contents of the record to load.
name : str, optional
The name to assign to the record. Often inferred from other
attributes if not given.
"""
# Load universal and subset content
super().load_model(model, name=name)
calc = self.model[self.modelroot]
# Load calculation-specific content
run_params = calc['calculation']['run-parameter']
self.num_a1 = run_params['stackingfault_num_a1']
self.nun_a2 = run_params['stackingfault_num_a2']
# Load results
if self.status == 'finished':
self.__gamma = calc
if 'intrinsic-fault-energy' in calc:
self.__E_isf = uc.value_unit(calc['intrinsic-fault-energy'])
if 'slip-path' in calc:
self.__paths = []
for sp in calc.iteraslist('slip-path'):
self.paths.append(StackingFaultPath(sp))
########################## Metadata interactions ##############################
def metadata(self) -> dict:
"""
Generates a dict of simple metadata values associated with the record.
Useful for quickly comparing records and for building pandas.DataFrames
for multiple records of the same style.
"""
# Call super to extract universal and subset content
meta = super().metadata()
# Extract calculation-specific content
# Extract results
if self.status == 'finished':
if self.E_isf is not None:
meta['E_isf'] = self.E_isf
try:
paths = self.paths
except:
pass
else:
for path in paths:
direction = path.direction
if path.error is None:
meta[f'E_usf_mep {direction}'] = path.usf_mep
meta[f'E_usf_urp {direction}'] = path.usf_urp
meta[f'τ_ideal_mep {direction}'] = path.shear_mep
meta[f'τ_ideal_urp {direction}'] = path.shear_urp
else:
meta[f'error {direction}'] = path.error
return meta
@property
def compare_terms(self) -> list:
"""list: The terms to compare metadata values absolutely."""
return [
'script',
'load_file',
'load_options',
'symbols',
'potential_LAMMPS_key',
'potential_key',
'a_mult',
'b_mult',
'c_mult',
'stackingfault_key',
'num_a1',
'num_a2'
]
@property
def compare_fterms(self) -> dict:
"""dict: The terms to compare metadata values using a tolerance."""
return {}
def isvalid(self) -> bool:
return self.system.family == self.defect.family
########################### Calculation interactions ##########################
def calc_inputs(self) -> dict:
"""Builds calculation inputs from the class's attributes"""
# Initialize input_dict
input_dict = {}
# Add subset inputs
for subset in self.subsets:
subset.calc_inputs(input_dict)
# Add calculation-specific inputs
input_dict['num_a1'] = self.num_a1
input_dict['num_a2'] = self.num_a2
# Return input_dict
return input_dict
def process_results(self, results_dict: dict):
"""
Processes calculation results and saves them to the object's results
attributes.
Parameters
----------
results_dict: dict
The dictionary returned by the calc() method.
"""
self.__gamma = results_dict['gamma']
|
# https://youtu.be/9JUAPgtkKpI?t=1065
import numpy as np
import os
os.system('cls')
print('LEARNING NUM_PY\n')
a = np.array([[1, 2], [4, 5], [7, 8]])
print(' a = np.array([[1, 2], [4, 5], [7, 8]])\n')
print('version =>', np.__version__)
print(f'a => \n{a}')
print(f"shape => {a.shape}")
print(f'dtype => {a.dtype}')
print(f'type => {type(a)}')
print(f'ndim => {a.ndim}')
print(f'size => {a.size}')
print(f'itemSize => {a.itemsize}')
print()
print(f'a => \n{a}\n')
print(f'a[1][0] => {a[1][0]}')
print(f'a[1, 0] => {a[1, 0]}')
print(f'a[0] => {a[0]}')
print()
# numpy transpose matrix
print(f'a.T => \n{a.T}')
print()
# numpy inverse matrix.
b = np.array([[1, 2], [4, 5]])
print(f'b = np.array([[1, 2], [4, 5]]) =>\n{np.array([[1, 2], [4, 5]])}\n')
print(
f'np.linalg.inv(b) => \n{np.linalg.inv(b)}')
print()
# numpy inverse matrix.
c = np.array([[1, 9, 3], [4, 5, 8], [3, 8, 9]])
print(f'c = np.array([[1, 9, 3], [4, 5, 8], [3, 8, 9]]) =>\n{c}\n')
print(
f'np.linalg.inv(c) =>\n{np.linalg.inv(c)}')
print()
|
#Defining a class
# Class names are uppercase
#Allows us to create our own data types
#Every class has to have the "__init__" method. Its alled the constructor. It must accept at least one thing, it has to be 'self'
class Person:
def __init__(self): #always self
print ("class instantiated")
def do_something (self):
print ("Something done")
#p = Person() #creates an object
#classes are blueprints to create objects
class Rectangle:
def __init__ (self, l, w):
self.length = l #This is how you create properties for a class
self.width = w
def area (self):
return self.length * self.width
def parimeter (self):
return 2*(self.length + self.width)
r1 = Rectangle (10,5) #Creating an object
print(r1.area())
print (r1.parimeter())
class Playlist:
def __init__ (self, name):
self.songs = []
self.name = name
# write the add-soung method
def add_song (self, song):
#validate the song data
#has to be a dictionary
#has to be have title, artist, length
if type (song) is dict and 'title' in song and 'artist' in song and 'length' in song:
self.songs.append (song)
else:
print ("Error, not a dictionary")
#write get_title Method
#assume songs are dictionaries
def get_title (self):
titles = []
for song in self.songs:
titles.append(song['title'])
return (titles)
#write the duration method to return the total lenth of the playlist
def duration(self):
total_duration = 0
for song in self.songs:
total_duration += song['length']
return total_duration
p = Playlist("My_Travel_list")
p.add_song(
{
'song':'title',
'artist' :'Cardi B',
'length': 3.50
}
)
p.add_song(
{'title':'Workout',
'artist' :'J cole',
'length': 2.30
}
)
p.get_title()
p.duration()
|
#Разработать программное средство с использованием ООП для
#представления успеваемости студентов по дисциплине:
#1)
#Промежуточная аттестация максимум 20 баллов, разбитые
#по количеству работ (практики, контрольная и тестирование в 1
#половине семестра);
#2)
#Работа в семестре 20 баллов (практики, контрольная и
#тестирование во 2 половине семестра);
#3)
#Экзамен 60 баллов;
#4)
#Выставление итоговой оценки.
#Объект класса должен содержать поля для сохранения имени
#студента и истории получения баллов (по практикам, контрольным и
#тестированиям) с учетом даты получения оценки по схеме: выполнено,
#защищено.
import first
class university:
def promeznost (self,score):
self.score=score
promofor=first.practise()
score=score+promofor[1]
return score,promofor
def rabotaisemen(self,score):
self.score=score
promofor=first.rabota()
score=score+promofor[1]
return score,promofor
def exam (self):
score=int(input("сколько баллов он получили за экзамен ?\n"))
if score > 60:
score=60
import datetime
lala=[]
a=input("введите дату выполнения через пробел. Сначала вводится год , потом месяц, а потом день\n").split(" ")
for i in range (len(a)):
a[i]=int(a[i])
lala.append(a[i])
if lala[0]<2000 or lala[1]>12 or lala[2]>31 :
return self.exam ()
sdacha=datetime.datetime(lala[0],lala[1],lala[2])
garik=str(score)
sdacha=str(sdacha)
result="За экзамен получено баллов - "+garik+" сдан экзамен был в эту дату - "+sdacha+" | "
return result,score
choice=int(input ("Здравствуйте ! Вас приветствует программа успеваемости студентов. Для того , чтобы ввести N-ное число студентов и инфу к ним введите - 1, чтобы узнать инфу о других студентах - 2 \n"))
if choice==2:
print ("Михалков|Пи19-1|выполнил 01-09-1939|защитил 3 - 08.05.1945|контрольная 3 - 10.05.2008|самостоятельная 5 - 11.06.2001|экзамен 50 - 11.09.2201|\nГаврилов|Пи19-1|выполнил 01-09-1939|защитил 5 - 08.05.1945|контрольная 10 - 10.05.2008|самостоятельная 2 - 11.06.2001|экзамен 50 - 11.09.2201|\nГагарин|Пи1990-10|выполнил 01-09-1920|защитил 69 - 08.05.1961|контрольная 0 - 10.05.2008|самостоятельная 7 - 11.06.2001|экзамен 20 - 11.09.2201|\nПутин|Пи2-1|выполнил 01-10-1910|защитил 10 - 20.12.1920|контрольная 3 - 10.05.2008|самостоятельная 5 - 11.06.2001|экзамен 50 - 11.09.2201|\nРамзан|Пи18-2|выполнил 01-09-1939|защитил 3 - 08.05.1945|контрольная 20 - 10.05.1991|самостоятельная 2 - 11.06.2001|экзамен 59 - 11.09.2201|\nМакс|Пи19-9|выполнил 02-11-2021|защитил 0 - 08.05.2022|контрольная 3 - 10.05.2008|самостоятельная 5 - 11.06.2001|экзамен 50 - 11.09.2201|\n")
elif choice==1:
cccount=int(input("сколько будет учеников ?\n"))
for i in range (cccount):
score=0
name=str(input("как зовут ученика ?\n"))
group=str(input("в какой группе он находится ?\n"))
discipline=str(input("какой предмет у ученика ?\n"))
raka=university()
asd=raka.exam()
score=score+asd[1]
jaka=university()
ass=jaka.promeznost(score)
score=score+ass[0]
maka=university()
ak=maka.rabotaisemen(score)
score=score+ak[0]
score=str(score)
i="номер ученика - "+str(i)+" | "
name="его\ее зовут -"+name+" | "
group="учится в "+group+" | "
discipline="предмет - "+discipline+" | "
result=""
lkj=str(ak[1])
poi=str(ass[1])
mnb=str(asd[0])
result=i+result+name+group+discipline+mnb+poi+lkj
f=open("text.txt","w")
f.write(result)
f.close()
print ("сейчас откроем файл и посмотрим, что мы туда записали\n")
f=open("text.txt","r")
for line in f:
print (line)
else :
exit()
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" Contain KafkaTransactionContext & KafkaTransactionalManager
Module for make Kafka Transaction
"""
from typing import Callable, Dict
from tonga.services.coordinator.transaction.base import (BaseTransaction,
BaseTransactionContext)
from tonga.services.coordinator.transaction.errors import MissingKafkaTransactionContext
from tonga.services.producer.base import BaseProducer
from tonga.models.structs.positioning import (BasePositioning, KafkaPositioning)
__all__ = [
'KafkaTransactionContext',
'KafkaTransactionalManager'
]
class KafkaTransactionContext(BaseTransactionContext):
""" KafkaTransactionContext class
This class contain the transaction context, at each received message KafkaConsumer generate this class
and update KafkaTransactionalManager
"""
_group_id: str
def __init__(self, topic: str, partition: int, offset: int, group_id: str):
"""KafkaTransactionContext constructor
Args:
topic (str): Kafka topic name
partition (int): Kafka topic partition number
offset (int): Kafka msg current offset
group_id (str): KafkaConsumer group_id
"""
self._offset: int = offset
self._partition: int = partition
self._topic: str = topic
self._group_id: str = group_id
def get_committed_offsets(self) -> Dict[str, BasePositioning]:
""" Return committed offsets as TopicsPartitions dict
Returns:
Dict[TopicPartition, int]: committed offset
"""
key = KafkaPositioning.make_class_assignment_key(self._topic, self._partition)
return {key: KafkaPositioning(self._topic, self._partition, self._offset + 1)}
@property
def group_id(self) -> str:
""" Return group_id
Returns:
str: return _group_id
"""
return self._group_id
class KafkaTransactionalManager(BaseTransaction):
""" KafkaTransactionalManager class
Contains the latest KafkaTransactionContext of received message (One by topic / partition)
"""
def __init__(self, transactional_producer: BaseProducer = None) -> None:
"""KafkaTransactionalManager constructor
Attributes:
transactional_producer (Union[KafkaProducer, None]): Transactional KafkaProducer used for start transaction
& send committed offset to Kafka
"""
self._ctx = None
self._transactional_producer = transactional_producer
def set_ctx(self, ctx: BaseTransactionContext) -> None:
""" Set KafkaTransactionContext
Args:
ctx (BaseTransactionContext): KafkaTransactionContext, calculated at each new message
Returns:
None
"""
self._ctx = ctx
def set_transactional_producer(self, transactional_producer: BaseProducer) -> None:
""" Set a transactional KafkaProducer
Args:
transactional_producer (BaseProducer): Transactional KafkaProducer used for start transaction
& send committed offset to Kafka
Returns:
None
"""
self._transactional_producer = transactional_producer
def __call__(self, func: Callable):
""" Decorator function, used for make transaction operation
Args:
func (Callable): Decorated function
Raises:
MissingKafkaTransactionContext: Raised when KafkaTransaction was missing
Returns:
bool: True if transaction has been succeeding
"""
self._logger.info('Init transactional function')
async def make_transaction(*args, **kwargs):
self._logger.info('Start transaction')
if not self._transactional_producer.is_running():
await self._transactional_producer.start_producer()
if self._transactional_producer is None:
raise MissingKafkaTransactionContext
if self._ctx is None:
raise MissingKafkaTransactionContext
self._logger.debug('Committed offset : %s', self._ctx.get_committed_offsets())
async with self._transactional_producer.init_transaction():
await func(*args, **kwargs)
await self._transactional_producer.end_transaction(committed_offsets=self._ctx.get_committed_offsets(),
group_id=self._ctx.group_id)
self._logger.info('End transaction')
return True
return make_transaction
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import urllib.request
URL = 'https://www.koreabaseball.com/Record/Player/HitterBasic/Basic1.aspx'
driver = webdriver.Chrome('H:\chromedriver.exe')
driver.get(url=URL)
driver.implicitly_wait(3)
ygName = driver.find_elements_by_css_selector('td a')
ygG = driver.find_elements_by_css_selector('.asc')
ygH = driver.find_elements_by_css_selector('td:nth-child(9)')
ygHR = driver.find_elements_by_css_selector('td:nth-child(12)')
ygRBI = driver.find_elements_by_css_selector('td:nth-child(14)')
alist = []
def ele(ments):
_list = []
for e in ments:
_list.append(e.text)
return _list
alist.append(ele(ygName))
alist.append(ele(ygG))
alist.append(ele(ygH))
alist.append(ele(ygHR))
alist.append(ele(ygRBI))
print(alist)
x = len(alist[0])
y = len(alist)
with open('yg_rank.csv', 'w', encoding='utf-8') as file:
file.write('이름,경기,안타,홈런,타점')
for i in range(x):
for j in range(y):
file.write(f"{alist[j][i]},")
file.write(f"\n")
driver.close() |
import xml.etree.ElementTree as ET
import os
import glob
def cover(xmlpath, thresh = 20):
error = []
error_type = []
for path, d, filelist in os.walk(xmlpath):
for xmlname in filelist:
if xmlname.endswith('xml'):
oldname = os.path.join(path, xmlname)
print(oldname)
tree = ET.parse(oldname)
objs = tree.findall('object')
BB = {}
Center = {}
m = 0
for ix, obj in enumerate(objs):
box = obj.find('bndbox')
x = obj.find('point')
y = obj.find('polygon')
while x or y:
break
else:
xmin = float(box.find('xmin').text.strip())
ymin = float(box.find('ymin').text.strip())
xmax = float(box.find('xmax').text.strip())
ymax = float(box.find('ymax').text.strip())
b = [xmin, ymin, xmax, ymax]
BB[m] = b
Center[m] = ((xmax + xmin) / 2.0, (ymax + ymin) / 2.0)
m+=1
if len(Center) > 0:
error_num = []
for i in range(len(Center) - 1):
c1 = Center[i]
for j in range(i + 1, len(Center)):
c2 = Center[j]
d = abs(c1[0] - c2[0]) + abs(c1[1] - c2[1])
if d < thresh:
error.append(oldname)
error_num.append(j)
error_type.append(str(len(error_num)))
error = list(set(error))
error.sort()
error_type = [x for x in error_type if x != '0']
for i in range(len(error)):
keys = error[i]
print('containing cover boxes : ', keys, error_type[i],'个')
def minibox(xmlpath, thresh = 20):
error = []
errors_nums = []
for path, d, filelist in os.walk(xmlpath):
for xmlname in filelist:
if xmlname.endswith('xml'):
oldname = os.path.join(path, xmlname)
tree = ET.parse(oldname)
objs = tree.findall('object')
n = 0
m = []
for ix, obj in enumerate(objs):
box = obj.find('bndbox')
if box is None:
continue
else:
xmin = float(box.find('xmin').text.strip())
ymin = float(box.find('ymin').text.strip())
xmax = float(box.find('xmax').text.strip())
ymax = float(box.find('ymax').text.strip())
width = xmax - xmin
hight = ymax - ymin
n+=1
if width < thresh or hight < thresh:
error.append(oldname)
m.append(n)
errors_nums.append(str(len(m)))
errors = list(set(error))
errors.sort()
error_num = [x for x in errors_nums if x != '0']
for i in range(len(errors)):
keys = errors[i]
nums = error_num[i]
print('containing small boxes : ', keys, nums, '个')
def point(xmlpath):
errors =[]
errors_nums=[]
for path, d, filelist in os.walk(xmlpath):
for xmlname in filelist:
if xmlname.endswith('xml'):
oldname = os.path.join(path, xmlname)
tree = ET.parse(oldname)
objs = tree.findall('object')
n = 0
for ix, obj in enumerate(objs):
x = obj.find('point')
if x is None :
continue
else:
errors.append(oldname)
n+=1
errors_nums.append(str(n))
errors = list(set(errors))
errors.sort()
errors_nums = [x for x in errors_nums if x != '0']
for i in range(len(errors)):
keys = errors[i]
nums = errors_nums[i]
print('containing point boxes : ', keys, nums, '个')
def polygon(xmlpath):
errors =[]
errors_nums=[]
for path, d, filelist in os.walk(xmlpath):
for xmlname in filelist:
if xmlname.endswith('xml'):
oldname = os.path.join(path, xmlname)
tree = ET.parse(oldname)
objs = tree.findall('object')
n = 0
for ix, obj in enumerate(objs):
x = obj.find('polygon')
if x is None :
continue
else:
errors.append(oldname)
n+=1
errors_nums.append(str(n))
errors = list(set(errors))
errors.sort()
errors_nums = [x for x in errors_nums if x != '0']
for i in range(len(errors)):
keys = errors[i]
nums = errors_nums[i]
print('containing polygon boxes : ', keys, nums, '个')
def del_point(xmlpath):
errors =[]
errors_nums=[]
for path, d, filelist in os.walk(xmlpath):
for xmlname in filelist:
if xmlname.endswith('xml'):
oldname = os.path.join(path, xmlname)
tree = ET.parse(oldname)
root = tree.getroot()
# print(root)
for elem in tree.iter(tag='object'):
if elem.find('point'):
print(oldname)
root.remove(elem)
tree.write(xmlpath + xmlname, encoding="utf-8", xml_declaration=True)
# if x is None:
# continue
# else:
# tree.remove(obj)
# tree.write(xmlpath + xmlname, encoding="utf-8", xml_declaration=True)
# errors.append(oldname)
# n+=1
#
# errors_nums.append(str(n))
#
# errors = list(set(errors))
# errors.sort()
# errors_nums = [x for x in errors_nums if x != '0']
# for i in range(len(errors)):
# keys = errors[i]
# nums = errors_nums[i]
# print('containing point boxes : ', keysa, nums, '个')
def filename(path):
xmls = glob.glob(os.path.join(path, '*/*/'))
return xmls
if __name__ == '__main__':
path = '/media/zs/LinkData/zpf_data/txt/bask/'
xml = filename(path)
for xml in filename(path):
print("~~~~~~~~~~~~~~~~~~~check cover boxes~~~~~~~~~~~~~~~~~")
cover(xmlpath=xml, thresh=15)
print("~~~~~~~~~~~~~~~~~~~check mini boxes~~~~~~~~~~~~~~~~~~~")
minibox(xmlpath=xml, thresh=7)
print("~~~~~~~~~~~~~~~~~~~check point boxes~~~~~~~~~~~~~~~~~~~")
point(xmlpath=xml)
#
print("~~~~~~~~~~~~~~~~~~~check polygon boxes~~~~~~~~~~~~~~~~~~~")
polygon(xmlpath=xml)
print('~~~~~~~~~~~~~~~~~~~del_point_boxes~~~~~~~~~~~~~~~~~~~')
del_point(path) |
def pascal(number) :
if number == 0 :return [1]
else :
line = [1]
previousLine = pascal(number - 1)
for i in range(len(previousLine) - 1):
line.append(previousLine[i] + previousLine[i+1])
line += [1]
return line
number = 3
print(pascal(number)) |
from tkinter import *
from hf10sound_panel2 import *
import pygame.mixer
app=Tk()
app.title("head first mix")
mixer=pygame.mixer
mixer.init()#创建一个mixer对象,并且初始化pygame的声音系统
panel=SoundPanel(app,mixer,"wrong.wav")
panel.pack()
panel=SoundPanel(app,mixer,"correct.wav")
panel.pack()
def shutdown():
self.track.stop()
app.destroy()
app.protocol("WM_DELETE_WINDOW",shutdown)#利用protocol()方法,并确定调用的函数
app.mainloop()
|
# Day 17: Conway Cubes
# <ryc> 2021
class ConwayCube:
def __set_active(self, coord):
x, y, z = coord
if z not in self.__active_cubes:
self.__active_cubes[z] = dict()
if y not in self.__active_cubes[z]:
self.__active_cubes[z][y] = set()
if x not in self.__active_cubes[z][y]:
self.__active_cubes[z][y].add(x)
def __init__(self, array):
self.__active_cubes = dict()
for coord in array:
self.__set_active(coord)
def __is_active(self, coord):
x, y, z = coord
if z not in self.__active_cubes:
return False
if y not in self.__active_cubes[z]:
return False
if x in self.__active_cubes[z][y]:
return True
else:
return False
def run(self):
candidates = dict()
for z, zdict in self.__active_cubes.items():
for y, ysets in zdict.items():
for x in ysets:
neighbors = [ (i, j, k) for k in [z-1, z, z+1] for j in [y-1, y, y+1] for i in [x-1, x, x+1] if i != x or j != y or k != z ]
for key in neighbors:
if key in candidates:
candidates[key] += 1
else:
candidates[key] = 1
actives = set()
for coord, count in candidates.items():
if 2 <= count <= 3 and self.__is_active(coord):
actives.add(coord)
elif count == 3 and not self.__is_active(coord):
actives.add(coord)
self.__active_cubes = dict()
for coord in actives:
self.__set_active(coord)
def get_actives(self):
actives = 0
for z in self.__active_cubes.values():
for y in z.values():
actives += len(y)
return actives
def inputdata(coord):
x, y, z, w = coord
with open('day_17_2020.input') as stream:
data = [ line[:-1] for line in stream ]
array_cube = list()
array_hypercube = list()
for j, line in enumerate(data):
for i, char in enumerate(line):
if char == '#':
array_cube.append((i+x, j+y, z))
array_hypercube.append((i+x, j+y, z, w))
return array_cube, array_hypercube
class ConwayHypercube:
def __set_active(self, coord):
x, y, z, w = coord
if w not in self.__active_cubes:
self.__active_cubes[w] = dict()
if z not in self.__active_cubes[w]:
self.__active_cubes[w][z] = dict()
if y not in self.__active_cubes[w][z]:
self.__active_cubes[w][z][y] = set()
if x not in self.__active_cubes[w][z][y]:
self.__active_cubes[w][z][y].add(x)
def __init__(self, array):
self.__active_cubes = dict()
for coord in array:
self.__set_active(coord)
def __is_active(self, coord):
x, y, z, w = coord
if w not in self.__active_cubes:
return False
if z not in self.__active_cubes[w]:
return False
if y not in self.__active_cubes[w][z]:
return False
if x in self.__active_cubes[w][z][y]:
return True
else:
return False
def run(self):
candidates = dict()
for w, wdict in self.__active_cubes.items():
for z, zdict in wdict.items():
for y, ysets in zdict.items():
for x in ysets:
neighbors = [ (i,j,k,l) for l in [w-1,w,w+1] for k in [z-1,z,z+1] for j in [y-1,y,y+1] for i in [x-1,x,x+1] if i != x or j != y or k != z or l != w ]
for key in neighbors:
if key in candidates:
candidates[key] += 1
else:
candidates[key] = 1
actives = set()
for coord, count in candidates.items():
if 2 <= count <= 3 and self.__is_active(coord):
actives.add(coord)
elif count == 3 and not self.__is_active(coord):
actives.add(coord)
self.__active_cubes = dict()
for coord in actives:
self.__set_active(coord)
def get_actives(self):
actives = 0
for w in self.__active_cubes.values():
for z in w.values():
for y in z.values():
actives += len(y)
return actives
def inputdata(coord):
x, y, z, w = coord
with open('day_17_2020.input') as stream:
data = [ line[:-1] for line in stream ]
array_cube = list()
array_hypercube = list()
for j, line in enumerate(data):
for i, char in enumerate(line):
if char == '#':
array_cube.append((i+x, j+y, z))
array_hypercube.append((i+x, j+y, z, w))
return array_cube, array_hypercube
if __name__ == '__main__':
print('\n17: Conway Cubes')
array_cube, array_hypercube = inputdata((-1,-1,0,0))
cc = ConwayCube(array_cube)
hc = ConwayHypercube(array_hypercube)
step = 6
for _ in range(step):
cc.run()
hc.run()
print('\n actives in cube =', cc.get_actives())
print('\n actives in hypercube =', hc.get_actives())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-10 12:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('k8sproject', '0007_auto_20180610_2017'),
]
operations = [
migrations.AlterField(
model_name='result',
name='result_from_api_name',
field=models.TextField(blank=True, max_length=10240, null=True, verbose_name='\u6240\u6709\u8fd4\u8fd8\u7ed3\u679c'),
),
]
|
class Dog():
def __init__(self,name,age):
self.name=name
self.age=age
def sit(self):
print(self.name.title()+" is a now sitting.")
def roll_over(self):
print(self.name.title()+" rolling over!")
my_dog=Dog("willie",6)
print("My dog's name is "+my_dog.name.title()+".")
print("My dog's age is "+str(my_dog.age)+" years old.")
my_dog.sit()
my_dog.roll_over()
|
# -*- coding:utf8 -*-
#1 request 的使用
#
# import requests
#
# response = requests.get('https://www.appannie.com/apps/ios/app/idle-heroes/reviews/?order_by=date&order_type=desc&date=2019-04-29~2019-05-29&translate_selected=false&granularity=weekly&stack&percent=false&series=rating_star_1,rating_star_2,rating_star_3,rating_star_4,rating_star_5&country=CN')
# print response.text
# import requests
# header ={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'}
# response = requests.get('https://www.appannie.com/apps/ios/app/idle-heroes/reviews/?order_by=date&order_type=desc&date=2019-04-29~2019-05-29&translate_selected=false&granularity=weekly&stack&percent=false&series=rating_star_1,rating_star_2,rating_star_3,rating_star_4,rating_star_5&country=CN')
# #print response.text
#
# print response.content.decode('gbk')
# 自己写的简单爬虫
#
# import requests
# from lxml import etree
#
# url = "https://www.douban.com"
#
# header ={'User-Agent': 'Mozilla/5.0 (Windows NT x.y; Win64; x64; rv:10.0) Gecko/20100101 Firefox/10.0 '}
#
#
# response = requests.get(url,headers = header)
# #print response.text
# html=response.content
# selector=etree.HTML(html)
#
# #contents=selector.xpath('//*[@id="fcxH9b"]/div[4]/c-wiz[3]/div/div[2]/div/div[1]/div/div/div[1]/div[5]/div/div[2]/div[2]')
#
# simple=selector.xpath('//*[@id="anony-time"]/div/div[3]/ul/li[1]/a[2]')
# print simple[0].text
# 此爬虫用于google play,被反爬虫了,手动爬虫
import requests
from lxml import etree
url = "https://play.google.com/store/apps/details?id=com.droidhang.ad"
header ={'User-Agent': 'Mozilla/5.0 (Windows NT x.y; Win64; x64; rv:10.0) Gecko/20100101 Firefox/10.0 '}
response = requests.get(url,headers = header)
#print response.text
html=response.content
#print html
selector=etree.HTML(html)
#contents=selector.xpath('//*[@id="fcxH9b"]/div[4]/c-wiz[3]/div/div[2]/div/div[1]/div/div/div[1]/div[5]/div/div[2]/div[2]')
simple1=selector.xpath('//*[@id="fcxH9b"]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[1]/c-wiz[1]/div/div[2]/div/div[1]/div[2]/div[1]/div[1]/span[1]/a')
print simple1
print simple1[0].attrib
# simple2=selector.xpath('//*[@id="fcxH9b"]/div[4]/c-wiz[3]/div/div[2]/div/div[1]/div/c-wiz[1]/c-wiz[3]/div/div[1]/meta')
# print simple2
# print simple2 |
import os
import psycopg2
DATABASE_URL = os.popen('heroku config:get DATABASE_URL -a app_name').read()[:-1]
# connect to database
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
# create a table
create_table_query = '''
CREATE TABLE test_table(
id serial PRIMARY KEY,
name VARCHAR NOT NULL,
weight NUMERIC NOT NULL,
height NUMERIC NOT NULL,
date DATE NOT NULL);
'''
# execute query
cursor.execute(create_table_query)
conn.commit()
cursor.close()
conn.close()
|
# -*- coding: utf-8 -*-
#
import numpy
import sys
if sys.platform == 'darwin':
# likely there.
gmsh_executable = '/Applications/Gmsh.app/Contents/MacOS/gmsh'
else:
gmsh_executable = 'gmsh'
def rotation_matrix(u, theta):
'''Return matrix that implements the rotation around the vector :math:`u`
by the angle :math:`\\theta`, cf.
https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle.
:param u: rotation vector
:param theta: rotation angle
'''
# Cross-product matrix.
cpm = numpy.array([
[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]
])
c = numpy.cos(theta)
s = numpy.sin(theta)
R = numpy.eye(3) * c \
+ s * cpm \
+ (1.0 - c) * numpy.outer(u, u)
return R
def generate_mesh(geo_object, optimize=True):
import meshio
import os
import subprocess
import tempfile
handle, filename = tempfile.mkstemp(suffix='.geo')
os.write(handle, geo_object.get_code().encode())
os.close(handle)
handle, outname = tempfile.mkstemp(suffix='.msh')
cmd = [gmsh_executable, '-3', filename, '-o', outname]
if optimize:
cmd += ['-optimize']
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
print(out.decode())
points, cells, _, _, _ = meshio.read(outname)
return points, cells
|
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, TextAreaField
from wtforms.validators import InputRequired
from flask_wtf.file import FileField, FileRequired, FileAllowed
from werkzeug.utils import secure_filename
class Propertyform(FlaskForm):
title = StringField('Property Title', validators=[InputRequired()])
bedroom = StringField('No. of Rooms', validators=[InputRequired()])
bathroom = StringField('No. of Bathrooms', validators=[InputRequired()])
location = StringField('Location', validators=[InputRequired()])
price = StringField('Price', validators=[InputRequired()])
select = SelectField('Property Type', choices=['House', 'Apartment'] )
desc=TextAreaField('Description', validators=[InputRequired()])
photo=FileField('Photo', validators=[FileRequired(), FileAllowed(['jpg','png'], 'Images only!')]) |
s = list(raw_input())
k = int(raw_input())
newList = []
n = len(s)
for i in range(k):
if i<=n-1:
newList.append(s[i])
print newList[::-1]+s[k:]
|
from django.db.models import Sum, F
from django.db import transaction
from rest_framework import serializers
import jdatetime
from car.models import CarStock, CarSold
from utils.exceptions import CustomException
class CarListSerializer(serializers.Serializer):
name = serializers.ReadOnlyField()
total = serializers.ReadOnlyField()
total_sold = serializers.ReadOnlyField()
def to_representation(self, instance):
instance = super().to_representation(instance)
return instance
class CarStockListSerializer(serializers.Serializer):
name = serializers.ReadOnlyField()
date = serializers.ReadOnlyField()
total = serializers.ReadOnlyField()
total_sold = serializers.ReadOnlyField()
def to_representation(self, instance):
instance['name'] = instance.pop('car__name')
instance['date'] = jdatetime.date.fromgregorian(date=instance['date']).strftime('%Y/%m/%d')
instance = super().to_representation(instance)
return instance
class CarBuySerializer(serializers.Serializer):
name = serializers.CharField(max_length=20, min_length=2)
count = serializers.IntegerField(default=1)
@transaction.atomic
def create(self, validated_data):
try:
car = CarStock.objects.select_for_update().filter(car__name__iexact=validated_data['name'],
total__gt=F('total_sold')).order_by('date').first()
except CarStock.DoesNotExist as e:
raise CustomException(detail='There is no {} for sale.'.format(validated_data['name']))
if car.total - car.total_sold < validated_data['count']:
raise CustomException(detail='There is not enough car left for sale.')
car.total_sold += validated_data['count']
car.save()
sale = CarSold(car_stock=car, user=self.context['request'].user, count=validated_data['count'])
sale.save()
return sale
class CarStockSerializer(serializers.Serializer):
name = serializers.ReadOnlyField()
def to_representation(self, instance):
instance = super().to_representation(instance)
return instance
|
seq1 = "0actcg"
seq2 = "acagtag"
gap = -1
mismatch = 0
match = 1
def BuildTable():
global gap
global seq1
global seq2
table = []
for i in range(len(seq1)):
column = []
for j in range(len(seq2)+1):
if j == 0:
column.append(i * gap)
elif i == 0:
column.append(j*gap)
else:
column.append(0)
table.append(column)
return table
def BuildTraceTable():
global seq1
global seq2
traceTable = []
for i in range(len(seq1)):
column = []
for j in range(len(seq2)+1):
column.append(0)
traceTable.append(column)
return traceTable
table = BuildTable()
traceTable = BuildTraceTable()
def FillTable():
global table
global traceTable
global gap
global match
global mismatch
for i in range(1,len(table)):
for j in range(1,len(table[0])):
dScore = 0
if seq1[i] == seq2[j-1]:
dScore = int(match + table[i-1][j-1])
else:
dScore = int(mismatch + table[i-1][j-1])
topScore = int(table[i-1][j]) + int(gap)
leftScore = int(table[i][j-1]) + int(gap)
score = max(dScore,topScore,leftScore)
table[i][j] = score
scores = []
if int(score) == int(topScore):
scores.append(0)
if int(score) == int(leftScore):
scores.append(1)
if int(score) == int(dScore):
scores.append(2)
traceTable[i][j] = scores
FillTable()
for i in table:
print(i)
for j in traceTable:
print(j)
def BuildPath():
global traceTable
i = len(table)-1
j = len(table[0])-1
s1 = ""
s2 = ""
path = ""
# assumes that there is one path and one path only
while i != 0 and j != 0:
if traceTable[i][j][0] == 2:
path += "d"
i = i-1
j = j-1
elif traceTable[i][j][0] == 1:
path += "l"
j = j - 1
elif traceTable[i][j][0] == 0:
path += "t"
i = i - 1
print(path)
return path
path = BuildPath()
def GetPairwiseAlignment(seq1,seq2):
seqFinal1 = ""
seqFinal2 = ""
seq1 = seq1[::-1]
seq2 = seq2[::-1]
seq1Counter = 0
seq2Counter = 0
finalScore = 0
for i in path:
if i == "d":
seqFinal1 += seq1[seq1Counter]
seqFinal2 += seq2[seq2Counter]
if seq1[seq1Counter] == seq2[seq2Counter]:
finalScore += match
else:
finalScore += mismatch
seq1Counter += 1
seq2Counter += 1
elif i == 'l':
seqFinal1 += '-'
seqFinal2 += seq2[seq2Counter]
seq2Counter += 1
finalScore += gap
else:
seqFinal2 += '-'
seqFinal1 += seq2[seq1Counter]
seq1Counter += 1
finalScore += gap
print(seqFinal1[::-1])
print(seqFinal2[::-1])
print(finalScore)
seq1 = "actcg"
seq2 = "acagtag"
GetPairwiseAlignment(seq1,seq2) |
#!/usr/bin/env python
# wujian@2018
import os
import pprint
import argparse
import random
from libs.trainer import PermutationTrainer
from libs.utils import dump_json, get_logger
from libs.dataset import make_pitloader
from nnet import Nnet
from conf import trainer_conf, nnet_conf, feats_conf, train_data, dev_data
logger = get_logger(__name__)
def run(args):
nnet = Nnet(**nnet_conf)
trainer = PermutationTrainer(
nnet, gpuid=args.gpu, checkpoint=args.checkpoint, **trainer_conf)
data_conf = {"train_data": train_data, "dev_data": dev_data}
confs = [nnet_conf, feats_conf, trainer_conf, data_conf]
names = ["mdl.json", "feats.json", "trainer.json", "data.conf"]
for conf, fname in zip(confs, names):
dump_json(conf, args.checkpoint, fname)
feats_conf["shuf"] = True
train_loader = make_pitloader(
train_data["linear_x"],
feats_conf,
train_data,
batch_size=args.batch_size,
cache_size=args.cache_size)
feats_conf["shuf"] = False
dev_loader = make_pitloader(
dev_data["linear_x"],
feats_conf,
dev_data,
batch_size=args.batch_size,
cache_size=args.cache_size)
trainer.run(train_loader, dev_loader, num_epochs=args.epochs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to do train (B)LSTM with utterance-level "
"permutation invariant training, auto configured from conf.py",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--gpu", type=int, default=0, help="Training on which GPUs")
parser.add_argument(
"--epochs", type=int, default=50, help="Number of training epochs")
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="Directory to dump models")
parser.add_argument(
"--batch-size",
type=int,
default=16,
help="Number of utterances in each batch")
parser.add_argument(
"--cache-size",
type=int,
default=8,
help="Number of batches cached in the queue")
args = parser.parse_args()
logger.info("Arguments in command:\n{}".format(pprint.pformat(vars(args))))
run(args)
|
# import numpy as np
# grid = np.chararray((rows, cols))
# order is important since these are parallel arrays
turn_cycle = [-1, 0, 1]
cart_chars = ['<','^','>','v']
track_chars = ['-','|','-','|']
backward_curve = ['^', '<', 'v', '>']
forward_curve = ['v','>','^','<']
dirs = [(0,-1),(-1,0),(0,1),(1,0)]
def simulate_carts(grid, carts):
while True: # will return with coords tuple once collision occurs
# 1) Sort cart list by row (top to bottom) then column (left to right).
carts = sorted(carts, key = lambda x: (x['row'], x['col']))
# 2) Iterate through cart list, move 1 step in direction of cart
for c in carts:
index = c['cart']
vector = dirs[index]
new_r = c['row'] + vector[0]
new_c = c['col'] + vector[1]
new_char = grid[new_r][new_c]
# 3) If new location has another cart, return new coords.
if new_char in cart_chars: # COLLISION
return (new_r, new_c)
new_index = index
# 4) If new location has \ / or + then make turn and update cart's state accordingly
if new_char == '/':
new_index = cart_chars.index(forward_curve[index])
elif new_char == '\\':
new_index = cart_chars.index(backward_curve[index])
elif new_char == '+':
turnIndex = c['turnIndex']
new_index = (index + turn_cycle[turnIndex]) % 4
c['turnIndex'] = (turnIndex + 1) % 3
grid[c['row']][c['col']] = c['track'] # put back old track piece
grid[new_r][new_c] = cart_chars[new_index] # update new track piece with cart
c['cart'] = new_index # stores cart orientation
c['track'] = new_char # remember track piece this cart is on, to replace later ^^^
c['row'] = new_r
c['col'] = new_c
f = open('input.txt', 'r')
lines = f.readlines()
f.close()
rows = len(lines)
cols = len(lines[0]) - 1 # include whitespace, exclude \n
grid = [] # figure out why numpy isn't working.
carts = []
for r in range(rows):
row = []
line = lines[r].replace('\n','')
for c in range(cols):
char = line[c]
if char in cart_chars:
index = cart_chars.index(char)
carts.append({
'row': r, # y
'col': c, # x
'cart': index, # can use this index to direction and other arrays
'track': track_chars[index],
'turnIndex': 0 # start at 0
})
row.append(char)
grid.append(row)
coords = simulate_carts(grid, carts)
print(coords) # outputs as (r,c). AoC site wants it in form (x,y) or in this case (c,r). |
import os, pandas as pd, numpy as np
from sklearn.cluster import KMeans
from matplotlib import pyplot as plt
from scipy.spatial.distance import cdist
from scipy.cluster.hierarchy import fcluster
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
os.chdir("#")
df = pd.read_csv("ramen-ratings.csv")
df = df.drop(["Top Ten"], axis = 1)
df = df.rename(columns={"Review #":"Review"})
dummies= pd.get_dummies(df[["Brand", "Variety", "Style", "Country"]])
stars = df[["Stars"]]
data = pd.concat((dummies, stars), axis=1)
#Unsupervised K-Means
#Elbow method
res=list()
n_cluster = range(2,15)
for n in n_cluster:
kmeans = KMeans(n_clusters = n)
kmeans.fit(data)
res.append(np.average(np.min(cdist(data, kmeans.cluster_centers_, 'euclidean'), axis=1)))
plt.plot(n_cluster, res)
plt.title('elbow curve')
kmeans_model=KMeans(n_clusters=7,init='k-means++', max_iter=300, n_init=10, random_state=0).fit(data);
results = kmeans_model.predict(data);
centroids = kmeans_model.cluster_centers_;
kmeans_n = pd.DataFrame(data = results);
df["Score"] = kmeans_n
res= kmeans_model.__dict__
res2 = pd.DataFrame.from_dict(res)
#Decision tree to get the features.
data2 = data
H_cluster = linkage(data2,'ward')
cluster_2 = fcluster(H_cluster, 5, criterion='maxclust')
cluster_Hierarchical = pd.DataFrame(cluster_2)
data['cluster'] = cluster_Hierarchical
data2 = data.groupby(["cluster"]).sum()
data2.to_csv("Scores.csv")
'''
The features are exported to an Excel file to have a better understanding of the groups
We can identify one cluster a) few stars, with style pack,bowl, cup mainly in Canda, Taiwan, USA, Thailand, UK and Hong Kong
b)not many stars, Nissin brand in Japan, the US, Thailand, Singapore, UK style Tray
c)lot of stars, bowl and pack style, in Japan and the US, brand Nissin
d) few stars, mainly the Korean market with local brands
e)Large number of stars, pack style, in Taiwan, Japan, Malaysia with the brand Nissin. It seems to be the broad Asian market excluding South Korea
'''
#We will use an AdaBoost algorithm to predict the stars score as there are too many features for a regression
X = data.drop(["Stars"], axis=1)
y = data[["Stars"]].astype(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
abc = AdaBoostClassifier(n_estimators=50, learning_rate=1)
model = abc.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
##Very low accuracy of 44% we saw in a previous analysis with the K-Means that the style, the brand and country were the most important features
X = data.drop(["Stars"], axis=1)
y = data[["Stars"]].astype(int)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
abc = AdaBoostClassifier(n_estimators=500, learning_rate=1)
model = abc.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
#I have modulated the test_size, we have an issue with sample size!!
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/4/24 下午2:28
# @Author : ZHZ
# @Description : 将predict行记录增加至1000条
import pandas as pd
num_days = 7
final_date = 444/num_days
if_all_predict = open("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/if_all_predict.csv",'r')
if_all_predict_1000 = open("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/if_all_predict_1000.csv",'wb+')
if_all_predict_dict = {}
temp_line = "0,0,0,0,0,0,0," \
"0,0,0,0,0,0,0," \
"0,0,0,0,0,0,0," \
"0,0,0,0,0"
column_line = ""
is_first = True
for line in if_all_predict.readlines():
if is_first:
is_first = False
if_all_predict_1000.writelines(line.strip()+'\n')
continue
line = line.strip()
key = int(line.split(',')[1])
value = line
if_all_predict_dict[key] = value
for i in range(0,1000):
if if_all_predict_dict.has_key(i):
if_all_predict_1000.writelines(if_all_predict_dict[i]+'\n')
else:
if_all_predict_1000.writelines(str(final_date)+","+str(i)+","+temp_line+'\n')
|
# 패키지 사용자가 echo.py 모듈을 쓸 때 import로
# from pygame.sound import *
# 를 하기 위해서는 원래
# __all__ = ['echo', 'effect' ...]
# 를 해줘야하는데
# from . import echo
# 가 있으면 없어도 작동함
__all__ = ['effect', 'echo'] # import * 했을 때
from . import echo
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kubernetes sandbox components."""
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import sandbox
import sandlet
def set_gke_cluster_context(gke_cluster_name):
logging.info('Changing cluster to %s.', gke_cluster_name)
clusters = subprocess.check_output(
['kubectl', 'config', 'get-clusters']).split('\n')
cluster = [c for c in clusters if c.endswith('_%s' % gke_cluster_name)]
if not cluster:
raise sandbox.SandboxError(
'Cannot change GKE cluster context, cluster %s not found',
gke_cluster_name)
with open(os.devnull, 'w') as devnull:
subprocess.call(['kubectl', 'config', 'use-context', cluster[0]],
stdout=devnull)
class HelmComponent(sandlet.SandletComponent):
"""A helm resource."""
def __init__(self, name, sandbox_name, helm_config):
super(HelmComponent, self).__init__(name, sandbox_name)
self.helm_config = helm_config
try:
subprocess.check_output(['helm'], stderr=subprocess.STDOUT)
except OSError:
raise sandbox.SandboxError(
'Could not find helm binary. Please visit '
'https://github.com/kubernetes/helm to download helm.')
def start(self):
logging.info('Initializing helm.')
try:
subprocess.check_output(['helm', 'init'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise sandbox.SandboxError('Failed to initialize helm: %s', e.output)
# helm init on a fresh cluster takes a while to be ready.
# Wait until 'helm list' returns cleanly.
with open(os.devnull, 'w') as devnull:
start_time = time.time()
while time.time() - start_time < 120:
try:
subprocess.check_call(['helm', 'list'], stdout=devnull,
stderr=devnull)
logging.info('Helm is ready.')
break
except subprocess.CalledProcessError:
time.sleep(5)
else:
raise sandbox.SandboxError(
'Timed out waiting for helm to become ready.')
logging.info('Installing helm.')
try:
subprocess.check_output(
['helm', 'install', os.path.join(os.environ['VTTOP'], 'helm/vitess'),
'-n', self.sandbox_name, '--namespace', self.sandbox_name,
'--replace', '--values', self.helm_config],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise sandbox.SandboxError('Failed to install helm: %s' % e.output)
logging.info('Finished installing helm.')
def stop(self):
subprocess.call(['helm', 'delete', self.sandbox_name, '--purge'])
def is_up(self):
return True
def is_down(self):
return not bool(subprocess.check_output(
['kubectl', 'get', 'pods', '--namespace', self.sandbox_name]))
class KubernetesResource(sandlet.SandletComponent):
"""A Kubernetes resource (pod, replicationcontroller, etc.)."""
def __init__(self, name, sandbox_name, template_file, **template_params):
super(KubernetesResource, self).__init__(name, sandbox_name)
self.template_file = template_file
self.template_params = template_params
def start(self):
super(KubernetesResource, self).start()
with open(self.template_file, 'r') as template_file:
template = template_file.read()
for name, value in self.template_params.items():
template = re.sub('{{%s}}' % name, str(value), template)
with tempfile.NamedTemporaryFile() as f:
f.write(template)
f.flush()
os.system('kubectl create --namespace %s -f %s' % (
self.sandbox_name, f.name))
def stop(self):
with open(self.template_file, 'r') as template_file:
template = template_file.read()
for name, value in self.template_params.items():
template = re.sub('{{%s}}' % name, str(value), template)
with tempfile.NamedTemporaryFile() as f:
f.write(template)
f.flush()
os.system('kubectl delete --namespace %s -f %s' % (
self.sandbox_name, f.name))
super(KubernetesResource, self).stop()
def get_forwarded_ip(service, namespace='default', max_wait_s=60):
"""Returns an external IP address exposed by a service."""
start_time = time.time()
while time.time() - start_time < max_wait_s:
try:
service_info = json.loads(subprocess.check_output(
['kubectl', 'get', 'service', service, '--namespace=%s' % namespace,
'-o', 'json']))
return service_info['status']['loadBalancer']['ingress'][0]['ip']
except (KeyError, subprocess.CalledProcessError):
time.sleep(1)
return ''
|
#!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Date: 2017.12.19
# Author: Tony Paulino
# Description:
# Modified by: Tony Paulino
# Version: 0.1
import os
import sys
import time
import glob
import re
TEMPLATE_HTML = """<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html;charset=windows-1252">
<TITLE>Merged Tables</TITLE>
</HEAD>
<BODY>
%s
</BODY>
</HTML>
"""
def extractBody(filename):
file_handler = open(filename, 'r')
file_content = file_handler.read() # ficheiro carregado como uma string
file_handler.close()
if not file_handler.closed:
sys.exit('Error on closing file.')
# find_body = re.search('<body>(.*)</body>', file_content.replace('\n', ''), re.IGNORECASE | re.MULTILINE)
find_body = re.findall('<body>(.*?)</body>', file_content, re.IGNORECASE | re.DOTALL)
# print find_body
if find_body:
return find_body[0]
# return find_body.groups()[0].replace('><', '>\n<')
else:
return ''
def usedTables():
usedTables_list = list()
usedTables_handler = open('MapTable.txt', 'r')
lines = usedTables_handler.read().split()
# usedTables_list = usedTables_handler.read().split() # ficheiro carregado como uma string
for line in lines:
if line not in usedTables_list:
# print line
usedTables_list.append(line)
usedTables_handler.close()
return usedTables_list
def main(directory):
print os.path.basename(directory)
result_extract = ''
tablesInUse = usedTables()
# print directory
# Search for HTML files in: C:\blah\*.html
for f in glob.glob(os.path.join(directory, '*.html')):
f_name = os.path.basename(f).replace('_1.html','')
if f_name in tablesInUse:
# print 4*' ' + f_name
result_extract += extractBody(f)
# Build filename path: C:\blah\merged\merged_table.html
# merged_table_file = os.path.join(directory, 'merged', 'merged_'+os.path.basename(directory).replace(' ', '_')+'.html')
merged_table_file = os.path.join(os.path.dirname(__file__), 'merged_results', 'merged_'+os.path.basename(directory).replace(' ', '_')+'.html')
# Check the existence of path: C:\blah\merged\
if not os.path.isdir(os.path.dirname(merged_table_file)):
os.makedirs(os.path.dirname(merged_table_file))
merged_table_handler = open(merged_table_file, 'w')
merged_table_handler.write(TEMPLATE_HTML % result_extract)
merged_table_handler.close()
if not merged_table_handler.closed:
sys.exit('Error on closing file.')
if __name__ == '__main__':
if len(sys.argv) > 1:
for d in sys.argv[1:]:
if os.path.isdir(d):
main(os.path.abspath(d))
else:
main(os.path.dirname(__file__))
|
from gtts import gTTS
import os
mytext ='This is a to remind you about your health problems'
language = 'en'
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("med.mp3")
os.system("mpg321 new.mp3")
|
from collections import defaultdict
def solution1(players, stop):
marbles = [0]
current = 0
steps = 0
score = defaultdict(int)
while steps <= stop:
for player in range(1, players + 1):
steps += 1
if steps % 23 == 0:
r = (current - 7) % len(marbles)
score[player] += steps + marbles[r]
del marbles[r]
current = r
else:
insert = (current + 2) % len(marbles)
if insert == 0:
insert = len(marbles)
marbles.insert(insert, steps)
current = insert
return max(score.values())
def parse_input1(input):
players, *_, stop, _ = input.split()
return int(players), int(stop)
if __name__ == '__main__':
with open('input.txt') as fh:
print(solution1(*parse_input1(fh.read().strip()))) |
# Helper routines for residual vectors output by the DREAM solver.
import h5py
import matplotlib.pyplot as plt
import numpy as np
import warnings
from .petscmat import _mplcursors_frmt1d
#################
# Check for mplcursors
HASMPLCURSORS = False
try:
import mplcursors
HASMPLCURSORS = True
except:
warnings.warn('The recommended package mplcursors was not found.', ImportWarning)
def cmpres(F1, F2, show=True, log=False, eqsys=None):
"""
Compare two residual vectors.
"""
dF = np.abs(F2 / F1 - 1)
if show:
plotres(dF, show=show, log=log, eqsys=eqsys)
def loadres(filename):
"""
Load a residual vector from the given MAT file.
"""
with h5py.File(filename, 'r') as f:
F = f['F'][:]
return F
def plotres(res, *args, log=True, show=True, legend=None, eqsys=None):
"""
Plot the given residual.
"""
labels = ['Residual 1']
if log:
p = lambda x : plt.semilogy(np.abs(x))
else:
p = lambda x : plt.plot(x)
p(res)
i = 2
for arg in args:
p(arg)
labels.append('Residual {}'.format(i))
i += 1
if HASMPLCURSORS:
cursor = mplcursors.cursor()
cursor.connect('add', lambda sel : _mplcursors_frmt1d(sel, eqsys=eqsys))
if legend:
plt.legend(legend)
else:
plt.legend(labels)
if show:
plt.show(block=False)
|
from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from .forms import WargaRegistrationForm
from account.forms import RegisterForm, LoginForm, EditAccountForm
from account.decorators import anonymous_required, warga_required
from django.db import transaction
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from laporan.models import BerkasLaporan
from django.contrib.auth import logout
from laporan.forms import BerkasLaporanForm
from .models import Warga
# Create your views here.
@login_required(login_url='/warga/login')
@warga_required
def index_view(request):
page = request.GET.get('page', 1)
semua_laporan = BerkasLaporan.objects.all()
paginator_laporan = Paginator(semua_laporan, 10)
try:
laporans = paginator_laporan.page(page)
except PageNotAnInteger:
laporans = paginator_laporan.page(1)
except EmptyPage:
laporans = paginator_laporan.page(paginator_laporan.num_pages)
return render(request, "warga/index.html", {
'laporans': laporans
})
@login_required(login_url='/warga/login')
@warga_required
def logout_view(request):
logout(request)
return redirect('home')
@transaction.atomic
def register_view(request):
account = RegisterForm(request.POST or None, prefix='account')
warga = WargaRegistrationForm(request.POST or None, prefix='warga')
context = {
"warga_form" : warga,
"account_form" : account
}
if account.is_valid() and warga.is_valid():
user = account.save(commit=False)
user.user_type = 4
user.save()
warga_data = warga.save(commit=False)
warga_data.user = user
warga_data.save()
return redirect('warga:login')
return render(request, 'warga/auth/register.html', context)
@anonymous_required
def login_view(request):
account = LoginForm(data=request.POST or None, request=request, prefix='warga')
context = {
"form" : account
}
if account.is_valid():
return redirect('warga:index')
return render(request, 'warga/auth/login.html', context)
@login_required(login_url='/warga/login')
@warga_required
def detail_laporan_view(request, id):
laporan = BerkasLaporan.objects.get(id=id)
return render(request, "warga/laporan/detail_laporan.html", {
'laporan': laporan
})
@login_required(login_url='/warga/login')
@warga_required
def add_laporan_view(request):
form = BerkasLaporanForm(request.POST or None, request.FILES or None)
if form.is_valid():
form_laporan = form.save(commit=False)
form_laporan.status_laporan = 'BELUM'
form_laporan.user_created = request.user
form_laporan.save()
nama_psu_laporan = form.cleaned_data.get('nama_psu_laporan')
messages.success(request, f'Laporan {nama_psu_laporan} berhasil ditambahkan.', extra_tags='laporan')
return redirect('warga:index')
return render(request, "warga/laporan/add_laporan.html", {
'form': form
})
@login_required(login_url='/warga/login')
@warga_required
def change_profile_view(request):
warga = Warga.objects.get(pk=request.user.id)
warga_form = WargaRegistrationForm(request.POST or None, prefix='warga',instance = warga)
account = EditAccountForm(request.POST or None, prefix='account', instance= request.user)
if warga_form.is_valid() and account.is_valid():
warga_form.save()
account.save()
messages.success(request, f'Profile berhasil diperbarui.')
return redirect('warga:display_profile')
context = {
"warga_form" : warga_form,
"account" : account,
}
return render(request, 'warga/auth/change_profile.html', context)
@login_required(login_url='/warga/login')
@warga_required
def display_profile_view(request):
warga = Warga.objects.get(pk=request.user.id)
context = {
"warga" : warga
}
return render(request, 'warga/auth/display_profile.html', context) |
import pika
import time
from DAO.connection import Connection
import os
import multiprocessing
import json
import logging
import ast
from extract_prosodic.main import extract
import threading
import functools
from files_ms_client import download, upload
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
def callback(channel, method, properties, body, args):
(connection, threads) = args
delivery_tag = method.delivery_tag
t = threading.Thread(target=do_work, args=(
connection, channel, delivery_tag, body))
t.start()
threads.append(t)
def do_work(connection, channel, delivery_tag, body):
try:
print(" [x] Received %r" % body, flush=True)
args = json.loads(body)
oid = args['oid']
project_id = args['project_id']
# conn = Connection()
# file = conn.get_file(oid)
# file = conn.get_doc_mongo(file_oid=oid)
file = download(args['file'], buffer=True)
result = ast.literal_eval(file.decode('utf-8'))
timestamps = [0]
duration = []
pause_duration = []
count = 0
dict_result = {}
previous_duration = 0
for key, value in result.items():
dict_result[count] = {}
if count == 0:
dict_result[count]['pause'] = float(value['timestamp'])
else:
dict_result[count]['pause'] = float(
value['timestamp']) - previous_duration
dict_result[count]['init_time'] = float(value['timestamp'])
previous_duration = float(
value['timestamp']) + float(value['duration'])
dict_result[count]['pitch'], dict_result[count]['volume'] = extract(
value['bytes'])
count += 1
payload = bytes(str(dict_result), encoding='utf-8')
uploaded = upload(payload, buffer=True, mime='text/plain')
conn = Connection()
# inserts the result of processing in database
file_oid = conn.insert_doc_mongo(payload)
conn.insert_jobs(type='low_level_features', status='done',
file=file_oid, project_id=project_id)
message = {'type': 'aggregator', 'status': 'new', 'oid': file_oid,
'project_id': project_id, 'file': uploaded['name'], 'queue': 'low_level_features'}
# post a message on topic_segmentation queue
connection_out = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ['QUEUE_SERVER']))
channel2 = connection_out.channel()
channel2.queue_declare(queue='aggregator', durable=True)
channel2.basic_publish(
exchange='', routing_key='aggregator', body=json.dumps(message))
except Exception as e:
# print(e, flush=True)
print('Connection Error %s' % e, flush=True)
print(" [x] Done", flush=True)
cb = functools.partial(ack_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
def ack_message(channel, delivery_tag):
"""Note that `channel` must be the same pika channel instance via which
the message being ACKed was retrieved (AMQP protocol constraint).
"""
if channel.is_open:
channel.basic_ack(delivery_tag)
else:
# Channel is already closed, so we can't ACK this message;
# log and/or do something that makes sense for your app in this case.
pass
def consume():
logging.info('[x] start consuming')
success = False
while not success:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ['QUEUE_SERVER'], heartbeat=5))
channel = connection.channel()
success = True
except:
time.sleep(30)
pass
channel.queue_declare(queue='low_level_features', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.basic_qos(prefetch_count=1)
threads = []
on_message_callback = functools.partial(
callback, args=(connection, threads))
channel.basic_consume(queue='low_level_features',
on_message_callback=on_message_callback)
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
# Wait for all to complete
for thread in threads:
thread.join()
connection.close()
consume()
'''
workers = int(os.environ['NUM_WORKERS'])
pool = multiprocessing.Pool(processes=workers)
for i in range(0, workers):
pool.apply_async(consume)
# Stay alive
try:
while True:
continue
except KeyboardInterrupt:
print(' [*] Exiting...')
pool.terminate()
pool.join()'''
|
#!/usr/bin/python
# add.py
# by: Mike Pozulp
# adds npb benchmark job run(s) to the pgsql db
# Usage: add.py pbsJobOutput benchMarkOutput
import os
import sys
import io
import string
import psycopg2
MIN_FILE_SIZE = 1000 # used to easily skip output of
# incomplete benchmark runs
def printUsage():
print 'Usage: add.py [pbsJobOutput] [bench_1]\
{bench_2} ... {bench_N}'
def getJobResults():
f = open(sys.argv[1], 'r')
#get jobid and date
line = f.readline()
#print line
words = string.split(line)
jobid = string.split(words[1],'.')[0]
#print jobid
date = ( words[4] + ' ' + words[5] + ' ' + words[6] + ' '
+ words[7] + ' ' + words[8] + ' ' + words[9] )
#print date
#get ncpus
line = f.readline()
line = f.readline()
#print line
ncpus = string.split(line,'=')[1]
ncpus = ncpus.strip('\n') #strip off newline
#print ncpus
#get modules
#TODO: Make this more general!!!
for x in range(0,13):
line = f.readline()
#print line
words = line.split()
#compilers = words[1]
#mpi = words[3]
modules = ('{' +
'"' + words[1] + '", ' +
'"' + words[3] + '"}')
#get procmodel
line = f.readline()
#print line
procmodel = string.split(line,'=')[3].strip()
#print procmodel
#get node list
nlist = '{'
while True:
line = f.readline().split()[0]
if line != 'running':
nlist += '"' + line + '", '
else:
break
nlist = nlist[:-2] + '}'
#close the file
f.close()
#return the output data vector
return [jobid, date, ncpus, procmodel, modules, nlist]
def getBenchResults(file):
f = open(file, 'r')
#get npbid
while True:
line = f.readline()
if 'Benchmark Completed' in line:
break
#print line
npbid = line.split()[0].lower()
#print npbid
#get probclass
line = f.readline()
#print line
probclass = line.split('=')[1].strip()
#print probclass
#get runtime
for x in range(0,3):
line = f.readline()
#print line
runtime = line.split('=')[1].strip()
#print runtime
#get nprocs
line = f.readline()
#print line
nprocs = line.split('=')[1].strip()
#print nprocs
#get Mops
for x in range(0,2):
line = f.readline()
#print line
mops = line.split('=')[1].strip()
#print mops
#get Verification
for x in range(0,3):
line = f.readline()
#print line
verification = line.split('=')[1].strip()
#print verification
#get compiler
for x in range(0,5):
line = f.readline()
#print line
compiler = line.split('=')[1].strip()
#print compiler
#get flags
for x in range(0, 4):
line = f.readline()
#print line
flags = line.split('=')[1].strip()
#print flags
#return output data
return [npbid, probclass, nprocs, compiler + flags, runtime,
mops, verification]
def insert(dlist, flag):
#[jobid, date, ncpus, procmodel, moduleList,
# nodeList, npid, probclass, runtime, nprocs,
# mops, verification, compiler, flags]
# 0: jobid 4: mod list 8: nprocs 12: verification
# 1: date 5: node list 9: cflags
# 2: ncpus 6: npbid 10: runtime
# 3: procmodel 7: class 11: mops
if flag == 'j':
cmd = ('INSERT INTO jobs VALUES( '
+ dlist[0] + ', '
+ '\'' + dlist[1] + '\'' + ', '
+ dlist[2] + ', '
+ '\'' + dlist[3] + '\'' + ', '
+ '\'' + dlist[4] + '\'' + ', '
+ '\'' + dlist[5] + '\'' + ') ')
elif flag == 'b':
if dlist[12] == 'SUCCESSFUL':
dlist[12] = '1'
else:
dlist[12] = '0'
cmd = ('INSERT INTO ' +
'"' + dlist[6] + '"' + ' VALUES( '
+ dlist[0] + ', '
+ '\'' + dlist[7] + '\'' + ', '
+ dlist[8] + ', '
+ '\'' + dlist[9] + '\'' + ', '
+ dlist[10] + ', '
+ dlist[11] + ', '
+ '\'' + dlist[12] + '\'' + ') ')
else:
print 'Not sure where to insert; exiting'; sys.exit(1);
con = None
try:
con = psycopg2.connect(database='devel', user='mpozulp')
cur = con.cursor()
cur.execute(cmd)
con.commit()
except psycopg2.DatabaseError, e:
if con:
con.rollback()
print 'Error %s' %e
sys.exit(1)
finally:
if con:
con.close()
def main():
filelist = list(sys.argv[2:])
if len(sys.argv) < 3:
printUsage(); sys.exit()
for file in sys.argv:
if not os.path.isfile(file):
print 'cannot find file ' + file
printUsage(); sys.exit()
if os.path.getsize(file) < MIN_FILE_SIZE:
# throw away that which did not run
print 'not processing empty file ' + file
filelist.remove(file)
######process pbsJobOutput######
od = getJobResults()
#print od
insert(od, 'j')
######process benchMarkOutput(s)######
for file in filelist:
fulld = list(od)
print 'inserting data from ' + file
fulld.extend(getBenchResults(file))
insert(fulld, 'b')
if __name__ == '__main__':
main()
|
from socket import *
import zlib
class Client():
def __init__(self, server='localhost', port=12201, maxChunkSize=8154):
self.graylog2_server = server
self.graylog2_port = port
self.maxChunkSize = maxChunkSize
def log(self, message):
UDPSock = socket(AF_INET,SOCK_DGRAM)
zmessage = zlib.compress(message)
UDPSock.sendto(zmessage,(self.graylog2_server,self.graylog2_port))
UDPSock.close()
|
import numpy as np
from keras.callbacks import Callback
from keras.optimizers import SGD, Adam
from keras.models import Sequential
from keras.layers import Dense
from scipy.stats import logistic
from .BaseModel import BaseModel
from ..utils import YpredCallback
class NN_SoftmaxSoftmax(BaseModel):
"""2 Layer logistic-logistic neural network using Keras"""
parametric = True
bootlist = None
def __init__(self, n_neurons=2, epochs=200, learning_rate=0.01, momentum=0.0, decay=0.0, nesterov=False, loss="categorical_crossentropy", batch_size=None, verbose=0):
self.n_neurons = n_neurons
self.verbose = verbose
self.n_epochs = epochs
self.k = n_neurons
self.batch_size = batch_size
self.loss = loss
self.optimizer = SGD(lr=learning_rate, momentum=momentum, decay=decay, nesterov=nesterov)
#self.optimizer = Adam(lr=learning_rate, decay=decay)
self.__name__ = 'cimcb.model.NN_SoftmaxSoftmax'
self.__params__ = {'n_neurons': n_neurons, 'epochs': epochs, 'learning_rate': learning_rate, 'momentum': momentum, 'decay': decay, 'nesterov': nesterov, 'loss': loss, 'batch_size': batch_size, 'verbose': verbose}
def set_params(self, params):
self.__init__(**params)
def train(self, X, Y, epoch_ypred=False, epoch_xtest=None):
""" Fit the neural network model, save additional stats (as attributes) and return Y predicted values.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.
Y : array-like, shape = [n_samples, 1]
Response variables, where n_samples is the number of samples.
Returns
-------
y_pred_train : array-like, shape = [n_samples, 1]
Predicted y score for samples.
"""
# If batch-size is None:
if self.batch_size is None:
self.batch_size = len(X)
self.model = Sequential()
self.model.add(Dense(self.n_neurons, activation="sigmoid", input_dim=len(X.T)))
self.model.add(Dense(len(Y[0]), activation="softmax"))
self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=["accuracy"])
# If epoch_ypred is True, calculate ypred for each epoch
if epoch_ypred is True:
self.epoch = YpredCallback(self.model, X, epoch_xtest)
else:
self.epoch = Callback()
# Fit
self.model.fit(X, Y, epochs=self.n_epochs, batch_size=self.batch_size, verbose=self.verbose, callbacks=[self.epoch])
y_pred_train = self.model.predict(X)
# Storing X, Y, and Y_pred
self.Y_pred = y_pred_train
self.X = X
self.Y = Y
return y_pred_train
def test(self, X, Y=None):
"""Calculate and return Y predicted value.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Test variables, where n_samples is the number of samples and n_features is the number of predictors.
Returns
-------
y_pred_test : array-like, shape = [n_samples, 1]
Predicted y score for samples.
"""
y_pred_test = self.model.predict(X)
return y_pred_test
|
# -*- coding: utf-8 -*-
"""Created by: Sacaia"""
import discord
from discord.ext import tasks, commands
import discord.ext
import asyncio
import dados
import gerenciadorDeDados
import random
import os
import re
import math
##############ACTIVITY##############
activity = discord.Activity()
activity.name = ".help | Esperando alguem para ajudar"
activity.type = discord.ActivityType.playing
activity.state = "Observando"
activity.details = "Caso precise de ajuda use \".help\""
activity.timestamps = {"start": 1000}
##############EMBED-HELP##############
embed_help = discord.Embed()
embed_help.colour = discord.Colour.dark_purple()
#embed_help.color = discord.Color.dark_purple()
embed_help.title = "Lista de comandos"
embed_help.description = "Para usar qualquer comando basta usar `.<comando>`\n"
embed_help.description += "**Ações:**\n`bite` `slap` `cry` `highfive` `blush` `lick` `pat` `hug` `cuddle` `nuzzle` `kiss`\n"
embed_help.description += "**RPG:**\n`roll` `ficha` `novaficha` `editficha` `delficha`\n"
embed_help.description += "**Outros:**\n`ship` `votação`\n"
embed_help.description += "**Staff:**\n`clear` `log` `rolepicker` `modrole`\n"
embed_help.set_footer(text="Para informações sobre cada comando use `.help <comando>`")
##############EMBED-BITE##############
embed_bite = discord.Embed()
embed_bite.colour = discord.Colour.dark_purple()
embed_bite.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589096897751285761/bite.jpg")
embed_bite.title = "Bite help"
embed_bite.description = "`.bite @user` : *Morde um ou mais usuarios*\n"
embed_bite.description += "Escolhe uma imagem ou um gif aleatório para ser exibido\n\n"
embed_bite.description += "**Extenções**\n`-img` : Necessariamente escolhe uma imagem\n`-gif` : Necessariamente escolhe um gif"
embed_bite.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-SLAP##############
embed_slap = discord.Embed()
embed_slap.colour = discord.Colour.dark_purple()
embed_slap.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589144364161302563/slap.jpg")
embed_slap.title = "Slap help"
embed_slap.description = "`.slap @user` : *Estapeia um ou mais usuarios*\n"
embed_slap.description += "Escolhe um gif aleatório para ser exibido\n"
embed_slap.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-CRY##############
embed_cry = discord.Embed()
embed_cry.colour = discord.Colour.dark_purple()
embed_cry.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589269732406984704/cry.png")
embed_cry.title = "Cry help"
embed_cry.description = "`.cry` : *Exibe um gif de choro aleatorio*\n"
#embed_cry.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-LICK##############
embed_lick = discord.Embed()
embed_lick.colour = discord.Colour.dark_purple()
embed_lick.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589151423359352852/lick.jpg")
embed_lick.title = "Lick help"
embed_lick.description = "`.lick @user` : *Lambe um ou mais usuarios*\n"
embed_lick.description += "Escolhe um gif aleatório para ser exibido\n"
embed_lick.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-HIGHFIVE##############
embed_highfive = discord.Embed()
embed_highfive.colour = discord.Colour.dark_purple()
embed_highfive.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589152146646237192/highfive.png")
embed_highfive.title = "Highfive help"
embed_highfive.description = "`.highfive @user` : *Highfive um ou mais usuarios*\n"
embed_highfive.description += "Escolhe uma imagem ou um gif aleatório para ser exibido\n"
embed_highfive.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-BLUSH##############
embed_blush = discord.Embed()
embed_blush.colour = discord.Colour.dark_purple()
embed_blush.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/591156702448189450/blush.jpg")
embed_blush.title = "Blush help"
embed_blush.description = "`.blush` : *Aleatoriamente exibe um gif ou imagem de alguem corando*\n"
#embed_blush.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-PAT##############
embed_pat = discord.Embed()
embed_pat.colour = discord.Colour.dark_purple()
embed_pat.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589152745462956053/pat.jpg")
embed_pat.title = "Pat help"
embed_pat.description = "`.pat @user` : *Acaricia um ou mais usuarios*\n"
embed_pat.description += "Escolhe uma imagem ou um gif aleatório para ser exibido\n"
embed_pat.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-HUG##############
embed_hug = discord.Embed()
embed_hug.colour = discord.Colour.dark_purple()
embed_hug.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589110871775641662/hug.jpg")
embed_hug.title = "Hug help"
embed_hug.description = "`.hug @user` : *Abraça um ou mais usuarios*\n"
embed_hug.description += "Escolhe uma imagem ou um gif aleatório para ser exibido\n\n"
embed_hug.description += "**Extenções**\n`-img` : Necessariamente escolhe uma imagem\n`-gif` : Necessariamente escolhe um gif"
embed_hug.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-CUDDLE##############
embed_cuddle = discord.Embed()
embed_cuddle.colour = discord.Colour.dark_purple()
embed_cuddle.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589243610789183505/cuddle.jpg")
embed_cuddle.title = "Cuddle help"
embed_cuddle.description = "`.cuddle @user` : *Abraça amorosamente um ou mais usuarios*\n"
embed_cuddle.description += "Escolhe uma imagem ou um gif aleatório para ser exibido\n\n"
embed_cuddle.description += "**Extenções**\n`-img` : Necessariamente escolhe uma imagem\n`-gif` : Necessariamente escolhe um gif"
embed_cuddle.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-NUZZLE##############
embed_nuzzle = discord.Embed()
embed_nuzzle.colour = discord.Colour.dark_purple()
embed_nuzzle.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589250328487133226/nuzzle.jpg")
embed_nuzzle.title = "Nuzzle help"
embed_nuzzle.description = "`.nuzzle @user` : *Esfrega o rosto em um ou mais usuarios*\n"
embed_nuzzle.description += "Escolhe um gif aleatório para ser exibido\n"
embed_nuzzle.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-KISS##############
embed_kiss = discord.Embed()
embed_kiss.colour = discord.Colour.dark_purple()
embed_kiss.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589152146646237192/highfive.png")
embed_kiss.title = "Kiss help"
embed_kiss.description = "`.kiss @user` : *Beija um ou mais usuarios*\n"
embed_kiss.description += "Escolhe uma imagem ou um gif aleatório para ser exibido\n\n"
embed_kiss.description += "**Extenções**\n`-img` : Necessariamente escolhe uma imagem\n`-gif` : Necessariamente escolhe um gif"
embed_kiss.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-ROLL##############
embed_roll = discord.Embed()
embed_roll.colour = discord.Colour.dark_purple()
embed_roll.set_thumbnail(url="https://cdn.discordapp.com/attachments/592521078597746698/592882908239495170/roll.jpg")
embed_roll.title = "Roll help"
embed_roll.description = "`.roll <repetições>* <dado> <buff/nerf>*` : *Joga um `<dado>`*\n"
embed_roll.description += "Pode-se jogar diversos dados em apenas um comando, basta repetir o parâmetro `<dado>` quantas vezes quiser. "
embed_roll.description += "Podendo, para cada `<dado>`, especificar o numero de `<repetições>` e/ou seu respectivo `<buff/nerf>`\n"
embed_roll.description += "Exemplos: `.roll d2` `.roll 2 d6` `.roll d10 4` `.roll d4 -1` `.roll 3 d20 +2` `.roll 2 d4 d6 5 3 d10 *1.2`\n"
embed_roll.description += "Dica: pode-se escrever `.roll 3x d6 -2, 2x d20 +3` para facilitar o entendimento\n\n"
embed_roll.description += "**Parâmetros**\n`<dado>` : Um `<dado>` é definido por `dX` onde `X` é um numero inteiro, correspondente a quantidade de lados do dado\n"
embed_roll.description += "`<repetições>` *__Opcional__: Número de vezes que se pretende lançar o dado. Caso seja omitido será considerado 1\n"
embed_roll.description += "`<buff/nerf>` *__Opcional__: Caso precise mudar o resultado do dado de alguma maneira, para buffar ou nerfar a ação do jogador, "
embed_roll.description += "pode usar um `oX` onde `o` é um operador matemático(operadores suportados: [+, -, *, x, /, ^, %]) e `X` um numero real(casas decimais são aceitas). Caso seja omitido será considerado um lançamento normal\n\n"
#embed_roll.set_footer(text="Pode-se escrever .roll 3x d6, 2x d20 para facilitar o entendimento")
##############EMBED-NOVAFICHA##############
embed_novaficha = discord.Embed()
embed_novaficha.colour = discord.Colour.dark_purple()
#embed_novaficha.set_thumbnail(url="https://cdn.discordapp.com/attachments/592521078597746698/592882908239495170/roll.jpg")
embed_novaficha.title = "Novaficha help"
embed_novaficha.description = "`.novaficha <nome>` : *Cria uma nova ficha com o nome especificado*\n"
embed_novaficha.description += "Espera receber uma segunda mensagem com o conteúdo da ficha, que é composto por um texto e pode ter uma imagem ilustrativa, basta anexar uma imagem à mensagem\n\n"
embed_novaficha.description += "**Parâmetros**\n`<nome>` : O nome da ficha que pretende criar\n"
embed_novaficha.set_footer(text="Caso erre alguma coisa você sempre pode editar sua ficha com o \".editficha\"")
##############EMBED-EDITFICHA##############
embed_editficha = discord.Embed()
embed_editficha.colour = discord.Colour.dark_purple()
#embed_editficha.set_thumbnail(url="https://cdn.discordapp.com/attachments/592521078597746698/592882908239495170/roll.jpg")
embed_editficha.title = "Editficha help"
embed_editficha.description = "`.editficha <nome>` : *Edita uma ficha com o nome especificado*\n"
embed_editficha.description += "Espera receber uma segunda mensagem com o novo conteúdo da ficha, que é composto por um texto e pode ter uma imagem ilustrativa, basta anexar uma imagem à mensagem.\n"
embed_editficha.description += "Caso deseje mudar somente a descrição ou a imagem pode-se omitir o outro elemento\n"
embed_editficha.description += "Dica: Caso deseje alterar alguma coisa no seu antigo texto pode-se consulta-lo pelo metodo `.ficha` e copiar o antigo conteúdo\n\n"
embed_editficha.description += "**Parâmetros**\n`<nome>` : O nome da ficha que pretende editar\n"
embed_editficha.set_footer(text="Não é possivel mudar o nome de uma ficha, para isso pode-se excluir esta ficha(.delficha) e criar uma nova(.novaficha)")
##############EMBED-DELFICHA##############
embed_delficha = discord.Embed()
embed_delficha.colour = discord.Colour.dark_purple()
#embed_delficha.set_thumbnail(url="https://cdn.discordapp.com/attachments/592521078597746698/592882908239495170/roll.jpg")
embed_delficha.title = "Delficha help"
embed_delficha.description = "`.delficha <nome>` : *Exclui uma ficha com o nome especificado*\n"
embed_delficha.description += "Pede uma confirmação dentro de um minuto antes de efetivamente excluir a ficha\n\n"
embed_delficha.description += "**Parâmetros**\n`<nome>` : O nome da ficha que pretende excluir\n"
#embed_delficha.set_footer(text="")
##############EMBED-FICHA##############
embed_ficha = discord.Embed()
embed_ficha.colour = discord.Colour.dark_purple()
#embed_ficha.set_thumbnail(url="https://cdn.discordapp.com/attachments/592521078597746698/592882908239495170/roll.jpg")
embed_ficha.title = "Ficha help"
embed_ficha.description = "`.ficha @user* <nome>*` : *lista as fichas de um usuario ou mostra a ficha especificada*\n`.fichas @user* <nome>*` também pode ser utilizado\n"
embed_ficha.description += "Dependendo dos parâmetros fornecidos exibe a lista de fichas de um usuario(somente o parâmetro `@user`) ou uma ficha específica(ambos os parâmetros)\n\n"
embed_ficha.description += "**Parâmetros**\n`@user` *__Opcional__: O usuário que pretende consultar. Caso seja omitido será considerado você mesmo\n"
embed_ficha.description += "`<nome>` *__Opcional__: Nome da ficha que pretende consultar\n"
embed_ficha.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-SHIP##############
embed_ship = discord.Embed()
embed_ship.colour = discord.Colour.dark_purple()
#embed_ship.set_thumbnail(url="https://cdn.discordapp.com/attachments/592521078597746698/592882908239495170/roll.jpg")
embed_ship.title = "Ship help"
embed_ship.description = "`.ship @user*` : *mistura os nomes e exibe a porcentagem do ship dar certo*\n"
embed_ship.description += "Mistura os nomes de todos os membros mencionados(na ordem) e calcula a porcentagem do ship dar certo\n\n"
embed_ship.description += "**Parâmetros**\n`@user` *__Mais de um__: Lista de menções a outros usuarios\n"
embed_ship.set_footer(text="@everyone e @here, bem como cargos não são parâmetros válidos")
##############EMBED-CLEAR##############
embed_clear = discord.Embed()
embed_clear.colour = discord.Colour.dark_purple()
#embed_clear.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589152146646237192/highfive.png")
embed_clear.title = "Clear help"
embed_clear.description = "`.clear <qtd>` : *deleta a `<qtd>` de mensagens no canal*\n`.cl <qtd>` também pode ser utilizado\n\n"
embed_clear.description += "**Parâmetros**\n`<qtd>` : Deve ser um _numero inteiro_ correspondente a quantidade de mensagens que se deseja apagar (limite de 100 mensagens por vez)"
embed_clear.set_footer(text="É preciso ter a permissão para excluir mensagens ou possuir o cargo de mod")
##############EMBED-LOG##############
embed_log = discord.Embed()
embed_log.colour = discord.Colour.dark_purple()
#embed_log.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589152146646237192/highfive.png")
embed_log.title = "Log help"
embed_log.description = "`.log <#canal>` : *define `<#canal>` como o canal de log do servidor*\n"
embed_log.description += "Caso um canal já tenha sido definido este será atualizado para o novo `<#canal>`\n\n"
embed_log.description += "**Parâmetros**\n`<#canal>` : Deve ser uma __mensão a um canal textual__ correspondente ao canal que se deseja mandar as mensagens de log\n\n"
embed_log.description += "**Extenções**\n`-clear` : Não necessita de parâmetros. Remove o canal de log do servidor"
embed_log.set_footer(text="Assim como outros comandos da staff, é preciso ser um administrador ou possuir o cargo de mod")
##############EMBED-ROLEPICKER##############
embed_rolepicker = discord.Embed()
embed_rolepicker.colour = discord.Colour.dark_purple()
#embed_rolepicker.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589152146646237192/highfive.png")
embed_rolepicker.title = "Rolepicker help"
embed_rolepicker.description = "`.rolepicker <#canal>` : *Espera receber um `rolepicker` valido no `<#canal>` especificado*\n"
embed_rolepicker.description += "Um `rolepicker` é qualquer mensagem que contem uma mesma quantidade de __menções de cargos__ e __emojis__. A ordem dos cargos irá indicar qual é o emoji corresponde e vice-versa\n"
embed_rolepicker.description += "A mensagem pode conter textos, menções, imagens, etc... desde que a restrição acima seja atingida\n"
embed_rolepicker.description += "Para **editar** o `rolepicker` basta editar a mensagem, mantendo a quantidade de __menções de cargos__ iguais a de __emojis__\n"
embed_rolepicker.description += "Para **excluir** o `rolepicker` basta excluir a mensagem\n\n"
embed_rolepicker.description += "**Parâmetros**\n`<#canal>` : Deve ser uma __mensão a um canal textual__ correspondente ao canal que se deseja mandar a mensagens de log\n\n"
embed_log.set_footer(text="Assim como outros comandos da staff, é preciso ser um administrador ou possuir o cargo de mod")
##############EMBED-MODROLE##############
embed_modrole = discord.Embed()
embed_modrole.colour = discord.Colour.dark_purple()
#embed_modrole.set_thumbnail(url="https://cdn.discordapp.com/attachments/588893416746647553/589152146646237192/highfive.png")
embed_modrole.title = "Modrole help"
embed_modrole.description = "`.modrole @role` : *define `@role` como o cargo de mod do servidor*\n"
embed_modrole.description += "Caso um cargo já tenha sido definido este será atualizado para o novo `@role`\n\n"
embed_modrole.description += "**Parâmetros**\n`@role` : Deve ser uma __mensão a um cargo__ correspondente ao cargo que se deseja transformar em mod para a Kyuni\n\n"
embed_modrole.description += "**Extenções**\n`-clear` : Não necessita de parâmetros. Remove o cargo de mod do servidor"
embed_modrole.set_footer(text="Assim como outros comandos da staff, é preciso ser um administrador ou possuir o cargo de mod")
######################################
client = commands.Bot(command_prefix=".")
client.remove_command("help")
client.activity = activity
#client.__setattr__("command_prefix", "!")
##############CHECKS##############
def ehDono():
"""Check para saber se o usuario do comando é o dono do server"""
async def inter_ehDono(ctx):
"""Comando interno do ehDono"""
return ctx.author.id == ctx.guild.owner_id
return commands.check(inter_ehDono)
def ehMod():
"""Check para saber se o usuario tem a permissão de moderador ou tem o cargo mod desse servidor"""
async def inter_ehMod(ctx):
"""Comando interno do edMod"""
server = gerenciadorDeDados.getServer(ctx.guild.id)
for role in ctx.author.roles:
if(role.id == server.modRoleID):
return True
return commands.has_permissions(administrator=True)
return commands.check(inter_ehMod)
def ehMessageManager():
"""Check para saber se o usuario tem a permissão manage_messages ou tem o cargo mod desse servidor"""
async def inter_ehMessageManager(ctx):
"""Comando interno do ehMessageManager"""
server = gerenciadorDeDados.getServer(ctx.guild.id)
for role in ctx.author.roles:
if (role.id == server.modRoleID):
return True
return commands.has_permissions(manage_messages=True)
return commands.check(inter_ehMessageManager)
######################################
@client.event
async def on_ready():
"""Log para saber se o bot está on-line"""
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_guild_join(guild):
"""Registra o novo server em que o bot entrou"""
gerenciadorDeDados.registrarServer(dados.Server(guild.id))
@client.event
async def on_message_delete(message):
"""Faz o log de mensagens deletadas (Falta fazer suporte para mensagens não textuais)"""
if message.author == client.user:
return
logChannelID = gerenciadorDeDados.getLogChannelID(message.guild.id)
if(logChannelID is not None):
embed = discord.Embed(colour=discord.Colour.dark_purple(), title="Message deleted", description="**At:** <#"+ str(message.channel.id) + ">\n**Author:** <@" + str(message.author.id) + ">\n" + str(message.content))
await client.get_channel(logChannelID).send(embed=embed)
return
@client.event
async def on_message_edit(before, after):
"""Faz o log de mensagens editadas (Falta fazer suporte para mensagens não textuais)"""
if before.author == client.user or after.author == client.user:
return
logChannelID = gerenciadorDeDados.getLogChannelID(before.guild.id)
if (logChannelID is not None):
embed = discord.Embed(colour=discord.Colour.dark_purple(), title="Message edited", description="**At:** <#"+ str(before.channel.id) + ">\n**Author:** <@" + str(before.author.id) + ">\n\n***Before:*** \n" + str(before.content) + "\n\n***After:*** \n" + str(after.content))
await client.get_channel(logChannelID).send(embed=embed)
return
@client.event
async def on_raw_message_delete(payload):
"""Remove o `rolepicker` caso esta mensagem seja um"""
channel = client.get_channel(payload.channel_id)
if(payload.cached_message is not None):
message = payload.cached_message
if(message.author.id == client.user.id):
return
if(payload.guild_id is not None):
guild = client.get_guild(payload.guild_id)
server = gerenciadorDeDados.getServer(guild.id)
if (payload.message_id in server.rolepickerIDs):
server.removeRolepicker(payload.message_id)
gerenciadorDeDados.updateServer(server)
@client.event
async def on_raw_message_edit(payload):
"""Edita o `rolepicker` caso esta mensagem seja um"""
channel = client.get_channel(int(payload.data["channel_id"]))
message = await channel.fetch_message(payload.message_id)
if(payload.data["guild_id"] is not None):
guild = client.get_guild(int(payload.data["guild_id"]))
server = gerenciadorDeDados.getServer(guild.id)
if (message.id in server.rolepickerIDs):
emojis = gerenciadorDeDados.getEmojisFromMessage(message)
await message.clear_reactions()
for i in range(min(len(gerenciadorDeDados.getRolesFromMessage(message)), len(emojis))):
await message.add_reaction(emojis[i][1])
@client.event
async def on_raw_reaction_add(payload):
"""Adiciona o cargo correspondente a reação do usuário para o respectivo `rolepicker` caso esta mensagem seja um"""
if(payload.user_id == client.user.id):
return
channel = client.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
guild = client.get_guild(payload.guild_id)
server = gerenciadorDeDados.getServer(guild.id)
emoji = payload.emoji
if(emoji.is_custom_emoji()):
emoji = "<:" + emoji.name + ":" + str(emoji.id) + ">"
else:
emoji = emoji.name
if(message.id in server.rolepickerIDs):
member = guild.get_member(payload.user_id)
role = gerenciadorDeDados.getCorrespondingRole(message, emoji)
if(role is None):
for reaction in message.reactions:
if (str(reaction.emoji) == emoji):
await reaction.remove(message.author)
else:
await member.add_roles(role)
@client.event
async def on_raw_reaction_remove(payload):
"""Remove o cargo correspondente a reação do usuário para o respectivo `rolepicker` caso esta mensagem seja um"""
if(payload.user_id == client.user.id):
return
channel = client.get_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
guild = client.get_guild(payload.guild_id)
server = gerenciadorDeDados.getServer(guild.id)
emoji = payload.emoji
if (emoji.is_custom_emoji()):
emoji = "<:" + emoji.name + ":" + str(emoji.id) + ">"
else:
emoji = emoji.name
if (message.id in server.rolepickerIDs):
member = guild.get_member(payload.user_id)
role = gerenciadorDeDados.getCorrespondingRole(message, emoji)
await member.remove_roles(role)
@client.event
async def on_message(message):
"""Trata possiveis comandos"""
if message.author == client.user:
return
contentOriginal = message.content
message.content = message.content.lower().strip()
if(not message.content.startswith(client.command_prefix)):
message.content = contentOriginal
if(message.content.startswith(client.command_prefix + "novaficha")):
i = re.search("novaficha", message.content).end()
message.content = message.content[:i] + contentOriginal[i:]
if (message.content.startswith(client.command_prefix + "ficha")):
i = re.search("ficha", message.content).end()
message.content = message.content[:i] + contentOriginal[i:]
if (message.content.startswith(client.command_prefix + "editficha")):
i = re.search("editficha", message.content).end()
message.content = message.content[:i] + contentOriginal[i:]
if (message.content.startswith(client.command_prefix + "delficha")):
i = re.search("delficha", message.content).end()
message.content = message.content[:i] + contentOriginal[i:]
if (message.content.startswith(client.command_prefix + "votação")):
i = re.search("votação", message.content).end()
message.content = message.content[:i] + contentOriginal[i:]
if (message.content.startswith(client.command_prefix + "cl")):
message.content = message.content.replace(client.command_prefix + "cl", client.command_prefix + "clear", 1)
if (message.content.startswith(client.command_prefix + "fichas")):
message.content = message.content.replace(client.command_prefix + "fichas", client.command_prefix + "ficha", 1)
if (message.content.startswith(client.command_prefix + "votação")):
message.content = message.content.replace(client.command_prefix + "votação", client.command_prefix + "votacao", 1)
await client.process_commands(message)
####################.BITE###########################
@client.command()
@commands.guild_only()
async def bite(ctx):
"""`.bite @user` : Morde um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
if ("-img" in ctx.message.content):
file = discord.File("bite/img/" + str(random.choice(os.listdir("bite/img/"))), filename="bite.jpg")
elif ("-gif" in ctx.message.content):
file = discord.File("bite/gif/" + str(random.choice(os.listdir("bite/gif/"))), filename="bite.gif")
else:
imgOrGif = random.randint(0,1000)
if (imgOrGif < 700):
file = discord.File("bite/gif/" + str(random.choice(os.listdir("bite/gif/"))), filename="bite.gif")
else:
file = discord.File("bite/img/" + str(random.choice(os.listdir("bite/img/"))), filename="bite.jpg")
ret = "**" + ctx.author.display_name + "** mordeu "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
await ctx.send(ret, file=file)
return
####################.SLAP###########################
@client.command()
@commands.guild_only()
async def slap(ctx):
"""`.slap @user` : Estapeia um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
file = discord.File("slap/gif/" + str(random.choice(os.listdir("slap/gif/"))), filename="slap.gif")
ret = "**" + ctx.author.display_name + "** deu um tapa em "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
await ctx.send(ret, file=file)
return
####################.CRY###########################
@client.command()
async def cry(ctx):
"""`.cry` : Exibe um gif de choro aleatorio"""
file = discord.File("cry/gif/" + str(random.choice(os.listdir("cry/gif/"))), filename="cry.gif")
await ctx.send(file=file)
return
####################.HIGHFIVE###########################
@client.command()
@commands.guild_only()
async def highfive(ctx):
"""`.highfive @user` : Highfive um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
file = discord.File("highfive/gif/" + str(random.choice(os.listdir("highfive/gif/"))), filename="highfive.gif")
ret = "**" + ctx.author.display_name + "** highfive "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
ret += "!"
await ctx.send(ret, file=file)
return
####################.BLUSH###########################
@client.command()
async def blush(ctx):
"""`.blush` : Aleatoriamente exibe um gif ou imagem de alguem corando"""
if ("-img" in ctx.message.content):
file = discord.File("blush/img/" + str(random.choice(os.listdir("blush/img/"))), filename="blush.jpg")
elif ("-gif" in ctx.message.content):
file = discord.File("blush/gif/" + str(random.choice(os.listdir("blush/gif/"))), filename="blush.gif")
else:
imgOrGif = random.randint(0,1000)
if (imgOrGif < 600):
file = discord.File("blush/gif/" + str(random.choice(os.listdir("blush/gif/"))), filename="blush.gif")
else:
file = discord.File("blush/img/" + str(random.choice(os.listdir("blush/img/"))), filename="blush.jpg")
ret = "**" + ctx.author.display_name + "** corou"
await ctx.send(ret, file=file)
return
####################.LICK###########################
@client.command()
@commands.guild_only()
async def lick(ctx):
"""`.pat @user` : Acaricia um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
file = discord.File("lick/gif/" + str(random.choice(os.listdir("lick/gif/"))), filename="lick.gif")
ret = "**" + ctx.author.display_name + "** lambeu "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
await ctx.send(ret, file=file)
return
####################.PAT###########################
@client.command()
@commands.guild_only()
async def pat(ctx):
"""`.pat @user` : Acaricia um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
file = discord.File("pat/gif/" + str(random.choice(os.listdir("pat/gif/"))), filename="pat.gif")
ret = "**" + ctx.author.display_name + "** acariciou "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
ret += " ❤"
await ctx.send(ret, file=file)
return
####################.HUG###########################
@client.command()
@commands.guild_only()
async def hug(ctx):
"""`.hug @user` : Abraça um ou mais usuarios"""
if(len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
if ("-img" in ctx.message.content):
file = discord.File("hug/img/" + str(random.choice(os.listdir("hug/img/"))), filename="hug.jpg")
elif ("-gif" in ctx.message.content):
file = discord.File("hug/gif/" + str(random.choice(os.listdir("hug/gif/"))), filename="hug.gif")
else:
imgOrGif = random.randint(0,1000)
if (imgOrGif < 600):
file = discord.File("hug/gif/" + str(random.choice(os.listdir("hug/gif/"))), filename="hug.gif")
else:
file = discord.File("hug/img/" + str(random.choice(os.listdir("hug/img/"))), filename="hug.jpg")
ret = "**" + ctx.author.display_name + "** abraçou "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:(len(ret)-2)]
await ctx.send(ret, file=file)
return
####################.CUDDLE###########################
@client.command()
@commands.guild_only()
async def cuddle(ctx):
"""`.cuddle @user` : Abraça amorosamente um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
if ("-img" in ctx.message.content):
file = discord.File("cuddle/img/" + str(random.choice(os.listdir("cuddle/img/"))), filename="cuddle.jpg")
elif ("-gif" in ctx.message.content):
file = discord.File("cuddle/gif/" + str(random.choice(os.listdir("cuddle/gif/"))), filename="cuddle.gif")
else:
imgOrGif = random.randint(0, 1000)
if (imgOrGif < 500):
file = discord.File("cuddle/gif/" + str(random.choice(os.listdir("cuddle/gif/"))), filename="cuddle.gif")
else:
file = discord.File("cuddle/img/" + str(random.choice(os.listdir("cuddle/img/"))), filename="cuddle.jpg")
ret = "**" + ctx.author.display_name + "** abraçou "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
ret += " 💞"
await ctx.send(ret, file=file)
return
####################.NUZZLE###########################
@client.command()
@commands.guild_only()
async def nuzzle(ctx):
"""`.nuzzle @user` : Esfrega o rosto em um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
file = discord.File("nuzzle/gif/" + str(random.choice(os.listdir("nuzzle/gif/"))), filename="nuzzle.gif")
ret = "**" + ctx.author.display_name + "** se esfregou em "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
await ctx.send(ret, file=file)
return
####################.KISS###########################
@client.command()
@commands.guild_only()
async def kiss(ctx):
"""`.kiss @user` : Beija um ou mais usuarios"""
if (len(ctx.message.mentions) == 0):
await ctx.send("Por favor, mencione, pelo menos, um usuario")
return
if ("-img" in ctx.message.content):
file = discord.File("kiss/img/" + str(random.choice(os.listdir("kiss/img/"))), filename="kiss.jpg")
elif ("-gif" in ctx.message.content):
file = discord.File("kiss/gif/" + str(random.choice(os.listdir("kiss/gif/"))), filename="kiss.gif")
else:
imgOrGif = random.randint(0, 1000)
if (imgOrGif < 500):
file = discord.File("kiss/gif/" + str(random.choice(os.listdir("kiss/gif/"))), filename="kiss.gif")
else:
file = discord.File("kiss/img/" + str(random.choice(os.listdir("kiss/img/"))), filename="kiss.jpg")
ret = "**" + ctx.author.display_name + "** beijou "
for mention in ctx.message.mentions:
ret = ret + "**" + mention.display_name + "**, "
ret = ret[:-2]
ret += " 💗"
await ctx.send(ret, file=file)
return
####################.ROLL###########################
@client.command()
async def roll(ctx):
"""`.roll <repetições>* <dado> <buff/nerf>*` : Joga um `<dado>`"""
content = ctx.message.content
indice = -1
faceDados = []
vezes = []
multiplicadores = []
while True:
numero = re.search(r"\d+[.\|,]?\d*", content)
if(numero is None):
break
if(content[numero.start()-1] == "d"):
if(indice == -1): #primeira inserção
indice += 1
faceDados.append(math.trunc(float(numero[0].replace(",", "."))))
vezes.append(1)
multiplicadores.append("+0")
elif(faceDados[indice] is None): #bloco já setado
faceDados[indice] = math.trunc(float(numero[0].replace(",", ".")))
else: #novo bloco
indice += 1
faceDados.append(math.trunc(float(numero[0].replace(",", "."))))
vezes.append(1)
multiplicadores.append("+0")
elif(content[numero.start()-1] in ["+", "-", "*", "/", "x", "^", "%"]):
multiplicador = content[numero.start()-1:numero.end()].replace(",", ".")
if(multiplicador.endswith(".")):
multiplicador = multiplicador[:-1]
if (indice == -1): # primeira inserção
indice += 1
faceDados.append(None)
vezes.append(1)
multiplicadores.append(multiplicador)
elif (multiplicadores[indice] == "+0"): # bloco já setado
multiplicadores[indice] = multiplicador
else: # novo bloco
indice += 1
faceDados.append(None)
vezes.append(1)
multiplicadores.append(multiplicador)
else:
if (indice == -1): # primeira inserção
indice += 1
faceDados.append(None)
vezes.append(math.trunc(float(numero[0].replace(",", "."))))
multiplicadores.append("+0")
elif (vezes[indice] == 1): # bloco já setado
vezes[indice] = math.trunc(float(numero[0].replace(",", ".")))
else: # novo bloco
indice += 1
faceDados.append(None)
vezes.append(math.trunc(float(numero[0].replace(",", "."))))
multiplicadores.append("+0")
content = content[numero.end():]
ret = "Resultados para " + ctx.author.mention + ":\n"
for i in range(len(faceDados)):
d = faceDados[i]
soma = 0
if(multiplicadores[i] in ["+0", "-0", "*1", "x1", "/1", "^1"]):
ret += "**d" + str(d) + "** ["
else:
ret += "**d" + str(d) + "** " + multiplicadores[i] + " ["
for j in range(vezes[i]):
if(d == 0):
valor = 0
else:
valor = (random.randint(0, d * 10) % d) + 1
if (multiplicadores[i].startswith("+")):
valor = round(float(valor) + float(multiplicadores[i][1:]))
elif (multiplicadores[i].startswith("-")):
valor = round(float(valor) - float(multiplicadores[i][1:]))
elif (multiplicadores[i].startswith("*") or multiplicadores[i].startswith("x")):
valor = round(float(valor) * float(multiplicadores[i][1:]))
elif (multiplicadores[i].startswith("/")):
valor = round(float(valor) / float(multiplicadores[i][1:]))
elif (multiplicadores[i].startswith("^")):
valor = round(float(valor) ** float(multiplicadores[i][1:]))
elif (multiplicadores[i].startswith("%")):
valor = round(float(valor) % float(multiplicadores[i][1:]))
soma += valor
ret += str(valor) + ", "
ret = ret[:-2] + "]\n"
if(vezes[i] != 1):
ret += "Total: " + str(soma) + "\n"
await ctx.send(ret)
####################.NOVAFICHA###########################
@client.command()
async def novaficha(ctx, *, nome):
"""`.novaficha <nome>` : Cria uma nova ficha com o nome especificado"""
if(gerenciadorDeDados.fichaJaRegistrada(ctx.author.id, nome)):
await ctx.send(ctx.author.mention + ", você já possui uma ficha chamada `" + nome + "`")
return
def check(m):
"""Checa se a mensagem foi mandada no mesmo canal e pelo mesmo usuario"""
return m.channel == ctx.message.channel and m.author == ctx.author
try:
await ctx.send(ctx.author.mention + ", você tem 10 minutos para mandar a descrição e uma imagem para seu personagem")
msg = await client.wait_for('message',timeout=600.0, check=check)
except asyncio.TimeoutError:
await ctx.send(ctx.author.mention + "tempo esgotado, tente novamente quando tiver tudo em mãos")
else:
if(not msg.attachments):
await ctx.send("Sua ficha não terá uma imagem, caso queira adicionar uma, use o comando `.editficha`")
imgURL = None
else:
imgURL = msg.attachments[0].url
if(msg.content == ""):
await ctx.send("Sua ficha não terá uma descrição, caso queira adicionar uma, use o comando `.editficha`")
usuario = gerenciadorDeDados.getUsuario(ctx.author.id)
if(usuario is None): # primeira ficha do usuario
usuario = dados.Usuario(msg.author.id, None)
gerenciadorDeDados.registrarUsuario(usuario)
usuario.addFicha(dados.Ficha(nome, msg.content, imgURL))
gerenciadorDeDados.updateUsuario(usuario)
await ctx.send("ficha criada com sucesso!\nPara consultar suas fichas use `.fichas`")
@novaficha.error
async def novaficha_error(ctx, error):
"""Trata erros de parâmetros do `.novaficha`"""
if isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
await ctx.send(ctx.author.mention + ", dê um nome ao seu personagem")
else:
print(error)
####################.EDITFICHA###########################
@client.command()
async def editficha(ctx, *, nome):
"""`.editficha <nome>` : Edita uma ficha com o nome especificado"""
usuario = gerenciadorDeDados.getUsuario(ctx.author.id)
if (usuario is None):
await ctx.send(ctx.author.mention + ", você não possui nenhuma ficha, procure usar o comando `.novaficha`")
return
if(not gerenciadorDeDados.fichaJaRegistrada(ctx.author.id, nome)):
await ctx.send(ctx.author.mention + ", você não possui uma ficha chamada `" + nome + "`")
return
def check(m):
"""Checa se a mensagem foi mandada no mesmo canal e pelo mesmo usuario"""
return m.channel == ctx.message.channel and m.author == ctx.author
try:
await ctx.send(ctx.author.mention + ", você tem 10 minutos para mandar a descrição e uma imagem para seu personagem")
msg = await client.wait_for('message',timeout=600.0, check=check)
except asyncio.TimeoutError:
await ctx.send(ctx.author.mention + "tempo esgotado, tente novamente quando tiver tudo em mãos")
else:
if(not msg.attachments and msg.content != ""):
#muda só a descrição
fichaEditavel = usuario.getFicha(nome)
fichaEditavel.setDescricao(msg.content)
elif(msg.attachments and msg.content == ""):
#muda só a imagem
fichaEditavel = usuario.getFicha(nome)
fichaEditavel.setImgURL(msg.attachments[0].url)
else:
fichaEditavel = dados.Ficha(nome, msg.content, msg.attachments[0].url)
usuario.editFicha(fichaEditavel)
gerenciadorDeDados.updateUsuario(usuario)
await ctx.send("ficha atualizada com sucesso!\nPara consultar suas fichas use `.fichas`")
@editficha.error
async def editficha_error(ctx, error):
"""Trata erros de parâmetros do `.editficha`"""
if isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
await ctx.send(ctx.author.mention + ", dê o nome da ficha que pretende editar")
else:
print(error)
####################.DELFICHA###########################
@client.command()
async def delficha(ctx, *, nome):
"""`.delficha <nome>` : Exclui uma ficha com o nome especificado"""
usuario = gerenciadorDeDados.getUsuario(ctx.author.id)
if (usuario is None):
await ctx.send(ctx.author.mention + ", você não possui nenhuma ficha, procure usar o comando `.novaficha`")
return
if(not gerenciadorDeDados.fichaJaRegistrada(ctx.author.id, nome)):
await ctx.send(ctx.author.mention + ", você não possui uma ficha chamada `" + nome + "`")
return
await ctx.send(ctx.author.mention + ", tem certeza que quer excluir `" + nome + "`? (s/n)")
def check(m):
"""Checa se a mensagem foi mandada no mesmo canal e pelo mesmo usuario"""
return m.channel == ctx.message.channel and m.author == ctx.author and m.content in ["s", "n", "S", "N", "sim", "nao", "não", "Sim", "Nao", "Não"]
try:
msg = await client.wait_for('message',timeout=60.0, check=check)
except asyncio.TimeoutError:
await ctx.send(ctx.author.mention + ", como você não me respondeu achei melhor não excluir `" + nome + "`")
else:
if(msg.content in ["n", "N", "nao", "não", "Nao", "Não"]):
await ctx.send(ctx.author.mention + ", ok, não excluirei `" + nome + "`")
return
usuario.delFicha(nome)
gerenciadorDeDados.updateUsuario(usuario)
await ctx.send("ficha excluida com sucesso!\nPara consultar suas fichas use `.fichas`")
@delficha.error
async def delficha_error(ctx, error):
"""Trata erros de parâmetros do `.delficha`"""
if isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
await ctx.send(ctx.author.mention + ", dê o nome da ficha que pretende excluir")
else:
print(error)
####################.FICHA###########################
@client.command()
async def ficha(ctx):
"""`.ficha @user* <nome>*` : lista as fichas de um usuario ou mostra a ficha especificada
`.fichas @user* <nome>*` também pode ser utilizado"""
content = ctx.message.content
if(not ctx.message.mentions):
user = ctx.author
else:
user = ctx.message.mentions[0]
for user in ctx.message.mentions:
content = content.replace(user.mention, "")
nome = content[re.search("ficha", content).end():].strip()
usu = gerenciadorDeDados.getUsuario(user.id)
if(usu is None):
await ctx.send(user.display_name + " não tem nenhuma ficha")
return
if nome == "":
msg = "Fichas de " + user.display_name + ": \n"
for nome in usu.listaNomeFichas():
msg += "`" + nome + "`\n"
await ctx.send(msg)
else:
fichaProcurada = usu.getFicha(nome)
if(fichaProcurada is None):
await ctx.send("Não encontrei a ficha: `" + nome + "`")
return
embedFicha = discord.Embed(title=fichaProcurada.nome, description=fichaProcurada.descricao)
if(fichaProcurada.imgURL is not None):
embedFicha.set_thumbnail(url=fichaProcurada.imgURL)
embedFicha.colour = ctx.author.color
await ctx.send(embed=embedFicha)
####################.SHIP###########################
@client.command()
@commands.guild_only()
async def ship(ctx):
"""`.ship @user*` : mistura os nomes e exibe a porcentagem do ship dar certo"""
if (len(ctx.message.mentions) < 2):
await ctx.send("Preciso de pelo menos duas pessoas para shippar")
return
numero = 0
nome = "❤ **"
qtd = len(ctx.message.mentions)
i = 0
for member in gerenciadorDeDados.getMentionsFromMessage(ctx.message):
parcela = math.ceil(len(member.display_name) / qtd)
nome += member.display_name[parcela * i:parcela * (i + 1)]
numero += member.id
i += 1
nome += "** ❤"
porcentagem = (numero % 100) + 1
content = nome + "\n\n" + str(porcentagem) + "% `"
porcentagemExibivel = math.ceil(porcentagem / 4)
for i in range(porcentagemExibivel):
content += "█"
for i in range(25 - porcentagemExibivel):
content += "_"
content += "`\n\n"
if(porcentagem < 20):
content += "É uma pena 😔"
elif(porcentagem <51):
content += "Talvez n seja uma boa ideia 😕"
elif(porcentagem < 71):
content += "Tem futuro 😘"
elif(porcentagem < 91):
content += "Porque ainda n estão ficando?! 💞"
else:
content += "😍 Que casal perfeitoo! 💕💖💗"
embedShip = discord.Embed(description=content)
await ctx.send(embed=embedShip)
####################.CLEAR###########################
@client.command()
@commands.guild_only()
@ehMessageManager()
async def clear(ctx, qtd: int):
"""`.clear <qtd>` : deleta a `<qtd>` de mensagens no canal
`cl <qtd>` também pode ser utilizado"""
if(qtd > 100):
qtd = 100
await ctx.message.channel.delete_messages(await ctx.message.channel.history(limit=qtd+1, oldest_first=False).flatten())
@clear.error
async def clear_error(ctx, error):
"""Trata erros de permissão e parâmetros do `.clear`"""
if isinstance(error, discord.ext.commands.errors.CheckFailure):
await ctx.send("Desculpe-me " + ctx.author.mention + " você não tem permissão para isso")
elif isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
await ctx.send("" + ctx.author.mention + ", especifique quantas mensagens deseja limpar")
elif isinstance(error, discord.ext.commands.errors.BadArgument):
await ctx.send("" + ctx.author.mention + ", quantidade de mensagens invalida")
else:
print(error)
####################.LOG###########################
@client.command()
@commands.guild_only()
@ehMod()
async def log(ctx):
"""`.log <#canal>` : define `<#canal>` como o canal de log do servidor"""
if ("-clear" in ctx.message.content):
server = gerenciadorDeDados.getServer(ctx.guild.id)
server.setLogChannelID(None)
gerenciadorDeDados.updateServer(server)
await ctx.send("Canal de log removido")
else:
if (not ctx.message.channel_mentions):
logChannelID = gerenciadorDeDados.getServer(ctx.guild.id).logChannelID
if (logChannelID is None):
await ctx.send("O servidor não possui um canal de log")
else:
await ctx.send("O canal de log atual é: <#" + str(logChannelID) + ">")
if(len(ctx.message.channel_mentions) > 1):
await ctx.send("Por favor, mencione um canal para ser o canal de registro")
server = gerenciadorDeDados.getServer(ctx.guild.id)
server.setLogChannelID(ctx.message.channel_mentions[0].id)
gerenciadorDeDados.updateServer(server)
await ctx.send("O canal de log foi atualizado para <#" + str(ctx.message.channel_mentions[0].id) + "> com sucesso")
@log.error
async def log_error(ctx, error):
"""Trata erros de permissão do `.log`"""
if isinstance(error, discord.ext.commands.errors.CheckFailure):
await ctx.send("Desculpe-me " + ctx.author.mention + " você não tem permissão para isso")
####################.ROLEPICKER###########################
@client.command()
@commands.guild_only()
@ehMod()
async def rolepicker(ctx):
"""`.rolepicker <#canal>` : Espera receber um `rolepicker` valido no `<#canal>` especificado"""
if(not len(ctx.message.channel_mentions) == 1):
await ctx.send("Mencione o canal em que pretende criar o rolepicker.\nCaso tenha alguma duvida consulte `.help rolepicker` e tente novamente.")
return
def check(m):
"""Verifica se o possivel rolepicker foi mandado no canal indicado e pelo mesmo usuario"""
return m.channel == ctx.message.channel_mentions[0] and m.author == ctx.author
try:
await ctx.send(ctx.author.mention + ", você tem 10 minutos para criar o rolepicker em: <#" + str(ctx.message.channel_mentions[0].id) + ">")
msg = await client.wait_for('message',timeout=600.0, check=check)
except asyncio.TimeoutError:
await ctx.send("tempo esgotado")
else:
if not(len(msg.role_mentions) > 0 and (len(re.findall(r'<:\w*:\d*>', msg.content)) > 0 or gerenciadorDeDados.hasEmoji(msg.content))):
await ctx.send("Desculpe-me, " + ctx.author.mention + ", não pude criar o rolepicker.\nCaso tenha alguma duvida consulte `.help rolepicker` e tente novamente.")
elif(len(gerenciadorDeDados.getRolesFromMessage(msg)) != len(gerenciadorDeDados.getEmojisFromMessage(msg))):
await ctx.send(ctx.author.mention + ", a quantidade de cargos e emojis tem de ser igual.\nCaso tenha alguma duvida consulte `.help rolepicker` e tente novamente.")
else:
server = gerenciadorDeDados.getServer(ctx.guild.id)
server.addRolepicker(msg.id)
gerenciadorDeDados.updateServer(server)
for emoji in gerenciadorDeDados.getEmojisFromMessage(msg):
await msg.add_reaction(emoji[1])
await ctx.send("rolepicker criado com sucesso!")
@rolepicker.error
async def rolepicker_error(ctx, error):
"""Trata erros de permissão do `.rolepicker`"""
if isinstance(error, discord.ext.commands.errors.CheckFailure):
await ctx.send("Desculpe-me " + ctx.author.mention + " você não tem permissão para isso")
else:
print(error)
####################.MODROLE###########################
@client.command()
@commands.guild_only()
@ehDono()
async def modrole(ctx):
"""`.modrole @role` : define `@role` como o cargo de mod do servidor"""
if ("-clear" in ctx.message.content):
server = gerenciadorDeDados.getServer(ctx.guild.id)
server.setModRoleID(None)
gerenciadorDeDados.updateServer(server)
await ctx.send("Cargo mod removido")
else:
if (not ctx.message.role_mentions):
modRoleID = gerenciadorDeDados.getServer(ctx.guild.id).modRoleID
if (modRoleID is None):
await ctx.send("O servidor não possui um cargo moderador")
else:
await ctx.send("O cargo de moderador atual é: <@&" + str(modRoleID) + ">")
if(len(ctx.message.role_mentions) > 1):
await ctx.send("Por favor, mencione apenas um cargo para ser o cargo de mod")
server = gerenciadorDeDados.getServer(ctx.guild.id)
server.setModRoleID(ctx.message.role_mentions[0].id)
gerenciadorDeDados.updateServer(server)
await ctx.send("O cargo de mod foi atualizado para <@&" + str(ctx.message.role_mentions[0].id) + "> com sucesso")
@modrole.error
async def modrole_error(ctx, error):
"""Trata erros de permissão do `.modrole`"""
if isinstance(error, discord.ext.commands.errors.CheckFailure):
await ctx.send("Desculpe-me " + ctx.author.mention + " somente o dono do server pode usar este comando")
####################.HELP###########################
@client.command()
async def help(ctx):
"""Exibe a lista de comandos e os embeds de ajuda de cada função"""
if ("bite" in ctx.message.content):
await ctx.send(embed=embed_bite)
return
elif ("slap" in ctx.message.content):
await ctx.send(embed=embed_slap)
return
elif ("cry" in ctx.message.content):
await ctx.send(embed=embed_cry)
return
elif ("highfive" in ctx.message.content):
await ctx.send(embed=embed_highfive)
return
elif ("blush" in ctx.message.content):
await ctx.send(embed=embed_blush)
return
elif ("lick" in ctx.message.content):
await ctx.send(embed=embed_lick)
return
elif ("pat" in ctx.message.content):
await ctx.send(embed=embed_pat)
return
elif ("hug" in ctx.message.content):
await ctx.send(embed=embed_hug)
return
elif ("cuddle" in ctx.message.content):
await ctx.send(embed=embed_cuddle)
return
elif ("nuzzle" in ctx.message.content):
await ctx.send(embed=embed_nuzzle)
return
elif ("kiss" in ctx.message.content):
await ctx.send(embed=embed_kiss)
return
elif ("ship" in ctx.message.content):
await ctx.send(embed=embed_ship)
return
elif ("clear" in ctx.message.content):
await ctx.send(embed=embed_clear)
return
elif ("log" in ctx.message.content):
await ctx.send(embed=embed_log)
return
elif ("rolepicker" in ctx.message.content):
await ctx.send(embed=embed_rolepicker)
return
elif ("roll" in ctx.message.content):
await ctx.send(embed=embed_roll)
return
elif ("novaficha" in ctx.message.content):
await ctx.send(embed=embed_novaficha)
return
elif ("editficha" in ctx.message.content):
await ctx.send(embed=embed_editficha)
return
elif ("delficha" in ctx.message.content):
await ctx.send(embed=embed_delficha)
return
elif ("ficha" in ctx.message.content):
await ctx.send(embed=embed_ficha)
return
elif ("modrole" in ctx.message.content):
await ctx.send(embed=embed_modrole)
return
await ctx.send(embed=embed_help)
return
client.run(os.environ["TOKEN"])
|
import io
from django.core import management
# from fabric.api import run
# from fabric.context_managers import settings
def create_session_on_server(username): # revisit
out = io.StringIO()
management.call_command("create_session", f"--username={username}", stdout=out)
return out.getvalue()
def reset_database_on_server(): # revisit
management.call_command("flush", verbosity=0, interactive=False)
# def _get_manage_dot_py(host):
# return f'~/sites/{host}/virtualenv/bin/python ~/sites/{host}/source/manage.py'
# def reset_database(host):
# manage_dot_py = _get_manage_dot_py(host)
# with settings(host_string=f'gai@{host}'):
# run(f'{manage_dot_py} flush --noinput')
# def create_session_on_server(host, username):
# manage_dot_py = _get_manage_dot_py(host)
# with settings(host_string=f'gai@{host}'):
# session_key = run(f'{manage_dot_py} create_session {username}')
# return session_key.strip()
|
__author__ = 'wuxj06'
import user_management_pb2
import user_management_pb2_grpc
import register_login_pb2
import register_login_pb2_grpc
import comp_management_pb2
import comp_management_pb2_grpc
import company_cert_pb2
import company_cert_pb2_grpc
import grpc
import random
import hmac
import time
import unittest
import yaml
from BeautifulReport import BeautifulReport
from hashlib import sha256
class user_center(unittest.TestCase):
def setUp(self):
self.list1 = list(range(10))
self.a = '10' + str(random.choice(self.list1))
self.b = ''.join(random.choice('0123456789') for j in range(8))
self.phone = self.a+self.b
self.format = "%Y-%m-%d %H:%M:%S"
self.t = time.strftime(self.format,time.localtime())
self.timeArray = time.strptime(self.t, self.format)
self.timeStamp = str(int(time.mktime(self.timeArray)))
# timeStamp = '1597231871'
self.app_id = 'appid'
self.app_key = 'appkey'
self.auth = self.app_id + '&' + self.timeStamp + '&' + self.app_key
self.md5x = hmac.new(
bytes(
self.app_key,
encoding='utf-8'),
bytes(
self.auth,
encoding='utf-8'),
digestmod=sha256)
self.key = self.md5x.hexdigest()
self.channel = grpc.insecure_channel('address')
self.metadata1 = (
('authorization', 'bearer %s.{"app_id":"%s","time_stamp":%s}' % (self.key,self.app_id, self.timeStamp)),)
# def run():
# format = "%Y-%m-%d %H:%M:%S"
# t = time.strftime(format, time.localtime())
# timeArray = time.strptime(t, format)
# timeStamp = str(int(time.mktime(timeArray)))
# #timeStamp = '1597231871'
# app_id = 'micro-supplier-service'
# app_key = '748e4880546c5004d0164d70ce8df743'
# auth = app_id + '&' + timeStamp + '&' + app_key
# md5x = hmac.new(
# bytes(
# app_key,
# encoding='utf-8'),
# bytes(
# auth,
# encoding='utf-8'),
# digestmod=sha256)
# key = md5x.hexdigest()
# channel = grpc.insecure_channel('47.101.38.159:31009')
# metadata1 = (
# ('authorization', 'bearer %s.{"app_id":"micro-supplier-service","time_stamp":%s}' %(key, timeStamp)), )
#
# stub = user_management_pb2_grpc.UserManagementServiceStub(channel)
# response = stub.GetUserInfoListByCompanyId(
# user_management_pb2.GetUserInfoListByCompanyIdReq(company_id="G151485",page=1,page_size=10
# ),metadata=metadata1)
# response2 = stub.AddCompanyInfo(
# user_management_pb2.AddCompanyInfoReq(
# company_id="G104411",
# company_name= "深圳市冰融科技有限司",
# company_type=1,
# company_logo= "adafaafagg",
# source= 1,
# status=1,
# created_by="阿的江"
# ),metadata=metadata1
# )
# print(auth)
# print(key)
# # dict1 = dict(response2)
# print("Greeter client received: " + str(response),str(response1),response2.success)
# def test_case0001(self):
# '''通过公司ID获取用户信息列表'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(self.channel)
# self.response = self.stub.GetUserInfoListByCompanyId(
# user_management_pb2.GetUserInfoListByCompanyIdReq(company_id="K006395",page=1,page_size=10,search={"name":"王"}
# ),metadata=self.metadata1)
# # print(self.response)
# self.assertIsNotNone(self.response, "通过公司ID获取用户信息列表接口错误")
#
# def test_case0002(self):
# '''编辑用户信息'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(self.channel)
# self.response = self.stub.SaveUserInfo(
# user_management_pb2.SaveUserInfoReq(mobile={ "value":"19000100002","valid":True},
# name={"value":"啦啦啦","valid":True},
# postion={"value":"总经理","valid":True},
# mail={"value":"18651085900@126.com","valid":True},
# qq={"value":"1314151","valid":True},
# concentration={"value":"13213131","valid":True},
# operation_remark={"value":"31231415","valid":True},
# record_id={"value":"47260","valid":True},
# user_id={"value":"100002","valid":True},
# logo={"value": "199241", "valid": True},
# wechat_qr_code={"value": "199241", "valid": True},
# company_name={"value": "199241", "valid": True},
# website={"value": "199241", "valid": True},
# telephone={"value": "199241", "valid": True},
# fax={"value": "199241", "valid": True},
# postcode={"value": "199241", "valid": True},
# biz_tel={"value": "199241", "valid": True},
# wechat={"value": "199241", "valid": True},
# weibo={"value": "199241", "valid": True},
# address={"value": "199241", "valid": True},
# province_code={"value": "199241", "valid": True},
# city_code={"value": "222", "valid": True}
#
# ),metadata=self.metadata1
# )
# self.assertEqual(self.response.success, True, "编辑用户信息接口错误")
#
# def test_case0003(self):
# '''添加公司信息'''
#
# self.stub = comp_management_pb2_grpc.CompManagementServiceStub(self.channel)
# self.response = self.stub.AddCompanyInfo(
# comp_management_pb2.AddCompanyInfoReq(
# company_id="G104411",
# company_name= "深圳市冰融科技有限司",
# company_type=1,
# company_logo= "adafaafagg",
# source= 1,
# status=1,
# created_by="阿的江"
# ),metadata=self.metadata1
#
# )
# # print(self.response)
# self.assertEqual(self.response.success, True, "添加公司信息接口错误")
#
# def test_case0004(self):
# '''注册企业员工账号'''
#
# self.stub = register_login_pb2_grpc.RegisterLoginServiceStub(self.channel)
# self.response = self.stub.RegisterEnterpriseEmployee(
# register_login_pb2.RegisterEnterpriseEmployeeReq(company_id="G151485", company_name="sdfsdf",
# enterprise_type=1, register_ip="127.0.0.1",
# user_info_list=[{"mobile": self.phone ,"name": "sdf",'password':'gys12356',"charge_region_codes":"222"}],
# duplicate_registration_permitted=True
# ), metadata=self.metadata1)
#
# self.assertIsNotNone(self.response, "注册企业员工账号接口错误")
#
# def test_case0005(self):
# '''获取区域公司签约开发商列表'''
#
# self.stub = comp_management_pb2_grpc.CompManagementServiceStub(
# self.channel)
# self.response = self.stub.GetCompContractListByAreaName(
# comp_management_pb2.GetCompContractListByAreaNameReq(
# contract_company='深圳分公司',
# page=1,
# page_size=10
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "获取区域公司签约开发商列表接口错误")
#
# def test_case0006(self):
# '''通过公司ID获取用户总数'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetUserNumByCompanyId(
# user_management_pb2.GetUserNumByCompanyIdReq(
# company_id_list=["K007447", "K007823"],
# enterprise_type=1,
# search={"name": "吴"}
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "通过公司ID获取用户总数接口错误")
#
# def test_case0007(self):
# '''通过公司ID获取最新的用户信息'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetLatestUserInfoByCompanyId(
# user_management_pb2.GetLatestUserInfoByCompanyIdReq(
# company_id_list=["K007447", "K007823"],
# enterprise_type=2
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "通过公司ID获取最新的用户信息接口错误")
#
# def test_case0008(self):
# '''获取区域公司签约开发商列表'''
#
# self.stub = comp_management_pb2_grpc.CompManagementServiceStub(self.channel)
# self.response = self.stub.GetCompContractListByAreaName(
# comp_management_pb2.GetCompContractListByAreaNameReq(
# contract_company='深圳分公司',
# page = 1,
# page_size = 10
# ),metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "注册企业员工账号接口错误")
#
# def test_case0009(self):
# '''通过公司ID获取混合型用户信息'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetMixedUserInfoByCompanyId(
# user_management_pb2.GetMixedUserInfoByCompanyIdReq(
# type=1,
# company_id="G092403",
# enterprise_type=1
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "通过公司ID获取混合型用户信息接口错误")
#
# def test_case0010(self):
# '''通过用户ID获取混合型用户信息'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetMixedUserInfoByUserId(
# user_management_pb2.GetMixedUserInfoByUserIdReq(
# type=1,
# user_id=["228098"],
#
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "通过用户ID获取混合型用户信息接口错误")
#
# def test_case0011(self):
# '''通过手机号或用户名称获取用户信息列表'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetUserInfoListByMobileOrName(
# user_management_pb2.GetUserInfoListByMobileOrNameReq(
# mobile="13024082335",
# name="吴兴江",
# page=1,
# page_size=10
#
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "通过手机号或用户名称获取用户信息列表接错误")
#
# def test_case0012(self):
# '''通过用户ID列表批量获取用户信息'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetUserInfoListByUserIdList(
# user_management_pb2.GetUserInfoListByUserIdListReq(
# user_id_list=["228098","227859"],
# page=1,
# page_size=10
#
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "通过用户ID列表批量获取用户信息接口错误")
#
# def test_case0013(self):
# '''通过手机号获取用户信息列表'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetUserInfoListByMobile(
# user_management_pb2.GetUserInfoListByMobileReq(
# mobile="13024082335"
#
# ), metadata=self.metadata1
# )
# print(self.response)
# self.assertIsNotNone(self.response, "通过手机号或用户名称获取用户信息列表接口错误")
#
# def test_case0014(self):
# '''通过用户ID判断当前用户是否为公司管理员'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetIsCompanyAdminByUserId(
# user_management_pb2.GetIsCompanyAdminByUserIdReq(
# user_id="198311",
# company_id="K006933"
#
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "通过用户ID判断当前用户是否为公司管理员接口错误")
# def test_case0015(self):
# '''通过用戶ID解除绑定公司'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.UnbindCompanyByUserId(
# user_management_pb2.UnbindCompanyByUserIdReq(
# operator_user_id="224265",
# unbind_user_id="224265",
# unbind_user_name="啾啾"
#
# ), metadata=self.metadata1
# )
# print(self.response)
# self.assertIsNotNone(self.response, "通过用戶ID解除绑定公司接口错误")
# def test_case0016(self):
# '''通过TOKEN获取用户简要信息'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.GetUserProfileByToken(
# user_management_pb2.GetUserProfileByTokenReq(
# token="6f4b6f0f-f274-11ea-85b1-0242ac120002"
#
#
# ), metadata=self.metadata1
# )
# print(self.response)
# self.assertIsNotNone(self.response, "通过TOKEN获取用户简要信息接口错误")
#
#
#
# def test_case0017(self):
# '''将用户和(开发商/供应商)企业进行绑定'''
#
# self.stub = user_management_pb2_grpc.UserManagementServiceStub(
# self.channel)
# self.response = self.stub.BindUserToCompany(
# user_management_pb2.BindUserToCompanyReq(
# user_id=self.phone,
# company_id='G159927',
# enterprise_type=1,
#
#
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "将用户和(开发商/供应商)企业进行绑定接口错误")
#
# def test_case0018(self):
# '''获取已绑定的用户的认证记录信息'''
#
# self.stub = company_cert_pb2_grpc.CompanyCertServiceStub(
# self.channel)
# self.response = self.stub.GetBoundedUserCertificationInfoList(
# company_cert_pb2.GetBoundedUserCertificationInfoListReq(
# company_id="K006489",
# enterprise_type=1,
# page=1,
# page_size=10,
# user_id_list={"227850"}
# ), metadata=self.metadata1
# )
# # print(self.response)
# self.assertIsNotNone(self.response, "获取已绑定的用户的认证记录信息接口错误")
def test_case0019(self):
'''根据用户手机号发送验证码'''
self.stub = user_management_pb2_grpc.UserManagementServiceStub(
self.channel)
self.response = self.stub.SendVerifyCode(
user_management_pb2.SendVerifyCodeReq(
phone="15827068970"
), metadata=self.metadata1
)
print(self.response)
self.assertIsNotNone(self.response, "根据用户手机号发送验证码接口错误")
if __name__ == "__main__":
test_suite = unittest.TestSuite()
loader = unittest.TestLoader()
test_suite.addTests(loader.loadTestsFromTestCase(user_center))
# unittest.TextTestRunner(verbosity=2).run(test_suite)
run = BeautifulReport(test_suite) # 实例化BeautifulReport模块
run.report(filename='用户中心微服务接口测试报告', description='用户中心微服务接口')
|
# # -*- coding:utf-8 -*-
#
# from django.test import TestCase
# from guest_app.models import Guest,Event
# from datetime import datetime
# from django.contrib.auth.models import User
# # Create your tests here.
# class TestModels(TestCase):
#
# def setUp(self):
# Event.objects.create(name="测试发布会", status=True, limit=1000, address="北京",
# start_time=datetime(2019, 8, 10, 14, 0, 0))
# def test_event_select(self):
# event = Event.objects.get(name="测试发布会")
# print("发布会地址",event.address)
# self.assertEqual(event.address,"北京")
#
# def test_event_delete(self):
# event = Event.objects.get(name="测试发布会")
# event.delete()
# event = Event.objects.filter(name="测试发布会")
# print("发布会个数:",len(event))
# self.assertEqual(len(event),0)
#
# def test_event_update(self):
# event = Event.objects.get(name="测试发布会")
# event.address = "上海"
# event.save()
# event = Event.objects.get(name="测试发布会")
# self.assertEqual(event.address,"上海")
#
# def test_event_create(self):
#
# event = Event.objects.get(name="测试发布会")
# print(event.address)
# print(event.start_time)
# self.assertEqual(event.address,"北京")
# self.assertEqual(event.start_time,datetime(2019,8,10,14,0,0))
#
# class TestViews(TestCase):
#
# def setUp(self):
# User.objects.create_user("user")
#
# def tearDown(self):
# pass
#
# def test_index_page_renders_index_template(self):
# """断音是否用了给定的index.html模板响应"""
# response = self.client.get("/")
# self.assertEqual(response.status_code,200)
#
#
# def test_username_passwor_null(self):
# test_data = {"username":"","password":""}
# response = self.client.post("/login_action/",data=test_data)
# self.assertEqual(response.status_code,200)
# resp_html = response.content.decode(encoding="utf-8")
# self.assertIn("用户名或密码错误",resp_html)
# print(response.content)
#
# def test_success(self):
# test_data = {"username":"admin","password":"admin123456"}
|
from datetime import datetime
from orm import DateTime
from pydantic import BaseModel
from ..models.questions import QuestionChoices
class QuestionBase(BaseModel):
created_at: datetime = None
# created_at: DateTime = datetime.now()
class QuestionCreate(QuestionBase):
question: QuestionChoices
class Question(QuestionBase):
id: int
question: str
class Config:
orm_mode = True
|
# -*- encoding: utf-8 -*-
"""
http://bytefish.de/blog/first_steps_with_sqlalchemy/
An image consist of a UUID and its associated number of likes. Each image can
be associated with many tags, a tag can be associated with many images. That's
a many-to-many relationship, so we need a mapping table. Finally each image
can have multiple comments, a one-to-many relation with a foreign key on
the comments side.
"""
# Model
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from datetime import datetime, timedelta
from sqlalchemy import Table, Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import relationship, backref
images_tags = Table(
'images_tags', Base.metadata,
Column('image_id', Integer, ForeignKey('images.id')),
Column('tag_id', Integer, ForeignKey('tags.id')),
)
class Image(Base):
__tablename__ = 'images'
id = Column(Integer, primary_key=True)
uuid = Column(String(36), unique=True, nullable=False)
likes = Column(Integer, default=0)
created_at = Column(DateTime, default=datetime.utcnow)
tags = relationship('Tag', secondary=images_tags,
backref = backref('images', lazy='dynamic'))
comments = relationship('Comment', backref='image', lazy='dynamic')
def __repr__(self):
str_created_at = self.created_at.strftime("%Y-%m-%d %H:%M:%S")
return "<Image (uuid='%s', likes='%d', created_at=%s)>" % (self.uuid, self.likes, str_created_at)
class Tag(Base):
__tablename__ = 'tags'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True, nullable=False)
def __repr__(self):
return "<Tag (name='%s')>" % (self.name)
class Comment(Base):
__tablename__ = 'comments'
id = Column(Integer, primary_key=True)
text = Column(String(2000))
image_id = Column(Integer, ForeignKey('images.id'))
def __repr__(self):
return "<Comment (text='%s')>" % (self.text)
# Connecting and Creating the Schema
from sqlalchemy import create_engine
engine = create_engine('sqlite:///images.db', echo=True)
# in-memory
#engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
"""
$ python images.py
2016-04-18 15:15:54,754 INFO sqlalchemy.engine.base.Engine SELECT CAST('test plain returns' AS VARCHAR(60)) AS anon_1
2016-04-18 15:15:54,754 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,754 INFO sqlalchemy.engine.base.Engine SELECT CAST('test unicode returns' AS VARCHAR(60)) AS anon_1
2016-04-18 15:15:54,754 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,756 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images")
2016-04-18 15:15:54,756 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,756 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images_tags")
2016-04-18 15:15:54,756 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,756 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("comments")
2016-04-18 15:15:54,757 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,757 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("tags")
2016-04-18 15:15:54,757 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,758 INFO sqlalchemy.engine.base.Engine
CREATE TABLE images (
id INTEGER NOT NULL,
uuid VARCHAR(36) NOT NULL,
likes INTEGER,
created_at DATETIME,
PRIMARY KEY (id),
UNIQUE (uuid)
)
2016-04-18 15:15:54,758 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,760 INFO sqlalchemy.engine.base.Engine COMMIT
2016-04-18 15:15:54,760 INFO sqlalchemy.engine.base.Engine
CREATE TABLE tags (
id INTEGER NOT NULL,
name VARCHAR(255) NOT NULL,
PRIMARY KEY (id),
UNIQUE (name)
)
2016-04-18 15:15:54,760 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,762 INFO sqlalchemy.engine.base.Engine COMMIT
2016-04-18 15:15:54,762 INFO sqlalchemy.engine.base.Engine
CREATE TABLE images_tags (
image_id INTEGER,
tag_id INTEGER,
FOREIGN KEY(image_id) REFERENCES images (id),
FOREIGN KEY(tag_id) REFERENCES tags (id)
)
2016-04-18 15:15:54,762 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,764 INFO sqlalchemy.engine.base.Engine COMMIT
2016-04-18 15:15:54,764 INFO sqlalchemy.engine.base.Engine
CREATE TABLE comments (
id INTEGER NOT NULL,
text VARCHAR(2000),
image_id INTEGER,
PRIMARY KEY (id),
FOREIGN KEY(image_id) REFERENCES images (id)
)
2016-04-18 15:15:54,764 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:15:54,766 INFO sqlalchemy.engine.base.Engine COMMIT
"""
# Sessions
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
# Insert
tag_cool = Tag(name='cool')
tag_car = Tag(name='car')
tag_animal = Tag(name='animal')
comment_rhino = Comment(text='Rhinoceros, often abbreviated as rhino, is a group of five extant species of odd-toed ungulates in the family Rhinocerotidae.')
image_car = Image(uuid='uuid_car', \
tags=[tag_car, tag_cool], \
created_at=(datetime.utcnow() - timedelta(days=1)))
image_another_car = Image(uuid='uuid_anothercar', \
tags=[tag_car])
image_rhino = Image(uuid='uuid_rhino', \
tags=[tag_animal], \
comments=[comment_rhino])
# run once because of unique
session.add(tag_cool)
session.add(tag_car)
session.add(tag_animal)
session.add(comment_rhino)
session.add(image_car)
session.add(image_another_car)
session.add(image_rhino)
session.commit()
"""
$ python images.py
2016-04-18 15:23:10,787 INFO sqlalchemy.engine.base.Engine SELECT CAST('test plain returns' AS VARCHAR(60)) AS anon_1
2016-04-18 15:23:10,787 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:23:10,788 INFO sqlalchemy.engine.base.Engine SELECT CAST('test unicode returns' AS VARCHAR(60)) AS anon_1
2016-04-18 15:23:10,788 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:23:10,789 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images")
2016-04-18 15:23:10,789 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:23:10,790 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images_tags")
2016-04-18 15:23:10,790 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:23:10,790 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("comments")
2016-04-18 15:23:10,790 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:23:10,790 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("tags")
2016-04-18 15:23:10,790 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:23:10,802 INFO sqlalchemy.engine.base.Engine BEGIN (implicit)
2016-04-18 15:23:10,803 INFO sqlalchemy.engine.base.Engine INSERT INTO images (uuid, likes, created_at) VALUES (?, ?, ?)
2016-04-18 15:23:10,803 INFO sqlalchemy.engine.base.Engine ('uuid_car', 0, '2016-04-17 07:23:10.798788')
2016-04-18 15:23:10,804 INFO sqlalchemy.engine.base.Engine INSERT INTO images (uuid, likes, created_at) VALUES (?, ?, ?)
2016-04-18 15:23:10,804 INFO sqlalchemy.engine.base.Engine ('uuid_anothercar', 0, '2016-04-18 07:23:10.804351')
2016-04-18 15:23:10,804 INFO sqlalchemy.engine.base.Engine INSERT INTO images (uuid, likes, created_at) VALUES (?, ?, ?)
2016-04-18 15:23:10,804 INFO sqlalchemy.engine.base.Engine ('uuid_rhino', 0, '2016-04-18 07:23:10.804775')
2016-04-18 15:23:10,805 INFO sqlalchemy.engine.base.Engine INSERT INTO tags (name) VALUES (?)
2016-04-18 15:23:10,805 INFO sqlalchemy.engine.base.Engine ('cool',)
2016-04-18 15:23:10,806 INFO sqlalchemy.engine.base.Engine INSERT INTO tags (name) VALUES (?)
2016-04-18 15:23:10,806 INFO sqlalchemy.engine.base.Engine ('car',)
2016-04-18 15:23:10,806 INFO sqlalchemy.engine.base.Engine INSERT INTO tags (name) VALUES (?)
2016-04-18 15:23:10,806 INFO sqlalchemy.engine.base.Engine ('animal',)
2016-04-18 15:23:10,807 INFO sqlalchemy.engine.base.Engine INSERT INTO images_tags (image_id, tag_id) VALUES (?, ?)
2016-04-18 15:23:10,807 INFO sqlalchemy.engine.base.Engine ((1, 2), (1, 1), (2, 2), (3, 3))
2016-04-18 15:23:10,808 INFO sqlalchemy.engine.base.Engine INSERT INTO comments (text, image_id) VALUES (?, ?)
2016-04-18 15:23:10,808 INFO sqlalchemy.engine.base.Engine ('Rhinoceros, often abbreviated as rhino, is a group of five extant species of odd-toed ungulates in the family Rhinocerotidae.', 3)
2016-04-18 15:23:10,809 INFO sqlalchemy.engine.base.Engine COMMIT
"""
# Update
# Find the image with the given uuid:
image_to_update = session.query(Image).filter(Image.uuid == 'uuid_rhino').first()
# Increase the number of upvotes:
image_to_update.likes = image_to_update.likes + 1
# And commit the work:
session.commit()
"""
$ python images.py
2016-04-18 15:46:26,587 INFO sqlalchemy.engine.base.Engine SELECT CAST('test plain returns' AS VARCHAR(60)) AS anon_1
2016-04-18 15:46:26,587 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:46:26,588 INFO sqlalchemy.engine.base.Engine SELECT CAST('test unicode returns' AS VARCHAR(60)) AS anon_1
2016-04-18 15:46:26,588 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:46:26,589 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images")
2016-04-18 15:46:26,589 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:46:26,590 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images_tags")
2016-04-18 15:46:26,590 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:46:26,590 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("comments")
2016-04-18 15:46:26,590 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:46:26,591 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("tags")
2016-04-18 15:46:26,591 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 15:46:26,601 INFO sqlalchemy.engine.base.Engine BEGIN (implicit)
2016-04-18 15:46:26,602 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images
WHERE images.uuid = ?
LIMIT ? OFFSET ?
2016-04-18 15:46:26,602 INFO sqlalchemy.engine.base.Engine ('uuid_rhino', 1, 0)
2016-04-18 15:46:26,604 INFO sqlalchemy.engine.base.Engine UPDATE images SET likes=? WHERE images.id = ?
2016-04-18 15:46:26,604 INFO sqlalchemy.engine.base.Engine (1, 3)
2016-04-18 15:46:26,605 INFO sqlalchemy.engine.base.Engine COMMIT
"""
# Queries
# Get a list of tags:
for name in session.query(Tag.name).order_by(Tag.name):
print name
"""
$ python images.py
2016-04-18 16:06:45,831 INFO sqlalchemy.engine.base.Engine SELECT CAST('test plain returns' AS VARCHAR(60)) AS anon_1
2016-04-18 16:06:45,832 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 16:06:45,832 INFO sqlalchemy.engine.base.Engine SELECT CAST('test unicode returns' AS VARCHAR(60)) AS anon_1
2016-04-18 16:06:45,832 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 16:06:45,833 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images")
2016-04-18 16:06:45,833 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 16:06:45,834 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("images_tags")
2016-04-18 16:06:45,834 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 16:06:45,835 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("comments")
2016-04-18 16:06:45,835 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 16:06:45,835 INFO sqlalchemy.engine.base.Engine PRAGMA table_info("tags")
2016-04-18 16:06:45,835 INFO sqlalchemy.engine.base.Engine ()
2016-04-18 16:06:45,845 INFO sqlalchemy.engine.base.Engine BEGIN (implicit)
2016-04-18 16:06:45,846 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images
WHERE images.uuid = ?
LIMIT ? OFFSET ?
2016-04-18 16:06:45,846 INFO sqlalchemy.engine.base.Engine ('uuid_rhino', 1, 0)
2016-04-18 16:06:45,848 INFO sqlalchemy.engine.base.Engine UPDATE images SET likes=? WHERE images.id = ?
2016-04-18 16:06:45,848 INFO sqlalchemy.engine.base.Engine (10, 3)
2016-04-18 16:06:45,849 INFO sqlalchemy.engine.base.Engine COMMIT
2016-04-18 16:06:45,852 INFO sqlalchemy.engine.base.Engine BEGIN (implicit)
2016-04-18 16:06:45,853 INFO sqlalchemy.engine.base.Engine SELECT tags.name AS tags_name
FROM tags ORDER BY tags.name
2016-04-18 16:06:45,853 INFO sqlalchemy.engine.base.Engine ()
(u'animal',)
(u'car',)
(u'cool',)
"""
# How many tags do we have?
print session.query(Tag).count()
"""
2016-04-18 16:10:09,912 INFO sqlalchemy.engine.base.Engine SELECT count(*) AS count_1
FROM (SELECT tags.id AS tags_id, tags.name AS tags_name
FROM tags) AS anon_1
2016-04-18 16:10:09,912 INFO sqlalchemy.engine.base.Engine ()
3
"""
# Get all images created yesterday:
print session.query(Image) \
.filter(Image.created_at < datetime.utcnow().date()) \
.all()
"""
2016-04-18 16:10:09,914 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images
WHERE images.created_at < ?
2016-04-18 16:10:09,914 INFO sqlalchemy.engine.base.Engine ('2016-04-18',)
[<Image (uuid='uuid_car', likes='0', created_at=2016-04-17 07:23:10)>]
"""
# Get all images, that belong to the tag 'car' or 'animal', using a subselect:
print session.query(Image) \
.filter(Image.tags.any(Tag.name.in_(['car', 'animal']))) \
.all()
"""
2016-04-18 16:10:09,917 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images
WHERE EXISTS (SELECT 1
FROM images_tags, tags
WHERE images.id = images_tags.image_id AND tags.id = images_tags.tag_id AND tags.name IN (?, ?))
2016-04-18 16:10:09,917 INFO sqlalchemy.engine.base.Engine ('car', 'animal')
[<Image (uuid='uuid_car', likes='0', created_at=2016-04-17 07:23:10)>, <Image (uuid='uuid_anothercar', likes='0', created_at=2016-04-18 07:23:10)>, <Image (uuid='uuid_rhino', likes='12', created_at=2016-04-18 07:23:10)>]
"""
# This can also be expressed with a join:
print session.query(Image) \
.join(Tag, Image.tags) \
.filter(Tag.name.in_(['car', 'animal'])) \
.all()
"""
2016-04-18 16:10:09,920 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images JOIN images_tags AS images_tags_1 ON images.id = images_tags_1.image_id JOIN tags ON tags.id = images_tags_1.tag_id
WHERE tags.name IN (?, ?)
2016-04-18 16:10:09,920 INFO sqlalchemy.engine.base.Engine ('car', 'animal')
[<Image (uuid='uuid_car', likes='0', created_at=2016-04-17 07:23:10)>, <Image (uuid='uuid_anothercar', likes='0', created_at=2016-04-18 07:23:10)>, <Image (uuid='uuid_rhino', likes='12', created_at=2016-04-18 07:23:10)>]
"""
# Play around with functions:
from sqlalchemy.sql import func, desc
max_date = session.query(func.max(Image.created_at))
print session.query(Image).filter(Image.created_at == max_date).first()
"""
2016-04-18 16:10:09,922 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images
WHERE images.created_at = (SELECT max(images.created_at) AS max_1
FROM images)
LIMIT ? OFFSET ?
2016-04-18 16:10:09,922 INFO sqlalchemy.engine.base.Engine (1, 0)
<Image (uuid='uuid_rhino', likes='12', created_at=2016-04-18 07:23:10)>
"""
# Get a list of tags with the number of images:
q = session.query(Tag, func.count(Tag.name)) \
.outerjoin(Image, Tag.images) \
.group_by(Tag.name) \
.order_by(desc(func.count(Tag.name))) \
.all()
for tag, count in q:
print 'Tag "%s" has %d images.' % (tag.name, count)
"""
2016-04-18 16:10:09,927 INFO sqlalchemy.engine.base.Engine SELECT tags.id AS tags_id, tags.name AS tags_name, count(tags.name) AS count_1
FROM tags LEFT OUTER JOIN (SELECT images_tags_1.image_id AS images_tags_1_image_id, images_tags_1.tag_id AS images_tags_1_tag_id, images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images_tags AS images_tags_1 JOIN images ON images.id = images_tags_1.image_id) AS anon_1 ON tags.id = anon_1.images_tags_1_tag_id GROUP BY tags.name ORDER BY count(tags.name) DESC
2016-04-18 16:10:09,928 INFO sqlalchemy.engine.base.Engine ()
Tag "car" has 2 images.
Tag "animal" has 1 images.
Tag "cool" has 1 images.
"""
# Get images created in the last two hours and zero likes so far:
print session.query(Image) \
.join(Tag, Image.tags) \
.filter(Image.created_at > (datetime.utcnow() - timedelta(hours=2))) \
.filter(Image.likes == 0) \
.all()
"""
2016-04-18 16:10:09,930 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images JOIN images_tags AS images_tags_1 ON images.id = images_tags_1.image_id JOIN tags ON tags.id = images_tags_1.tag_id
WHERE images.created_at > ? AND images.likes = ?
2016-04-18 16:10:09,931 INFO sqlalchemy.engine.base.Engine ('2016-04-18 06:10:09.929513', 0)
[<Image (uuid='uuid_anothercar', likes='0', created_at=2016-04-18 07:23:10)>]
"""
# Delete
print '@@@ Delete'
image_rhino = session.query(Image).filter(Image.uuid == 'uuid_rhino').first()
print image_rhino
session.delete(image_rhino)
session.commit()
"""
@@@ Delete
2016-04-18 16:36:55,016 INFO sqlalchemy.engine.base.Engine SELECT images.id AS images_id, images.uuid AS images_uuid, images.likes AS images_likes, images.created_at AS images_created_at
FROM images
WHERE images.uuid = ?
LIMIT ? OFFSET ?
2016-04-18 16:36:55,016 INFO sqlalchemy.engine.base.Engine ('uuid_rhino', 1, 0)
<Image (uuid='uuid_rhino', likes='18', created_at=2016-04-18 07:23:10)>
2016-04-18 16:36:55,018 INFO sqlalchemy.engine.base.Engine SELECT comments.id AS comments_id, comments.text AS comments_text, comments.image_id AS comments_image_id
FROM comments
WHERE ? = comments.image_id
2016-04-18 16:36:55,018 INFO sqlalchemy.engine.base.Engine (3,)
2016-04-18 16:36:55,020 INFO sqlalchemy.engine.base.Engine SELECT tags.id AS tags_id, tags.name AS tags_name
FROM tags, images_tags
WHERE ? = images_tags.image_id AND tags.id = images_tags.tag_id
2016-04-18 16:36:55,020 INFO sqlalchemy.engine.base.Engine (3,)
2016-04-18 16:36:55,021 INFO sqlalchemy.engine.base.Engine DELETE FROM images_tags WHERE images_tags.image_id = ? AND images_tags.tag_id = ?
2016-04-18 16:36:55,021 INFO sqlalchemy.engine.base.Engine (3, 3)
2016-04-18 16:36:55,022 INFO sqlalchemy.engine.base.Engine UPDATE comments SET image_id=? WHERE comments.id = ?
2016-04-18 16:36:55,023 INFO sqlalchemy.engine.base.Engine (None, 1)
2016-04-18 16:36:55,023 INFO sqlalchemy.engine.base.Engine DELETE FROM images WHERE images.id = ?
2016-04-18 16:36:55,023 INFO sqlalchemy.engine.base.Engine (3,)
2016-04-18 16:36:55,024 INFO sqlalchemy.engine.base.Engine COMMIT
"""
"""
print '@@@ Delete'
session.query(Image).filter(Image.uuid == 'uuid_rhino').delete()
session.commit()
@@@ Delete
2016-06-24 20:26:38,402 INFO sqlalchemy.engine.base.Engine DELETE FROM images WHERE images.uuid = ?
2016-06-24 20:26:38,402 INFO sqlalchemy.engine.base.Engine ('uuid_rhino',)
<Image (uuid='uuid_rhino', likes='1', created_at=2016-06-24 12:26:38)>
2016-06-24 20:26:38,402 INFO sqlalchemy.engine.base.Engine COMMIT
"""
|
import time
import dash_bootstrap_components as dbc
from dash import Input, Output, html
loading_spinner = html.Div(
[
dbc.Button("Load", id="loading-button", n_clicks=0),
dbc.Spinner(html.Div(id="loading-output")),
]
)
@app.callback(
Output("loading-output", "children"), [Input("loading-button", "n_clicks")]
)
def load_output(n):
if n:
time.sleep(1)
return f"Output loaded {n} times"
return "Output not reloaded yet"
|
a = []
for _ in range(9):
a.append(int(input()))
m = max(a)
n = a.index(m)+1
print(m)
print(n)
|
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
def is_balanced(self, text, brackets='〈〉()[]{}'):
opening = brackets[::2]
closing = brackets[1::2]
stack = Stack()
for character in text:
if character in opening:
stack.push(opening.index(character))
elif character in closing:
if stack.size() == 0:
return 'Несбалансированно'
elif stack and stack.peek() == closing.index(character):
stack.pop()
else:
return 'Несбалансированно'
return 'Сбалансированно'
list_bracket = ['((({})))', '[([])((([[[]]])))]{()}', '{{[()]}}', '}{}', '{{[(])]}}', '[[{())}]']
for bracket in list_bracket:
print(Stack().is_balanced(bracket))
|
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask
from webcv import public
from webcv.extensions import db, migrate, heroku # cache,
from webcv.settings import Config
def create_app(config_object=Config):
"""Application factory.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
# cache.init_app(app)
heroku.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.public)
return None
|
# -*- coding: utf-8 -*-
# MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems
# Copyright (C) 2015-2017, Thomas Duriez (thomas.duriez@gmail.com)
# Copyright (C) 2015, Adrian Durán (adrianmdu@gmail.com)
# Copyright (C) 2015-2017, Ezequiel Torres Feyuk (ezequiel.torresfeyuk@gmail.com)
# Copyright (C) 2016-2017, Marco Germano Zbrun (marco.germano@intraway.com)
# Copyright (C) 2016-2017, Raúl Lopez Skuba (raulopez0@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# -*- coding: utf-8 -*-
import numpy as np
import MLC.Log.log as lg
import matplotlib.pyplot as plt
import random
import sys
import time
import csv
# import pandas as pd
from MLC.arduino.protocol import ArduinoUserInterface
from MLC.mlc_parameters.mlc_parameters import Config
from PyQt5.QtCore import Qt
def individual_data(indiv):
global g_data
# ==============================================================================
# SAMPLES = 201
# x = np.linspace(-10.0, 10.0, num=SAMPLES)
# y = np.tanh(4*x)
# ==============================================================================
# ==============================================================================
# My Code to import features from the building data
# dataset = pd.read_csv('/home/etorres/harsh.csv', delimiter='\t')
try:
if g_data is None:
pass
except NameError:
g_data = None
with open('/home/htomar/Holiday_Dataset.csv', 'r') as f:
csv_reader = csv.reader(f, delimiter='\t')
for row in csv_reader:
if g_data is None:
g_data = [[] for x in xrange(len(row))]
for index in xrange(len(row)):
g_data[index].append(float(row[index]))
x0 = np.array(g_data[0]) # Time
x1 = np.array(g_data[1]) # Temperature
x2 = np.array(g_data[2]) # Wind
x3 = np.array(g_data[3]) # Solar
x4 = np.array(g_data[4]) # Humidity
x5 = np.array(g_data[8]) # IsHoliday
#x6 = np.array(g_data[9]) # Day of the Week
y = np.array(g_data[5]) # Whole Building Energy
# print "x0: {}".format(type(x0))
# print "x1: {}".format(type(x1))
# print "x2: {}".format(type(x2))
# print "x3: {}".format(type(x3))
# print "x4: {}".format(type(x4))
# print "x5: {}".format(type(x5))
# print "x6: {}".format(type(x6))
# print "x0: {}".format(x0)
# print "x1: {}".format(x1)
# print "x2: {}".format(x2)
# print "x3: {}".format(x3)
# print "x4: {}".format(x4)
# print "x5: {}".format(x5)
# print "x6: {}".format(x6)
# print "x6: {}".format(x6)
# print "y: {}".format(y)
# ==============================================================================
# ==============================================================================
# DON'T NEED TO ADD NOISE
# config = Config.get_instance()
# artificial_noise = config.getint('EVALUATOR', 'artificialnoise')
# y_with_noise = y + [random.random() / 2 - 0.25 for _ in xrange(SAMPLES)] + artificial_noise * 500
#
# ==============================================================================
# ==============================================================================
# if isinstance(indiv.get_formal(), str):
# formal = indiv.get_formal().replace('S0', 'x')
# else:
# # toy problem support for multiple controls
# formal = indiv.get_formal()[0].replace('S0', 'x')
# ==============================================================================
# ==============================================================================
# My definition for formal
# TODO: This could be wrong. Check this line first
# formal: matlab interpretable expression of the individual
if isinstance(indiv.get_formal(), str):
formal = indiv.get_formal().replace('S0',
'x0') # Replacing S0 with x after obtaining the interpretable expression
formal = formal.replace('S1', 'x1')
formal = formal.replace('S2', 'x2')
formal = formal.replace('S3', 'x3')
formal = formal.replace('S4', 'x4')
formal = formal.replace('S5', 'x5')
#formal = formal.replace('S6', 'x6')
else:
# toy problem support for multiple controls
formal = indiv.get_formal()[0].replace('S0', 'x0') # Should all of them be [0]? Mostly not. And this can be compressed of course
formal = formal.replace('S1', 'x1')
formal = formal.replace('S2', 'x2')
formal = formal.replace('S3', 'x3')
formal = formal.replace('S4', 'x4')
formal = formal.replace('S5', 'x5')
#formal = formal.replace('S6', 'x6')
# ==============================================================================
# Calculate J like the sum of the square difference of the
# functions in every point
lg.logger_.debug('[POP][TOY_PROBLEM] Individual Formal: ' + formal)
b = indiv.get_tree().calculate_expression([x0, x1, x2, x3, x4, x5])
# print b
# If the expression doesn't have the term 'x',
# the eval returns a value (float) instead of an array.
# In that case transform it to an array
# ==============================================================================
# if type(b) == float:
# b = np.repeat(b, SAMPLES)
#
# return x, y, y_with_noise, b
# ==============================================================================
return x0, x1, x2, x3, x4, x5, y, b
def cost(indiv):
# x, y, y_with_noise, b = individual_data(indiv)
x0, x1, x2, x3, x4, x5, y, b = individual_data(indiv)
# Deactivate the numpy warnings, because this sum could raise an overflow
# Runtime warning from time to time
np.seterr(all='ignore')
# print "b: {}".format(b)
# print "y: {}".format(y)
# print "b: {}".format(type(b))
# print "y: {}".format(type(y))
array_size = 1
try:
array_size = b.size
except AttributeError:
pass
cost_value = float(np.sum((b - y) ** 2)) / array_size
np.seterr(all='warn')
return cost_value
# ==============================================================================
def show_best(index, generation, indiv, cost, block=True):
# #x, y, y_with_noise, b = individual_data(indiv)
x0, x1, x2, x3, x4, x5, y, b = individual_data(indiv)
x = np.linspace(0, y.size-1, num=y.size)
#mean_squared_error = np.sqrt((y - b)**2 / (1 + np.absolute(x**2)))
mean_squared_error = y - b # This is just mean error
# Put figure window on top of all other windows
fig = plt.figure()
fig.canvas.manager.window.setWindowModality(Qt.ApplicationModal)
fig.canvas.manager.window.setWindowTitle("Best Individual")
formal = None
if type(indiv.get_formal()) == list:
formal = indiv.get_formal()[0]
else:
formal = indiv.get_formal()
plt.rc('font', family='serif')
plt.suptitle("Generation N#{0} - Individual N#{1}\n"
"Cost: {2}\n Formal: {3}".format(generation,
index,
cost,
formal),
fontsize=12)
plt.subplot(2, 1, 1)
line1, = plt.plot(x, y, color='r', linewidth=2, label='Curve without noise')
line3, = plt.plot(x, b, color='k', linewidth=2, label='Control Law (Individual)')
plt.ylabel('Functions', fontsize=12, fontweight='bold')
plt.xlabel('Samples', fontsize=12, fontweight='bold')
plt.legend(handles=[line1, line3], loc=2)
plt.grid(True)
plt.subplot(2, 1, 2)
plt.plot(x, mean_squared_error, '*r')
plt.ylabel('Mean Squared Error', fontsize=12, fontweight='bold')
plt.xlabel('Samples', fontsize=12, fontweight='bold')
plt.grid(True)
plt.yscale('log')
plt.show(block=block)
|
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtCore, QtGui, uic
import PixivUtil
import threading
import cache
import PixivNotifier
from bs4 import BeautifulSoup
import requests
DlgUI, QtBaseClass = uic.loadUiType("IllustDialog.ui")
bookmarkUrl = 'https://www.pixiv.net/bookmark_add.php?type=illust&illust_id='
class MyView(QtGui.QGraphicsView):
def __init__(self, parent = None):
super(MyView, self).__init__(parent)
self.setStyleSheet(qss)
self.setFrameShape(QtGui.QFrame.NoFrame)
self.factor = 1.0
def wheelEvent(self, e):
if e.delta() < 0:
self.factor /= 1.2
self.scale(1.2 / 1.0, 1.2 / 1.0)
else:
self.factor *= 1.2
self.scale(1.0 / 1.2, 1.0 / 1.2)
def mouseDoubleClickEvent(self, e):
self.scale(self.factor, self.factor)
self.factor = 1.0
class IllustDialog(QtGui.QMainWindow, DlgUI):
def __init__(self, title, id, url, bookmark, parent = None):
super(IllustDialog, self).__init__(parent)
self.setupUi(self)
self.setWindowTitle(title)
self.id = str(id)
self.url = str(url)
self.bookmarked = bookmark
self.bookmark.clicked.connect(self.addBookmark)
self.image = MyView(self)
self.image.setGeometry(0, 0, 500, 500)
self.refreshBookmarkState()
def setImage(self, img):
scene = QtGui.QGraphicsScene()
scene.addPixmap(img)
self.image.setScene(scene)
self.image.scale(1.0, 1.0)
def resizeEvent(self, e):
self.control.setGeometry(0, e.size().height() - self.control.geometry().height(),
e.size().width(), self.control.geometry().height())
self.image.setGeometry(0, 0, e.size().width(),
e.size().height() - self.control.geometry().height())
def addBookmark(self):
if not self.bookmarked:
pageHtml = PixivUtil.get(PixivUtil.pixiv.getServer(),
bookmarkUrl + self.id, headers = PixivUtil.create_header(self.url)).text
page = BeautifulSoup(pageHtml, 'lxml')
token = str(page.find('input', attrs = {'name': 'tt'})['value'])
form = {
'mode': 'add',
'tt': token,
'id': self.id,
'type': 'illust',
'from_sid': '',
'comment': '',
'tag': '',
'restrict': '0'
}
else:
pass
PixivUtil.post(PixivUtil.pixiv.getServer(),
bookmarkUrl + self.id, headers = PixivUtil.create_header(self.url),
data = form
)
self.bookmarked = not self.bookmarked
self.refreshBookmarkState()
def refreshBookmarkState(self):
self.bookmark.setText(u'☆' if not self.bookmarked else u'★')
qss = """
QGraphicsView
{
background: transparent;
}
QScrollBar:vertical
{
width:12px;
background:rgba(0,0,0,0%);
margin:0px,0px,0px,0px;
padding-top:13px;
padding-bottom:13px;
}
QScrollBar::handle:vertical
{
width:12px;
background:rgba(0,0,0,25%);
border-radius:6px;
min-height:20;
}
QScrollBar::handle:vertical:hover
{
width:12px;
background:rgba(0,0,0,50%);
border-radius:6px;
min-height:20;
}
QScrollBar::add-line:vertical
{
height:13px;width:12px;
border-image:url(:/images/a/3.png);
subcontrol-position:bottom;
}
QScrollBar::sub-line:vertical
{
height:13px;width:12px;
border-image:url(:/images/a/1.png);
subcontrol-position:top;
}
QScrollBar::add-line:vertical:hover
{
height:13px;width:12px;
border-image:url(:/images/a/4.png);
subcontrol-position:bottom;
}
QScrollBar::sub-line:vertical:hover
{
height:13px;width:12px;
border-image:url(:/images/a/2.png);
subcontrol-position:top;
}
QScrollBar::add-page:vertical,QScrollBar::sub-page:vertical
{
background:rgba(0,0,0,10%);
border-radius:6px;
}
QScrollBar:horizontal
{
height:12px;
background:rgba(0,0,0,0%);
margin:0px,0px,0px,0px;
padding-left:13px;
padding-right:13px;
}
QScrollBar::handle:horizontal
{
height:12px;
background:rgba(0,0,0,25%);
border-radius:6px;
min-width:20;
}
QScrollBar::handle:horizontal:hover
{
height:12px;
background:rgba(0,0,0,50%);
border-radius:6px;
min-width:20;
}
QScrollBar::add-line:horizontal
{
height:12px;width:13px;
border-image:url(:/images/a/3.png);
subcontrol-position:right;
}
QScrollBar::sub-line:horizontal
{
height:12px;width:13px;
border-image:url(:/images/a/1.png);
subcontrol-position:left;
}
QScrollBar::add-line:horizontal:hover
{
height:12px;width:13px;
border-image:url(:/images/a/4.png);
subcontrol-position:right;
}
QScrollBar::sub-line:horizontal:hover
{
height:12px;width:13px;
border-image:url(:/images/a/2.png);
subcontrol-position:left;
}
QScrollBar::add-page:horizontal,QScrollBar::sub-page:horizontal
{
background:rgba(0,0,0,10%);
border-radius:6px;
}
"""
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
w = IllustDialog()
w.show()
sys.exit(app.exec_()) |
#!/usr/bin/env python
import os
import re
import collections
import argparse
"""
File name : file_searching.py
script that implements the following scenario:
You have a big directory tree with many files and directories in it.
Some of those files have the extension '.inform'.
Some of the files with that extension contain instances of the string 'port#nnnn', where nnnn is a hexadecimal number
of some length.
"""
__author__ = 'Rebeca Perez Lainez'
__email__ = 'rebeca.perez.lainez@gmail.com'
PORT_HEXADECIMAL_PATTERN = "^port#((?:0[xX])?(?:[1-7])?[1-9a-fA-F][0-9a-fA-F]{1,6})$"
def display_port_frequency(counter):
"""
Method in charged of print the elements included in counter in he followin format
:param counter: type of collections.Counter it contains he port value and its frequency
"""
print get_port_frequency(counter)
def get_port_frequency(counter):
"""
Method in charged processing the counter elements an assing the processing to a string tha follows this format
port frequency
nnnnn frequency
nnnnn frequency
nnnnn frequency
:param counter: type of collections.Counter it contains he port value and its frequency
:return a string with the suitable format
"""
port_frequency = "port frequency\n"
for port, frequency in counter.most_common():
port_frequency += ("{} {}\n").format(port, frequency)
return port_frequency
def find_files(initial_path):
"""
Method to find all files with the .inform extension and count the number of instances
of each different 'port#nnnn' string , Where nnnn is a four or more digit hexadecimal number to a maxium of 32 bits
Assumptions:
- One line can only contains one port#nnnn sequence
- Lower letters and capital letters are allowed only for hexadecimal numbers
- The range of Hexadecimal numbers of 32 bit is [0- 7fffffff]
- The Hexadecimal value can start with 0X 0x 0x or without the 0x
- An Hexadecimal value can not start with 0 , this is not valid : 0x09
:param initial_path: the root path of the search
:return: c type of collections.Counter it stores the port value and its frequency
"""
c = collections.Counter()
for path, subdirs, files in os.walk(initial_path):
for file in files:
if file.endswith(".inform"):
f = open( os.path.join(path, file), "r")
for line in f:
#findall will retrieve the groups in this case the port number
#match with or without Ox Patern
#1 digit = 4 bits 8digits*4bits = 32 bits
#0X can be optional
#The maximum value for 32 bits is 7fffffff -->(?:[1-7])?
#This 0x01 is not valid [1-9a-fA-F]
elements = re.findall(PORT_HEXADECIMAL_PATTERN, line)
c = c + collections.Counter(elements)
f.close()
return c
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Find port frequency')
parser.add_argument('--directory', help='Initial directory')
args = parser.parse_args()
if args.directory:
display_port_frequency(find_files(args.directory))
else:
parser.print_help()
|
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from mptt.admin import DraggableMPTTAdmin
from .models import Shop, Link, Address, Phone, Page, Menu, MenuItem
class MenuItemsInline(GenericTabularInline):
model = MenuItem
extra = 1
class LinkInline(admin.TabularInline):
model = Link
extra = 1
class AddressInline(admin.TabularInline):
model = Address
extra = 1
class PhoneInline(admin.TabularInline):
model = Phone
extra = 1
@admin.register(Shop)
class ShopAdmin(admin.ModelAdmin):
inlines = (LinkInline, AddressInline, PhoneInline)
@admin.register(Page)
class PageAdmin(admin.ModelAdmin):
inlines = (MenuItemsInline,)
class MenuItemInline(admin.TabularInline):
model = MenuItem
extra = 1
@admin.register(MenuItem)
class MenuItemAdmin(DraggableMPTTAdmin):
pass
@admin.register(Menu)
class MenuAdmin(admin.ModelAdmin):
inlines = (MenuItemInline,)
|
from os import path
from setuptools import setup, find_packages
with open('requirements.txt') as reqs_file:
requirements = reqs_file.read().splitlines()
with open('test-requirements.txt') as reqs_file:
test_requirements = reqs_file.read().splitlines()
# Get the long description from the relevant file
long_description = 'PyDriller is a Python framework that helps developers on ' \
'mining software repositories. With PyDriller' \
' you can easily extract information from any Git ' \
'repository, such as commits, developers, ' \
'modifications, diffs, and source codes, and ' \
'quickly export CSV files.'
def get_version():
with open(path.join(path.dirname(__file__), 'pydriller', '__init__.py')) as f:
for line in f:
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
setup(
name='PyDriller',
description='Framework for MSR',
long_description=long_description,
author='Davide Spadini',
author_email='spadini.davide@gmail.com',
version=get_version(),
packages=find_packages('.', exclude=['tests*']),
url='https://github.com/ishepard/pydriller',
license='Apache License',
package_dir={'pydriller': 'pydriller'},
python_requires='>=3.5',
install_requires=requirements,
tests_require=requirements + test_requirements,
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries :: Python Modules',
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
]
)
|
from logging import error, info
import requests
from furl import furl
from retry import retry
from utilities import constants
class Http:
def __init__(self, session_builder):
self.session = session_builder()
@retry(requests.exceptions.ConnectionError or requests.exceptions.Timeout, delay=constants.Requests.retry_delay(),
backoff=constants.Requests.retry_backoff(), tries=constants.Requests.retry_max_tries())
def __get__(self, url):
response = self.session.get(url, timeout=60)
# rate limiting headers do not exist for all responses (i.e. cached responses)
observed_header = "ratelimit-observed"
limit_header = "ratelimit-limit"
if observed_header and limit_header in response.headers.keys():
self.__log_rate_limit_info__(response.headers[observed_header],
response.headers[limit_header])
return response
@staticmethod
def __adjust_paging__(original_url, page_size):
f = furl(original_url).remove(["per_page"])
return f.add({"per_page": page_size}).url
def get_with_retry_and_paging_adjustment(self, url):
def log_or_raise_error(error_type, current_page_size, current_url):
error(
f"[!] {error_type}: request failed. Adjusting page size to {current_page_size} for GET on {current_url}")
if page_size <= 1:
raise e
for page_size in [20, 10, 5, 1]:
url = Http.__adjust_paging__(url, page_size)
try:
response = self.__get__(url)
except requests.exceptions.ConnectionError as e:
log_or_raise_error("ConnectionError", page_size, url)
continue
except requests.exceptions.Timeout as e:
log_or_raise_error("Timeout", page_size, url)
continue
except requests.exceptions.RequestException as e:
error("[!] RequestException (%s): Skipping %s", e.response.status, url)
return response
@staticmethod
def __log_rate_limit_info__(observed, limit):
if (int(observed) / int(limit)) >= .9:
info("[*] Rate Limit Usage: (%s/%s)", observed, limit)
|
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy.signal import argrelextrema
import librosa
from librosa import display
filename = 'Samples/Anga.wav' ## Test Signal
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def load_audiofile(filename):
audio, Fs = librosa.load(filename)
return audio, Fs
def Ry_Base(Beats, Rx, Rz): ## Ry: onset strengths --> Theta, Rz: the frequency components --> Psi
Ry_Base = {}
Ry_subase = {}
for index, beat in enumerate(Beats):
if index < len(Beats)-1:
Ry_subase['Rx ' + str(index+1)] = Rx[index]
Ry_subase['Rz ' + str(index+1)] = Rz[index]
Ry_Base['Beat' + str(index+1)] = Ry_subase
Ry_subase = {}
return Ry_Base
class audiofile:
def __init__(self, audio, samplerate, duration):
self.fs = samplerate
self.dur = duration
self.audio = audio
def beat_detect(self):
tempo, beats = librosa.beat.beat_track(y=self.audio, sr=self.fs)
beat_times = librosa.frames_to_time(beats, sr=self.fs)
B_Intv = np.linspace(0, len(beat_times)-2, num=len(beat_times)-1)
for index, beat in enumerate(beat_times):
if index != 0:
bintv = beat_times[index] - beat_times[index-1]
B_Intv[index-1] = round(bintv, 2)
avg_beat = round(np.sum(B_Intv) / len(B_Intv), 2)
B_time = np.linspace(0, len(beat_times)-2, num=len(beat_times)-1)
for i, val in enumerate(B_time):
B_time[i] = round(i * avg_beat, 2)
sub_div = np.linspace(0, 5*(len(beat_times)-2), num=5*(len(beat_times)-2))
for div, sub in enumerate(sub_div):
sub_div[div] = round(div * (avg_beat/4), 2)
return B_time, sub_div, tempo
def tatum_detect(self):
onset = librosa.onset.onset_strength(y=self.audio, sr=self.fs)
times = librosa.times_like(onset, sr=self.fs)
onset_detect = librosa.onset.onset_detect(onset_envelope=onset, sr=self.fs)
tatum_times = times[onset_detect]
return tatum_times, onset
class Beat:
def __init__(self, audio_blk, beat_time, tatum_times, sub_div, sample_rate): ## Tatum is type np.array
self.audio_blk = audio_blk
self.fs = sample_rate
self.beat_time = beat_time
self.sub_div = sub_div
self.tatum_times, self.tatum_strength = tatum_times
self.beat_samp = np.linspace(0, len(self.beat_time)-1, num=len(self.beat_time)-1) ## Beats in samples
for element, time in enumerate(self.beat_samp):
self.beat_samp[element] = int(self.beat_time[element] * self.fs)
self.tatum_samp = np.linspace(0, len(self.tatum_times)-1, num=len(self.tatum_times)-1) ## Tatum in samples
for element, time in enumerate(self.tatum_samp):
self.tatum_samp[element] = int(self.tatum_times[element] * self.fs)
def Transisent_strength(self):
tat_S = []
offset = 0
for index, beat in enumerate(self.beat_time):
tat_subT = self.tatum_times[(self.tatum_times < beat)&
(self.tatum_times >= self.beat_time[index-1])]
tat_sub2 = np.zeros(len(tat_subT))
if (index != 0) & (tat_subT != []):
for i, j in enumerate(tat_subT):
#tat_samp = int(j * self.fs)
tat_sub2[i] = self.tatum_strength[i + offset]#; print('check: ', self.tatum_strength[i + offset])
offset += len(tat_subT)
tat_element = tat_sub2 / np.max(tat_sub2)#; print('check: ', tat_element)
tat_element2 = np.linspace(0, len(tat_element), num=len(tat_element))
for x, y in enumerate(tat_element):
y = round(y, 2)
tat_element2[x] = y
tat_S.append(tat_element2)
else:
tat_S.append([])
return tat_S
def Harmonic(self): ## Q: How does the perceived perception range affect the result of the qunatum circuit? (reformulate and clean up question)
audio = self.audio_blk
stft = librosa.stft(np.array(audio))
harm, perc = librosa.decompose.hpss(stft)
h_audio = librosa.core.istft(harm)
freq_array = []; beat_fa = []
prev_beat = 0
for element, beat in enumerate(self.beat_samp):
tat_subT = self.tatum_samp[(self.tatum_samp < self.beat_samp[element])&
(self.tatum_samp >= self.beat_samp[element-1])]
for index, val in enumerate(self.tatum_samp):
if (val >= prev_beat) & (val < beat):
if (index != 0) & (index < len(self.tatum_samp)):
sub_blk = h_audio[int(self.tatum_samp[index-1]):int(self.tatum_samp[index])]
fft_subblk = np.real(np.fft.fft(sub_blk))
fft_subblk = self.fs / (2 * np.array(argrelextrema(fft_subblk, np.greater)))
pos_fft = fft_subblk[fft_subblk >= 0]
pos_fft = pos_fft[(pos_fft < 1000) & (pos_fft > 100)]
freq_pitch = pos_fft[:len(tat_subT)] ##give me the first number of qubits out, so for testing 3, experiment with the perceptual rang of human hearing and where music lies!!!
freq_array.append(freq_pitch) ## This is going to be deafult rang for now....
#print(freq_array, np.max(freq_array),np.min(freq_array), len(freq_array))
beatfa = freq_array
prev_beat = beat
return beatfa
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
#!/usr/bin/python3
import sys
import json
import subprocess
from urllib.request import urlopen
num_tags = 20
url ='https://hub.docker.com/v2/repositories/factominc/factomd/tags/?page_size=%s' % num_tags
docker_path = '/usr/bin/docker'
def prompt(tag_list):
print("Please choose an image to install:")
for i, tag in enumerate(tag_list):
print("%s) %s" % (i+1, tag))
choice = input("Enter Image Number: ")
while True:
try:
index = int(choice)
selection = tag_list[index - 1]
return selection
except (ValueError, IndexError):
print("Not a valid selection. CTRL + C to exit or choose again.")
choice = input("Enter Image Number: ")
def parse_results(results):
valid_list = []
for tag in results:
name = tag["name"]
if name[0] == "v":
valid_list.append(name)
elif "develop" in name:
valid_list.append(name)
elif "master" in name:
valid_list.append(name)
return valid_list
with urlopen(url) as response:
if response.status != 200:
print("Error connecting to Docker Hub. Exiting...")
sys.exit(1)
response_content = response.read()
response_content.decode('utf-8')
data = json.loads(response_content)
results = data["results"]
tag_list = parse_results(results)
selection = prompt(tag_list)
try:
subprocess.call([docker_path, "stop", "factomd"])
subprocess.call([docker_path, "rm", "factomd"])
run_commands = [docker_path, 'run', '-d',
'--name', 'factomd',
'-v', 'factom_database:/root/.factom/m2',
'-v', 'factom_keys:/root/.factom/private',
'--restart', 'unless-stopped',
'-p', '8088:8088',
'-p', '8090:8090',
'-p', '8110:8110',
'-l', 'name=factomd',
'factominc/factomd:%s' % selection,
'-broadcastnum=16',
'-network=CUSTOM',
'-customnet=fct_community_test',
'-startdelay=600',
'-faulttimeout=120',
'-config=/root/.factom/private/factomd.conf']
subprocess.call(run_commands)
except FileNotFoundError:
print("Unable to run docker.\nEither run as sudo or check path is correct: %s" % docker_path)
sys.exit(2) |
from distutils.core import setup
setup(name='pysem',
version='1.0',
description='Simplesem interpreter',
author='Davide Angelocola',
author_email='davide.angelocola@gmail.com',
url='http://bitbucket.org/dfa/pysem',
package_dir = { '': 'src' },
packages=['pysem'],
)
|
"""
Module to handle specifically comminication actions with RabbitMQ
"""
# System Imports
import json
import os
# Framework / Library Imports
import pika
# Application Imports
# Local Imports
import config
from exceptions import ConnectionError
def get_connection():
"""
Returns a connection object for RabbitMQ
"""
try:
credentials = pika.PlainCredentials(config.RABBITMQ['user'], config.RABBITMQ['password'])
rmq_connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=config.RABBITMQ['server'],
port=config.RABBITMQ['port'],
virtual_host='/',
credentials=credentials,
client_properties={
'connection_name': "Clockify Webhook Ingress Node: {}[{}]".format(
config.APP_NODE,
os.getpid()
)
}
)
)
except Exception as exc:
error_string = "Unable to connect using {username}:{password}@{server}:{port}".format(
username=config.RABBITMQ['user'],
password=config.RABBITMQ['password'],
server=config.RABBITMQ['server'],
port=config.RABBITMQ['port']
)
print(error_string)
raise ConnectionError('Connection Error: ' + error_string, status_code=500) from exc
return rmq_connection
def publish_webhook(queue_payload, queue):
"""
Publishes a webhook payload onto the inbound webhook queue for processing
"""
conn = get_connection()
channel = conn.channel()
channel.basic_publish(
exchange='',
routing_key=queue,
properties=pika.BasicProperties(
content_type="application/json",
headers={
'task': queue_payload['task']
}
),
body=json.dumps(queue_payload))
conn.close()
return True
def create_queue_payload(req, task=None, payload_override=None):
"""
Helper method to form the required queue payload from inbound object
"""
return {
"task": task or "clockify.webhook",
"meta": {
"headers": {
str(header[0]): req.headers.get(str(header[0]), None) for header in req.headers
} or None
},
"params": {str(arg): req.args.get(str(arg), None) for arg in req.args} or None,
"payload": payload_override or req.json or None,
"system": {
"element": "clockify_webhook_ingress",
"node": config.APP_NODE,
"version": config.APP_VERSION,
"version_date": config.APP_DATE
}
}
|
import names
from gtts import gTTS
import tempfile
import os
def spellApco(word):
alphabet = {
'A': "Adam",
'B': "Boy",
'C': "Charles",
'D': "David",
'E': "Edward",
'F': "Frank",
'G': "George",
'H': "Henry",
'I': "Ida",
'J': "John",
'K': "King",
'L': "Lincoln",
'M': "Mary",
'N': "Nora",
'O': "Ocean",
'P': "Paul",
'Q': "Queen",
'R': "Robert",
'S': "Sam",
'T': "Tom",
'U': "Union",
'V': "Victor",
'W': "William",
'X': "X-Ray",
'Y': "Young",
'Z': "Zebra",
}
# Say the word at the beginning
spelling = word + ". "
# Spell out each letter of the word
for letter in word:
if letter == " ":
spelling += "... "
else:
spelling += letter.upper() + " as in " + alphabet.get(letter.upper(), "Umm, I don't know this one.") + ". "
# Say the word again at the end
spelling += word + "."
return spelling
if __name__ == '__main__':
# Get a random name
print("Selecting random name...")
name = names.get_full_name()
# Spell out name
spelling = spellApco(name)
# Get MP3 of phrase
print("Converting text to speech...")
tts = gTTS(text=spelling)
# Write MP3 to a temporary file
f = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
tts.write_to_fp(f)
f.close()
# Play MP3 in temporary file
print("Speaking...")
response = "R"
while response.upper() == "R":
os.system("start " + f.name)
# Ask user to repeat or continue
response = input("(R)epeat or (C)ontinue? ")
# Close and delete temporary file
os.remove(f.name)
# Check is name is correct
response = input("What was the name? ")
if response.upper() == name.upper():
print("Correct! Well done!")
else:
print("The correct answer was '%s'." % name)
|
# -*- coding: utf-8 -*-
import logging
from django_cron import CronJobBase, Schedule
from .base import FetcherJob
logger = logging.getLogger(__name__)
class HearthstoneJob(CronJobBase, FetcherJob):
RUN_EVERY_MINS = 59
RETRY_AFTER_FAILURE_MINS = 9
schedule = Schedule(run_every_mins=RUN_EVERY_MINS,
retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)
code = 'yyfeed.fetcher.hearthstone'
def name(self):
return 'hearthstone'
class OoxxJob(CronJobBase, FetcherJob):
RUN_EVERY_MINS = 59
RETRY_AFTER_FAILURE_MINS = 1439
schedule = Schedule(run_every_mins=RUN_EVERY_MINS,
retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)
code = 'yyfeed.fetcher.ooxx'
def name(self):
return 'ooxx'
class IAppsJob(CronJobBase, FetcherJob):
RUN_EVERY_MINS = 59
RETRY_AFTER_FAILURE_MINS = 179
schedule = Schedule(run_every_mins=RUN_EVERY_MINS,
retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)
code = 'yyfeed.fetcher.iapps'
def name(self):
return 'iapps'
class SmzdmJob(CronJobBase, FetcherJob):
RUN_EVERY_MINS = 59
RETRY_AFTER_FAILURE_MINS = 9
schedule = Schedule(run_every_mins=RUN_EVERY_MINS,
retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)
code = 'yyfeed.fetcher.smzdm'
def name(self):
return 'smzdm'
class TtrssJob(CronJobBase, FetcherJob):
RUN_EVERY_MINS = 59
RETRY_AFTER_FAILURE_MINS = 359
schedule = Schedule(run_every_mins=RUN_EVERY_MINS,
retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)
code = 'yyfeed.fetcher.ttrss'
def name(self):
return 'ttrss'
class RosiyyJob(CronJobBase, FetcherJob):
RUN_EVERY_MINS = 59
RETRY_AFTER_FAILURE_MINS = 1439
schedule = Schedule(run_every_mins=RUN_EVERY_MINS,
retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)
code = 'yyfeed.fetcher.rosiyy'
def name(self):
return 'rosiyy'
|
from django.shortcuts import redirect
from django.contrib import messages
# decorator used to ensure user in in session
def required_login(views_func):
def _wrapped_views_func(request, *args, **kwargs):
if 'user_id' not in request.session:
messages.error(request, "Please register an account or log in to proceed.", extra_tags="not_in_session")
return redirect('/')
else:
return views_func(request, *args, **kwargs)
return _wrapped_views_func |
# coding:utf-8
import urllib.request, json
from bs4 import BeautifulSoup
import pandas as pd
url = 'http://zhaopin.baidu.com/api/quanzhiasync?query=%E4%BA%A7%E5%93%81%E7%BB%8F%E7%90%86+%E6%8B%9B%E8%81%98&sort_type=1&city_sug=%E4%B8%8A%E6%B5%B7&detailmode=close&rn=20&pn=0'
m1 = []
m2 = []
content = {}
req = urllib.request.Request(url)
res = urllib.request.urlopen(req)
#html = (res.read()).decode()
jd = json.loads(res.read())
for i in jd['data']['main']['data']['disp_data']:
m1.append(i['age'])
m2.append(i['district'])
content['m1'] = m1
content['m2'] = m2
jt = pd.DataFrame(content)
print(jt)
|
# Author:ambiguoustexture
# Date: 2020-02-13
from collections import Counter
from morphological_analysis import morphology_map
file_parsed = "./neko.txt.mecab"
words = morphology_map(file_parsed)
words_without_punctuation = []
for word in words:
if word['pos'] != '記号':
words_without_punctuation.append(word)
word_count = Counter()
word_count.update([word['surface'] for word in words_without_punctuation])
items = word_count.most_common()
for item in items:
print(item)
|
import unittest
from katas.kyu_7.the_office_1_outed import outed
class OutedTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(outed({
'tim': 0, 'jim': 2, 'randy': 0, 'sandy': 7, 'andy': 0, 'katie': 5,
'laura': 1, 'saajid': 2, 'alex': 3, 'john': 2, 'mr': 0}, 'laura'
), 'Get Out Now!')
def test_equal_2(self):
self.assertEqual(outed({
'tim': 1, 'jim': 3, 'randy': 9, 'sandy': 6, 'andy': 7, 'katie': 6,
'laura': 9, 'saajid': 9, 'alex': 9, 'john': 9, 'mr': 8}, 'katie'
), 'Nice Work Champ!')
def test_equal_3(self):
self.assertEqual(outed({
'tim': 2, 'jim': 4, 'randy': 0, 'sandy': 5, 'andy': 8, 'katie': 6,
'laura': 2, 'saajid': 2, 'alex': 3, 'john': 2, 'mr': 8}, 'john'
), 'Get Out Now!')
# There is an issue with the division used in the katas Python solution
# The following test can result in either string depending on the
# Python version (floor division VS true division)
# update after I get a response...
# def test_equal_4(self):
# self.assertEqual(outed({
# 'alex': 3, 'mr': 7, 'jim': 9, 'laura': 4, 'randy': 9, 'sandy': 6,
# 'andy': 8, 'katie': 0, 'john': 4, 'tim': 0, 'saajid': 5}, 'jim'
# ), 'Nice Work Champ!')
def test_equal_5(self):
self.assertEqual(outed({
'alex': 6, 'jim': 1, 'saajid': 9, 'laura': 5, 'tim': 4, 'randy': 0,
'andy': 4, 'mr': 3, 'john': 1, 'sandy': 2, 'katie': 9}, 'saajid'
), 'Get Out Now!')
def test_equal_6(self):
self.assertEqual(outed({
'alex': 3, 'jim': 7, 'saajid': 4, 'laura': 6, 'tim': 8, 'randy': 7,
'andy': 8, 'mr': 9, 'john': 3, 'sandy': 0, 'katie': 1}, 'sandy'
), 'Get Out Now!')
|
import sale
import product
import mrp |
from django.utils.translation import gettext_lazy as _
from django.db.models import (
CASCADE,
Model,
DateTimeField,
CharField,
ForeignKey
)
from users.models import User
class Tag(Model):
modified_at = DateTimeField(
_('Modified at'),
auto_now=True
)
name = CharField(
_('Name'),
unique=True,
max_length=200
)
user = ForeignKey(
User,
null=True,
on_delete=CASCADE,
related_name='tags'
)
def __str__(self):
return self.name
|
#!/bin/env python3
import logging
from sys import platform, getfilesystemencoding
from os import uname
from collections import namedtuple
from jproperties import Properties
from splunk_hec_handler import SplunkHecHandler
# setup logger utility for this script
logging.basicConfig(filename='transmute.log', filemode='w',format='%(asctime)s - PID:%(process)d - %(name)s - %(message)s', level=logging.INFO)
# global vars
SYSTEM_OS = platform
ENCODING = getfilesystemencoding()
# create logger specifically for splunk data
splunk_logger = logging.getLogger('splunk_logger')
splunk_logger.setLevel(logging.DEBUG)
# create and add log stream handler to it
stream_handler = logging.StreamHandler()
stream_handler.level = logging.DEBUG
splunk_logger.addHandler(stream_handler)
# splunk token
token = "EA33046C-6FEC-4DC0-AC66-4326E58B54C3"
# Create Handler to push data to Splunk HTTP Event Collector
splunk_handler = SplunkHecHandler('sample.splunk.domain.com',
token, index="hec",
port=8080, proto='http', ssl_verify=False,
source="evtx2json", sourcetype='xxxxxxxx_json')
splunk_logger.addHandler(splunk_handler)
# add additional fields and corresponding values to splunk
dict_obj = {'fields': {'color': 'yellow', 'api_endpoint': '/results', 'host': 'app01', 'index':'hec'},
'user': 'foobar', 'app': 'my demo', 'severity': 'low', 'error codes': [1, 23, 34, 456]}
# send sample data to splunk_logger
splunk_logger.info(dict_obj)
# specify splunk ingestion parameters adhoc like so:
log_summary_evt = {'fields': {'index': 'adhoc', 'sourcetype': '_json', 'source': 'adv_example'}, 'exit code': 0, 'events logged': 100}
splunk_logger.debug(log_summary_evt)
# load java properties
p = Properties()
jpfile = '/home/kafka/apps/kafka/config/log4j.properties'
with open(jpfile, 'rb') as f:
p.load(f, ENCODING)
# add to dictionary
log4j_json = dict()
log4j_json['source_file'] = jpfile
log4j_json.update(p)
# send to splunk
splunk_logger.info({'fields': p})
def os_enrich(prune_output=True):
"""
returns dict of useful OS information
"""
osvars = uname()
os_data = { 'system_os': SYSTEM_OS,
'fs_enconding': ENCODING,
'sysname': osvars.sysname,
'nodename': osvars.nodename,
'machine': osvars.machine,
'os_version': osvars.version,
'os_release': osvars.release
}
return os_data
# send more data
splunk_logger.info({'fields': os_enrich()})
# you get the idea
splunk_logger.info({'fields': os.environ})
|
#!/usr/bin/python3
#
# Project: pyresteasy
# File: pyresteasy.py
#
# Copyright 2015 Matthew Mitchell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
from urllib.parse import quote
HTTP_OK = "200 OK"
HTTP_CREATED = "201 Created"
HTTP_NO_CONTENT = "204 No Content"
HTTP_BAD_REQUEST = "400 Bad Request"
HTTP_UNAUTHORISED = "401 Unauthorized"
HTTP_FORBIDDEN = "403 Forbidden"
HTTP_NOT_FOUND = "404 Not Found"
HTTP_METHOD_NOT_ALLOWED = "405 Method Not Allowed"
HTTP_CONFLICT = "409 Conflict"
HTTP_SERV_ERROR = "500 Internal Server Error"
MIME_JSON = "application/json"
def findMatch(obj, obj_list):
for obj2 in obj_list:
if obj == obj2:
return obj2
return None
def headersList(d):
return list(d.items())
class JsonResp():
def __call__(self, f):
def jsonRespWrapper(*args, **kargs):
try:
resp = f(*args, **kargs)
resp[0]["Content-Type"] = MIME_JSON
resp[1] = json.dumps({"success" : resp[1]})
return resp
except HttpInterrupt as e:
e.body = json.dumps({"error" : e.body})
e.headers["Content-Type"] = MIME_JSON
raise
return jsonRespWrapper
class JsonReq():
def __call__(self, f):
def jsonReqWrapper(self, app, env, *args, **kargs):
try:
body = json.loads(env['wsgi.input'].read().decode('utf-8'))
except ValueError:
raise BadRequest("Badly formatted JSON")
return f(self, app, env, body, *args, **kargs)
return jsonReqWrapper
class HttpInterrupt(Exception):
def __init__(self, body="", headers={}):
self.headers = headers
self.body = body
class NotFound(HttpInterrupt):
HTTP_CODE = HTTP_NOT_FOUND
class BadRequest(HttpInterrupt):
HTTP_CODE = HTTP_BAD_REQUEST
class ServError(HttpInterrupt):
HTTP_CODE = HTTP_SERV_ERROR
class Unauthorised(HttpInterrupt):
HTTP_CODE = HTTP_UNAUTHORISED
class Forbidden(HttpInterrupt):
HTTP_CODE = HTTP_FORBIDDEN
class Conflict(HttpInterrupt):
HTTP_CODE = HTTP_CONFLICT
class Resource():
def hasMethod(self, method):
return callable(getattr(self, method, None))
class PathNode():
def __init__(self):
self.strs = []
self.ids = []
self.resource = None
class PathStr(PathNode):
def __init__(self, s):
super().__init__()
self.s = s
def __eq__(self, other):
if type(other) is str:
return self.s == other
return self.s == other.s
class PathId(PathNode):
def __init__(self, name, seg_type=None):
super().__init__()
self.name = name
if seg_type == "int":
self.seg_type = int
else:
self.seg_type = str
def __eq__(self, other):
if type(other) is str:
if self.seg_type is str:
return True
try:
int(other)
return True
except ValueError:
return False
return self.name == other.name and self.seg_type == other.seg_type
class RestEasy():
def __init__(self, resources):
self.resources = PathNode()
for resource in resources:
path = self.compilePathInfo(resource.path)
cursor = self.resources
for segment in path:
node_list = cursor.strs if type(segment) == PathStr else cursor.ids
match = findMatch(segment, node_list)
if match is not None:
cursor = match
else:
node_list.append(segment)
cursor = segment
cursor.resource = resource
def compilePathInfo(self, path):
path = path.split("/")
final = []
for segment in path:
if segment[0] == "{" and segment[-1] == "}":
final.append(PathId(*segment[1:-1].split(":")))
else:
final.append(PathStr(segment))
return final
def getURL(self, env, addId=None):
url = env['wsgi.url_scheme']+'://'
if env.get('HTTP_HOST'):
url += env['HTTP_HOST']
else:
url += env['SERVER_NAME']
if env['wsgi.url_scheme'] == 'https':
if env['SERVER_PORT'] != '443':
url += ':' + env['SERVER_PORT']
else:
if env['SERVER_PORT'] != '80':
url += ':' + env['SERVER_PORT']
url += quote(env.get('SCRIPT_NAME', ''))
url += quote(env.get('PATH_INFO', ''))
if addId is not None:
url += "/" + str(addId)
return url
def __call__(self, env, start_response):
code, headers, body = self._callProcess(env, start_response)
start_response(code, headersList(headers))
return [bytes(body, "utf-8")]
def _callProcess(self, env, start_response):
try:
# Find matching resource and get resource identifiers
req_path = env["PATH_INFO"][1:].split("/")
found = False
res_ids = {}
cursor = self.resources
for segment in req_path:
# Begin looking at string matches
match = findMatch(segment, cursor.strs)
if match is None:
# Next try ids
match = findMatch(segment, cursor.ids)
if match:
res_ids[match.name] = match.seg_type(segment)
if match is None:
# Not Found
raise NotFound()
cursor = match
resource = cursor.resource
if resource is None:
# Not Found
raise NotFound()
allowed = []
for method in ["POST", "GET", "PUT", "DELETE"]:
if resource.hasMethod(method):
allowed.append(method)
allow = {"Allow": ",".join(allowed)};
method = env['REQUEST_METHOD']
if method == "OPTIONS":
headers = {
'Access-Control-Allow-Headers': 'Content-Type, Accept, Content-Length, Host, Origin, User-Agent, Referer'
}
headers.update(allow)
return (HTTP_NO_CONTENT, headers, "")
if method not in allowed:
return (HTTP_METHOD_NOT_ALLOWED, allow, "")
if method == "POST":
headers, body, rid = resource.POST(self, env, **res_ids)
headers["Location"] = self.getURL(env, rid)
return (HTTP_CREATED, headers, body)
if method == "GET":
headers, body = resource.GET(self, env, **res_ids)
return (HTTP_OK, headers, body)
if method == "PUT":
headers, body = resource.PUT(self, env, **res_ids)
elif method == "DELETE":
headers, body = resource.DELETE(self, env, **res_ids)
return (HTTP_OK if len(body) > 0 else HTTP_NO_CONTENT, headers, body)
except HttpInterrupt as e:
return (e.HTTP_CODE, e.headers, e.body)
|
maior = int(input())
for i in range(1,100):
n = int(input())
if maior < n:
maior = n
print(maior)
|
import numpy as np
def initTemp(STATUS, T, x):
# return values for initial temperature field
if not isinstance(x, np.ndarray):
raise Exception("x must be numpy.ndarray")
if not isinstance(T, np.ndarray):
raise Exception("T must be numpy.ndarray")
if len(T) != len(x):
raise Exception("Incompatible x and T")
if STATUS['CONFIG']['TINI_TYPE'] == -1:
pass # T read at restart and no modification needed
elif STATUS['CONFIG']['TINI_TYPE'] == 0:
T[:] = 1.0
elif STATUS['CONFIG']['TINI_TYPE'] == 1:
T[:] = 1519.0 * x[:]/STATUS['L'][1] + 10.0
elif STATUS['CONFIG']['TINI_TYPE'] == 10:
# extra 30km overthrust
idx = x < 30e3
T[idx] = 750.0 * x[idx]/30e3
idx = (x >= 30e3) & (x < 30e3+STATUS['Moho_Depth'])
T[idx] = 750.0 * (x[idx]-30e3)/STATUS['Moho_Depth']
idx = x >= 30e3+STATUS['Moho_Depth']
T[idx] = 750.0 + (1250.0-750.0) * (x[idx]-(30e3+STATUS['Moho_Depth']))/(STATUS['L'][1]-(30e3+STATUS['Moho_Depth']))
elif STATUS['CONFIG']['TINI_TYPE'] == 101:
# T read at restart but we want to modify it
pass
else:
raise Exception("Invalid TINI_TYPE " + str(STATUS['CONFIG']['TINI_TYPE']))
def prod(x):
ret = 1
for i in x:
ret = ret * i
return ret
def kT(reltype, k0, params, T):
if reltype == 0:
retk = np.copy(k0)
elif reltype == 1:
retk = k0/(1+params[0]*T)
elif reltype == 2:
MAXK = 1e2
MINK = 0
retk = k0/(1+params[0]*T) + params[1]*T**3
retk[retk < MINK] = MINK
retk[retk > MAXK] = MAXK
else:
raise Exception("Invalid reltype")
return retk
def cT(reltype, c0, params, T):
if reltype == 0:
retc = np.copy(c0)
pass
elif reltype == 1:
# From Waples and Waples (2004)
Tt = 20 # measuring temp
cpnt1 = params[0] * Tt**3 + params[1] * Tt**2 + params[2]*Tt + params[3]
D = params[4] # Debye temp of a silicate
Tlimited = np.copy(T)
Tlimited[Tlimited > D] = D
cpnt2 = params[0] * Tlimited**3 + params[1] * Tlimited**2 + params[2]*Tlimited + params[3]
retc = c0 * cpnt2 / cpnt1
else:
raise Exception("Invalid reltype")
return retc
def maxdt(k, rho, cp, xs):
NX = np.size(k)
dx = xs[1:NX] - xs[0:(NX-1)]
diff = k / (rho*cp)
Khalf = 0.5 * (diff[1:NX] + diff[0:(NX-1)])
return min(0.5 * dx * dx / Khalf)
def diffstep(STATUS, T, new_T, xs, k, dt, Tsurf, q0, rho, cp, H):
NX = np.size(T)
if NX != np.size(new_T):
raise Exception("dimension mismatch")
if NX != np.size(k):
raise Exception("dimension mismatch")
if NX != np.size(xs):
raise Exception("dimension mismatch")
if NX != np.size(rho):
raise Exception("dimension mismatch")
if NX != np.size(cp):
raise Exception("dimension mismatch")
khalf = 0.5 * (k[1:NX] + k[0:(NX-1)])
dx = xs[1:NX] - xs[0:(NX-1)]
dxhalf = 0.5 * (dx[1:(NX-1)] + dx[0:(NX-2)])
# upper bnd
new_T[0] = Tsurf
# inner points
new_T[1:(NX-1)] = khalf[1:(NX-1)] * (T[2:NX]-T[1:(NX-1)]) / dx[1:(NX-1)] - khalf[0:(NX-2)] * (T[1:(NX-1)] - T[0:(NX-2)]) / dx[0:(NX-2)]
new_T[1:(NX-1)] = 2.0 * new_T[1:(NX-1)] / dxhalf[0:(NX-2)]
new_T[1:(NX-1)] = new_T[1:(NX-1)] * dt / (rho[1:(NX-1)] * cp[1:(NX-1)]) + T[1:(NX-1)]
new_T[1:(NX-1)] = new_T[1:(NX-1)] + H[1:(NX-1)] * dt / (rho[1:(NX-1)] * cp[1:(NX-1)])
# W m-3 => J m-3 => J m-3 J-1 kg K => K kg m-3 => K
#for ix in range(1,NX-1):
# new_T[ix] = khalf[ix] * (T[ix+1] - T[ix]) / dx[ix] - khalf[ix-1] * (T[ix] - T[ix-1]) / dx[ix-1]
# new_T[ix] = 2.0 * new_T[ix] / dxhalf[ix-1]
# new_T[ix] = new_T[ix] * dt / (rho[ix] * cp[ix]) + T[ix]
# new_T[ix] = new_T[ix] + H[ix] * dt / (rho[ix] * cp[ix])
# lower bnd
if STATUS['CONFIG']['BND_BOT_TYPE'] == 0:
new_T[NX-1] = STATUS['CONFIG']['BND_BOT_TEMP']
elif STATUS['CONFIG']['BND_BOT_TYPE'] == 1:
new_T[NX-1] = q0 * dx[NX-2] / khalf[NX-2] + T[NX-2]
elif STATUS['CONFIG']['BND_BOT_TYPE'] == 9:
TL = 1250.0
try:
botloc = min(np.where(new_T >= TL)[0])
except ValueError:
botloc = NX-1
botq = (rho[botloc]**2 * cp[botloc] * 1.0**4 * 60.0**4 * 9.81 * 3.5e-5 * (xs[botloc]-xs[0])**2) \
/ (1e19*new_T[botloc]**2)
for ix in range(botloc, NX):
new_T[ix] = botq * dx[ix-1] / khalf[ix-1] + T[ix-1]
#print botq, botloc
else:
raise Exception("Invalid BND_BOT_TYPE")
def getErosionSpeed(STATUS):
if STATUS['CONFIG']['EROSION_SPEED_TYPE'] == 0:
STATUS['Erosion_Speed'] = STATUS['CONFIG']['EROSION_SPEED_M_MA'] / (1e6*STATUS['SECINYR'])
elif STATUS['CONFIG']['EROSION_SPEED_TYPE'] == 1:
if (STATUS['curTime'] - STATUS['ModelStartTime']) > STATUS['MaxTimeToErode']:
STATUS['Erosion_Speed'] = 0.0
else:
STATUS['Erosion_Speed'] = STATUS['CONFIG']['EROSION_SPEED_M_MA'] / (1e6*STATUS['SECINYR'])
elif STATUS['CONFIG']['EROSION_SPEED_TYPE'] == 10:
# only erode the original overthrust sheet
if STATUS['CONFIG']['RESTART_POST_MOD'] != 2:
raise Exception("EROSION_SPEED_TYPE == 10 requires RESTART_POST_MOD == 2")
if (STATUS['curTime'] - STATUS['ModelStartTime']) > STATUS['CONFIG']['RESTART_POST_MOD_PARAMS'][0] / STATUS['CONFIG']['EROSION_SPEED_M_MA']:
STATUS['Erosion_Speed'] = 0.0
else:
STATUS['Erosion_Speed'] = STATUS['CONFIG']['EROSION_SPEED_M_MA'] / (1e6*STATUS['SECINYR'])
else:
raise Exception("Invalid EROSION_SPEED_TYPE")
def remesh(STATUS, curxs, newExt, arrays, extrapolation=0):
# modify curxs to newExt and interpolate values within the arrays
# newExt is either the extent (min,max)
# or an array of new xs
NX = 0
if len(newExt) == 2:
if newExt[0] == curxs[0] and newExt[1] == curxs[-1]:
# we're done here
return
NX = STATUS['NX']
if newExt[1] == curxs[-1]:
# bottom location hasn't changed
if (newExt[0] > curxs[0]) and (newExt[0] < curxs[1]):
# only the uppermost grid point "has" moved
#print newExt[0], curxs[0], curxs[1], curxs[2]
if (curxs[1] - newExt[0]) < 0.5 * (curxs[2] - curxs[1]):
# grid spacing getting too tight, remove uppermost grid point
newxs = np.zeros(NX-1)
newxs[:] = curxs[1:]
newxs[0] = newExt[0]
NX = NX-1
STATUS['NX'] = NX
#print NX
else:
newxs = np.copy(curxs)
newxs[0] = newExt[0]
else:
# TODO:
# make this better so that a jump over one grid point
# is also properly handled without extra interpolations
newxs = np.linspace(newExt[0], newExt[1], num=NX)
else:
newxs = np.linspace(newExt[0], newExt[1], num=NX)
else:
# newExt is an array of xs
if len(np.where(newExt == curxs)) == len(curxs):
# we're done here
return
NX = len(newExt)
STATUS['NX'] = NX
newxs = np.copy(newExt)
addGridPoints = NX - len(curxs)
if addGridPoints != 0:
#print "remesh(): Adding " + str(addGridPoints) + " grid points"
pass
# apply the new grid to the value arrays
for i in range(len(arrays)):
newarr = np.zeros(NX)
interpolate(curxs, arrays[i], newxs, newarr, extrapolation)
if addGridPoints != 0:
arrays[i].resize(NX, refcheck=False)
arrays[i][0:NX] = newarr[0:NX]
if addGridPoints != 0:
curxs.resize(NX, refcheck=False)
curxs[0:NX] = newxs[0:NX]
return None
def posMin(arr):
return np.where(arr == min(arr[arr > 0]))
def findPoint(xs, val):
# returns the first point in xs that is larger than loc
# raises ValueError is not found
# returns the last occurrence
dist = xs - val
i = posMin(dist)
if len(i[0]) != 1:
raise Exception("Error in findPoint()")
return i[0][0]
def interpolate(xs1, arr1, xs2, arr2, extrapolation):
# interpolate from arr1 to arr2
for i in range(len(xs2)):
if xs2[i] < xs1[0]:
if extrapolation <= 0:
arr2[i] = -extrapolation
elif extrapolation == 1:
arr2[i] = np.NaN
elif extrapolation == 2:
relloc = xs2[i] - xs2[0]
j = findPoint(xs1-xs1[0], relloc)
arr2[i] = arr1[j-1] + (relloc+xs1[0] - xs1[j-1]) * (arr1[j] - arr1[j-1]) / (xs1[j] - xs1[j-1])
else:
raise Exception("Don't know how to extrapolate")
elif xs2[i] > xs1[-1]:
if extrapolation == 1:
arr2[i] = np.NaN
elif extrapolation == 2:
raise Exception("Extrapolation: Copying at the lower bnd not supported yet")
else:
raise Exception("Don't know how to extrapolate")
else:
idx = xs2[i] == xs1
whereIdx = np.where(idx)
nSameNodes = len(whereIdx[0])
if nSameNodes == 0:
# interpolate, linear
idx2 = np.where(xs1 > xs2[i])[0][0]
idx1 = idx2 - 1
arr2[i] = arr1[idx1] + (xs2[i] - xs1[idx1]) * (arr1[idx2] - arr1[idx1]) / (xs1[idx2] - xs1[idx1])
elif nSameNodes == 1:
# copy value directly
arr2[i] = arr1[whereIdx]
elif nSameNodes > 1:
raise Exception("Invalid grid xs1: multiple same values")
|
import tornado
import json
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Content-type", "application/json")
def get(self):
pass
def post(self):
pass
def put(self):
pass
def delete(self):
pass
def send_response(self, success=True, status=200, message="", response=None):
self.set_status(status)
return self.write(json.dumps({
"success": success,
"message": message,
"data": response
})) |
#!/usr/bin/env python3
''' Author : Student '''
MYNOTE = "print this string"
print(MYNOTE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.