content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
import requests
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='Erkv7l2j2mSASxwVFoZ8.sGpL7MhXv/v78cRSmT7doa.eTfxddkYkHiMYLCcObtFkIBTOUDFr4XWr//CjYbDAP0=') # BOT PUBLIK
cl.loginResult()
print "❂➣ [BOT PUBLIK BERHASIL LOGIN]"
reload(sys)
sys.setdefaultencoding('utf-8')
#album = None
#image_path = 'tmp/tmp.jpg'
helpMessage ="""
============================
🤖 BOT KEY 🤖
============================
01. Apakah [text] = Kr_ajaib
02. Kapan [text] = Tanya bot
03. Dosa @ [by tag]
04. Pahala @ [by tag]
05. Gcreator
06. Bot spam on [jml] [text]
07. Search image: [text]
08. Instagram: [username]
09. Wikipedia: [search words]
10. Playstore: [search words]
11. Anime: [search words]
12. Lirik: [artis] [judul]
13. Music: [artis] [judul]
14. youtube: [text]
15. youtube search: [text]
16. Mister/Mr (cek bot)
17. Dubbing [text]
18. Name: [tag member]
19. Bio: [tag member]
20. Info: [tag member]
21. Getinfo: [tag member]
22. Getprofile [tag member]
23. Getcontact: [tag member]
24. Getpp [tag member]
25. Gethome [tag member]
26. Getimage group
27. Set sider
28. Cek sider
29. setview
30. viewseen
31. Tagall/Mentionall
32. Gift
33. Gift1
34. Gift2
35. Gift3
36. Line clone
37. Key translate
38. love [Siti love adi]
39. sider on/off (auto)
=============================
My creator:
line.me//ti/p/~tak.dapat.tidur
=============================
"""
socmedMessage ="""
╔═════════════
║ SOSMED KEY
║╔════════════
║╠❂͜͡🌟➣Wikipedia: [тeхт]
║╠❂͜͡🌟➣Instagram: [username]
║╠❂͜͡🌟➣Image: [тeхт]
║╠❂͜͡🌟➣Lirik: [тeхт]
║╠❂͜͡🌟➣Lineid: [тeхт]
║╠❂͜͡🌟➣Music: [artis] [judul]
║╠❂͜͡🌟➣тιмe [тιмe]
║╠❂͜͡🌟➣ѕay [тeхт]
║╚════════════
╚═════════════
"""
translateMessage ="""
╔═════════════
║ TRANSLATE KEY
║╔════════════
║╠☔тr-ιd = ιndoneѕιa
║╠☔тr-мy = мyanмar
║╠☔тr-en = englιѕн
║╠☔тr-тн = тнaιland
║╠☔тr-ja = japaneѕe
║╠☔тr-мѕ = мalayѕιa
║╠☔тr-ιт = ιтalιan
║╠☔тr-тr = тυrĸιѕн
║╠☔тr-aғ = aғrιĸaanѕ
║╠☔тr-ѕq = alвanιan
║╠☔тr-aм = aмнarιc
║╠☔тr-ar = araвιc
║╠☔тr-нy = arмenιan
║╚════════════
╚═════════════
"""
botMessage ="""
╔═════════════
║ BOT KEY
║╔════════════
║╠❂͜͡⚡➣Set sider > Cek sider
║╠❂͜͡⚡➣Tes / Sepi
║╠❂͜͡⚡➣Reѕpon
║╠❂͜͡⚡➣Speed / Sp
║╠❂͜͡⚡➣Grup list
║╠❂͜͡⚡➣Tagall / Mentionall
║╚════════════
╚═════════════
"""
settingMessage ="""Empty"""
#╔═════════════
#║ SETTING KEY
#║╔════════════
#║╠❂͜͡🌟➣ѕeт
#║╠❂͜͡🌟➣тag on/oғғ
#║╠❂͜͡🌟➣aυтolιĸe on/oғғ
#║╠❂͜͡🌟➣add on/oғғ
#║╠❂͜͡🌟➣joιn on/oғғ
#║╠❂͜͡🌟➣ѕнare on/oғғ
#║╠❂͜͡🌟➣coммenт on/oғғ
#║╚════════════
#╚═════════════
#"""
giftMessage ="""
╔═════════════
║ GIFT KEY
║╔════════════
║╠❂͜͡🌟➣gιғт
║╠❂͜͡🌟➣gιғт 1
║╠❂͜͡🌟➣gιғт 2
║╠❂͜͡🌟➣gιғт 3
║╚════════════
╚═════════════
"""
stealMessage ="""
╔═════════════
║ STEAL KEY
║╔════════════
║╠❂͜͡🌟➣geтnaмe @
║╠❂͜͡🌟➣geтвιo @
║╠❂͜͡🌟➣geтιnғo @
║╠❂͜͡🌟➣geтpp @
║╠❂͜͡🌟➣geтмιd @
║╠❂͜͡🌟➣geтgroυp
║╠❂͜͡🌟➣papιмage
║╠❂͜͡🌟➣papvιdeo
║╚════════════
╚═════════════
"""
KAC=[cl]
mid = cl.getProfile().mid
Bots=[mid]
owner=["u3cfa63811888b3a880bc4f348a95b23b","u0040a6dfc0a274f29899cccfc1c9b457",mid]
admin=["u3cfa63811888b3a880bc4f348a95b23b","u0040a6dfc0a274f29899cccfc1c9b457",mid]
baby=[""]#chery/barby/ranita
creator=["u3cfa63811888b3a880bc4f348a95b23b","u0040a6dfc0a274f29899cccfc1c9b457"]
owner=["u3cfa63811888b3a880bc4f348a95b23b","u0040a6dfc0a274f29899cccfc1c9b457"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':False,
'autoAdd':False,
'timeline':True,
'displayName':True,
"Timeline":True,
"message":"""Thanks for add me (^_^)\n\nContact My cerator:\nline.me/ti/p/~tak.dapat.tidur""",
"lang":"JP",
"comment":"Invite to your group ヘ(^_^)ヘ\n\nContact My cerator:\nline.me/ti/p/~tak.dapat.tidur",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"auto":True,
"tag":True,
"pap":True,
"steal":{},
'pap':{},
'invite':{},
"spam":{},
"gift":{},
"likeOn":True,
"alwayRead":True,
"detectMention":True,
"detectMention2":True,
'point':False,
'sidermem':False,
"mid":{},
"sendMessage":True,
"Mimic":False,
"mimic":False,
"winvite":True,
"winvite2":False,
"Wc":True,
"Lv":True,
"atjointicket":True,
"Sider":{},
"members":1,
"Simi":{},
"BlGroup":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = cl.getProfile()
profile = cl.getProfile()
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
mulai = time.time()
agent = {'User-Agent' : "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"}
def translate(to_translate, to_language="auto", language="auto"):
bahasa_awal = "auto"
bahasa_tujuan = to_language
kata = to_translate
url = 'http://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
return result
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def sendFileWithURL(self, url, name = ''):
"""Send a File with given File url
:param url: File url to send
"""
from urlparse import urlparse
from os.path import basename
import urllib2
if name == '':
name = basename(urlparse(url).path)
file = urllib2.urlopen(url)
output = open('pythonLine.data','wb')
output.write(file.read())
output.close()
try:
self.sendFile('pythonLine.data', name)
except Exception as e:
raise e
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self.Talk.client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('http://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n9§9" + Name
wait2['ROM'][op.param1][op.param2] = "9§9" + Name
else:
pass
except:
pass
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M_id = self.Talk.client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
print r
if r.status_code != 201:
raise Exception('Upload audio failure.')
def sendAudioWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def sendVoice(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentPreview = None
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'voice_message',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('http://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload voice failure.')
return True
def tagall(to,nama):
aa = ""
bb = ""
strt = int(12)
akh = int(12)
nm = nama
#print nm
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "✮ @c \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "「Mention」\n"+bb
msg.contentMetadata = {'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def summon(to,nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag Sider on"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def removeAllMessages(self, lastMessageId):
return self._client.removeAllMessages(0, lastMessageId)
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if wait["auto"] == True:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Do not invite anyone !! In addition to permission from the board or group owner. Thank you 🙂")
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if(wait["message"]in[""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
# Name = summon(op.param2)
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
cl.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nCie cie yang jones ngintip aja cie . . .\nSini napa nes (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
cl.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nBisulan tujuh turunan cctv telus . . .\nChat Napa (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
cl.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nKak ngapain ngintip ? \nSini Dong ih.. ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[From Simi]\n" + data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["",cName + " Ada apa kak panggil aku ?, ", cName + " Kangen kak ? Datang aja ke lumah aku", cName + " Yang tag jones ", "Maaf aku lagi nikung janga ganggu, " + cName + "?","Au ah tag mulu, ", "Lagi anu kak tanggung mau keluar, "]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.sendText(msg.from_,ret_)
time.sleep(0.2)
summon(op.param1,[op.param2])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention2"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Woii " + cName + ", Dasar Jones Ngetag Mulu!"]
balas1 = "Ini Foto Sii Jones Yang Suka Ngetag. . ."
ret_ = random.choice(balas)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.sendText(msg.to,balas1)
cl.sendImageWithURL(msg.to,image)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
break
#---------------------------------------------------------------------
if op.type == 26:
msg = op.message
if msg.text is None:
return
if "@"+cl.getProfile().displayName in msg.text:
if wait["tag"] == True:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = ("Kenapa Tag Si "+cl.getProfile().displayName+" Kangen yah..!!!\nPC aja langsung biar anu hihi..!!","Nah ngetag lagi si "+cl.getProfile().displayName+" mending ajak mojok nah bozz saya 👉 🍁հմՏɑíղ✍️ line.me//t/p/~tak.dapat.tidur kasian bozz saya jones,, dari pada ngetag mulu.. wkwk...!!!")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#------Open QR Kick finish-----#
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"In Blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Nothing")
if op.type == 17:
if wait["Wc"] == True:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
cl.sendText(op.param1,"Hii... " + cl.getContact(op.param2).displayName + " \nWelcome to " + str(ginfo.name) + "\n\n" + "Group creator :\n👉 " + ginfo.creator.displayName + "\n\nObey the rules in the group admin\nAnd hopefully feel at home here 🙂\nPlease type [Help/Key], For help. And use wisely. Thank you 🙂")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if wait["Lv"] == True:
if op.param2 in Bots:
return
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
cl.sendText(op.param1,"Good bye " + cl.getContact(op.param2).displayName + "\nSee you next time . . . (p′︵‵。) 🤗")
print "MEMBER HAS LEFT THE GROUP"
#-----------------------------------------------
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
cl.findAndAddContactsByMid(invite)
cl.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in creator:
cl.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner or mid:
cl.acceptGroupInvitation(op.param1)
cl.rejectGroupInvitation(op.param1)
cl.sendText(op.param1, "Thank you for inviting me.\nIntroduce my name is Mr. Rius 🤖\n\nPlease type [Help/Key], For help. And use wisely. Thank you 🙂")
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#================================================================
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
#-----------------------------------------
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
cl.sendText(msg.to,"already")
cl.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
cl.sendText(msg.to,"aded")
cl.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
cl.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
cl.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL�0�10��9�0�16�0�69�0�3�0�4\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","Key","help","key"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key translate"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,translateMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key7"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,botMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key2"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,socmedMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key4"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,protectMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key5"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,settingMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key6"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,stealMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif msg.text in ["Key3"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,giftMessage)
else:
cl.sendText(msg.to,helpt)
#--------------------------------------------------
elif ("Gn: " in msg.text):
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
#--------------------------------------------------
elif "Jemput dia " in msg.text:
if msg.from_ in admin or owner:
midd = msg.text.replace("Jemput dia ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------------------------------------------
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["Ourl","Link on","Urlon"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif msg.text in ["Curl","Link off","Urloff"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
#--------------------------------------------------
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "Close"
else:
u = "Open"
cl.sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Profile Status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nMembers: " + str(len(ginfo.members)) + " members\nPending: " + sinvitee + " people\nURL: " + u + " it is inside")
else:
cl.sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Profile Status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------
elif "Details group: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Details group: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))
elif "Spamtag @" in msg.text:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
print "Spamtag Berhasil."
#--------------------------------------------------
elif msg.text.lower() in ["wkwk","wkwkwk","ckck","ckckck"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '100',
'STKPKGID': '1',
'STKVER': '100' }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["hehehe","hehe"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '10',
'STKPKGID': '1',
'STKVER': '100' }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["galon","galo","galau"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '9',
'STKPKGID': '1',
'STKVER': '100' }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["you","kamu","km","u","qm"]:
msg.contentType = 7
msg.contentMetadata={"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["hadeuh","hadeh","hadech"]:
msg.contentType = 7
msg.contentMetadata={"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["please"]:
msg.contentType = 7
msg.contentMetadata={"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["haaa","hah","kaget","terkejut"]:
msg.contentType = 7
msg.contentMetadata={"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["lol","haha","hahaha","ngakak","lucu"]:
msg.contentType = 7
msg.contentMetadata={"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["hmmm","hmm"]:
msg.contentType = 7
msg.contentMetadata={"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text.lower() in ["come","ayo","kuy"]:
msg.contentType = 7
msg.contentMetadata={"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["marah","hadeuh","hadeh"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '6',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["tidur","turu","bobo","bubu","sleep","nite"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '1',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["gemes"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '2',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["cantik","imut"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '5',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["nyanyi","lalala"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '11',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["gugup"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '8',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["ok","oke","okay","oce","okee","sip","siph"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '13',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["mantab","mantap","nice","keren"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '14',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["ngejek"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '15',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["nangis","sedih"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '16',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["kampret"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '102',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["huft"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '104',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
#---------------------------------------------------
elif "Id" == msg.text:
if msg.from_ in admin or owner:
cl.sendText(msg.to,msg.to)
#--------------------------------------------------
elif msg.text in ["TL "]:
if msg.from_ in admin or owner:
tl_text = msg.text.replace("TL ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#--------------------------------------------------
elif msg.text in ["Undang"]:
if msg.from_ in admin or owner:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#--------------------------------------------------
elif msg.text in ["Mc "]:
if msg.from_ in admin or owner:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥Ââ 1�71¤7šÃ¥Å 1�71¤7 :オãÆ�1�7�Â�1�7�1�71¤7","Join on","Auto join:on","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥ÂÆ’Ã¥Å�1�7�1�71¤7 :éâ€�1�7�â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥Ââ 1�71¤7šÃ¥Å 1�71¤7 :オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Join off","Auto join:off","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¥ÂÆ’Ã¥Å�1�7�1�71¤7 :éâ€�1�7�Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
#----------------------------------------------------#
elif msg.text in ["Respontagbot on","Autoresponbot:on","Responbot on","Responbot:on"]:
if msg.from_ in admin:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto respon tag On")
elif msg.text in ["Responbot2 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = True
wait["detectMention3"] = False
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon2 Sudah Aktif")
else:
cl.sendText(msg.to,"Khusus Admin")
elif msg.text in ["Responbot2 off"]:
if msg.from_ in admin:
wait["detectMention2"] = False
cl.sendText(msg.to,"Auto Respon2 Sudah Off")
else:
cl.sendText(msg.to,"Khusus Admin")
elif msg.text in ["Respontagbot off","Autoresponbot:off","Responbot off","Responbot:off"]:
if msg.from_ in admin:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto respon tag Off")
#--------------------------------------------------
elif msg.text in ["Gcancel:"]:
if msg.from_ in admin or owner:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"å…³äºâ�1�7�1�71¤7 éâ 1�71¤7šâ‚¬Ã¨Â¯Â·Ã¦â€¹â 1�71¤7™Ã§Â»Âãâ�1�7�¬â€šÃ¨Â¦Âæâ 1�71¤7”¶å¼â�1�7�¬Ã¨Â¯Â·Ã¦Å�1�7�‡å®šäººæâ 1�71¤7¢Â°Ã¥Ââ 1�71¤7˜Ã©â‚¬Â1�7")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下çšâ�1�7�1�71¤7žÃ¥Â°Âç»â 1�71¤7žÃ§â 1�71¤7¨èâ 1�71¤7¡ÂªÃ¥Å ¨éâ 1�71¤7šâ‚¬Ã¨Â¯Â·Ã¦â€¹â 1�71¤7™Ã§Â»Â�1�7�1�71¤7")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
#--------------------------------------------------
elif msg.text in ["強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡Â 1�71¤7:オãÆ�1�7�Â�1�7�1�71¤7","Leave on","Auto leave:on","強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡ÂºÃ¯Â¼Å¡Ã©â 1�71¤7“â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡Â 1�71¤7:オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Leave off","Auto leave:off","強制自åâ 1�71¤7¹â 1�71¤7¢Ã©â‚¬â�1�7�¬Ã¥â�1�7�1�71¤7¡ÂºÃ¯Â¼Å¡Ã©â 1�71¤7”Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
#--------------------------------------------------
elif msg.text in ["å…±æÅ�1�7�â�1�7�1�71¤7 1�71¤7:オãÆ�1�7�Â�1�7�1�71¤7","Share on","Share on"]:
if msg.from_ in admin or owner:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["å…±æÅ�1�7�â�1�7�1�71¤7 1�71¤7:オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Share off","Share off"]:
if msg.from_ in admin or owner:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了åâ�1�7�1�71¤7¦Â³Ã¦â 1�71¤7“Âãâ�1�7�¬â€ 1�71¤7")
elif "Sider on" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
cl.sendText(msg.to,"Siap On Cek Sider")
elif "Sider off" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
cl.sendText(msg.to, "Cek Sider Off")
else:
cl.sendText(msg.to, "Heh Belom Di Set")
#--------------------------------------------------
elif msg.text in ["Status bot"]:
if msg.from_ in admin or owner:
md = ""
if wait["contact"] == True: md+="[Mask] CONTACT : [✅]\n"
else: md+="[Mask] CONTACT : [❌]\n"
if wait["autoJoin"] == True: md+="[Mask] AUTOJOIN : [✅]\n"
else: md +="[Mask] AUTOJOIN : [❌]\n"
if wait["leaveRoom"] == True: md+="[Mask] AUTOLEAVE : [✅]\n"
else: md+="[Mask] AUTOLEAVE : [❌]\n"
if wait["timeline"] == True: md+="[Mask] SHARE : [✅]\n"
else:md+="[Mask] SHARE : [❌]\n"
if wait["autoAdd"] == True: md+="[Mask] AUTOADD : [✅]\n"
else:md+="[Mask] AUTOADD : [❌]\n"
if wait["commentOn"] == True: md+="[Mask] COMMENT : [✅]\n"
else:md+="[Mask] COMMENT : [❌]\n"
if wait["likeOn"] == True: md+="[Mask] AUTOLIKE : [✅]\n"
else:md+="[Mask] AUTOLIKE : [❌]\n"
if wait["Wc"] == True: md+="[Mask] WELCOME : [✅]\n"
else:md+="[Mask] WELCOME : [❌]\n"
if wait["Lv"] == True: md+="[Mask] LEAVE : [✅]\n"
else:md+="[Mask] LEAVE : [❌]\n"
if wait["tag"] == True: md+="[Mask] TAG 1 : [✅]\n"
else:md+="[Mask] TAG 1 : [❌]\n"
if wait["auto"] == True: md+="[Mask] AutoBot Join : [✅]\n"
else:md+="[Mask] AutoBot Join : [❌]\n"
if wait["auto"] == True: md+="[Mask] Autoread On : [✅]\n"
else:md+="[Mask] Autoread Off : [❌]\n"
if wait["auto"] == True: md+="[Mask] Auto Sider : [✅]\n"
else:md+="[Mask] Auto Sider : [❌]\n"
if wait["auto"] == True: md+="[Mask] Simisimi On : [✅]\n"
else:md+="[Mask] Simisimi Off : [❌]\n"
if wait["detectMention"] == True: md+="[Mask] Respon on [✅]\n"
else:md+="[Mask] Respon off [❌]\n"
if wait["detectMention2"] == True: md+="[Mask] Respon on [✅]\n"
else:md+="[Mask] Respon off [❌]\n"
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': "u3cfa63811888b3a880bc4f348a95b23b"}
cl.sendMessage(msg)
#--------------------------------------------------
elif "album merit " in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相åâ�1�7�1�71¤7 Œæ²¡åÅ�1�7�¨ãâ�1�7�¬â€ 1�71¤7")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象çšâ�1�7�1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 Å 1�71¤7"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
#--------------------------------------------------
elif "album " in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相åâ�1�7�1�71¤7 Œæ²¡åÅ�1�7�¨ãâ�1�7�¬â€ 1�71¤7")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象çšâ�1�7�1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 Å 1�71¤7"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
#--------------------------------------------------
elif "album remove " in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "Ã¥Ë 1�7 除äºâ�1�7�1�71¤7 äºâ 1�71¤7¹Ã§Å¡â 1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 ΋â�1�7�¬â€ 1�71¤7")
elif msg.text in ["Group id","群組åâ�1�7�1�71¤7¦Â¨id"]:
if msg.from_ in admin or owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------------------------------------------
elif msg.text in ["Clear"]:
if msg.from_ in admin or owner:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹â�1�7�1�71¤7™Ã§Â»Âäºâ�1�7�1�71¤7 Ã¥â 1�71¤7¦Â¨Ã©Æ’¨çšâ�1�7�1�71¤7žÃ©â 1�71¤7šâ‚¬Ã¨Â¯Â·Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif "album removeâ†â�1�7�1�71¤7 1�71¤7" in msg.text:
if msg.from_ in admin or owner:
gid = msg.text.replace("album removeâ†â�1�7�1�71¤7 1�71¤7","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "Ã¥Ë 1�7 除äºâ�1�7�1�71¤7 äºâ 1�71¤7¹Ã§Å¡â 1�71¤7žÃ§â 1�71¤7ºÂ¸Ã¥â 1�71¤7 ΋â�1�7�¬â€ 1�71¤7")
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :オãÆ�1�7�Â�1�7�1�71¤7","Add on","Auto add:on","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :éâ€�1�7�â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Add off","Auto add:off","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 :éâ€�1�7�Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了åâ�1�7�1�71¤7¦Â³Ã¦â 1�71¤7“Âãâ�1�7�¬â€ 1�71¤7")
#--------------------------------------------------
elif "Message change: " in msg.text:
if msg.from_ in admin or owner:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
if msg.from_ in admin or owner:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"doneã€â�1�7�1�71¤7 1�71¤7")
#--------------------------------------------------
elif msg.text in ["Message","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã¨Â¿Â½Ã¥Å 1�71¤7 å•ÂÃ¥â�1�7�¬â�1�7�¢Ã¨ÂªÅ¾Ã§Â¢ÂºÃ¨ÂªÂ�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as followsã€â�1�7�1�71¤7š\n\n" + wait["message"])
#--------------------------------------------------
elif "Comment:" in msg.text:
if msg.from_ in admin or owner:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#--------------------------------------------------
elif "Add comment:" in msg.text:
if msg.from_ in admin or owner:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["コãÆ�1�7�¡ãÆ�1�7�³ãÆ�1�7�Ë�1�7�1�71¤7:オãÆ�1�7�Â�1�7�1�71¤7","Comment on","Comment:on","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã©Â¦â 1�71¤7“Ã�1�7�1�71¤7 Âç•â�1�7�¢Ã¨Â¨â�1�7�¬Ã¯Â¼Å¡Ã©â�1�7�1�71¤7“â�1�7�1�71¤7 1�71¤7"]:
if msg.from_ in admin or owner:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了å¼â�1�7�¬Ã£â�1�7�¬â�1�7�1�71¤7 1�71¤7")
elif msg.text in ["コãÆ�1�7�¡ãÆ�1�7�³ãÆ�1�7�Ë�1�7�1�71¤7:オãÆ�1�7�â�1�7�1�71¤7 1�71¤7","Comment on","Comment off","自åâ�1�7�1�71¤7¹â 1�71¤7¢Ã©Â¦â 1�71¤7“Ã�1�7�1�71¤7 Âç•â�1�7�¢Ã¨Â¨â�1�7�¬Ã¯Â¼Å¡Ã©â�1�7�1�71¤7”Å�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦Â了åâ�1�7�1�71¤7¦Â³Ã¦â 1�71¤7“Âãâ�1�7�¬â€ 1�71¤7")
elif msg.text in ["Comment","ç•â�1�7�¢Ã¨Â¨â�1�7�¬Ã§Â¢ÂºÃ¨ÂªÂ�1�7�1�71¤7"]:
if msg.from_ in admin or owner:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
#---------------------------------------------------------------------------------------------------------------------
elif msg.text in ["Gurl"]:
if msg.from_ in admin or owner:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------------------------------------------------------------------------
elif msg.text in ["Comment bl "]:
if msg.from_ in admin or owner:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
if msg.from_ in admin or owner:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if msg.from_ in admin or owner:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Jam on"]:
if msg.from_ in admin or owner:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Jam off"]:
if msg.from_ in admin or owner:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
if msg.from_ in admin or owner:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-----------------------------------------------
elif msg.text.lower() in ["mentionall","tagall"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
tagall(msg.to, nama)
if jml > 100 and jml < 500:
for i in range(0,99):
nm1 += [nama[i]]
tagall(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
tagall(msg.to, nm2)
for k in range(200, len(nama)-1):
nm3 += [nama[k]]
tagall(msg.to, nm3)
for l in range(300, len(nama)-1):
nm4 += [nama[l]]
tagall(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
tagall(msg.to, nm5)
cnt = Message()
cnt.text = "Results mention all : "+str(jml) + " Members"
cnt.to = msg.to
cl.sendText(msg.to,"Mention all success")
cl.sendMessage(cnt)
if jml > 500:
cnt = Message()
cnt.text = ""
cnt.to = msg.to
cl.sendMessage(cnt)
#-----------------------------------------------
elif msg.text in ["Bot pergi"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"Sɪᴀᴘ ʙᴏᴢᴢ!! Lasanakan!!\n\nBʏᴇ,, Bʏᴇᴇ... " + str(ginfo.name) + "\nJᴀɴɢᴀɴ Lᴜᴘᴀ Bᴀʜᴀɢɪᴀ...")
cl.leaveGroup(msg.to)
print "[Command]Bot pergi"
except:
pass
#-----------------------------------------------
elif "Gruplist" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "☄ %s \n" % (cl.getGroup(i).name + " 👥 ▄ [ " + str(len (cl.getGroup(i).members))+" ]")
cl.sendText(msg.to, " ☄ [ ♡List Grup♄ ] ☜\n"+ h +"Total Group ▄" +"[ "+str(len(gid))+" ]")
#-----------------------------SALAM or SELAMAT--------------------------------------
elif msg.text in ["Pagi","Pagi all","Pagi semua","Pageeh","Vagi","Vageeh"]:
cl.sendText(msg.to,"Pagi kak...")
cl.sendText(msg.to,"Buruan mandi gih,, bau jigong tuh")
cl.sendText(msg.to,"Wkwkwk")
cl.sendText(msg.to,"Dan buruan sarapan 😁")
elif msg.text in ["Assalamu alaikum","Salamu alaikum","Assalamu ‘alaikum"]:
cl.sendText(msg.to,"وَعَلَيْكُمُ السَّلاَمُ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
cl.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
#-----------------------------------------------
elif msg.text == "Set sider":
cl.sendText(msg.to, "Lurking Is Starting!! "+ datetime.today().strftime('%H:%M:%S'))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
print wait2
elif msg.text in ["Cek sider"]:
if msg.toType == 2:
print "\nRead aktif..."
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "Yang baca ↴\n =========================== %s\n===========================\n\nTukang nyimak ↴\n%s\n===========================\nIn the last seen point:\n[%s]\n===========================" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
print "\nReading Point Set..."
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "lukers"
cl.sendText(msg.to, "Auto Read Point!!" + (wait2['setTime'][msg.to]))
else:
cl.sendText(msg.to, "Ketik [Set sider] dulu")
#-------------------------------------
elif "Bot rename " in msg.text:
if msg.from_ in admin or owner:
string = msg.text.replace("Bot rename ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"UpdateName => " + string + " <= Success")
#-----------------------------------------------
elif msg.text in ['kontol','Kontol','jancuk','Jancuk','jancok','Jancok','asu','Asu','jembut','Jembut','jembot','Jembot','tempek','Tempek','itil','Itil','makmu','Makmu','mak mu','Mak mu']:
msg.contentType = 13
msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"}
cl.sendMessage(msg)
cl.sendMessage(msg)
cl.sendMessage(msg)
#-----------------------------------------------
elif msg.text in ["Lag"]:
if msg.from_ in admin:
cl.sendText(msg.to,"44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.55.44.44.44.44.44.44.44.4444.44.44.4.44.4.44.44.4.440.440.004444.4444.44.33.")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
cl.sendText(msg.to,"Pekok Har Har")
elif msg.text in ["Sepi"]:
cl.sendText(msg.to,"Mᴇɴᴛɪᴏɴᴀʟʟ Oʀ Tᴀɢᴀʟʟ")
elif msg.text in ["Mister","Mr"]:
cl.sendText(msg.to,"Sɪᴀᴘ ʙᴏᴢᴢ")
elif msg.text in ["Test","test","Tes","tes"]:
cl.sendText(msg.to,"Cᴇᴋ")
cl.sendText(msg.to,"1")
cl.sendText(msg.to,"2")
cl.sendText(msg.to,"3")
cl.sendText(msg.to,"Pᴏꜱɪᴛɪғ ʙᴏᴢᴢ")
cl.sendText(msg.to,"Sᴇʟᴀᴍᴀᴛ ʏᴀ... haha")
elif msg.text in ["Woy","woy","Woi","woi","bot","Bot"]:
jawab = ['Aʜ Kᴜᴘʀᴇᴛ Lᴜ','Mᴜᴋᴀ Lᴜ Kᴀʏᴀ Jᴀᴍʙᴀɴ','Aᴅᴀ Oʀᴀɴɢ ᴋᴀʜ ᴅɪꜱɪɴɪ?','Sᴀɴɢᴇ Eᴜʏ','Aᴅᴀ Pᴇʀᴀᴡᴀɴ Nɢᴀɴɢɢᴜʀ ɢᴀ Cᴏʏ?']
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Kita nge-spam kuy!")
cl.sendText(msg.to,"Kuy XD")
cl.sendText(msg.to,"Kuy haha")
cl.sendText(msg.to,"3")
cl.sendText(msg.to,"2")
cl.sendText(msg.to,"1")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Kurang ga? XD")
cl.sendText(msg.to,"KURAAAANG BANYAAK")
cl.sendText(msg.to,"Kurang Mastaah")
cl.sendText(msg.to,"Ok!")
cl.sendText(msg.to,"Tambah ya! XD")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up haha")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Up XD")
cl.sendText(msg.to,"Udh cukup?")
cl.sendText(msg.to,"Udh cukup XD")
cl.sendText(msg.to,"Cape nih")
cl.sendText(msg.to,"Ok sudah.")
cl.sendText(msg.to,"MAKASIH SEMUA XD")
cl.sendText(msg.to,"Done")
elif msg.text in ["Up up","up up"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Ha? ada apa nih")
cl.sendText(msg.to,"Di up in bos")
cl.sendText(msg.to,"Butuh balon udara nggak?")
cl.sendText(msg.to,"Buat di up in nih")
cl.sendText(msg.to,"Gausah lah ya")
cl.sendText(msg.to,"Up atuh")
cl.sendText(msg.to,"Panjat bos")
cl.sendText(msg.to,"Jangan panjat sosyal aja")
cl.sendText(msg.to,"Panjat panjat pohon")
cl.sendText(msg.to,"yiha")
cl.sendText(msg.to,"Pohon aja di panjat")
cl.sendText(msg.to,"Apalagi kamu.gg unch")
cl.sendText(msg.to,"Maaf, harus kita up in")
cl.sendText(msg.to,"Demi kebaikan bersama sayang")
cl.sendText(msg.to,"Iya sayang")
cl.sendText(msg.to,"Opo koe krungu?")
cl.sendText(msg.to,"Jerite atiku")
cl.sendText(msg.to,"Oaoee..")
cl.sendText(msg.to,"Males lanjutin ah")
cl.sendText(msg.to,"Sepi bat")
cl.sendText(msg.to,"Iya sepi udah udah")
cl.sendText(msg.to,"Gaada yang denger juga")
cl.sendText(msg.to,"Yaiyalah, ini kan ketik ogeb")
cl.sendText(msg.to,"Mending gua nyari BBG dulu")
cl.sendText(msg.to,"Sono huss")
cl.sendText(msg.to,"Up unch")
cl.sendText(msg.to,"Up in dulu bos")
cl.sendText(msg.to,"Ada apa nih")
cl.sendText(msg.to,"Up atuh")
cl.sendText(msg.to,"Maaf di up bos")
elif msg.text in ["Spam"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Aku belum mandi")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Tapi masih cantik juga")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"apalagi kalau sudah mandi")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Pasti cantik sekali")
cl.sendText(msg.to,"yiha")
cl.sendText(msg.to,"Kalau orang lain melihatku")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Badak aku taba bana")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Tak tuntuang")
cl.sendText(msg.to,"Tapi kalau langsuang diidu")
cl.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Atagfirullah baunya")
cl.sendText(msg.to,"Males lanjutin ah")
cl.sendText(msg.to,"Sepi bat")
cl.sendText(msg.to,"Iya sepi udah udah")
cl.sendText(msg.to,"Gaada yang denger juga kita nyanyi")
cl.sendText(msg.to,"Nah")
cl.sendText(msg.to,"Mending gua makan dulu")
cl.sendText(msg.to,"Siyap")
cl.sendText(msg.to,"Okeh")
cl.sendText(msg.to,"Katanya owner kita Jomblo ya")
cl.sendText(msg.to,"Iya emang")
cl.sendText(msg.to,"Denger denger si lagi nyari pacar doi")
cl.sendText(msg.to,"Udah ah gosip mulu doain aja biar dapet")
elif msg.text == "Myspam":
if msg.from_ in admin:
cl.sendText(msg.to,"3")
cl.sendText(msg.to,"2")
cl.sendText(msg.to,"1")
cl.sendText(msg.to,"Fuck Off")
cl.sendText(msg.to,"Ku mengejar bus yang mulai berjalan")
cl.sendText(msg.to,"Ku ingin ungkapkan kepada dirimu")
cl.sendText(msg.to,"Kabut dalam hatiku telah menghilang")
cl.sendText(msg.to,"Dan hal yang penting bagiku pun terlihat")
cl.sendText(msg.to,"Walaupun jawaban itu sebenarnya begitu mudah")
cl.sendText(msg.to,"Tetapi entah mengapa diriku melewatkannya")
cl.sendText(msg.to,"Untukku menjadi diri sendiri")
cl.sendText(msg.to,"Ku harus jujur, pada perasaanku")
cl.sendText(msg.to,"Ku suka dirimu ku suka")
cl.sendText(msg.to,"Ku berlari sekuat tenaga")
cl.sendText(msg.to,"Ku suka selalu ku suka")
cl.sendText(msg.to,"Ku teriak sebisa suaraku")
cl.sendText(msg.to,"Ku suka dirimu ku suka")
cl.sendText(msg.to,"Walau susah untukku bernapas")
cl.sendText(msg.to,"Tak akan ku sembunyikan")
cl.sendText(msg.to,"Oogoe daiyamondo~")
cl.sendText(msg.to,"Saat ku sadari sesuatu menghilang")
cl.sendText(msg.to,"Hati ini pun resah tidak tertahankan")
cl.sendText(msg.to,"Sekarang juga yang bisa ku lakukan")
cl.sendText(msg.to,"Merubah perasaan ke dalam kata kata")
cl.sendText(msg.to,"Mengapa sedari tadi")
cl.sendText(msg.to,"Aku hanya menatap langit")
cl.sendText(msg.to,"Mataku berkaca kaca")
cl.sendText(msg.to,"Berlinang tak bisa berhenti")
cl.sendText(msg.to,"Di tempat kita tinggal, didunia ini")
cl.sendText(msg.to,"Dipenuhi cinta, kepada seseorang")
cl.sendText(msg.to,"Ku yakin ooo ku yakin")
cl.sendText(msg.to,"Janji tak lepas dirimu lagi")
cl.sendText(msg.to,"Ku yakin ooo ku yakin")
cl.sendText(msg.to,"Akhirnya kita bisa bertemu")
cl.sendText(msg.to,"Ku yakin ooo ku yakin")
cl.sendText(msg.to,"Ku akan bahagiakan dirimu")
cl.sendText(msg.to,"Ku ingin kau mendengarkan")
cl.sendText(msg.to,"Oogoe daiyamondo~")
cl.sendText(msg.to,"Jika jika kamu ragu")
cl.sendText(msg.to,"Takkan bisa memulai apapun")
cl.sendText(msg.to,"Ungkapkan perasaanmu")
cl.sendText(msg.to,"Jujurlah dari sekarang juga")
cl.sendText(msg.to,"Jika kau bersuar")
cl.sendText(msg.to,"Cahaya kan bersinar")
cl.sendText(msg.to,"Ku suka dirimu ku suka")
cl.sendText(msg.to,"Ku berlari sekuat tenaga")
cl.sendText(msg.to,"Ku suka selalu ku suka")
cl.sendText(msg.to,"Ku teriak sebisa suaraku")
cl.sendText(msg.to,"Ku suka dirimu ku suka")
cl.sendText(msg.to,"Sampaikan rasa sayangku ini")
cl.sendText(msg.to,"Ku suka selalu ku suka")
cl.sendText(msg.to,"Ku teriakkan ditengah angin")
cl.sendText(msg.to,"Ku suka dirimu ku suka")
cl.sendText(msg.to,"Walau susah untuk ku bernapas")
cl.sendText(msg.to,"Tak akan ku sembunyikan")
cl.sendText(msg.to,"Oogoe daiyamondo~")
cl.sendText(msg.to,"Katakan dengan berani")
cl.sendText(msg.to,"Jika kau diam kan tetap sama")
cl.sendText(msg.to,"Janganlah kau merasa malu")
cl.sendText(msg.to,"“Suka” itu kata paling hebat!")
cl.sendText(msg.to,"“Suka” itu kata paling hebat!")
cl.sendText(msg.to,"“Suka” itu kata paling hebat!")
cl.sendText(msg.to,"Ungkapkan perasaanmu")
cl.sendText(msg.to,"Jujurlah dari sekarang juga..")
cl.sendText(msg.to,"Anugerah terindah adalah ketika kita masih diberikan waktu untuk berkumpul bersama orang-orang yang kita sayangi.")
cl.sendText(msg.to,"Cuma dirimu seorang yang bisa meluluhkan hati ini. Kamulah yang terindah dalam hidupku.")
cl.sendText(msg.to,"Aku ingin meraih kembali cintamu menjadi kenyataan. Saat diriku dalam siksaan cinta, dirimu melenggang pergi tanpa pernah memikirkan aku.")
cl.sendText(msg.to,"Tak ada yang salah dengan CINTA. Karena ia hanyalah sebuah kata dan kita sendirilah yang memaknainya.")
cl.sendText(msg.to,"Mencintaimu adalah inginku. memilikimu adalah dambaku. meski jarak jadi pemisah, hati tak akan bisa terpisah.")
cl.sendText(msg.to,"Dalam cinta ada bahagia, canda, tawa, sedih, kecewa, terluka, semua itu tidak akan terlupakan dalam hal cinta, itu yang artinya cinta.")
cl.sendText(msg.to,"Seseorang yang berarti, tak akan dengan mudah kamu miliki. Jika kamu sungguh mencintai, jangan pernah berhenti berusaha untuk hati.")
cl.sendText(msg.to,"Jika esok pagi menjelang, akan aku tantang matahari yang terbangun dari tidur lelap nya.")
cl.sendText(msg.to,"Ketulusan cinta hanya dapat dirasakan mereka yang benar-benar mempunyai hati tulus dalam cinta.")
cl.sendText(msg.to,"Kamu tak perlu menjadikan dirimu cantik/ganteng untuk bisa memilikiku, kamu hanya perlu menunjukkan bahwa aku membutuhkanmu.")
cl.sendText(msg.to,"Ada seribu hal yang bisa membuatku berpikir ununtuk meninggalkanmu, namun ada satu kata yang membuatku tetap disini. Aku Cinta Kamu.")
cl.sendText(msg.to,"Aku pernah jatuhkan setetes air mata di selat Sunda. Di hari aku bisa menemukannya lagi, itulah waktunya aku berhenti mencintaimu.")
cl.sendText(msg.to,"Cinta adalah caraku bercerita tentang dirimu, caraku menatap kepergian mu dan caraku tersenyum, saat menatap indah wajahmu.")
cl.sendText(msg.to,"Datang dan pergi seperti angin tidak beraturan dan arah merasakan cinta dalam kehidupan kadang ku bahagia kadang ku bersedih.")
cl.sendText(msg.to,"Cinta adalah caraku bercerita tentang dirimu, caraku menatap kepergian mu dan caraku tersenyum, saat menatap indah wajahmu.")
cl.sendText(msg.to,"Saat jarak memisahkan, satu yang harus kamu ketahui. Akan aku jaga cinta ini ununtukmu.")
cl.sendText(msg.to,"Bersandarlah di pundaku sampai kau merasakan kenyamanan, karena sudah keharusan bagiku ununtuk memberikanmu rasa nyaman.")
cl.sendText(msg.to,"Air mata merupakan satu-satunya cara bagimana mata berbicara ketika bibir tidak mampu menjelaskan apa yang membuatmu terluka.")
cl.sendText(msg.to,"Hidup tidak bisa lebih baik tanpa ada cinta, tapi cinta dengan cara yang salah akan membuat hidupmu lebih buruk.")
cl.sendText(msg.to,"Mencintaimu hanya butuh waktu beberapa detik, namun untuk melupakanmu butuh waktu seumur hidupku.")
cl.sendText(msg.to,"Hidup tidak bisa lebih baik tanpa ada cinta, tapi cinta dengan cara yang salah akan membuat hidupmu lebih buruk.")
cl.sendText(msg.to,"Mencintaimu hanya butuh waktu beberapa detik, namun ununtuk melupakanmu butuh waktu seumur hidupku.")
cl.sendText(msg.to,"Cinta merupakan keteguhan hati yang ditambatkan pada kemanusiaan yang menghubungkan masa lalu, masa kini dan masa depan.")
cl.sendText(msg.to,"Ketika mencintai seseorang, cintailah apa adanya. Jangan berharap dia yang sempurna, karena kesempurnaan adalah ketika mencinta tanpa syarat.")
cl.sendText(msg.to,"Cinta bukanlah tentang berapa lama kamu mengenal seseorang, tapi tentang seseorang yang membuatmu tersenyum sejak kamu mengenalnya.")
cl.sendText(msg.to,"Ketika mereka bertanya tentang kelemahanku, aku ingin mengatidakan bahwa kelemahanku itul adalah kamu. Aku merindukanmu di mana-mana dan aku sanagat mencintaimu.")
cl.sendText(msg.to,"Kehadiranmu dalam hidupku, aku tahu bahwa aku bisa menghadapi setiap tantangan yang ada di hadapanku, terima kasih telah menjadi kekuatanku.")
cl.sendText(msg.to,"Meneriakkan namamu di deras hujan, memandangmu dari kejauhan, dan berdo’a di hening malam. Cinta dalam diam ini lah yang mampu kupertahankan.")
cl.sendText(msg.to,"Perempuan selalu menjaga hati orang yang dia sayangsehingga hati dia sendiri tersiksa. inilah pengorbanan perempuan ununtuk lelaki yang tidak pernah sadar.")
cl.sendText(msg.to,"Ketika kau belum bisa mengambil keputusan ununtuk tetap bertahan dengan perasaan itu, sabarlah, cinta yang akan menguatkanmu.")
cl.sendText(msg.to,"Aku tidak akan pernah menjajikan ununtuk sebuah perasaan, tapi aku bisa menjanjikan ununtuk sebuah kesetiaan.")
cl.sendText(msg.to,"Cinta yang sebenarnya tidak buta, cinta yaitu adalah hal yang murni, luhur serta diharapkan. Yang buta itu jika cinta itu menguasai dirimu tanpa adanya suatu pertimbangan.")
cl.sendText(msg.to,"Aku tercipta dalam waktu, ununtuk mengisi waktu, selalu memperbaiki diri di setiap waktu, dan semua waktu ku adalah ununtuk mencintai kamu.")
cl.sendText(msg.to,"Cinta akan indah jika berpondasikan dengan kasih sang pencipta. Karena sesungguhnya Cinta berasal dari-Nya Dan cinta yang paling utama adalah cinta kepada Yang Kuasa.")
cl.sendText(msg.to,"Bagi aku, dalam hidup ini, hidup hanya sekali, cinta sekali dan matipun juga sekali. Maka tidak ada yang namanya mendua.")
cl.sendText(msg.to,"Tuhan..jagalah ia yang jauh disana, lindungi tiap detik hidup yang ia lewati,sayangi dia melebihi engkau menyayangiku.")
cl.sendText(msg.to,"Kapan kau akan berhenti menyakitiku, lelah ku hadapi semua ini tapi aku tidak bisa memungkiri aku sangat mencintaimu.")
cl.sendText(msg.to,"Ketidakutan terbesar dalam hidupku bukan kehilanganmu, tapi melihat dirimu kehilangan kebahagiaanmu.")
cl.sendText(msg.to,"Cinta yang sesungguhnya akan mengatidakan aku butuh kamu karna aku siap ununtuk mencintaimu dan menjalani suka duka bersamamu")
cl.sendText(msg.to,"Seseorang pacar yang baik adalah dia yang JUJUR dan tidak pernah membuat kamu selalu bertanya-tanya atau selalu mencurigai dia")
cl.sendText(msg.to,"Cinta bukanlah sebuah kata cinta, yang sebenarnya adalah cinta yang menyentuh hati dan perasaan")
cl.sendText(msg.to,"Kau datang di saat ke egoisan akan cinta tengah mendera. Membawa cahaya dan kedamaian, membuatku tidak mudah menyerah ununtuk merengkuh kisah cinta bersamamu")
cl.sendText(msg.to,"Aku sangat menyukai kebersamaan karena kebersamaan mengajarkan kita tentang suka dan duka di lalui bersama")
cl.sendText(msg.to,"Mungkin Tuhan sengaja memberi kita berjumpa dengan orang yang salah sebelum menemui insan yang betul supaya apabila kita akhirnya menemui insan yang betul, kita akan tahu bagaimana ununtuk bersyukur dengan pemberian dan hikmah di balik pemberian tersebut.")
cl.sendText(msg.to,"Getaran di hatiku yang lama haus akan belaianmu seperti saat dulu dan kau bisikan kata ‘aku cinta padamu’ aku merindukannya")
cl.sendText(msg.to,"Terkadang air mata adalah tanda kebahagiaan yang tidak terucapkan. Dan senyuman adalah tanda sakit yang mencoba ununtuk kuat")
cl.sendText(msg.to,"Dicintai dan disayangi kamu adalah anugerah terindah yang tuhan berikan padaku.")
cl.sendText(msg.to,"Mencintai kamu butuh waktu beberapa detik, Namun melupakanmu butuh waktu ku seumur hidup.")
cl.sendText(msg.to,"Datang dan pergi seperti angin tidak beraturan dan arah merasakan cinta dalam kehidupan kadang aku bahagia dan juga kadang aku bersedih.")
cl.sendText(msg.to,"Air mata merupakan satu-satunya cara bagimana mata berbicara ketika bibir tidak mampu lagi menjelaskan apa yang membuatmu terluka.")
cl.sendText(msg.to,"Jauh sebelum bertemu denganmu, aku telah mengenalmu dalam doaku.")
cl.sendText(msg.to,"Mungkin dia tidak sadar bahwa aku itu cemburu dan mungkin juga dia tidak merasa bahwa aku sangat terluka, tidak mendengar bahwa hatiku sedang menangis.")
cl.sendText(msg.to,"Kehadirmu membawa cinta, memberi bahagia, dan juga rasa rindu yang tiada pernah ada akhirnya.")
cl.sendText(msg.to,"Aku nngak mau jadi wakil rakyat, aku maunya jadi wali murid yang ngambil raport anak kita besok.")
cl.sendText(msg.to,"Seperti hujan yang turun di tanah yang tandus, seperti itulah arti hadirmu dengan cinta dan kasih sayang untukku.")
cl.sendText(msg.to,"Tanda-tanda cinta adalah ketika anda merasa bahwa kebahagiaan orang tersebut lebih penting daripada kebahagiaanmu sendiri.")
cl.sendText(msg.to,"Cinta tidak hanya apa yang anda rasakan, tetapi apa yang harus anda lakukan.")
cl.sendText(msg.to,"Cinta adalah sebuah kekuatan untuk melihat kesamaan dan tidak kesamaan.")
cl.sendText(msg.to,"Cinta adalah pengalaman penuh emosi yang dirasakan banyak orang tetapi hanya beberapa orang saja yang bisa menikmatinya.")
cl.sendText(msg.to,"Cinta adalah berbagi. Karena walau ada di dua raga yang berbeda, setiap pasangan hanya memiliki satu hati.")
cl.sendText(msg.to,"Saat kita berjauhan, sebenarnya hanya raga kitalah yang jauh. Namun hati kita selalu dekat, karena hatiku ada di hatimu.")
cl.sendText(msg.to,"Cinta datang dengan pengorbanan yang akan memberikan petunjuk siapa diri kita yang sebenarnya.")
cl.sendText(msg.to,"Cinta begitu lembut dan merdu, namun jangan kau gunankan untuk merayu. Karena rayuan hanyalah akan mengosongkan makna kecintaan yang sesungguhnya.")
cl.sendText(msg.to,"Cinta bukanlah penuntutan, penguasaan, pemaksaan, dan pengintimidasian. Tak lain itu hanyalah cara manusia mendefinisikannya. Karena cinta adalah perjuangan, pengorbanan, tanggungjawab, kejujuran, dan keikhlasan.")
cl.sendText(msg.to,"Derajat cinta hanya bisa diukur dengan seberapa besar “Pemberian” yang kita korbankan.")
elif msg.text in ["Ngantuk","Sleep","Nite","Good night"]:
cl.sendText(msg.to,"Have a nice dream Har Har")
cl.sendText(msg.to,"Have a nice dream Har Har")
cl.sendText(msg.to,"Have a nice dream Har Har")
elif msg.text in ["Pekok","Pea","Dudul"]:
cl.sendText(msg.to,"Har Har")
cl.sendText(msg.to,"Har Har")
cl.sendText(msg.to,"Har Har")
elif msg.text in ["PING","Ping","ping"]:
cl.sendText(msg.to,"PONG double thumbs upHar Har")
cl.sendText(msg.to,"PONG double thumbs upHar Har")
cl.sendText(msg.to,"PONG double thumbs upHar Har")
elif msg.text in ["Bot resek ae coeg","Berisik","Rame ae","Rame wae","Sepi bet","Sepi banget","Sepi Bgt"]:
cl.sendText(msg.to,"Kasian dy jones double thumbs upHar Har")
cl.sendText(msg.to,"double thumbs upHar Har")
cl.sendText(msg.to,"double thumbs upHar Har")
elif "Pap creator" in msg.text:
tanya = msg.text.replace("Pap creator","")
link = ["http://dl.profile.line-cdn.net/0hMJ3w03iOEmUODj51FXRtMjJLHAh5IAMtYGhbUHsOSAclNlYxMW4NAnxaSgEmOAUxZThaCikPRFVz","http://dl.profile.line-cdn.net/0hMJ3w9GpxEmUODj6JvWxtMjJLHAh5IAMtYGhbUHsOSAclNlYxMW4NAnxaSgEmOAUxZThaCikPRFVz","http://dl.profile.line-cdn.net/0hMJ3wzUdrEmUODj6JveNtMjJLHAh5IAMtYGhbUHsOSAclNlYxMW4NAnxaSgEmOAUxZThaCikPRFVz","http://dl.profile.line-cdn.net/0hMJ3w5Xh8EmUODj6JvY9tMjJLHAh5IAMtYGhbUHsOSAclNlYxMW4NAnxaSgEmOAUxZThaCikPRFVz"]
pilih = random.choice(link)
cl.sendImageWithURL(msg.to,pilih)
#----------------------
elif "Pap cecan" in msg.text:
tanya = msg.text.replace("Pap cecan","")
jawab = ("https://i.pinimg.com/736x/fa/b0/de/fab0def5ba3108d51ba40747791bb089.jpg","https://i.pinimg.com/736x/8b/c6/0e/8bc60e8fd6fb5d142a074b6d2cf5c7ed.jpg","https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcQAa0KQ8XfoVfKRh82Ys63AX3VcuPml1JJFLk7iTEtMpmd7OzbN-yk_MGK6","https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRPMwr1Igswf8wgrTURHbGAt9jn54SvimA6Ps6W6lCtItkrh4I-kA","https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRg5SRVDjILsjUyBeLkBnbV96kX22_1mplLyjfCKws6nv8E_VtMDyV07e56bw","https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTOXZ4yFF8R8vPVmEl21Txhvzh4YpUJkJ2uuO3KQLUzYIEVsuT9")
jawaban = random.choice(jawab)
cl.sendImageWithURL(msg.to,jawaban)
#-----------------------------------------------
#----------------------
elif "Pap cogan" in msg.text:
tanya = msg.text.replace("Pap cogan","")
jawab = ("https://i.pinimg.com/736x/41/9b/a5/419ba5606edf61dbab6dfdcc8014624d.jpg","https://i.pinimg.com/736x/38/9c/b1/389cb1203841730a1a8ba322daa7ecb0.jpg","https://i.pinimg.com/736x/76/e3/dc/76e3dc311ddbd61f666083b963910cea.jpg","https://i.pinimg.com/736x/e4/96/67/e496676ca6ea785c8ca5d28f514f9b69.jpg","https://i.pinimg.com/736x/c7/c9/d6/c7c9d6ee5e7d5214d89e3d8bab964497.jpg","https://i.pinimg.com/736x/98/79/5c/98795c07ad9b84ef22e4a6c2cdb135cc.jpg","https://i.pinimg.com/736x/63/fe/b0/63feb07620c1fab54e98ed2139be8aae.jpg","https://i.pinimg.com/736x/66/fc/f2/66fcf2d7d405398f8f163c4ea61aafbf.jpg","https://i.pinimg.com/736x/d9/52/ca/d952caf7b7de45d70f058be2b44e28b3.jpg","https://i.pinimg.com/736x/34/59/c5/3459c5208c819675eff6273210eed009.jpg","https://i.pinimg.com/736x/2a/55/76/2a557666df14a2594f6f3aade212021e.jpg","https://i.pinimg.com/736x/f0/b7/d5/f0b7d5140ec2fb65e58a53bef4506b52.jpg","https://i.pinimg.com/736x/ea/7b/4d/ea7b4d364c0150060e6b9bca249527b9.jpg","https://i.pinimg.com/736x/05/45/a4/0545a45040b9e368726bc134abf78075.jpg","https://i.pinimg.com/736x/f5/92/3a/f5923a99bfd83e0d8f7c0362e649c33a.jpg","http://dl.profile.line-cdn.net/0hMJ3wh4HFEmUODj6JvBNtMjJLHAh5IAMtYGhbUHsOSAclNlYxMW4NAnxaSgEmOAUxZThaCikPRFVz")
jawaban = random.choice(jawab)
cl.sendImageWithURL(msg.to,jawaban)
#----------------------
elif "Pap abs" in msg.text:
tanya = msg.text.replace("Pap abs","")
jawab = ("https://i.pinimg.com/736x/80/1f/e8/801fe86de5b3768ac2994230b1a579e2.jpg","https://i.pinimg.com/736x/a0/e4/89/a0e489d5aeb8cc33c902f49b3b1f8006.jpg","https://i.pinimg.com/736x/91/b0/ee/91b0ee956c46b29f74b0e6d015be3255.jpg","https://i.pinimg.com/736x/f4/92/4d/f4924d75fe3170a73929fa3408592c86.jpg","https://i.pinimg.com/736x/d5/31/ba/d531ba0b7e72056eaedffa54620707e9.jpg","https://i.pinimg.com/736x/51/9b/99/519b9954e1b2ca5f4ab18a4e7c325619.jpg","https://i.pinimg.com/736x/3c/31/8c/3c318cae8e2a5e41ea1ed326737bf12f.jpg","https://i.pinimg.com/736x/87/d3/cb/87d3cb48f2e8eef33a49cd28d971d14b.jpg","https://i.pinimg.com/736x/0d/a3/57/0da357eeeeb9711317f2755a525d07db.jpg","https://i.pinimg.com/736x/09/7a/22/097a2296802dc6535edf1f10d35e64e8.jpg","https://i.pinimg.com/736x/e7/75/cc/e775cc97b9d52777f561daf284ace68b.jpg","https://i.pinimg.com/736x/76/bd/bf/76bdbfa728dcc6dfdd90cb816310af75.jpg","https://i.pinimg.com/736x/49/3a/98/493a988a4872216568844b319f022ac9.jpg","https://i.pinimg.com/736x/f0/f1/cf/f0f1cf3a347dd44c7416ca7baf2da7ed.jpg")
jawaban = random.choice(jawab)
cl.sendImageWithURL(msg.to,jawaban)
#-----------------------------------------------#----------------------
#----------------------
elif "Pap toket" in msg.text:
tanya = msg.text.replace("Pap toket","")
jawab = ("https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcTilO50kExe4q_t-l8Kfn98sxyrHcbWPWCu2GP2SNgg8XWGMaZc8h5zaxAeVA","https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQKgSYYgB33GP3LAvVSYxKjDlbPokmtzSWjbWJogz8lbZMNSyvqJTE3qWpwBg","https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTgwKO_CAdZpSlXVVfA29qglGQR00WHkeqq4JakyYDuzIW2tKhvGg","https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcSC3ZMq4PnCX5dj7Fc_N6HOG6R_XrmOM7r6uBtpEcBfbO4hMEXQirK_lU_ePw","https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRgynJUxS4uYgaIiV_R6e4FY62QfhYRUEgYZg6psfJzWH_ci4dFng")
jawaban = random.choice(jawab)
cl.sendImageWithURL(msg.to,jawaban)
#-----------------------------------------------#----------------------
elif "Pap anu" in msg.text:
tanya = msg.text.replace("Pap anu","")
jawab = ("https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcQFFKdXErF56KzAa4oWnWQT34jmGKJ66lj1g0hnN4zwYh9GgW0dHWZfRnuM","https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcQTn4_JMD1ZAg-XIk6JZ1Crhz9gtXEIS8AcjTA3SYmazAutt7ekHw","https://encrypted-tbn1.gstatic.com/images?q=tbn:ANd9GcTIVuITo7KicaU6UwPhol1Rvkq4aQwznly8Xl2SiTlAa_1FrSHuwhwV5XoElA")
jawaban = random.choice(jawab)
cl.sendImageWithURL(msg.to,jawaban)
elif msg.text in ["Raisa"]:
try:
cl.sendImageWithURL(msg.to, "https://cdn.brilio.net/news/2017/05/10/125611/750xauto-selalu-tampil-cantik-memesona-ini-harga-10-sepatu-raisa-andriana-170510q.jpg")
except Exception as e:
cl.sendMessage(msg.to, str(e))
#-----------------------------------------------
elif msg.text in ["Mode:on","mode:on"]:
if msg.from_ in admin or owner:
if wait["Wc"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn on")
else:
wait["Wc"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
if wait["Lv"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave on")
else:
wait["Lv"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
if wait["tag"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Tag On")
else:
wait["tag"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag On")
else:
cl.sendText(msg.to,"already on")
#=================================================
elif msg.text in ["Mode Off","mode off"]:
if msg.from_ in admin or owner:
if wait["Wc"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn oғғ")
else:
wait["Wc"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nayapa yg gabung already oғғ")
if wait["Lv"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave oғғ")
else:
wait["Lv"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nayapa yg left already oғғ")
if wait["tag"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Tag off")
else:
cl.sendText(msg.to,"Tag Off")
else:
wait["tag"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag Off")
else:
cl.sendText(msg.to,"Already Tag off")
#===================================================
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': "u3cfa63811888b3a880bc4f348a95b23b"}
cl.sendMessage(msg)
cl.sendText(msg.to,'Mʏ Cʀᴇᴀᴛᴏʀ👉 line.me/ti/p/~tak.dapat.tidur')
#-------------Fungsi Creator Finish-----------------#
elif "Spam: " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam: "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
#Vicky Kull~
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
#----------------------------------------------------
elif "Botstatus " in msg.text:
if msg.from_ in admin or owner:
string = msg.text.replace("Botstatus ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
else:
cl.sendText(msg.to,"Done")
#-----------------------------------------------
elif "dubbing " in msg.text.lower():
say = msg.text.lower().replace("dubbing ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
#--------------------
elif 'wikipedia: ' in msg.text.lower():
try:
wiki = msg.text.lower().replace("wikipedia: ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=3)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
#-----------------------------------------------
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Bisa Jadi","Mungkin")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
#-----------------------------------------------
elif "Dosa @" in msg.text:
tanya = msg.text.replace("Dosa @","")
jawab = ("60%","70%","80%","90%","100%","Tak terhingga")
jawaban = random.choice(jawab)
cl.sendText(msg.to,"Dosanya " + tanya + "adalah " + jawaban + "\nBanyak banyak tobat Nak ")
elif "Pahala @" in msg.text:
tanya = msg.text.replace("Pahala @","")
jawab = ("0%","20%","40%","50%","60%","Tak ada")
jawaban = random.choice(jawab)
cl.sendText(msg.to,"Pahalanya " + tanya + "adalah " + jawaban + "\nTobatlah nak")
#-----------------------------------------------
elif "Steal group" in msg.text:
group = cl.getGroup(msg.to)
path =("http://dl.profile.line-cdn.net/" + group.pictureStatus)
cl.sendImageWithURL(msg.to, path)
#-----------------------------------------------
elif "Name: @" in msg.text:
_name = msg.text.replace("Name: @","")
_nametarget = _name.rstrip(" ")
gs = cl.getGroup(msg.to)
for h in gs.members:
if _nametarget == h.displayName:
cl.sendText(msg.to,"[DisplayName]:\n" + h.displayName )
else:
pass
elif "Bio: @" in msg.text:
_name = msg.text.replace("Bio: @","")
_nametarget = _name.rstrip(" ")
gs = cl.getGroup(msg.to)
for h in gs.members:
if _nametarget == h.displayName:
cl.sendText(msg.to,"[Status]:\n" + h.statusMessage )
else:
pass
elif "Getprofile:" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nᴀᴍᴀ :\n" + contact.displayName + "\n\nBɪᴏ :\n" + contact.statusMessage)
cl.sendText(msg.to,"Pʀᴏғɪʟᴇ Pɪᴄᴛᴜʀᴇ " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cᴏᴠᴇʀ " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getinfo:" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"[Dɪꜱᴘʟᴀʏ Nᴀᴍᴇ]:\n" + contact.displayName + "\n\n[Mɪᴅ]:\n" + contact.mid + "\n\n[Bɪᴏ]:\n" + contact.statusMessage + "\n\n[Pʀᴏғɪʟᴇ Pɪᴄᴛᴜʀᴇ]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n[Cᴏᴠᴇʀ]:\n" + str(cu))
except:
cl.sendText(msg.to,"[Dɪꜱᴘʟᴀʏ Nᴀᴍᴇ]:\n" + contact.displayName + "\n\n[Mɪᴅ]:\n" + contact.mid + "\n\n[Bɪᴏ]:\n" + contact.statusMessage + "\n\n[Pʀᴏғɪʟᴇ Pɪᴄᴛᴜʀᴇ]:\n" + str(cu))
elif "Contact:" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Info: @" in msg.text:
_name = msg.text.replace("Info: @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
kelamin = ("Waria","Laki-laki","Perempuan","Tidak Diketahui","Bencong","Kalau pagi cowo","Kalau pagi cewe")
wajah = ("Standar","Ganteng","Cantik","Beruk","Hancur","Kembaran miper","Tidak beraturan")
status = ("Menikah","Pacaran","Jones","Gamon dari mantan")
k = random.choice(kelamin)
w = random.choice(wajah)
s = random.choice(status)
cl.sendText(msg.to,"Dᴇᴛᴀɪʟ ɪɴғᴏ :\n Nᴀᴍᴀ : "+xname+"\n Kᴇʟᴀᴍɪɴ : "+k+"\n Wᴀᴊᴀʜ : "+w+"\n Sᴛᴀᴛᴜꜱ Kᴇʜɪᴅᴜᴘᴀɴ : "+s)
elif "Status: @" in msg.text:
_name = msg.text.replace("Status: @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
kelamin = ("Keturunan darah biru","Keturunan darah kotor","Saudaranya miper","Keturunan kerajaan","Keturunan ubab","Anaknya miper","Kembaran dijjah")
wajah = ("Gajelas","Digantungin doi","Status ngambang kek anu","Pacaran","Bentar lagi Nikah","Menikah","Jomblo","Jonez seumur hidup","Menyedihkan")
status = ("Jodohnya miper","Jodohnya Dijjah","Jodohnya artis","Jodohnya dari khayangan","Gapunya jodoh","Jodohnya ganti ganti","Kan jonez gapunya jodoh:'v")
k = random.choice(kelamin)
w = random.choice(wajah)
s = random.choice(status)
cl.sendText(msg.to,"Dᴇᴛᴀɪʟ ɪɴғᴏ :\n Nᴀᴍᴀ : "+xname+"\n Kᴇʟᴀᴍɪɴ : "+k+"\n Sᴛᴀᴛᴜꜱ : "+w+"\n Jᴏᴅᴏʜ : "+s)
elif "love " in msg.text:
tanya = msg.text.replace("love ","")
jawab = ("10%\nCoba lah untuk melupakan","20%\nKu tak tau lagi:'","30%\nButuh perjuangan yang berat inih","40%\nCobalah saling mencimtai dengan tulus\nIngatlah kenangan indah kalian","50%\nSegeralah mengerti satu sama lain","60%\nLebih perhatian lagi oke","70%\nAyo sedikit lagi","80%\nWahhh, ada kemungkinan kalian jodoh","90%\nAyo sedikit lgi kak","100%\nKeterangan Moga - Moga Langgeng Ya Kak","0%\nKeterangan Ngak Cinta Sama Sekali :v")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
elif msg.text in ["Ajg","Bgst","Bacot","Tai","Bazeng","Anjir","Fck","Fuck","Najiz","Bego","Najis"]:
# if msg.from_ in admin:
cl.sendText(msg.to,"Hayo jangan ngomong kasar kak")
cl.sendText(msg.to,"Aku kick nih.gg")
if msg.contentType == 16:
if wait['likeOn'] == True:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1005)
cl.like(url[25:58], url[66:], likeType=1002)
cl.like(url[25:58], url[66:], likeType=1004)
cl.like(url[25:58], url[66:], likeType=1003)
cl.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment1"])
cl.comment(url[25:58], url[66:], wait["comment2"])
cl.comment(url[25:58], url[66:], wait["comment3"])
cl.comment(url[25:58], url[66:], wait["comment4"])
cl.comment(url[25:58], url[66:], wait["comment5"])
cl.sendText(msg.to,"Like Success")
wait['likeOn'] = False
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to,data['result']['response'].encode('utf-8'))
#-----------------------------------------------
elif "zodiak " in msg.text:
tanggal = msg.text.replace("zodiak ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"Tanggal Lahir: "+lahir+"\n\nUsia: "+usia+"\n\nUltah: "+ultah+"\n\nZodiak: "+zodiak)
#----------------------------------------------
elif "Stalk " in msg.text:
print "[Command]Stalk executing"
stalkID = msg.text.replace("Stalk ","")
subprocess.call(["instaLooter",stalkID,"tmp/","-n","1"])
files = glob.glob("tmp/*.jpg")
for file in files:
os.rename(file,"tmp/tmp.jpg")
fileTmp = glob.glob("tmp/tmp.jpg")
if not fileTmp:
cl.sendText(msg.to, "Image not found, maybe the account haven't post a single picture or the account is private")
print "[Command]Stalk,executed - no image found"
else:
image = upload_tempimage(client)
cl.sendText(msg.to, format(image['link']))
subprocess.call(["sudo","rm","-rf","tmp/tmp.jpg"])
print "[Command]Stalk executed - succes"
#-------------------------------------------------------------
elif "Gbc: " in msg.text:
if msg.from_ in admin or owner:
bctxt = msg.text.replace("Gbc: ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia, (bctxt))
elif "Pm cast: " in msg.text:
if msg.from_ in admin or owner:
bctxt = msg.text.replace("Pm cast: ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Fbc: " in msg.text:
if msg.from_ in admin or owner:
bctxt = msg.text.replace("Fbc: ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia, (bctxt))
#------------------------------------------------------
elif "Gethome @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Gethome @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
#-----------------------------------------------
elif "Getpp @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getpp @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
#--------------------------------------------
elif msg.text in ["Steal contact"]:
wait["contact"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Owner")
try:
likeme()
except:
pass
elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman
print "[Command]Like executed"
cl.sendText(msg.to,"Like Status Teman")
try:
likefriend()
except:
pass
elif msg.text in ["Auto like"]:
wait["likeOn"] = True
cl.sendText(msg.to,"Shere Post Kamu Yang Mau Di Like!")
elif msg.text in ["Steal contact"]:
wait["steal"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Giftbycontact"]:
wait["gift"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Autolike on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Autolike off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already")
#--------------------------
elif msg.text in ["Njoin on"]:
if wait["Wc"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn on")
else:
wait["Wc"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
elif msg.text in ["Njoin off"]:
if wait["Wc"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ joιn oғғ")
else:
wait["Wc"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already oғғ")
#--------------------------
elif msg.text in ["Nleave on"]:
if wait["Lv"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave on")
else:
wait["Lv"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
elif msg.text in ["Nleave off"]:
if wait["Lv"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"noтιғ leave oғғ")
else:
wait["Lv"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already oғғ")
elif msg.text in ["Autoread:on"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"Auto read On")
elif msg.text in ["Autoread:off"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"Auto read Off")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
wait["Simi"] = True
cl.sendText(msg.to," Simisimi Di Aktifkan")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
wait["Simi"] = False
cl.sendText(msg.to,"Simisimi Di Nonaktifkan")
##--------------------------
elif 'music: ' in msg.text.lower():
try:
songname = msg.text.lower().replace('music: ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
# r = requests.get('http://ide.ntorp.us/joox/client' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
songz = song[5].encode('utf-8')
lyric = songz.replace('ti:','Title -')
lyric = lyric.replace('ar:','Artist -')
lyric = lyric.replace('al:','Album -')
removeString = "[1234567890.:]"
for char in removeString:
lyric = lyric.replace(char,'')
cl.sendText(msg.to, hasil)
cl.sendAudioWithURL(msg.to, song[4])
cl.sendText(msg.to, "Judul: " + song[0].encode('utf-8') + "\n\n" + lyric)
except Exception as njer:
cl.sendText(msg.to, str(njer))
#------------------------------------------------
elif 'lirik: ' in msg.text.lower():
try:
songname = msg.text.lower().replace('lirik: ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
#-----------------------------------
elif "idline: " in msg.text:
id = msg.text.replace("idline: ", "")
find = cl.findContactsByUserId(id)
for findid in find:
try:
msg.contentType = 13
msg.contentMetadata = {'mid': findid.mid}
cl.sendMessage(msg)
except Exception as error:
print error
#-----------------------------------
elif "Getimage group" in msg.text:
group = cl.getGroup(msg.to)
path =("http://dl.profile.line-cdn.net/" + group.pictureStatus)
cl.sendImageWithURL(msg.to, path)
#----------------------------------
elif "Leavegroup " in msg.text.split():
if msg.from_ in admin or owner:
ng = msg.text.split().replace("Leavegroup ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin or owner:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Bot di paksa keluar oleh owner!")
cl.leaveGroup(i)
cl.sendText(msg.to,"Success left ["+ h +"] group")
#else:
#pass
#else:
#cl.sendText(msg.to,"Khusus Creator/Admin")
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin or owner:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
elif "Invitegrup " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Invitegrup ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#----------------------------------
elif "Getcontact: " in msg.text:
if msg.from_ in admin or owner:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
#----------------------------------
elif "youtube search: " in msg.text.lower():
query = msg.text.lower().replace("youtube search: ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&List' not in a['href']:
cl.sendText(msg.to,'Judul : ' + a['title'] + '\nLink : ' + 'https://www.youtube.com' + a['href'])
print '[Command] Youtube Search'
elif 'youtube: ' in msg.text.lower():
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
print(e)
elif "Vidio " in msg.text:
try:
textToSearch = (msg.text).replace("Vidio ", "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#---------------------------------
#-----------------------------------------
elif msg.text.lower() == 'runtime':
if msg.from_ in admin or owner:
eltime = time.time() - mulai
van = "Bot sudah berjalan selama "+waktu(eltime)
cl.sendText(msg.to,van)
#-----------------------------------------
elif msg.text in ["Bot restart"]:
if msg.from_ in owner:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
#-----------------------------------------
elif msg.text in ["Like temen","Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
try:
autolike()
except:
pass
#-----------------------------------------
elif msg.text in ["Gcreator"]:
if msg.toType == 2:
msg.contentType = 13
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
msg.contentMetadata = {'mid': gCreator}
gCreator1 = ginfo.creator.displayName
except:
gCreator = "Error"
cl.sendText(msg.to, "Group Creator : " + gCreator1)
cl.sendMessage(msg)
#-----------------------------------------------
elif msg.text in ["Tag on"]:
if wait["tag"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Tag On")
else:
wait["tag"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag On")
else:
cl.sendText(msg.to,"already on")
elif msg.text in ["Tag off"]:
if wait["tag"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
cl.sendText(msg.to,"Tag Off")
else:
wait["tag"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag Off")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Auto on"]:
if msg.from_ in admin or owner:
if wait["auto"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join on")
else:
cl.sendText(msg.to,"Bot join On")
else:
wait["auto"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join On")
else:
cl.sendText(msg.to,"Bot join On")
elif msg.text in ["Auto off"]:
if msg.from_ in admin or owner:
if wait["auto"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join off")
else:
cl.sendText(msg.to,"Bot join off")
else:
wait["auto"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot join off")
else:
cl.sendText(msg.to,"Bot join off")
#-----------------------------------------------
elif "Admadd @" in msg.text:
if msg.from_ in admin or owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admadd @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Telah Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command Di Tolak Jangan Sedih")
cl.sendText(msg.to,"Sudah Menjadi Admin Maka Tidak Bisa Menjadi Admin Lagi")
elif "Admrem @" in msg.text:
if msg.from_ in admin or owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admrem @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Telah Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command DiTolak")
cl.sendText(msg.to,"Admin Tidak Bisa Menggunakan")
elif msg.text in ["Adminlist bot"]:
if msg.from_ in admin or owner:
if admin == []:
cl.sendText(msg.to,"The adminlist is empty")
else:
cl.sendText(msg.to,"Sabar Dikit Mamang.....")
mc = "🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖"
for mi_d in admin:
mc += "☄1�7 " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------------------------------
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("Besok","Tahun Depan","Minggu Depan","Satu Abad Lagi")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
elif "Waktu" in msg.text:
wait2['setTime'][msg.to] = datetime.today().strftime('TANGGAL : %Y-%m-%d \nHARI : %A \nJAM : %H:%M:%S')
cl.sendText(msg.to, " Waktu/Tanggal\n\n" + (wait2['setTime'][msg.to]))
cl.sendText(msg.to, "Mungkin Tidak Sesuai Atau Sesuai Dengan Tanggal/Waktu Sekrang Dikarenakan Ini Robot Bukan Manusia :v")
#-----------------------------------------------
#-----------------------------------------------
elif "Quotes" in msg.text:
tanya = msg.text.replace(".quotes","")
jawab = ("Don't cry because it's over, smile because it happened.\nDr. Seuss","I'm selfish, impatient and a little insecure. I make mistakes, I am out of control and at times hard to handle. But if you can't handle me at my worst, then you sure as hell don't deserve me at my best.\nMarilyn Monroe","Be yourself; everyone else is already taken.\nOscar Wilde","Two things are infinite: the universe and human stupidity; and I'm not sure about the universe.\nAlbert Einstein","Jangan makan, berat\nNanti kamu gendutan:'v","Nggak perlu orang yang sexy maupun rupawan untukku\nCukup kamu yang bisa buat aku bahagia")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
elif msg.text in ["pap","Pap"]:
cl.sendImageWithURL(msg.to, "https://i.pinimg.com/736x/d1/93/25/d19325b71789e33bedb054468c1fd134--girls-generation-tiffany-girls-generation.jpg")
elif "/apakah " in msg.text:
apk = msg.text.replace("/apakah ","")
rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
cl.sendText(msg.to,p)
elif "Hari " in msg.text:
apk = msg.text.replace("Hari ","")
rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
cl.sendText(msg.to,p)
elif "Berapa " in msg.text:
apk = msg.text.replace("Berapa ","")
rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
cl.sendText(msg.to,p)
elif "Berapakah " in msg.text:
apk = msg.text.replace("Berapakah ","")
rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
cl.sendText(msg.to,p)
elif "/kapan " in msg.text:
apk = msg.text.replace("/kapan ","")
rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
cl.sendText(msg.to,p)
#--------------------------------------
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
#client.sendText(msg.to, rst)
#-----------------------------------------------
elif "Search image: " in msg.text:
search = msg.text.replace("Search image: ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif 'searchimage' in msg.text.lower():
try:
shi = msg.text.lower().replace("searchimage ","")
kha = random.choice(items)
url = 'https://www.google.com/search?hl=en&biw=1366&bih=659&tbm=isch&sa=1&ei=vSD9WYimHMWHvQTg_53IDw&q=' + shi
raw_html = (download_page(url))
items = items + (_images_get_all_items(raw_html))
items = []
except:
try:
start = timeit.timeit()
cl.sendImageWithURL(msg.to,random.choice(items))
cl.sendText(msg.to,"Total Image Links ="+str(len(items)))
except Exception as e:
cl.sendText(msg.to,str(e))
elif "anime: " in msg.text.lower():
van = msg.text.lower().replace("anime: ","")
cl.sendText(msg.to,"Sedang Mencari...")
cl.sendText(msg.to,"https://myanimelist.net/anime.php?q=" + van)
cl.sendText(msg.to,"Bener Gak?")
elif "Google: " in msg.text:
a = msg.text.replace("Google: ","")
b = urllib.quote(a)
cl.sendText(msg.to,"Sedang Mencari...")
cl.sendText(msg.to, "https://www.google.com/" + b)
cl.sendText(msg.to,"Itu Dia Linknya. . .")
cl.sendImageWithUrl(msg.to,b)
elif "playstore: " in msg.text.lower():
tob = msg.text.lower().replace("playstore: ","")
cl.sendText(msg.to,"Sedang Mencari...")
cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLink : https://play.google.com/store/search?q=" + tob)
cl.sendText(msg.to,"Tuh link nya boss")
#-----------------------------------------------
elif 'instagram: ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("instagram: ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO USER========\n"
details = "\n========INSTAGRAM INFO USER========"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "twitter: " in msg.text.lower():
a = msg.text.replace("twitter: ","")
b = urllib.quote(a)
cl.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Processing")
cl.sendText(msg.to, "https://www.twitter.com/" + b)
cl.sendText(msg.to,"「 Searching 」\n" "Type:Search Info\nStatus: Success")
#-----------------------------------------------
elif "Tr-id " in msg.text:
nk0 = msg.text.replace("Tr-id ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'id')
cl.sendText(msg.to,str(trans))
elif "Tr-th " in msg.text:
nk0 = msg.text.replace("Tr-th ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'th')
cl.sendText(msg.to,str(trans))
elif "Tr-ja " in msg.text:
nk0 = msg.text.replace("Tr-ja ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'ja')
cl.sendText(msg.to,str(trans))
elif "Tr-en " in msg.text:
nk0 = msg.text.replace("Tr-en ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'en')
cl.sendText(msg.to,str(trans))
elif "Tr-ms " in msg.text:
nk0 = msg.text.replace("Tr-ms ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'ms')
cl.sendText(msg.to,str(trans))
elif "Tr-it " in msg.text:
nk0 = msg.text.replace("Tr-it ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'it')
cl.sendText(msg.to,str(trans))
elif "Tr-tr " in msg.text:
nk0 = msg.text.replace("Tr-tr ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'tr')
cl.sendText(msg.to,str(trans))
elif "Tr-my " in msg.text:
nk0 = msg.text.replace("Tr-my ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'my')
cl.sendText(msg.to,str(trans))
elif "Tr-af " in msg.text:
nk0 = msg.text.replace("Tr-af ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'af')
cl.sendText(msg.to,str(trans))
elif "Tr-sq " in msg.text:
nk0 = msg.text.replace("Tr-sq ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'sq')
cl.sendText(msg.to,str(trans))
elif "Tr-am " in msg.text:
nk0 = msg.text.replace("Tr-am ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'am')
cl.sendText(msg.to,str(trans))
elif "Tr-ar " in msg.text:
nk0 = msg.text.replace("Tr-ar ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'ar')
cl.sendText(msg.to,str(trans))
elif "Tr-hy " in msg.text:
nk0 = msg.text.replace("Tr-hy ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
trans = translate(_name, 'hy')
cl.sendText(msg.to,str(trans))
#----------------UpdateFotoProfil----------------#
elif "Cpp" in msg.text:
if msg.from_ in admin or owner:
path = "Robot.jpg"
cl.sendText(msg.to,"Update PP :")
cl.sendImage(msg.to,path)
cl.updateProfilePicture(path)
#--------------------------CEK SIDER------------------------------
elif "setview" in msg.text:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "Checkpoint checked!")
print "@setview"
elif "viewseen" in msg.text:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "List Viewer\n*"
grp = '\n* '.join(str(f) for f in dataResult)
total = '\n\nTotal %i viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
cl.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
else:
cl.sendText(msg.to, "Belum ada viewers")
print "@viewseen"
#--------------------------CEK SIDER------------------------------
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
cl.sendMessage(msg)
elif "Mimic:" in msg.text:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on")
else:
cl.sendText(msg.to,"Mimic already on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off")
else:
cl.sendText(msg.to,"Mimic already off")
elif "Add: " in cmd:
target0 = msg.text.replace("Add: ","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed")
break
elif "Del: " in cmd:
target0 = msg.text.replace("Del: ","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed!")
break
elif cmd == "ListTarget":
if mimic["target"] == {}:
cl.sendText(msg.to,"No target")
else:
lst = "<<List Target>>"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n☄1�7" + cl.getContact(mi_d).displayName + " | " + stat
cl.sendText(msg.to,lst + "\nTotal:" + total)
#----------------------------------------------------------------
#--------------------------------
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift01':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift02':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift03':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift04':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift05':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift06':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift012':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
cl.sendMessage(msg)
# elif "Gift dia 5000c " in msg.text:
# if msg.from_ in admin or owner:
# msg.contentType = 13
# nk0 = msg.text.replace("Gift dia 5000c ","")
# nk1 = nk0.lstrip()
# nk2 = nk1.replace("@","")
# nk3 = nk2.rstrip()
# _name = nk3
# gs = cl.getGroup(msg.to)
# targets = []
# for s in gs.members:
# if _name in s.displayName:
# targets.append(s.mid)
# if targets == []:
# sendMessage(msg.to,"user does not exist")
# pass
# else:
# for target in targets:
# try:
# cl.sendText(msg.to,_name + " Cʜᴇᴄᴋ Yᴏᴜʀ Gɪғᴛ Bᴏx")
# msg.contentType = 9
# msg.contentMetadata= {'PRDTYPE': 'STICKER',
# 'STKVER': '1',
# 'MSGTPL': '1',
# 'STKPKGID': '1380280'}
# msg.to = target
# msg.text = None
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendMessage(msg)
# cl.sendText(msg.to,_name + "Dᴏɴᴇ 5000 ᴄᴏɪɴ")
# except:
# msg.contentMetadata = {'mid': target}
elif "Gift1 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift2 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '1360738'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift3 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift3 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '1395389'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift4 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift4 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1329191'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift5 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift5 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '9057'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift6 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift6 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '9167'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift7 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift7 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '7334'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift8 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift8 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift9 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift9 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1405277'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift10 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift10 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
#------------------------------
elif msg.text in ["Line clone","line clone"]:
cl.sendText(msg.to,"Key:\nLine1\nLine2\nLine3\nLine4\nLine5")
elif msg.text in ["Line1","line1"]:
cl.sendText(msg.to,"Clone 1:\nhttps://drive.google.com/open?id=1M4MvuodyebWZ_3ePUySEh3fdhnoWgLes")
elif msg.text in ["Line2","line2"]:
cl.sendText(msg.to,"Clone 2:\nhttps://drive.google.com/open?id=1AKDbRW7O-ql4t1wUYe2KkfGahjXvShsJ")
elif msg.text in ["Line3","line3"]:
cl.sendText(msg.to,"Clone 3:\nhttps://drive.google.com/open?id=1zUPVQrI8fq9Z0W6IenqtgB5qtLfZq2if")
elif msg.text in ["Line4","line4"]:
cl.sendText(msg.to,"Clone 4:\nhttps://drive.google.com/open?id=1SzUe4lqQehfqYC-FsKmsYT7RkLsYAgJV")
elif msg.text in ["Line5","line5"]:
cl.sendText(msg.to,"Clone 5:\nhttps://drive.google.com/open?id=1JfStADgnukTsg1CyACR-PN3_cOxGuGpb")
#--------------------------------------
elif msg.text in ["hmm","hmmm"]:
cl.sendText(msg.to,"Waduh kenapa? gatel tenggorokan ya")
elif msg.text in ["Welcome","welcome","Welkam","welkam","Wc","wc","Kam","kam"]:
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Sᴇʟᴀᴍᴀᴛ Dᴀᴛᴀɴɢ Dɪ "+ gs.name)
msg.contentType = 7
msg.contentMetadata={'STKID': '247',
'STKPKGID': '3',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
cl.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
#-------------- Add Friends ------------
elif "botadd @" in msg.text:
if msg.from_ in admin or owner:
if msg.toType == 2:
if msg.from_ in admin or owner:
print "[Command]Add executing"
_name = msg.text.replace("botadd @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.senText(msg.to, "Berhasil Menambah Kan Teman")
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak")
cl.sendText(msg.to,"Perintah ini Hanya Untuk Admin")
#-------------------------------------------------
elif "Mid: @" in msg.text:
if msg.from_ in admin or owner:
_name = msg.text.replace("Mid: @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#--------------------------
elif msg.text in ["Bye all gc"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in admin or owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bʏᴇ,, Bʏᴇᴇ... " + str(ginfo.name) + "\n\nBᴏᴛꜱ Dɪᴘᴀᴋꜱᴀ Kᴇʟᴜᴀʀ ᴏʟᴇʜ Oᴡɴᴇʀ Bᴏᴛꜱ...!!!\nMᴀᴋᴀꜱɪʜ...!!!")
else:
cl.sendText(msg.to,"He declined all invitations")
#--------------------------
elif "Bcgrup: " in msg.text:
bc = msg.text.replace("Bcgrup: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin or owner:
for i in gid:
cl.sendText(i,""+bc+"\n\n@Bʀᴏᴀᴅᴄᴀꜱᴛ")
cl.sendText(msg.to,"Success BC BosQ")
else:
cl.sendText(msg.to,"Khusus Admin")
#--------------------------------------------------------
elif msg.text in ["Bot like","bot like"]:
try:
print "activity"
url = cl.activity(limit=1)
print url
cl.like(url['result']['posts'][0]['userInfo']['mid'], url['result']['posts'][0]['postInfo']['postId'], likeType=1001)
cl.comment(url['result']['posts'][0]['userInfo']['mid'], url['result']['posts'][0]['postInfo']['postId'], "Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
cl.sendText(msg.to, "Success~")
except Exception as E:
try:
cl.sendText(msg.to,str(E))
except:
pass
elif msg.text in ["timeline"]:
try:
url = cl.activity(limit=5)
cl.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])
except Exception as E:
print E
#---------------------------------------------------------------------
elif msg.text in ["Sp","Speed",".sp"]:
start = time.time()
cl.sendText(msg.to, "🏹Proses...➴")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%s/Detik" % (elapsed_time))
#------------------------------------------------------------------
elif "album" in msg.text:
if msg.from_ in admin or owner:
try:
albumtags = msg.text.replace("album","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecâ†â�1�7�1�71¤7 1�71¤7" in msg.text:
if msg.from_ in admin or owner:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecâ†â�1�7�1�71¤7 1�71¤7","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#------------------------------------------------------------------
elif "#rmegs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("#rmegs ","")
ap = cl.getGroups([msg.to])
semua = findAndAddContactsByMid(Mi_d)
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
klis=[cl]
team=random.choice(klis)
cl.findAndAddContactsByMid(Mi_d)
cl.createGroup(gName, semua)
elif "Bot spin" in msg.text:
if msg.from_ in owner:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.createGroup("Nah kan", mi_d)
cl.sendText(msg.to,"Success...!!!!")
elif msg.text in ["Remove all chat"]:
if msg.from_ in owner:
cl.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat Finish")
elif "Gif gore" in msg.text:
gif = ("https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif","https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif")
gore = random.choice(gif)
cl.sendGifWithURL(msg.to,gore)
if op.type == 59:
print op
except Exception as error:
print error
if op.type == 55:
print "[NOTIFIED_READ_MESSAGE]"
try:
if op.param1 in wait2['readPoint']:
Nama = cl.getContact(op.param2).displayName
if Nama in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n-> " + Nama
wait2['ROM'][op.param1][op.param2] = "-> " + Nama
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
def autolike():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
print "Like"
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
print "Like"
except:
pass
else:
print "Already Liked Om"
time.sleep(0.60)
def likeme():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By Sain:\n\n🤖 ŤẸÃϻ ϻÃŇČỖЖ βỖŤ 🤖\n👉 line.me//ti/p/~tak.dapat.tidur")
print "Like"
except:
pass
else:
print "Status Sudah di Like Om"
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
import os
from unittest import TestCase
from aibolit.patterns.joined_validation.joined_validation import JoinedValidation
from pathlib import Path
class TestJoinedValidation(TestCase):
dir_path = Path(os.path.realpath(__file__)).parent
pattern = JoinedValidation()
def test_canFindSimpleJoinedValidation(self):
file = Path(self.dir_path, 'SimpleJoinedValidation.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find simple joined validation'
)
def test_canFindJoinedValidationAndOr(self):
file = Path(self.dir_path, 'JoinedValidationAndOr.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation in AndOr condition'
)
def test_canFindJoinedValidationOrAnd(self):
file = Path(self.dir_path, 'JoinedValidationOrAnd.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation in OrAnd condition'
)
def test_canFindJoinedValidationOrOr(self):
file = Path(self.dir_path, 'JoinedValidationOrOr.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation in OrOr condition'
)
def test_canFindJoinedValidationOrFunctionCall(self):
file = Path(self.dir_path, 'JoinedValidationOrFunctionCall.java')
self.assertEqual(
[8],
self.pattern.value(file),
'Could not find joined validation in function call'
)
def test_canFindJoinedValidationOrFieldAccess(self):
file = Path(self.dir_path, 'JoinedValidationOrFieldAccess.java')
self.assertEqual(
[6],
self.pattern.value(file),
'Could not find joined validation in field access'
)
def test_canFindNoBracketsJoinedValidation(self):
file = Path(self.dir_path, 'NoBracketsJoinedValidation.java')
self.assertEqual(
[3],
self.pattern.value(file),
'Could not find joined validation when using no brackets'
)
def test_canSkipEmptyJoinedValidation(self):
file = Path(self.dir_path, 'EmptyJoinedValidation.java')
self.assertEqual(
[],
self.pattern.value(file),
'Could not skip empty joined validation'
)
def test_canSkipNoJoinedValidation(self):
file = Path(self.dir_path, 'NoJoinedValidation.java')
self.assertEqual(
[],
self.pattern.value(file),
'Could not skip when there is no joined validation'
)
|
from .openFilePathAndSelect import *
|
from soykeyword.proportion import CorpusbasedKeywordExtractor
corpusbased_extractor = CorpusbasedKeywordExtractor(
min_tf=20,
min_df=2,
tokenize=lambda x:x.strip().split(),
verbose=True
)
# docs: list of str like
f=open('text/news/input5-1.txt','r')
lines=""
while True:
line = f.readline()
if not line: break
lines+=line
docs=corpusbased_extractor.train(lines)
keywords = corpusbased_extractor.extract_from_docs(
docs,
min_score=0.8,
)
print(keywords)
|
import tweepy, facebook
def twitter_auth():
"""OAuth procedure for the Twitter API"""
consumer_key = '8U4SH1S8MqMlxFASj6GlgeobL'
consumer_secret = 'iHGgrHBnGJJhnLfH7g2ZaggAwuun2QuNEspvg2ftUD4Ij6UnTp'
access_token = '928672057042391043-Niz2uWC8iXeXepr0NVn8GEzZ8yh5gDG'
access_token_secret = 'DSIXLThko0e0Dcem7OGsa1ht2zpR2oZbZM4dxcSn9lHLr'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def facebook_auth():
"""OAuth procedure for Twitter API
Long-lived access_token obtained via:
https://graph.facebook.com/v2.11/oauth/access_token?grant_type=fb_exchange_token&client_id={page_id}&client_secret={app_secret}&fb_exchange_token={short-lived token}"""
cfg = {
"page_id": "128390027869974",
"access_token": "EAAFQgmJkMMMBAFypMLjn9R8mJZCUFoZCchyXu1RnqEHApiOoK30G8vPZCUzKjC8mSGkZCekOuq5PVmIpsRPUegUmZAzLmSshDps6S6b08StDNRaz4PLdZBfLbp6Huss0k7iSgCodQyo9Tv0f9kd7I2w0nWIvyAeZCCq4ensTzWWlDl0xxNjyj9vnF9jSx4mPjoZD" # long-lived token for 2 months from Nov 19 2017
}
return facebook.GraphAPI(access_token = cfg['access_token'])
|
import os
import sys
import io
import pytz
import yaml
from datetime import datetime
import rx
from rx import operators as ops
from concurrent.futures import ProcessPoolExecutor
from urllib.request import urlopen
from csv import DictReader
from functools import partial
from collections import namedtuple
from rich import print
from rich.progress import Progress
from rich.progress import TextColumn, TimeElapsedColumn, SpinnerColumn
from histdatacom.fx_enums import TimeFormat
from histdatacom.utils import get_csv_dialect
from histdatacom.concurrency import get_pool_cpu_count
from histdatacom.concurrency import ProcessPool
from histdatacom.concurrency import InfluxDBWriter
from histdatacom.api import _API
class _Influx():
def __init__(self, args, records_current_, records_next_, csv_chunks_queue_):
self.args = args
global records_current
records_current = records_current_
global records_next
records_next = records_next_
global csv_chunks_queue
csv_chunks_queue = csv_chunks_queue_
def init_counters(self, csv_chunks_queue_, records_current_, records_next_, args_):
global csv_chunks_queue
csv_chunks_queue = csv_chunks_queue_
global records_current
records_current = records_current_
global records_next
records_next = records_next_
global args
args = args_
def parse_csv_row(self, row, record):
# line protocol example: myMeasurement,tag1=value1,tag2=value2 fieldKey="fieldValue" 1556813561098000000
measurement = f"{record.data_fxpair}"
tags = f"source=histdata.com,format={record.data_format},timeframe={record.data_timeframe}".replace(" ", "")
time = self.convert_datetime_to_utc_timestamp(record.data_format,
record.data_timeframe,
row)
match record.data_timeframe:
case "M1":
fields = f"openbid={row['openBid']},highbid={row['highBid']},lowbid={row['lowBid']},closebid={row['closeBid']}".replace(" ", "")
case "T":
fields = f"bidquote={row['bidQuote']},askquote={row['askQuote']}".replace(" ", "")
line_protocol = f"{measurement},{tags} {fields} {time}"
return line_protocol
def parse_csv_rows(self, rows, record):
mapfunc = partial(self.parse_csv_row, record=record)
_parsed_rows = list(map(mapfunc, rows))
csv_chunks_queue.put(_parsed_rows)
def parse_jay_row(self, row, record):
measurement = f"{record.data_fxpair}"
tags = f"source=histdata.com,format={record.data_format},timeframe={record.data_timeframe}".replace(" ", "")
match record.data_timeframe:
case "M1":
_row = namedtuple('_row', ['datetime', 'open', 'high', 'low', 'close', 'vol'])
named_row = _row(row[0], row[1], row[2], row[3], row[4], row[5])
fields = f"openbid={named_row.open},highbid={named_row.high},lowbid={named_row.low},closebid={named_row.close}".replace(" ", "")
time = str(named_row.datetime)
case "T":
_row = namedtuple('_row', ['datetime','bid','ask','vol'])
named_row = _row(row[0], row[1], row[2], row[3])
fields = f"bidquote={named_row.bid},askquote={named_row.ask}".replace(" ", "")
time = str(named_row.datetime)
line_protocol = f"{measurement},{tags} {fields} {time}"
return line_protocol
def parse_jay_rows(self, iterable, record):
mapfunc = partial(self.parse_jay_row, record=record)
_parsed_rows = list(map(mapfunc, iterable))
csv_chunks_queue.put(_parsed_rows)
def import_file(self, record, args, records_current, records_next, csv_chunks_queue):
try:
if str.lower(record.data_format) == "ascii":
jay_path = f"{record.data_dir}.data"
if os.path.exists(jay_path):
self.import_jay(record, args, records_current, records_next, csv_chunks_queue)
elif "CSV" in record.status:
if "ZIP" in record.status:
_API.test_for_jay_or_create(record, args)
self.import_jay(record, args,
records_current, records_next,
csv_chunks_queue)
else:
self.import_csv(record, args,
records_current, records_next,
csv_chunks_queue)
records_next.put(record)
except Exception:
print("Unexpected error from here:", sys.exc_info())
record.delete_into_file()
raise
finally:
records_current.task_done()
def import_jay(self, record, args, records_current, records_next, csv_chunks_queue):
jay = _API.import_jay_data(record.data_dir + record.jay_filename)
with ProcessPoolExecutor(max_workers=2,
initializer=self.init_counters,
initargs=(csv_chunks_queue,
records_current,
records_next,
self.args.copy())) as executor:
data = rx.from_iterable(jay.to_tuples()) \
.pipe(ops.buffer_with_count(25_000),
ops.flat_map(
lambda rows: executor.submit(self.parse_jay_rows, rows, record)))
data.subscribe(
on_next=lambda x: None,
on_error=lambda er: print(f"Unexpected error: {er}"))
record.status = "INFLUX_UPLOAD"
record.write_info_file(base_dir=args['default_download_dir'])
def import_csv(self, record, args, records_current, records_next, csv_chunks_queue):
csv_path = record.data_dir + record.csv_filename
file_endpoint = f"file://{record.data_dir}{record.csv_filename}"
res = urlopen(file_endpoint)
io_wrapper = io.TextIOWrapper(res)
with ProcessPoolExecutor(max_workers=2,
initializer=self.init_counters,
initargs=(csv_chunks_queue,
records_current,
records_next,
self.args.copy())) as executor:
fieldnames = self.fieldnames_match(record.data_format, record.data_timeframe)
dialect = get_csv_dialect(csv_path)
data = rx.from_iterable(
DictReader(io_wrapper,
fieldnames=fieldnames,
dialect=dialect)) \
.pipe(
ops.buffer_with_count(25_000),
ops.flat_map(
lambda rows: executor.submit(self.parse_csv_rows, rows, record)))
data.subscribe(
on_next=lambda x: None,
on_error=lambda er: print(f"Unexpected error: {er}"))
os.remove(csv_path)
record.status = "INFLUX_UPLOAD"
record.write_info_file(base_dir=args['default_download_dir'])
def import_data(self, records_current, records_next, csv_chunks_queue):
writer = InfluxDBWriter(self.args, csv_chunks_queue)
writer.start()
pool = ProcessPool(self.import_file,
self.args,
"Adding", "CSVs to influx queue...",
get_pool_cpu_count(self.args['cpu_utilization']) - 1\
if get_pool_cpu_count(self.args['cpu_utilization']) >= 2 \
else 1,
join=False,
dump=False)
pool(records_current, records_next, csv_chunks_queue)
with Progress(TextColumn(text_format="[cyan]...finishing upload to influxdb"),
SpinnerColumn(), SpinnerColumn(), SpinnerColumn(),
TimeElapsedColumn()) as progress:
task_id = progress.add_task("waiting", total=0)
records_current.join()
csv_chunks_queue.put(None)
csv_chunks_queue.join()
progress.advance(task_id, 0.75)
print("[cyan] done.")
records_next.dump_to_queue(records_current)
@classmethod
def load_influx_yaml(cls):
if os.path.exists('influxdb.yaml'):
with open('influxdb.yaml', 'r') as file:
try:
yamlfile = yaml.safe_load(file)
except yaml.YAMLError as exc:
print(exc)
sys.exit()
return yamlfile
print(""" ERROR: -I flag is used to import data to a influxdb instance...
there is no influxdb.yaml file in working directory.
did you forget to set it up?
""")
sys.exit()
@classmethod
def fieldnames_match(cls, csv_format, timeframe):
try:
match csv_format:
case "ASCII" if timeframe == "M1":
fieldnames = ["msSinceEpochUTC", "openBid", "highBid", "lowBid", "closeBid", "Volume"]
case "ASCII" if timeframe == "T":
fieldnames = ["msSinceEpochUTC", "bidQuote", "askQuote", "Volume"]
case _:
raise ValueError("Invalid format for influx import")
return fieldnames
except ValueError as err:
print(err)
sys.exit()
@classmethod
def get_timeformat(cls, csv_format, timeframe):
format_enum_key = f'{str(csv_format)}_{str(timeframe)}'
return TimeFormat[format_enum_key].value
@classmethod
def convert_datetime_to_utc_timestamp(cls, csv_format, timeframe, row):
est_timestamp = row["msSinceEpochUTC"]
date_object = datetime.strptime(est_timestamp, cls.get_timeformat(csv_format, timeframe))
tz_date_object = date_object.replace(tzinfo=pytz.timezone("Etc/GMT-5"))
timestamp = int(tz_date_object.timestamp() * 1000)
return str(timestamp)
|
from typing import Optional
from fastapi import FastAPI
import requests
import json
import mail
def get_token():
with open('config.json') as config_file:
config = json.load(config_file)
token = config.get('token')
return token
def get_status(url):
r = requests.get(url, allow_redirects=False, verify=False)
return r.status_code
def get_url():
stat_list = []
with open('web.json') as data_file:
data = json.load(data_file)
url = data.get('url')
for u in url:
print(u)
try:
status = get_status(u)
except requests.exceptions.RequestException as e:
stat = {'url': u, 'status': 408, 'msg': 'false'}
stat_list.append(stat)
continue
if status == 200 or status == 302 or status == 301:
a = "true"
stat = {'url': u, 'status': status, 'msg': a}
stat_list.append(stat)
else:
a = "false"
stat = {'url': u, 'status': status, 'msg': a}
stat_list.append(stat)
return stat_list
app = FastAPI()
@app.get("/{token}")
def read_root(token: Optional[str] = None):
true_token = get_token()
if token == true_token:
message = get_url()
return {"message": message}
else:
return {"message": "Token is not true"}
@app.get("/mail/{token}")
def read_root(token: Optional[str] = None):
true_token = get_token()
if token == true_token:
massage = mail.result()
return {"message": massage}
else:
return {"message": "Token is not true"}
|
from django.conf.urls import include
from django.conf.urls import url
from rest_framework import routers
from .api import DataStoreViewset
router = routers.SimpleRouter()
router.register(r'datastore', DataStoreViewset, base_name='datastore')
urlpatterns = [
url(r'^', include(router.urls)),
]
|
# encoding: utf-8
"""
Enable pyglet to be used interacive by setting PyOS_InputHook.
Authors
-------
* Nicolas P. Rougier
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
# This has been modified from the Pyglet and GLUT event hooks to work with
# glfw.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import time
#-----------------------------------------------------------------------------
# Platform-dependent imports and functions
#-----------------------------------------------------------------------------
if os.name == 'posix':
import select
def stdin_ready():
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
if infds:
return True
else:
return False
elif sys.platform == 'win32':
import msvcrt
def stdin_ready():
return msvcrt.kbhit()
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def create_inputhook_glfw(mgr, render_loop):
"""Run the GLFW event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
"""
def inputhook_glfw():
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
import cyglfw3 as glfw
try:
t = glfw.GetTime()
while not stdin_ready():
render_loop.next()
used_time = glfw.GetTime() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
except KeyboardInterrupt:
pass
return 0
return inputhook_glfw
from IPython.lib.inputhook import inputhook_manager, InputHookBase
@inputhook_manager.register('glfw')
class GLFWInputHook(InputHookBase):
def enable(self, app=None):
"""Enable event loop integration with GLFW.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for GLFW, which allows
GLFW to integrate with terminal based applications like
IPython.
"""
inputhook_glfw = create_inputhook_glfw(self.manager, app)
self.manager.set_inputhook(inputhook_glfw)
return
|
from systemofrecord.repository import blockchain_object_repository
from systemofrecord.repository.message_id_validator import InvalidTitleIdException
from tests.system_of_record_message_fixtures import *
from datatypes import system_of_record_request_validator
from datatypes.core import unicoded
from tests.teardown_unittest import TeardownUnittest
from datatypes.exceptions import DataDoesNotMatchSchemaException
test_object_id = valid_system_of_record_input_message_with_two_tags['object']['object_id']
class BlockchainObjectRepositoryTestCase(TeardownUnittest):
def test_can_store_object_data(self):
blockchain_object_repository.store_object(
object_id=test_object_id,
data=valid_system_of_record_input_message_with_two_tags)
loaded_object = blockchain_object_repository.load_most_recent_object_with_id(test_object_id)
self.check_loaded_object(unicoded(loaded_object.as_dict()))
def test_can_store_object_with_chains(self):
blockchain_object_repository.store_object(
object_id=test_object_id,
data=valid_system_of_record_input_message_with_two_tags)
loaded_object = blockchain_object_repository.load_most_recent_object_with_id(test_object_id)
self.check_loaded_object(unicoded(loaded_object.as_dict()))
self.check_chains_are_equal(
loaded_object,
valid_system_of_record_input_message_with_two_tags['object']['chains'])
def test_adding_new_object_with_same_id_can_load_most_recent_object(self):
blockchain_object_repository.store_object(
object_id=test_object_id,
data=valid_system_of_record_input_message_with_two_tags)
loaded_first_object = blockchain_object_repository.load_most_recent_object_with_id(test_object_id)
blockchain_object_repository.store_object(
object_id=test_object_id,
data=valid_system_of_record_input_message_with_two_tags)
loaded_second_object = blockchain_object_repository.load_most_recent_object_with_id(test_object_id)
self.assertNotEquals(loaded_first_object.blockchain_index, loaded_second_object.blockchain_index)
self.assertGreater(loaded_second_object.blockchain_index, loaded_first_object.blockchain_index)
def test_cannot_store_object_with_the_same_tag_and_value_pair_twice(self):
self.assertRaises(DataDoesNotMatchSchemaException,
blockchain_object_repository.store_object, test_object_id,
invalid_message_with_duplicate_tag_value)
self.assertRaises(DataDoesNotMatchSchemaException,
blockchain_object_repository.store_object, test_object_id,
another_invalid_message_with_duplicate_tag_value)
def test_cannot_store_title_with_title_id_not_matching_json_payload(self):
self.assertRaises(InvalidTitleIdException, blockchain_object_repository.store_object, "foo",
valid_system_of_record_input_message_with_two_tags)
def check_chains_are_equal(self, loaded_data, expected_chains):
self.assertEqual(len(loaded_data.chains), len(expected_chains))
for expected_chain in expected_chains:
found_chain = 0
for maybe_chain in loaded_data.as_dict()['object']['chains']:
if (maybe_chain['chain_name'] == expected_chain['chain_name']) and \
(maybe_chain['chain_value'] == expected_chain['chain_value']):
found_chain = 1
if not found_chain:
self.fail("Could not find chain " + repr(expected_chain))
def check_loaded_object(self, loaded_data):
self.assertIsNotNone(loaded_data)
system_of_record_request_validator.validate(loaded_data)
self.assertEquals(loaded_data['object']['object_id'], test_object_id)
|
# -*- coding: utf-8 -*-
# Copyright 2021, CS GROUP - France, https://www.csgroup.eu/
#
# This file is part of EODAG project
# https://www.github.com/CS-SI/EODAG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from tests import TEST_RESOURCES_PATH
from tests.context import (
EODataAccessGateway,
FilterDate,
FilterLatestByName,
FilterOverlap,
FilterProperty,
SearchResult,
)
class TestSearchStacStatic(unittest.TestCase):
def setUp(self):
super(TestSearchStacStatic, self).setUp()
self.dag = EODataAccessGateway()
self.cat_dir_path = self.root_cat = os.path.join(TEST_RESOURCES_PATH, "stac")
self.root_cat = os.path.join(self.cat_dir_path, "catalog.json")
self.root_cat_len = 5
self.child_cat = os.path.join(
self.cat_dir_path, "country", "FRA", "year", "2018", "2018.json"
)
self.child_cat_len = 2
self.item = os.path.join(
os.path.dirname(self.child_cat),
"items",
"S2A_MSIL1C_20181231T141041_N0207_R110_T21NYF_20181231T155050",
"S2A_MSIL1C_20181231T141041_N0207_R110_T21NYF_20181231T155050.json",
)
self.singlefile_cat = os.path.join(TEST_RESOURCES_PATH, "stac_singlefile.json")
self.singlefile_cat_len = 5
self.stac_provider = "astraea_eod"
self.product_type = "S2_MSI_L1C"
self.extent_big = {"lonmin": -55, "lonmax": -53, "latmin": 2, "latmax": 5}
self.extent_small = {"lonmin": -55, "lonmax": -54.5, "latmin": 2, "latmax": 2.5}
self.static_stac_provider = "foo_static"
self.dag.update_providers_config(
f"""
{self.static_stac_provider}:
search:
type: StaticStacSearch
api_endpoint: {self.root_cat}
products:
GENERIC_PRODUCT_TYPE:
productType: '{{productType}}'
download:
type: HTTPDownload
base_uri: https://fake-endpoint
flatten_top_dirs: True
"""
)
self.dag.set_preferred_provider(self.static_stac_provider)
def test_search_stac_static_load_child(self):
"""load_stac_items from child catalog must provide items"""
items = self.dag.load_stac_items(
self.child_cat, recursive=True, provider=self.stac_provider
)
self.assertIsInstance(items, SearchResult)
self.assertEqual(len(items), self.child_cat_len)
self.assertEqual(items[0].provider, self.stac_provider)
# if no product_type is provided, product_type should be guessed from properties
self.assertEqual(items[0].product_type, "S2_MSI_L1C")
def test_search_stac_static_load_root_not_recursive(self):
"""load_stac_items from root must provide an empty list when no recursive"""
items = self.dag.load_stac_items(
self.root_cat, recursive=False, provider=self.stac_provider
)
self.assertEqual(len(items), 0)
def test_search_stac_static_load_root_recursive(self):
"""load_stac_items from root must provide items when recursive"""
items = self.dag.load_stac_items(
self.root_cat,
recursive=True,
provider=self.stac_provider,
productType=self.product_type,
)
self.assertEqual(len(items), self.root_cat_len)
for item in items:
self.assertEqual(item.provider, self.stac_provider)
self.assertEqual(item.product_type, self.product_type)
def test_search_stac_static(self):
"""Use StaticStacSearch plugin to search all items"""
items, nb = self.dag.search()
self.assertEqual(len(items), self.root_cat_len)
self.assertEqual(nb, self.root_cat_len)
for item in items:
self.assertEqual(item.provider, self.static_stac_provider)
def test_search_stac_static_load_item(self):
"""load_stac_items from a single item must provide it"""
item = self.dag.load_stac_items(self.item, provider=self.stac_provider)
self.assertIsInstance(item, SearchResult)
self.assertEqual(len(item), 1)
self.assertEqual(item[0].provider, self.stac_provider)
# if no product_type is provided, product_type should be guessed from properties
self.assertEqual(item[0].product_type, "S2_MSI_L1C")
def test_search_stac_static_load_item_updated_provider(self):
"""load_stac_items from a single item using updated provider"""
item = self.dag.load_stac_items(self.item, provider=self.stac_provider)
self.assertEqual(item[0].properties["license"], "proprietary")
self.assertEqual(item[0].properties["platform"], "S2ST")
self.assertEqual(item[0].properties["orbitDirection"], "descending")
self.assertNotIn("foo", item[0].properties)
# fake provider with mixed metadata_mapping
self.dag.update_providers_config(
"""
fake_provider:
search:
type: StacSearch
api_endpoint: 'https://fake-endpoint'
metadata_mapping:
license: '{platform}'
foo: '{orbitDirection}'
products:
GENERIC_PRODUCT_TYPE:
productType: '{productType}'
download:
type: HTTPDownload
base_uri: 'https://fake-uri'
"""
)
item = self.dag.load_stac_items(
self.item, provider="fake_provider", raise_errors=True
)
self.assertEqual(item[0].properties["platform"], "S2ST")
self.assertEqual(item[0].properties["license"], "S2ST")
self.assertEqual(item[0].properties["orbitDirection"], "descending")
self.assertIn("foo", item[0].properties)
self.assertEqual(item[0].properties["foo"], "descending")
@unittest.skip(
"skipped as single-file-stac has been removed and is being rethought"
)
def test_search_stac_static_load_singlefile_catalog(self):
"""load_stac_items from child catalog must provide items"""
items = self.dag.load_stac_items(
self.singlefile_cat, provider=self.stac_provider
)
self.assertIsInstance(items, SearchResult)
self.assertEqual(len(items), self.singlefile_cat_len)
self.assertEqual(items[0].provider, self.stac_provider)
# if no product_type is provided, product_type is None
self.assertIsNone(items[0].product_type)
def test_search_stac_static_crunch_filter_date(self):
"""load_stac_items from root and filter by date"""
items = self.dag.load_stac_items(
self.root_cat,
recursive=True,
provider=self.stac_provider,
productType=self.product_type,
)
filtered_items = items.crunch(
FilterDate({"start": "2018-01-01", "end": "2019-01-01"})
)
self.assertEqual(len(filtered_items), self.child_cat_len)
for item in filtered_items:
self.assertIn("2018", item.properties["startTimeFromAscendingNode"])
def test_search_stac_static_by_date(self):
"""Use StaticStacSearch plugin to search by date"""
filtered_items, nb = self.dag.search(start="2018-01-01", end="2019-01-01")
self.assertEqual(len(filtered_items), self.child_cat_len)
self.assertEqual(nb, self.child_cat_len)
for item in filtered_items:
self.assertIn("2018", item.properties["startTimeFromAscendingNode"])
def test_search_stac_static_crunch_filter_overlap(self):
"""load_stac_items from root and filter by overlap"""
# tests over extent_big search geometry
items = self.dag.load_stac_items(
self.root_cat,
recursive=True,
provider=self.stac_provider,
productType=self.product_type,
geom=self.extent_big,
)
self.assertEqual(len(items), self.root_cat_len)
filtered_items = items.crunch(
FilterOverlap({"minimum_overlap": 10}), geometry=self.extent_big
)
self.assertEqual(len(filtered_items), 3)
filtered_items = items.crunch(
FilterOverlap({"minimum_overlap": 100}), geometry=self.extent_big
)
self.assertEqual(len(filtered_items), 1)
filtered_items = items.crunch(
FilterOverlap({"within": True}), geometry=self.extent_big
)
self.assertEqual(len(filtered_items), 1)
filtered_items = items.crunch(
FilterOverlap({"contains": True}), geometry=self.extent_big
)
self.assertEqual(len(filtered_items), 0)
filtered_items = items.crunch(
FilterOverlap({"intersects": True}), geometry=self.extent_big
)
self.assertEqual(len(filtered_items), 3)
# tests over extent_small search geometry
items = self.dag.load_stac_items(
self.root_cat,
recursive=True,
provider=self.stac_provider,
productType=self.product_type,
geom=self.extent_small,
)
self.assertEqual(len(items), self.root_cat_len)
filtered_items = items.crunch(
FilterOverlap({"contains": True}), geometry=self.extent_small
)
self.assertEqual(len(filtered_items), 1)
def test_search_stac_static_by_geom(self):
"""Use StaticStacSearch plugin to search by geometry"""
items, nb = self.dag.search(
geom=self.extent_big,
)
self.assertEqual(len(items), 3)
self.assertEqual(nb, 3)
def test_search_stac_static_crunch_filter_property(self):
"""load_stac_items from root and filter by property"""
items = self.dag.load_stac_items(
self.root_cat,
recursive=True,
provider=self.stac_provider,
productType=self.product_type,
)
self.assertEqual(len(items), self.root_cat_len)
filtered_items = items.crunch(FilterProperty({"orbitNumber": 110}))
self.assertEqual(len(filtered_items), 3)
filtered_items = items.crunch(
FilterProperty({"platformSerialIdentifier": "S2A", "operator": "eq"})
)
self.assertEqual(len(filtered_items), 4)
filtered_items = items.crunch(
FilterProperty({"cloudCover": 10, "operator": "lt"})
)
self.assertEqual(len(filtered_items), 1)
def test_search_stac_static_by_property(self):
"""Use StaticStacSearch plugin to search by property"""
items, nb = self.dag.search(orbitNumber=110)
self.assertEqual(len(items), 3)
self.assertEqual(nb, 3)
def test_search_stac_static_by_cloudcover(self):
"""Use StaticStacSearch plugin to search by cloud cover"""
items, nb = self.dag.search(cloudCover=10)
self.assertEqual(len(items), 1)
self.assertEqual(nb, 1)
def test_search_stac_static_crunch_filter_lastest_by_name(self):
"""load_stac_items from root and filter by name"""
items = self.dag.load_stac_items(
self.root_cat,
recursive=True,
provider=self.stac_provider,
productType=self.product_type,
)
self.assertEqual(len(items), self.root_cat_len)
filtered_items = items.crunch(
FilterLatestByName(
{"name_pattern": r"S2[AB]_MSIL1C_20(?P<tileid>\d{6}).*T21NY.*"}
)
)
self.assertEqual(len(filtered_items), 2)
|
from datetime import datetime
from dependency_injector.wiring import inject, Provide
from fastapi import Depends, FastAPI, Request
from .configuration import WebhookGatewayConfig
from .request import WebhookRequest
from .routes.service import RouteService
api = FastAPI()
start_time = datetime.now()
@api.post("/route/{route_name}")
@inject
async def dispatch(
route_name: str,
req: Request,
routes: RouteService = Depends(Provide[WebhookGatewayConfig.routes_service]),
):
wrapper_req = WebhookRequest(req)
await wrapper_req.init() # Awaits request body
call_results = routes.dispatch(route_name, wrapper_req)
return {"route": route_name, "called_rules": [r.__dict__ for r in call_results]}
@api.get("/status")
@inject
async def status(
routes: RouteService = Depends(Provide[WebhookGatewayConfig.routes_service]),
):
delta = datetime.now() - start_time
delta_str_without_micros = str(delta).split(".")[0]
return {
"status": "available",
"route_count": routes.route_count,
"uptime": delta_str_without_micros,
}
|
#!/usr/bin/env python
import sys
from collections import defaultdict
import itertools
from operator import itemgetter
s = 0.005
outputLine = 30
C1 = defaultdict(int)
L1 = set()
C2 = defaultdict(int)
L2 = set()
C3 = defaultdict(int)
L3 = defaultdict(int)
fr = open('baskets', 'r')
# Calculate C1
basketCount = 0
for line in fr:
basket = line.strip()
items = basket.split()
for item in items:
if item in C1:
C1[item] += 1
else:
C1[item] = 1
basketCount += 1
# Define support
support = s * basketCount
# Calculate L1
for item in C1:
if C1[item] >= support:
L1.add(item)
# Calculate C2
for pair in itertools.combinations(sorted(L1), 2):
C2[pair] = 0
fr.seek(0)
for line in fr:
basket = line.strip()
items = basket.split()
for pair in itertools.combinations(sorted(items), 2):
if pair in C2:
C2[pair] += 1
# Calculate L2
for pair in C2:
if C2[pair] >= support:
if pair[0] in L1 and pair[1] in L1:
L2.add(pair)
# Calculate C3
decomposed_L2 = set()
for pair in L2:
decomposed_L2.add(pair[0])
decomposed_L2.add(pair[1])
for triple in itertools.combinations(sorted(decomposed_L2), 3):
C3[triple] = 0
fr.seek(0)
for line in fr:
basket = line.strip()
items = basket.split()
for triple in itertools.combinations(sorted(items), 3):
if triple in C3:
C3[triple] += 1
# Calculate L3
for triple in C3:
if C3[triple] >= support:
L3[triple] = C3[triple]
for triple in sorted(L3.items(), key=itemgetter(1), reverse=True):
print triple[0][0], triple[0][1], triple[0][2]
outputLine -= 1
if not outputLine:
break
|
#===- enumerations.py - Python Enumerations ------------------*- python -*--===#
#
# The LLVM37 Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
Clang Enumerations
==================
This module provides static definitions of enumerations that exist in libclang.
Enumerations are typically defined as a list of tuples. The exported values are
typically munged into other types or classes at module load time.
All enumerations are centrally defined in this file so they are all grouped
together and easier to audit. And, maybe even one day this file will be
automatically generated by scanning the libclang headers!
"""
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
# enumerations from the C++ API.
TokenKinds = [
('PUNCTUATION', 0),
('KEYWORD', 1),
('IDENTIFIER', 2),
('LITERAL', 3),
('COMMENT', 4),
]
__all__ = ['TokenKinds']
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from wtforms import BooleanField, StringField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Email
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm
from indico.web.forms.validators import UsedIfChecked
from indico.web.forms.widgets import SwitchWidget
class CephalopodForm(IndicoForm):
joined = BooleanField('Join the community', widget=SwitchWidget())
contact_name = StringField('Contact Name', [UsedIfChecked('joined'), DataRequired()],
description=_('Name of the person responsible for your Indico server.'))
contact_email = EmailField('Contact Email',
[UsedIfChecked('joined'), DataRequired(), Email()],
description=_('Email address of the person responsible for your Indico server.'))
|
import rospy
import subprocess
import threading
import re
from lg_common.msg import WindowGeometry
import awesome
class ManagedWindow(object):
def __init__(self, w_name=None, w_class=None, w_instance=None,
geometry=None, visible=True, chrome_kiosk_workaround=False):
self.w_name = w_name
self.w_class = w_class
self.w_instance = w_instance
self.geometry = geometry
self.is_visible = visible
self.chrome_kiosk_workaround = chrome_kiosk_workaround
self.lock = threading.RLock()
self.proc = None
rospy.on_shutdown(self._cleanup_proc)
@staticmethod
def parse_geometry(geometry):
"""
Parses Xorg window geometry in the form WxH[+-]X[+-]Y
Raises ValueError if the geometry string is invalid.
"""
m = re.match(r'^(\d+)x(\d+)([+-]\d+)([+-]\d+)$', geometry)
if m is None:
raise ValueError(
'Invalid window geometry: {}'.format(geometry))
dims = map(int, m.groups())
return WindowGeometry(width=dims[0], height=dims[1],
x=dims[2], y=dims[3])
@staticmethod
def lookup_viewport_geometry(viewport_key):
"""
Looks up geometry for the given viewport name.
Raises KeyError if the viewport is not configured.
"""
param_name = '/viewport/{}'.format(viewport_key)
if not rospy.has_param(param_name):
raise KeyError(
'Viewport parameter not set: {}'.format(param_name))
viewport_value = rospy.get_param(param_name)
return ManagedWindow.parse_geometry(viewport_value)
@staticmethod
def get_viewport_geometry():
"""
Returns WindowGeometry if the private '~viewport' param is set.
Returns None if the private '~viewport' param is not set.
"""
if rospy.has_param('~viewport'):
viewport = rospy.get_param('~viewport')
geometry = ManagedWindow.lookup_viewport_geometry(viewport)
else:
geometry = None
return geometry
def _get_command(self):
with self.lock:
cmd = []
cmd.append('echo "{}" | /usr/bin/awesome-client'.format(
awesome.get_script(self, chrome_kiosk_workaround=self.chrome_kiosk_workaround)
))
return cmd
def _cleanup_proc(self):
with self.lock:
if self.proc is not None:
self.proc.kill()
def set_visibility(self, visible):
with self.lock:
self.is_visible = visible
def set_geometry(self, geometry):
with self.lock:
self.geometry = geometry
def converge(self):
with self.lock:
cmd = self._get_command()
self._cleanup_proc()
cmd_str = ' '.join(cmd)
rospy.logdebug(cmd_str)
try:
awesome.setup_environ()
except Exception as e:
rospy.logerr(
'failed to setup awesome environment: {}'.format(e.message)
)
try:
self.proc = subprocess.Popen(cmd, close_fds=True, shell=True)
self.proc.wait()
self.proc = None
except OSError:
rospy.logerr('failed to run {}'.format(cmd_str))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
from pydantic import BaseModel, Field
from .value_objects import UUID
from .mixins import BusinessRuleValidationMixin
class Entity(BusinessRuleValidationMixin, BaseModel):
id: UUID = Field(default_factory=UUID.v4)
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2019
import datetime
from traceback import format_exc
from flask import Blueprint
from idds.common import exceptions
from idds.common.constants import HTTP_STATUS_CODE
from idds.core.requests import get_requests
from idds.rest.v1.controller import IDDSController
class Monitor(IDDSController):
""" Monitor """
def get_month_list(self, start, end):
mlist = []
total_months = lambda dt: dt.month + 12 * dt.year
for tot_m in range(total_months(start) - 1, total_months(end)):
y, m = divmod(tot_m, 12)
mlist.append(datetime.datetime(y, m + 1, 1).strftime("%Y-%m"))
return mlist
def get_requests(self, request_id, workload_id, with_request=False, with_transform=False, with_processing=False):
if with_request:
rets, ret_reqs = [], {}
reqs = get_requests(request_id=request_id, workload_id=workload_id, with_detail=True, with_processing=False, with_metadata=False)
for req in reqs:
if req['request_id'] not in ret_reqs:
ret_reqs[req['request_id']] = {'request_id': req['request_id'],
'workload_id': req['workload_id'],
'status': req['status'].name if req['status'] else req['status'],
'created_at': req['created_at'],
'updated_at': req['updated_at'],
'transforms': {},
'input_total_files': 0,
'input_coll_bytes': 0,
'input_processed_files': 0,
'input_processing_files': 0,
'output_total_files': 0,
'output_coll_bytes': 0,
'output_processed_files': 0,
'output_processing_files': 0
}
if req['transform_status']:
if req['transform_status'].name not in ret_reqs[req['request_id']]['transforms']:
ret_reqs[req['request_id']]['transforms'][req['transform_status'].name] = 0
ret_reqs[req['request_id']]['transforms'][req['transform_status'].name] += 1
if req['input_total_files']:
ret_reqs[req['request_id']]['input_total_files'] += req['input_total_files']
if req['input_coll_bytes']:
ret_reqs[req['request_id']]['input_coll_bytes'] += req['input_coll_bytes']
if req['input_processed_files']:
ret_reqs[req['request_id']]['input_processed_files'] += req['input_processed_files']
if req['input_processing_files']:
ret_reqs[req['request_id']]['input_processing_files'] += req['input_processing_files']
if req['output_total_files']:
ret_reqs[req['request_id']]['output_total_files'] += req['output_total_files']
if req['output_coll_bytes']:
ret_reqs[req['request_id']]['output_coll_bytes'] += req['output_coll_bytes']
if req['output_processed_files']:
ret_reqs[req['request_id']]['output_processed_files'] += req['output_processed_files']
if req['output_processing_files']:
ret_reqs[req['request_id']]['output_processing_files'] += req['output_processing_files']
for req_id in ret_reqs:
rets.append(ret_reqs[req_id])
return rets
elif with_transform:
rets = []
reqs = get_requests(request_id=request_id, workload_id=workload_id, with_detail=True, with_processing=False, with_metadata=False)
for req in reqs:
ret = {'request_id': req['request_id'],
'transform_id': req['transform_id'],
'workload_id': req['workload_id'],
'transform_workload_id': req['transform_workload_id'],
'transform_type': req['transform_type'].name if req['transform_type'] else req['transform_type'],
'output_coll_scope': req['output_coll_scope'],
'output_coll_name': req['output_coll_name'],
'transform_status': req['transform_status'].name if req['transform_status'] else req['transform_status'],
'transform_created_at': req['transform_created_at'],
'transform_updated_at': req['transform_updated_at'],
'transform_finished_at': req['transform_finished_at'],
'input_total_files': req['input_total_files'] if req['input_total_files'] else 0,
'input_coll_bytes': req['input_coll_bytes'] if req['input_coll_bytes'] else 0,
'input_processed_files': req['input_processed_files'] if req['input_processed_files'] else 0,
'input_processing_files': req['input_processing_files'] if req['input_processing_files'] else 0,
'output_total_files': req['output_total_files'] if req['output_total_files'] else 0,
'output_coll_bytes': req['output_coll_bytes'] if req['output_coll_bytes'] else 0,
'output_processed_files': req['output_processed_files'] if req['output_processed_files'] else 0,
'output_processing_files': req['output_processing_files'] if req['output_processing_files'] else 0,
'errors': req['errors']
}
rets.append(ret)
return rets
elif with_processing:
rets = []
reqs = get_requests(request_id=request_id, workload_id=workload_id, with_detail=False, with_processing=True, with_metadata=False)
for req in reqs:
ret = {'request_id': req['request_id'],
'workload_id': req['workload_id'],
'processing_id': req['processing_id'],
'processing_status': req['processing_status'].name if req['processing_status'] else req['processing_status'],
'processing_created_at': req['processing_created_at'],
'processing_updated_at': req['processing_updated_at'],
'processing_finished_at': req['processing_finished_at']
}
rets.append(ret)
return rets
else:
rets = []
reqs = get_requests(request_id=request_id, workload_id=workload_id, with_detail=False, with_processing=False, with_metadata=False)
for req in reqs:
ret = {'request_id': req['request_id'],
'workload_id': req['workload_id'],
'status': req['status'].name if req['status'] else req['status'],
'created_at': req['created_at'],
'updated_at': req['updated_at']
}
rets.append(ret)
return rets
def get(self, request_id, workload_id, with_request='false', with_transform='false', with_processing='false'):
""" Get details about a specific Request with given id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
500 InternalError
:returns: dictionary of an request.
"""
try:
if request_id == 'null':
request_id = None
if workload_id == 'null':
workload_id = None
if with_request and with_request.lower() in ['true']:
with_request = True
else:
with_request = False
if with_transform and with_transform.lower() in ['true']:
with_transform = True
else:
with_transform = False
if with_processing and with_processing.lower() in ['true']:
with_processing = True
else:
with_processing = False
rets = self.get_requests(request_id=request_id, workload_id=workload_id,
with_request=with_request,
with_transform=with_transform,
with_processing=with_processing)
except exceptions.NoObject as error:
return self.generate_http_response(HTTP_STATUS_CODE.NotFound, exc_cls=error.__class__.__name__, exc_msg=error)
except exceptions.IDDSException as error:
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=error.__class__.__name__, exc_msg=error)
except Exception as error:
print(error)
print(format_exc())
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=exceptions.CoreException.__name__, exc_msg=error)
return self.generate_http_response(HTTP_STATUS_CODE.OK, data=rets)
def post_test(self):
import pprint
pprint.pprint(self.get_request())
pprint.pprint(self.get_request().endpoint)
pprint.pprint(self.get_request().url_rule)
class MonitorRequest(Monitor):
""" Monitor Request """
def get(self, request_id, workload_id):
""" Get details about a specific Request with given id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
500 InternalError
:returns: dictionary of an request.
"""
try:
if request_id == 'null':
request_id = None
if workload_id == 'null':
workload_id = None
rets = self.get_requests(request_id=request_id, workload_id=workload_id,
with_request=False, with_transform=False,
with_processing=False)
status_dict = {'Total': {}}
min_time, max_time = None, None
for ret in rets:
if ret['status'] not in status_dict:
status_dict[ret['status']] = {}
if min_time is None or ret['updated_at'] < min_time:
min_time = ret['updated_at']
if max_time is None or ret['updated_at'] > max_time:
max_time = ret['updated_at']
month_list = self.get_month_list(min_time, max_time)
for key in status_dict:
for m in month_list:
status_dict[key][m] = 0
for ret in rets:
m_time = ret['updated_at'].strftime(r"%Y-%m")
status_dict['Total'][m_time] += 1
status_dict[ret['status']][m_time] += 1
status_dict_acc = {}
for key in status_dict:
status_dict_acc[key] = {}
for i in range(len(month_list)):
if i == 0:
status_dict_acc[key][month_list[i]] = status_dict[key][month_list[i]]
else:
status_dict_acc[key][month_list[i]] = status_dict[key][month_list[i]] + status_dict_acc[key][month_list[i - 1]]
ret_status = {'total': len(rets), 'month_status': status_dict, 'month_acc_status': status_dict_acc}
except exceptions.NoObject as error:
return self.generate_http_response(HTTP_STATUS_CODE.NotFound, exc_cls=error.__class__.__name__, exc_msg=error)
except exceptions.IDDSException as error:
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=error.__class__.__name__, exc_msg=error)
except Exception as error:
print(error)
print(format_exc())
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=exceptions.CoreException.__name__, exc_msg=error)
return self.generate_http_response(HTTP_STATUS_CODE.OK, data=ret_status)
class MonitorTransform(Monitor):
""" Monitor Transform """
def get(self, request_id, workload_id):
""" Get details about a specific Request with given id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
500 InternalError
:returns: dictionary of an request.
"""
try:
if request_id == 'null':
request_id = None
if workload_id == 'null':
workload_id = None
rets = self.get_requests(request_id=request_id, workload_id=workload_id,
with_request=False, with_transform=True, with_processing=False)
status_dict = {'Total': {}}
status_dict_by_type = {}
processed_files, processed_bytes = {}, {}
processed_files_by_type, processed_bytes_by_type = {}, {}
min_time, max_time = None, None
total_files, total_bytes = 0, 0
for ret in rets:
if ret['transform_status'] and ret['transform_status'] not in status_dict:
status_dict[ret['transform_status']] = {}
if ret['transform_type'] and ret['transform_type'] not in status_dict_by_type:
status_dict_by_type[ret['transform_type']] = {}
processed_files_by_type[ret['transform_type']] = {}
processed_bytes_by_type[ret['transform_type']] = {}
if ret['transform_updated_at'] and (min_time is None or ret['transform_updated_at'] < min_time):
min_time = ret['transform_updated_at']
if ret['transform_updated_at'] and (max_time is None or ret['transform_updated_at'] > max_time):
max_time = ret['transform_updated_at']
month_list = self.get_month_list(min_time, max_time)
for key in status_dict:
processed_files[key] = {}
processed_bytes[key] = {}
for t_type in status_dict_by_type:
status_dict_by_type[t_type][key] = {}
processed_files_by_type[t_type][key] = {}
processed_bytes_by_type[t_type][key] = {}
for m in month_list:
status_dict[key][m] = 0
processed_files[key][m] = 0
processed_bytes[key][m] = 0
for t_type in status_dict_by_type:
status_dict_by_type[t_type][key][m] = 0
processed_files_by_type[t_type][key][m] = 0
processed_bytes_by_type[t_type][key][m] = 0
for ret in rets:
if not ret['transform_updated_at']:
continue
m_time = ret['transform_updated_at'].strftime(r"%Y-%m")
status_dict['Total'][m_time] += 1
status_dict[ret['transform_status']][m_time] += 1
processed_files[ret['transform_status']][m_time] += ret['output_processed_files']
# processed_bytes[ret['transform_status']][m_time] += ret['output_coll_bytes']
# output_coll_bytes is not filled, need to be fixed on the server
processed_bytes[ret['transform_status']][m_time] += ret['input_coll_bytes']
processed_files['Total'][m_time] += ret['output_processed_files']
# processed_bytes['Total'][m_time] += ret['output_coll_bytes']
processed_bytes['Total'][m_time] += ret['input_coll_bytes']
total_files += ret['output_processed_files']
total_bytes += ret['output_coll_bytes']
total_bytes += ret['input_coll_bytes']
t_type = ret['transform_type']
status_dict_by_type[t_type][ret['transform_status']][m_time] += 1
processed_files_by_type[t_type][ret['transform_status']][m_time] += ret['output_processed_files']
# processed_bytes_by_type[t_type][ret['transform_status']][m_time] += ret['output_coll_bytes']
processed_bytes_by_type[t_type][ret['transform_status']][m_time] += ret['input_coll_bytes']
status_dict_by_type[t_type]['Total'][m_time] += 1
processed_files_by_type[t_type]['Total'][m_time] += ret['output_processed_files']
# processed_bytes_by_type[t_type]['Total'][m_time] += ret['output_coll_bytes']
processed_bytes_by_type[t_type]['Total'][m_time] += ret['input_coll_bytes']
status_dict_acc = {}
processed_files_acc, processed_bytes_acc = {}, {}
status_dict_by_type_acc = {}
processed_files_by_type_acc = {}
processed_bytes_by_type_acc = {}
for t_type in status_dict_by_type:
status_dict_by_type_acc[t_type] = {}
processed_files_by_type_acc[t_type] = {}
processed_bytes_by_type_acc[t_type] = {}
for key in status_dict:
status_dict_acc[key] = {}
processed_files_acc[key] = {}
processed_bytes_acc[key] = {}
for t_type in status_dict_by_type:
status_dict_by_type_acc[t_type][key] = {}
processed_files_by_type_acc[t_type][key] = {}
processed_bytes_by_type_acc[t_type][key] = {}
for i in range(len(month_list)):
if i == 0:
status_dict_acc[key][month_list[i]] = status_dict[key][month_list[i]]
processed_files_acc[key][month_list[i]] = processed_files[key][month_list[i]]
processed_bytes_acc[key][month_list[i]] = processed_bytes[key][month_list[i]]
for t_type in status_dict_by_type_acc:
status_dict_by_type_acc[t_type][key][month_list[i]] = status_dict_by_type[t_type][key][month_list[i]]
processed_files_by_type_acc[t_type][key][month_list[i]] = processed_files_by_type[t_type][key][month_list[i]]
processed_bytes_by_type_acc[t_type][key][month_list[i]] = processed_bytes_by_type[t_type][key][month_list[i]]
else:
status_dict_acc[key][month_list[i]] = status_dict[key][month_list[i]] + status_dict_acc[key][month_list[i - 1]]
processed_files_acc[key][month_list[i]] = processed_files[key][month_list[i]] + processed_files_acc[key][month_list[i - 1]]
processed_bytes_acc[key][month_list[i]] = processed_bytes[key][month_list[i]] + processed_bytes_acc[key][month_list[i - 1]]
for t_type in status_dict_by_type_acc:
status_dict_by_type_acc[t_type][key][month_list[i]] = status_dict_by_type[t_type][key][month_list[i]] + status_dict_by_type_acc[t_type][key][month_list[i - 1]]
processed_files_by_type_acc[t_type][key][month_list[i]] = processed_files_by_type[t_type][key][month_list[i]] + processed_files_by_type_acc[t_type][key][month_list[i - 1]]
processed_bytes_by_type_acc[t_type][key][month_list[i]] = processed_bytes_by_type[t_type][key][month_list[i]] + processed_bytes_by_type_acc[t_type][key][month_list[i - 1]]
ret_status = {'total': len(rets),
'total_files': total_files,
'total_bytes': total_bytes,
'month_status': status_dict,
'month_acc_status': status_dict_acc,
'month_processed_files': processed_files,
'month_acc_processed_files': processed_files_acc,
'month_processed_bytes': processed_bytes,
'month_acc_processed_bytes': processed_bytes_acc,
'month_status_dict_by_type': status_dict_by_type,
'month_acc_status_dict_by_type': status_dict_by_type_acc,
'month_processed_files_by_type': processed_files_by_type,
'month_acc_processed_files_by_type': processed_files_by_type_acc,
'month_processed_bytes_by_type': processed_bytes_by_type,
'month_acc_processed_bytes_by_type': processed_bytes_by_type_acc
}
except exceptions.NoObject as error:
return self.generate_http_response(HTTP_STATUS_CODE.NotFound, exc_cls=error.__class__.__name__, exc_msg=error)
except exceptions.IDDSException as error:
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=error.__class__.__name__, exc_msg=error)
except Exception as error:
print(error)
print(format_exc())
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=exceptions.CoreException.__name__, exc_msg=error)
return self.generate_http_response(HTTP_STATUS_CODE.OK, data=ret_status)
class MonitorProcessing(Monitor):
""" Monitor Processing """
def get(self, request_id, workload_id):
""" Get details about a specific Request with given id.
HTTP Success:
200 OK
HTTP Error:
404 Not Found
500 InternalError
:returns: dictionary of an request.
"""
try:
if request_id == 'null':
request_id = None
if workload_id == 'null':
workload_id = None
rets = self.get_requests(request_id=request_id, workload_id=workload_id,
with_request=False, with_transform=False, with_processing=True)
status_dict = {'Total': {}}
min_time, max_time = None, None
for ret in rets:
if ret['processing_status'] and ret['processing_status'] not in status_dict:
status_dict[ret['processing_status']] = {}
if ret['processing_updated_at'] and (min_time is None or ret['processing_updated_at'] < min_time):
min_time = ret['processing_updated_at']
if ret['processing_updated_at'] and (max_time is None or ret['processing_updated_at'] > max_time):
max_time = ret['processing_updated_at']
month_list = self.get_month_list(min_time, max_time)
for key in status_dict:
for m in month_list:
status_dict[key][m] = 0
for ret in rets:
if ret['processing_updated_at']:
m_time = ret['processing_updated_at'].strftime(r"%Y-%m")
status_dict['Total'][m_time] += 1
status_dict[ret['processing_status']][m_time] += 1
status_dict_acc = {}
for key in status_dict:
status_dict_acc[key] = {}
for i in range(len(month_list)):
if i == 0:
status_dict_acc[key][month_list[i]] = status_dict[key][month_list[i]]
else:
status_dict_acc[key][month_list[i]] = status_dict[key][month_list[i]] + status_dict_acc[key][month_list[i - 1]]
ret_status = {'total': len(rets), 'month_status': status_dict, 'month_acc_status': status_dict_acc}
except exceptions.NoObject as error:
return self.generate_http_response(HTTP_STATUS_CODE.NotFound, exc_cls=error.__class__.__name__, exc_msg=error)
except exceptions.IDDSException as error:
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=error.__class__.__name__, exc_msg=error)
except Exception as error:
print(error)
print(format_exc())
return self.generate_http_response(HTTP_STATUS_CODE.InternalError, exc_cls=exceptions.CoreException.__name__, exc_msg=error)
return self.generate_http_response(HTTP_STATUS_CODE.OK, data=ret_status)
"""----------------------
Web service url maps
----------------------"""
def get_blueprint():
bp = Blueprint('monitor', __name__)
monitor_view = Monitor.as_view('monitor')
bp.add_url_rule('/monitor/<request_id>/<workload_id>/<with_request>/<with_transform>/<with_processing>', view_func=monitor_view, methods=['get', ])
monitor_request_view = MonitorRequest.as_view('monitor_request')
bp.add_url_rule('/monitor_request/<request_id>/<workload_id>', view_func=monitor_request_view, methods=['get', ])
monitor_transform_view = MonitorTransform.as_view('monitor_transform')
bp.add_url_rule('/monitor_transform/<request_id>/<workload_id>', view_func=monitor_transform_view, methods=['get', ])
monitor_processing_view = MonitorProcessing.as_view('monitor_processing')
bp.add_url_rule('/monitor_processing/<request_id>/<workload_id>', view_func=monitor_processing_view, methods=['get', ])
return bp
|
import sys
import json
import shutil
from urllib.parse import urlparse
from contextlib import redirect_stdout
import sh
import humanize
from loguru import logger
from .config import config
from .utils import get_process_memory, get_process_cpu, get_current_time
from .download_handlers import DEFAULT_PROVIDER, get_provider_dict
STATUS_FILENAME = '.plyder.status'
LOG_FILENAME = '.download.log'
PROVIDER_DICT = get_provider_dict(config)
@logger.catch
def download_url(url: str, output_dir: str) -> bool:
o = urlparse(url)
provider = PROVIDER_DICT.get(o.netloc)
if provider is None:
logger.warning(f'No provider for "{url}" found, using wget fallback')
provider = DEFAULT_PROVIDER
logger.info(f'[{provider["name"]}] Downloading "{url}" to "{output_dir}"')
try:
func = provider['function']
if isinstance(func, sh.Command):
func(url, output_dir, _out=sys.stdout, _err_to_out=True)
else:
func(url, output_dir)
except Exception as e:
logger.exception(e)
return False
return True
def download_package(job: 'JobSubmission') -> None:
# prepare environment
output_dir = config['download_directory'] / job.package_name
output_dir.mkdir(parents=True, exist_ok=True)
with (output_dir / STATUS_FILENAME).open('w') as fd:
json.dump({'status': 'running', 'start_time': get_current_time()}, fd)
# download
logger.info(f'Processing "{job.package_name}"')
with (output_dir / LOG_FILENAME).open('w') as fd:
with redirect_stdout(fd):
any_url_failed = False
for url in job.url_field:
success = download_url(url, output_dir)
any_url_failed |= not success
logger.info(f'Finished "{job.package_name}"')
# update final status
with (output_dir / STATUS_FILENAME).open() as fd:
status_data = json.load(fd)
status_data['status'] = 'failed' if any_url_failed else 'done'
status_data['end_time'] = get_current_time()
with (output_dir / STATUS_FILENAME).open('w') as fd:
json.dump(status_data, fd)
def clean_packages() -> None:
if not config['download_directory'].exists():
logger.warning(
f'Download directory ({config["download_directory"]}) does not exist.'
)
return
for entry in config['download_directory'].iterdir():
if not entry.is_dir():
continue
status_file = entry / STATUS_FILENAME
if not status_file.exists():
continue
with status_file.open() as fd:
info = json.load(fd)
if info['status'] not in ('done', 'failed'):
logger.warning(
f'Package "{entry.name}" in inconsistent state, setting to failed'
)
with status_file.open('w') as fd:
json.dump({'status': 'failed'}, fd)
def list_packages():
if not config['download_directory'].exists():
logger.warning(
f'Download directory ({config["download_directory"]}) does not exist.'
)
return []
res = []
for entry in config['download_directory'].iterdir():
if not entry.is_dir():
continue
# read log
log_file = entry / LOG_FILENAME
if log_file.exists():
with log_file.open() as fd:
log_text = fd.read()[-10000:] # truncate
else:
log_text = ''
# assemble information
status_file = entry / STATUS_FILENAME
try:
with status_file.open() as fd:
info = json.load(fd)
except (FileNotFoundError, json.decoder.JSONDecodeError):
# due to race conditions, the file may not contain valid JSON
# even if it exists
info = {'status': 'unknown'}
res.append({'name': entry.name, 'info': info, 'log': log_text})
return res
def get_server_info():
if config['download_directory'].exists():
total, used, free = shutil.disk_usage(config['download_directory'])
else:
total, used, free = -1, -1, -1
return {
'download_directory': str(config['download_directory']),
'disk_usage': {
'total': humanize.naturalsize(total),
'used': humanize.naturalsize(used),
'free': humanize.naturalsize(free),
},
'process': {
'memory': round(get_process_memory(), 2),
'cpu': round(get_process_cpu(), 2),
},
}
|
import json
import urllib
import pymysql
def videoList():
url = '주소를 쓰거라'
jsonData = urllib.urlopen(url)
data = json.loads(jsonData)
videoId = data["videoId"]
videoTitle = data["title"]
videoId = data["id"]
def messages():
url = ''
jsonData = urllib.urlopen(url)
data = json.loads(jsonData)
def youtube():
url = ''
jsonData = urllib.urlopen(url)
data = json.loads(jsonData)
videoStop = data["videoStop"]
videoEnd = data["videoEnd"]
videoSec = data["Number"]
sql = "SELECT * FROM 'naver-db'"
cursor.execute(sql)
result = cursor.fetchall()
|
import win32con
import win32clipboard
print('本程序可以模仿马雷用人名刷屏的行为.\n')
inputString = input("请输入要重复的字符串:\n")
if(inputString ==""):
print("使用默认重复串。\n")
inputString = "malayniubi"
inputTimes = abs(int(input("请输入要重复的次数:\n")))
if(inputTimes == 0):
print("至少重复一次。\n已使用默认重复次数 100。\n")
inputTimes = 100
print("你的马雷序列如下:\n")
MalayString = (inputString.lower()+inputString.upper())*inputTimes
print(MalayString)
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardData(win32con.CF_UNICODETEXT, MalayString)
win32clipboard.CloseClipboard()
print("已经将结果输出到剪贴板。\n按任意键退出程序。\n")
input()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Interest rate is bank service that periodically deposits an interest
amount to each account in the database based on some fixed rate.
"""
from time import time
from threading import Thread, Event, Lock
class InterestRateManager( Thread ):
#DEFAULT VALUES
PERIOD = 10*60 # in Seconds
RATE = 0.05
def __init__( self, accounts, depositInterest, rate = 0.05 ):
self.RATE = rate
Thread.__init__( self )
self.__accounts = accounts
self.__handler = depositInterest
self.__schedule = []
self.__lock = Lock()
self.__terminateFlag = False
self.__event = Event()
self.start()
def ScheduleDeposit( self, Aid ):
if not self.__schedule:
flag = True
else:
flag = False
begining = self.__accounts[ str(Aid) ][ 'AdateOfBegin' ]
self.__lock.acquire()
self.__schedule.append( [ Aid, begining, 0 ] )
self.__lock.release()
if flag:
self.__event.set()
def Close( self ):
self.__terminateFlag = True
self.__event.set()
def __CalcTimeToNext( self ):
timeToWait = time()-self.__schedule[0][1]
n = int( timeToWait / self.PERIOD )
if n > self.__schedule[0][2]:
return 0
else:
timeToWait = timeToWait%self.PERIOD
timeToWait = self.PERIOD-timeToWait
return timeToWait
def run( self ):
while True:
#Thread waits for event or timeout
if self.__schedule:
timeToWait = self.__CalcTimeToNext()
self.__flag = True
self.__event.clear()
self.__event.wait( timeToWait )
else:
self.__flag = False
self.__event.clear()
self.__event.wait()
if self.__event.is_set(): #Close, First ScheduleDeposit
if self.__terminateFlag:
break
else: #timeout
if self.__accounts.get( str( self.__schedule[0][0] ), None ) is None:
self.__deleteInterest()
else:
self.__applyRotationInterest()
def __applyRotationInterest( self ):
self.__lock.acquire()
event = self.__schedule.pop(0)
self.__handler( str( event[0] ), self.RATE )
event[2] += 1
self.__schedule.append( event )
self.__lock.release()
def __deleteInterest( self ):
self.__lock.acquire()
self.__schedule.pop( 0 )
self.__lock.release()
|
import argparse
import multiprocessing as mp
import os
from functools import partial
import numpy as np
import pandas as pd
from qpputils import dataparser as dp
from RBO import rbo_dict
from Timer import Timer
parser = argparse.ArgumentParser(description='Features for UQV query variations Generator',
usage='python3 features.py -q queries.txt -c CORPUS -r QL.res ',
epilog='Unless --generate is given, will try loading the file')
parser.add_argument('-c', '--corpus', default='ROBUST', type=str, help='corpus (index) to work with',
choices=['ROBUST', 'ClueWeb12B'])
parser.add_argument('-g', '--group', help='group of queries to predict',
choices=['top', 'low', 'medh', 'medl', 'title'])
parser.add_argument('--quantile', help='quantile of query variants to use for prediction', default=None,
choices=['all', 'low', 'low-0', 'high', 'cref'])
parser.add_argument('-l', '--load', default=None, type=str, help='features file to load')
parser.add_argument('--generate', help="generate new features file", action="store_true")
parser.add_argument('--predict', help="generate new predictions", action="store_true")
parser.add_argument('--graphs', default=None, help="generate new features for graphs", choices=['asce', 'desc'])
parser.add_argument('-v', '--vars', default=None, type=int, help="number of variations, valid with graphs")
NUMBER_OF_DOCS = (5, 10, 25, 50, 100, 250, 500)
def jaccard_coefficient(st1: str, st2: str):
st1_set = set(st1.split())
st2_set = set(st2.split())
union = st1_set.union(st2_set)
intersect = st1_set.intersection(st2_set)
return float(len(intersect) / len(union))
def list_overlap(x, y):
x_set = set(x)
intersection = x_set.intersection(y)
return len(intersection)
# TODO: Implement the class QueryFeatureFactory to be meta class that can be used by other classes
class QueryFeatureFactory:
"""TODO: At the moment this will save for each combination a separate pickle file, should change it to a pickle file
that consists of all the calculations and then filter the relevant query variations from it"""
def __init__(self, corpus, queries_group, vars_quantile, **kwargs):
self.top_docs_overlap = kwargs.get('top_docs_overlap', 10)
self.rbo_top = kwargs.get('rbo_top', 100)
self.corpus = corpus
self.queries_group = queries_group
graphs = kwargs.get('graphs', None)
if graphs:
n = kwargs.get('n', None)
assert n, 'Missing number of vars'
self.__set_graph_paths(corpus, queries_group, graphs, n)
else:
self.__set_paths(corpus, queries_group, vars_quantile)
_raw_res_data = dp.ResultsReader(self.results_file, 'trec')
if queries_group == 'title':
_title_res_data = dp.ResultsReader(self.title_res_file, 'trec')
self.prediction_queries_res_data = _title_res_data
else:
self.prediction_queries_res_data = _raw_res_data
self.queries_data = dp.QueriesTextParser(self.queries_full_file, 'uqv')
self.topics_data = dp.QueriesTextParser(self.queries_topic_file)
# Uncomment the next lines if you want to write the basic results of the topic queries.
# write_basic_results(self.prediction_queries_res_data.data_df.loc[self.topics_data.queries_df['qid']], corpus,
# queries_group)
# exit()
# These 2 DF used for the filtering method
self.variations_data = dp.QueriesTextParser(self.queries_variations_file, 'uqv')
self.quantile_variations_data = dp.QueriesTextParser(self.queries_quantile_vars, 'uqv')
# _var_scores_df.loc[_var_scores_df['qid'].isin(_vars_list)]
self.raw_res_data = _raw_res_data
self.fused_data = dp.ResultsReader(self.fused_results_file, 'trec')
self.query_vars = self.queries_data.query_vars
@classmethod
def __set_paths(cls, corpus, qgroup, vars_quantile):
"""This method sets the default paths of the files and the working directories, it assumes the standard naming
convention of the project"""
# cls.predictor = predictor
_corpus_res_dir = dp.ensure_dir(f'~/QppUqvProj/Results/{corpus}')
_corpus_dat_dir = dp.ensure_dir(f'~/QppUqvProj/data/{corpus}')
_results_file = f'{_corpus_res_dir}/test/raw/QL.res'
cls.results_file = os.path.normpath(_results_file)
dp.ensure_file(cls.results_file)
_title_results_file = f'{_corpus_res_dir}/test/basic/QL.res'
cls.title_res_file = os.path.normpath(_title_results_file)
dp.ensure_file(cls.title_res_file)
cls.queries_full_file = dp.ensure_file(f'{_corpus_dat_dir}/queries_{corpus}_UQV_full.stemmed.txt')
# The variations file is used in the filter function - it consists of all the vars w/o the query at hand
_queries_variations_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_wo_{qgroup}.txt'
cls.queries_variations_file = dp.ensure_file(_queries_variations_file)
# The vars quantile file is used in the filter function - it consists of the relevant vars quantile
if vars_quantile == 'all':
_queries_quantile_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_full.txt'
else:
_queries_quantile_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_{vars_quantile}_variants.txt'
cls.queries_quantile_vars = dp.ensure_file(_queries_quantile_file)
_queries_topic_file = f'{_corpus_dat_dir}/queries_{corpus}_{qgroup}.stemmed.txt'
cls.queries_topic_file = dp.ensure_file(_queries_topic_file)
_fused_results_file = f'{_corpus_res_dir}/test/fusion/QL.res'
cls.fused_results_file = dp.ensure_file(_fused_results_file)
# cls.output_dir = dp.ensure_dir(f'{_corpus_res_dir}/test/raw/')
_predictions_out = f'{_corpus_res_dir}/uqvPredictions/referenceLists/{qgroup}/{vars_quantile}_vars/sim_as_pred/'
cls.predictions_output_dir = dp.ensure_dir(_predictions_out)
cls.pkl_dir = dp.ensure_dir(f'{_corpus_res_dir}/test/ref/pkl_files/')
@classmethod
def __set_graph_paths(cls, corpus, qgroup, direct, n):
"""This method sets the default paths of the files and the working directories, it assumes the standard naming
convention of the project"""
# cls.predictor = predictor
_corpus_res_dir = dp.ensure_dir(f'~/QppUqvProj/Results/{corpus}')
_corpus_dat_dir = dp.ensure_dir(f'~/QppUqvProj/data/{corpus}')
_graphs_base_dir = dp.ensure_dir(f'~/QppUqvProj/Graphs/{corpus}')
_graphs_res_dir = dp.ensure_dir(f'{_graphs_base_dir}/referenceLists/{qgroup}/{direct}/{n}_vars')
_graphs_dat_dir = dp.ensure_dir(f'{_graphs_base_dir}/data')
cls.number_of_vars = n
_results_file = f'{_corpus_res_dir}/test/raw/QL.res'
cls.results_file = os.path.normpath(_results_file)
dp.ensure_file(cls.results_file)
_title_results_file = f'{_corpus_res_dir}/test/basic/QL.res'
cls.title_res_file = os.path.normpath(_title_results_file)
dp.ensure_file(cls.title_res_file)
_queries_full_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_full.stemmed.txt'
cls.queries_full_file = dp.ensure_file(_queries_full_file)
# The variations file is used in the filter function - it consists of all the vars w/o the query at hand
_queries_variations_file = f'{_graphs_dat_dir}/{direct}/queries/queries_wo_{qgroup}_{n}_vars.txt'
cls.queries_variations_file = dp.ensure_file(_queries_variations_file)
cls.queries_quantile_vars = cls.queries_variations_file
_queries_topic_file = f'{_corpus_dat_dir}/queries_{corpus}_{qgroup}.stemmed.txt'
cls.queries_topic_file = dp.ensure_file(_queries_topic_file)
_fused_results_file = f'{_corpus_res_dir}/test/fusion/QL.res'
# _fused_results_file = f'{_corpus_res_dir}/test/fusion/all_wo_{qgroup}_fused_QL.res'
cls.fused_results_file = dp.ensure_file(_fused_results_file)
# cls.output_dir = dp.ensure_dir(f'{_graphs_res_dir}/test/raw/')
cls.predictions_output_dir = dp.ensure_dir(f'{_graphs_res_dir}/sim_as_pred/')
cls.pkl_dir = dp.ensure_dir(f'{_graphs_dat_dir}/pkl_files/features')
def _calc_features(self):
"""This method calculates the similarity features for all the variations with the 'query at hand' i.e. the query
that being predicted, including the query itself (if it's among the variations)"""
_dict = {'topic': [], 'qid': [], 'Jac_coefficient': [], f'Top_{self.top_docs_overlap}_Docs_overlap': [],
f'RBO_EXT_{self.rbo_top}': [], f'RBO_FUSED_EXT_{self.rbo_top}': []}
for topic in self.topics_data.queries_dict.keys():
_topic = topic.split('-')[0]
q_vars = self.query_vars.get(_topic)
_dict['topic'] += [topic] * len(q_vars)
res_dict = self.fused_data.get_res_dict_by_qid(_topic, top=self.rbo_top)
topic_txt = self.topics_data.get_qid_txt(topic)
topics_top_list = self.prediction_queries_res_data.get_docs_by_qid(topic, self.top_docs_overlap)
# topics_top_list = self.title_res_data.get_docs_by_qid(topic, 25)
topic_results_list = self.prediction_queries_res_data.get_res_dict_by_qid(topic, top=self.rbo_top)
for var in q_vars:
var_txt = self.queries_data.get_qid_txt(var)
jc = jaccard_coefficient(topic_txt, var_txt)
var_top_list = self.raw_res_data.get_docs_by_qid(var, self.top_docs_overlap)
# var_top_list = self.raw_res_data.get_docs_by_qid(var, 25)
docs_overlap = list_overlap(topics_top_list, var_top_list)
# All RBO values are rounded to 10 decimal digits, to avoid float overflow
var_results_list = self.raw_res_data.get_res_dict_by_qid(var, top=self.rbo_top)
_rbo_scores_dict = rbo_dict(topic_results_list, var_results_list, p=0.95)
rbo_ext_score = np.around(_rbo_scores_dict['ext'], 10)
_fused_rbo_scores_dict = rbo_dict(res_dict, var_results_list, p=0.95)
_rbo_fused_ext_score = np.around(_fused_rbo_scores_dict['ext'], 10)
_dict['qid'] += [var]
_dict['Jac_coefficient'] += [jc]
_dict[f'Top_{self.top_docs_overlap}_Docs_overlap'] += [docs_overlap]
_dict[f'RBO_EXT_{self.rbo_top}'] += [rbo_ext_score]
_dict[f'RBO_FUSED_EXT_{self.rbo_top}'] += [_rbo_fused_ext_score]
_df = pd.DataFrame.from_dict(_dict)
# _df.set_index(['topic', 'qid'], inplace=True)
return _df
def _filter_queries(self, df):
if 'qid' in df.index.names:
df.reset_index(inplace=True)
# Remove the topic queries
_df = df.loc[df['qid'].isin(self.variations_data.queries_df['qid'])]
# Filter only the relevant quantile variations
_df = _df.loc[_df['qid'].isin(self.quantile_variations_data.queries_df['qid'])]
return _df
def _soft_max_scores(self, df):
_df = self._filter_queries(df)
_df = df
_df.set_index(['topic', 'qid'], inplace=True)
_exp_df = _df.apply(np.exp)
# For debugging purposes
z_e = _exp_df.groupby(['topic']).sum()
softmax_df = (_exp_df.groupby(['topic', 'qid']).sum() / z_e)
# _temp = softmax_df.dropna()
# For debugging purposes
return softmax_df
def _average_scores(self, df):
_df = self._filter_queries(df)
# _df = df
_df.set_index(['topic', 'qid'], inplace=True)
# _exp_df = _df.apply(np.exp)
# For debugging purposes
avg_df = _df.groupby(['topic']).mean()
# avg_df = (_df.groupby(['topic', 'qid']).mean())
# _temp = softmax_df.dropna()
# For debugging purposes
return avg_df
def _max_norm_scores(self, df):
# _df = self._filter_queries(df)
_df = df
_df.set_index(['topic', 'qid'], inplace=True)
# For debugging purposes
z_m = _df.groupby(['topic']).max()
z_m.drop('qid', axis='columns', inplace=True)
max_norm_df = (_df.groupby(['topic', 'qid']).sum() / z_m).fillna(0)
# _temp = softmax_df.dropna()
# For debugging purposes
return max_norm_df
def _sum_scores(self, df):
_df = df
# filter only variations different from original query
# _df = self._filter_queries(df)
z_n = _df.groupby(['topic']).sum()
z_n.drop('qid', axis='columns', inplace=True)
# All nan values will be filled with 0
norm_df = (_df.groupby(['topic', 'qid']).sum() / z_n).fillna(0)
return norm_df
def divide_by_size(self, df):
# _df = df
# filter only variations different from original query
_df = self._filter_queries(df)
z_n = _df.groupby(['topic']).count()
z_n.drop('qid', axis='columns', inplace=True)
# All nan values will be filled with 0
# norm_df = (_df.groupby(['topic', 'qid']) / z_n).fillna('!@#!@#!@#!')
_df.set_index(['topic', 'qid'], inplace=True)
norm_df = _df / z_n
return norm_df
def __load_features_df(self, _file_name):
"""The method will try to load the features DF from a pkl file, if it fails it will generate a new df
and save it"""
try:
# Will try loading a DF, if fails will generate and save a new one
file_to_load = dp.ensure_file(_file_name)
_df = pd.read_pickle(file_to_load)
except AssertionError:
print(f'\nFailed to load {_file_name}')
print(f'Will generate {self.pkl_dir.rsplit("/")[-1]} vars {self.queries_group}_query_features '
f'features and save')
_df = self._calc_features()
_df.to_pickle(_file_name)
n = self.top_docs_overlap
_df[f'Top_{n}_Docs_overlap'] = _df[f'Top_{n}_Docs_overlap'] / n
return _df
def __get_pkl_file_name(self):
_file = '{}/{}_queries_{}_RBO_{}_TopDocs_{}.pkl'.format(self.pkl_dir, self.queries_group, self.corpus,
self.rbo_top, self.top_docs_overlap)
return _file
def generate_features(self, load_from_pkl=True):
"""If `load_from_pkl` is True the method will try to load the features DF from a pkl file, otherwise
it will generate a new df and save it"""
_file = self.__get_pkl_file_name()
if load_from_pkl:
_df = self.__load_features_df(_file)
else:
_df = self._calc_features()
_df.to_pickle(_file)
n = self.top_docs_overlap
_df[f'Top_{n}_Docs_overlap'] = _df[f'Top_{n}_Docs_overlap'] / n
return self.divide_by_size(_df)
# return _df
# return self._soft_max_scores(_df)
# return self._sum_scores(_df)
# return self._average_scores(_df)
# return self._max_norm_scores(_df)
def save_predictions(self, df: pd.DataFrame):
_df = self._filter_queries(df)
_df = _df.groupby('topic').mean()
_df = dp.convert_vid_to_qid(_df)
_rboP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/rboP/predictions')
_FrboP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/FrboP/predictions')
_topDocsP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/topDocsP/predictions')
_jcP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/jcP/predictions')
_df[f'RBO_EXT_{self.rbo_top}'].to_csv(f'{_rboP_dir}/predictions-{self.rbo_top}', sep=' ')
_df[f'RBO_FUSED_EXT_{self.rbo_top}'].to_csv(f'{_FrboP_dir}/predictions-{self.rbo_top}', sep=' ')
_df[f'Top_{self.top_docs_overlap}_Docs_overlap'].to_csv(f'{_topDocsP_dir}/predictions-{self.top_docs_overlap}',
sep=' ')
_df['Jac_coefficient'].to_csv(f'{_jcP_dir}/predictions-{self.rbo_top}', sep=' ')
def generate_predictions(self, load_from_pkl=True):
_file = self.__get_pkl_file_name()
if load_from_pkl:
_df = self.__load_features_df(_file)
else:
_df = self._calc_features()
_df.to_pickle(_file)
self.save_predictions(_df)
class RefQueryFeatureFactory(QueryFeatureFactory):
"""TODO: At the moment this will save for each combination a separate pickle file, should change it to a pickle file
that consists of all the calculations and then filter the relevant query variations from it"""
def __init__(self, corpus, queries_group, vars_quantile, **kwargs):
super().__init__(corpus, queries_group, vars_quantile, **kwargs)
self.top_docs_overlap = kwargs.get('top_docs_overlap', 10)
self.rbo_top = kwargs.get('rbo_top', 100)
self.corpus = corpus
self.queries_group = queries_group
graphs = kwargs.get('graphs', None)
if graphs:
n = kwargs.get('n', None)
assert n, 'Missing number of vars'
self.__set_graph_paths(corpus, queries_group, graphs, n)
else:
self.__set_paths(corpus, queries_group, vars_quantile)
_raw_res_data = dp.ResultsReader(self.results_file, 'trec')
if queries_group == 'title':
_title_res_data = dp.ResultsReader(self.title_res_file, 'trec')
self.prediction_queries_res_data = _title_res_data
else:
self.prediction_queries_res_data = _raw_res_data
self.queries_data = dp.QueriesTextParser(self.queries_full_file, 'uqv')
self.topics_data = dp.QueriesTextParser(self.queries_topic_file)
# Uncomment the next lines if you want to write the basic results of the topic queries.
# write_basic_results(self.prediction_queries_res_data.data_df.loc[self.topics_data.queries_df['qid']], corpus,
# queries_group)
# exit()
# These 2 DF used for the filtering method
self.variations_data = dp.QueriesTextParser(self.queries_variations_file, 'uqv')
self.quantile_variations_data = dp.QueriesTextParser(self.queries_quantile_vars, 'uqv')
# _var_scores_df.loc[_var_scores_df['qid'].isin(_vars_list)]
self.raw_res_data = _raw_res_data
self.fused_data = dp.ResultsReader(self.fused_results_file, 'trec')
self.query_vars = self.queries_data.query_vars
@classmethod
def __set_paths(cls, corpus, qgroup, vars_quantile):
"""This method sets the default paths of the files and the working directories, it assumes the standard naming
convention of the project"""
# cls.predictor = predictor
_corpus_res_dir = dp.ensure_dir(f'~/QppUqvProj/Results/{corpus}')
_corpus_dat_dir = dp.ensure_dir(f'~/QppUqvProj/data/{corpus}')
_results_file = f'{_corpus_res_dir}/test/raw/QL.res'
cls.results_file = os.path.normpath(_results_file)
dp.ensure_file(cls.results_file)
_title_results_file = f'{_corpus_res_dir}/test/basic/QL.res'
cls.title_res_file = os.path.normpath(_title_results_file)
dp.ensure_file(cls.title_res_file)
cls.queries_full_file = dp.ensure_file(f'{_corpus_dat_dir}/queries_{corpus}_UQV_full.stemmed.txt')
# The variations file is used in the filter function - it consists of all the vars w/o the query at hand
_queries_variations_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_wo_{qgroup}.txt'
cls.queries_variations_file = dp.ensure_file(_queries_variations_file)
# The vars quantile file is used in the filter function - it consists of the relevant vars quantile
if vars_quantile == 'all':
_queries_quantile_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_full.txt'
else:
_queries_quantile_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_{vars_quantile}_variants.txt'
cls.queries_quantile_vars = dp.ensure_file(_queries_quantile_file)
_queries_topic_file = f'{_corpus_dat_dir}/queries_{corpus}_{qgroup}.stemmed.txt'
cls.queries_topic_file = dp.ensure_file(_queries_topic_file)
_fused_results_file = f'{_corpus_res_dir}/test/fusion/QL.res'
cls.fused_results_file = dp.ensure_file(_fused_results_file)
# cls.output_dir = dp.ensure_dir(f'{_corpus_res_dir}/test/raw/')
_predictions_out = f'{_corpus_res_dir}/uqvPredictions/referenceLists/{qgroup}/{vars_quantile}_vars/sim_as_pred/'
cls.predictions_output_dir = dp.ensure_dir(_predictions_out)
cls.pkl_dir = dp.ensure_dir(f'{_corpus_res_dir}/test/ref/pkl_files/')
@classmethod
def __set_graph_paths(cls, corpus, qgroup, direct, n):
"""This method sets the default paths of the files and the working directories, it assumes the standard naming
convention of the project"""
# cls.predictor = predictor
_corpus_res_dir = dp.ensure_dir(f'~/QppUqvProj/Results/{corpus}')
_corpus_dat_dir = dp.ensure_dir(f'~/QppUqvProj/data/{corpus}')
_graphs_base_dir = dp.ensure_dir(f'~/QppUqvProj/Graphs/{corpus}')
_graphs_res_dir = dp.ensure_dir(f'{_graphs_base_dir}/referenceLists/{qgroup}/{direct}/{n}_vars')
_graphs_dat_dir = dp.ensure_dir(f'{_graphs_base_dir}/data')
cls.number_of_vars = n
_results_file = f'{_corpus_res_dir}/test/raw/QL.res'
cls.results_file = os.path.normpath(_results_file)
dp.ensure_file(cls.results_file)
_title_results_file = f'{_corpus_res_dir}/test/basic/QL.res'
cls.title_res_file = os.path.normpath(_title_results_file)
dp.ensure_file(cls.title_res_file)
_queries_full_file = f'{_corpus_dat_dir}/queries_{corpus}_UQV_full.stemmed.txt'
cls.queries_full_file = dp.ensure_file(_queries_full_file)
# The variations file is used in the filter function - it consists of all the vars w/o the query at hand
_queries_variations_file = f'{_graphs_dat_dir}/{direct}/queries/queries_wo_{qgroup}_{n}_vars.txt'
cls.queries_variations_file = dp.ensure_file(_queries_variations_file)
cls.queries_quantile_vars = cls.queries_variations_file
_queries_topic_file = f'{_corpus_dat_dir}/queries_{corpus}_{qgroup}.stemmed.txt'
cls.queries_topic_file = dp.ensure_file(_queries_topic_file)
_fused_results_file = f'{_corpus_res_dir}/test/fusion/QL.res'
# _fused_results_file = f'{_corpus_res_dir}/test/fusion/all_wo_{qgroup}_fused_QL.res'
cls.fused_results_file = dp.ensure_file(_fused_results_file)
# cls.output_dir = dp.ensure_dir(f'{_graphs_res_dir}/test/raw/')
cls.predictions_output_dir = dp.ensure_dir(f'{_graphs_res_dir}/sim_as_pred/')
cls.pkl_dir = dp.ensure_dir(f'{_graphs_dat_dir}/pkl_files/features')
def _calc_features(self):
"""This method calculates the similarity features for all the variations with the 'query at hand' i.e. the query
that being predicted, including the query itself (if it's among the variations)"""
_dict = {'topic': [], 'qid': [], 'Jac_coefficient': [], f'Top_{self.top_docs_overlap}_Docs_overlap': [],
f'RBO_EXT_{self.rbo_top}': [], f'RBO_FUSED_EXT_{self.rbo_top}': []}
for topic in self.topics_data.queries_dict.keys():
_topic = topic.split('-')[0]
q_vars = self.query_vars.get(_topic)
_dict['topic'] += [topic] * len(q_vars)
res_dict = self.fused_data.get_res_dict_by_qid(_topic, top=self.rbo_top)
topic_txt = self.topics_data.get_qid_txt(topic)
topics_top_list = self.prediction_queries_res_data.get_docs_by_qid(topic, self.top_docs_overlap)
# topics_top_list = self.title_res_data.get_docs_by_qid(topic, 25)
topic_results_list = self.prediction_queries_res_data.get_res_dict_by_qid(topic, top=self.rbo_top)
for var in q_vars:
var_txt = self.queries_data.get_qid_txt(var)
jc = jaccard_coefficient(topic_txt, var_txt)
var_top_list = self.raw_res_data.get_docs_by_qid(var, self.top_docs_overlap)
# var_top_list = self.raw_res_data.get_docs_by_qid(var, 25)
docs_overlap = list_overlap(topics_top_list, var_top_list)
# All RBO values are rounded to 10 decimal digits, to avoid float overflow
var_results_list = self.raw_res_data.get_res_dict_by_qid(var, top=self.rbo_top)
_rbo_scores_dict = rbo_dict(topic_results_list, var_results_list, p=0.95)
rbo_ext_score = np.around(_rbo_scores_dict['ext'], 10)
_fused_rbo_scores_dict = rbo_dict(res_dict, var_results_list, p=0.95)
_rbo_fused_ext_score = np.around(_fused_rbo_scores_dict['ext'], 10)
_dict['qid'] += [var]
_dict['Jac_coefficient'] += [jc]
_dict[f'Top_{self.top_docs_overlap}_Docs_overlap'] += [docs_overlap]
_dict[f'RBO_EXT_{self.rbo_top}'] += [rbo_ext_score]
_dict[f'RBO_FUSED_EXT_{self.rbo_top}'] += [_rbo_fused_ext_score]
_df = pd.DataFrame.from_dict(_dict)
# _df.set_index(['topic', 'qid'], inplace=True)
return _df
def _filter_queries(self, df):
if 'qid' in df.index.names:
df.reset_index(inplace=True)
# Remove the topic queries
_df = df.loc[df['qid'].isin(self.variations_data.queries_df['qid'])]
# Filter only the relevant quantile variations
_df = _df.loc[_df['qid'].isin(self.quantile_variations_data.queries_df['qid'])]
return _df
def _soft_max_scores(self, df):
_df = self._filter_queries(df)
_df = df
_df.set_index(['topic', 'qid'], inplace=True)
_exp_df = _df.apply(np.exp)
# For debugging purposes
z_e = _exp_df.groupby(['topic']).sum()
softmax_df = (_exp_df.groupby(['topic', 'qid']).sum() / z_e)
# _temp = softmax_df.dropna()
# For debugging purposes
return softmax_df
def _average_scores(self, df):
_df = self._filter_queries(df)
# _df = df
_df.set_index(['topic', 'qid'], inplace=True)
# _exp_df = _df.apply(np.exp)
# For debugging purposes
avg_df = _df.groupby(['topic']).mean()
# avg_df = (_df.groupby(['topic', 'qid']).mean())
# _temp = softmax_df.dropna()
# For debugging purposes
return avg_df
def _max_norm_scores(self, df):
# _df = self._filter_queries(df)
_df = df
_df.set_index(['topic', 'qid'], inplace=True)
# For debugging purposes
z_m = _df.groupby(['topic']).max()
z_m.drop('qid', axis='columns', inplace=True)
max_norm_df = (_df.groupby(['topic', 'qid']).sum() / z_m).fillna(0)
# _temp = softmax_df.dropna()
# For debugging purposes
return max_norm_df
def _sum_scores(self, df):
_df = df
# filter only variations different from original query
# _df = self._filter_queries(df)
z_n = _df.groupby(['topic']).sum()
z_n.drop('qid', axis='columns', inplace=True)
# All nan values will be filled with 0
norm_df = (_df.groupby(['topic', 'qid']).sum() / z_n).fillna(0)
return norm_df
def divide_by_size(self, df):
# _df = df
# filter only variations different from original query
_df = self._filter_queries(df)
z_n = _df.groupby(['topic']).count()
z_n.drop('qid', axis='columns', inplace=True)
# All nan values will be filled with 0
# norm_df = (_df.groupby(['topic', 'qid']) / z_n).fillna('!@#!@#!@#!')
_df.set_index(['topic', 'qid'], inplace=True)
norm_df = _df / z_n
return norm_df
def __load_features_df(self, _file_name):
"""The method will try to load the features DF from a pkl file, if it fails it will generate a new df
and save it"""
try:
# Will try loading a DF, if fails will generate and save a new one
file_to_load = dp.ensure_file(_file_name)
_df = pd.read_pickle(file_to_load)
except AssertionError:
print(f'\nFailed to load {_file_name}')
print(f'Will generate {self.pkl_dir.rsplit("/")[-1]} vars {self.queries_group}_query_features '
f'features and save')
_df = self._calc_features()
_df.to_pickle(_file_name)
n = self.top_docs_overlap
_df[f'Top_{n}_Docs_overlap'] = _df[f'Top_{n}_Docs_overlap'] / n
return _df
def __get_pkl_file_name(self):
_file = '{}/{}_queries_{}_RBO_{}_TopDocs_{}.pkl'.format(self.pkl_dir, self.queries_group, self.corpus,
self.rbo_top, self.top_docs_overlap)
return _file
def generate_features(self, load_from_pkl=True):
"""If `load_from_pkl` is True the method will try to load the features DF from a pkl file, otherwise
it will generate a new df and save it"""
_file = self.__get_pkl_file_name()
if load_from_pkl:
_df = self.__load_features_df(_file)
else:
_df = self._calc_features()
_df.to_pickle(_file)
n = self.top_docs_overlap
_df[f'Top_{n}_Docs_overlap'] = _df[f'Top_{n}_Docs_overlap'] / n
return self.divide_by_size(_df)
# return _df
# return self._soft_max_scores(_df)
# return self._sum_scores(_df)
# return self._average_scores(_df)
# return self._max_norm_scores(_df)
def save_predictions(self, df: pd.DataFrame):
_df = self._filter_queries(df)
_df = _df.groupby('topic').mean()
_df = dp.convert_vid_to_qid(_df)
_rboP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/rboP/predictions')
_FrboP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/FrboP/predictions')
_topDocsP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/topDocsP/predictions')
_jcP_dir = dp.ensure_dir(f'{self.predictions_output_dir}/jcP/predictions')
_df[f'RBO_EXT_{self.rbo_top}'].to_csv(f'{_rboP_dir}/predictions-{self.rbo_top}', sep=' ')
_df[f'RBO_FUSED_EXT_{self.rbo_top}'].to_csv(f'{_FrboP_dir}/predictions-{self.rbo_top}', sep=' ')
_df[f'Top_{self.top_docs_overlap}_Docs_overlap'].to_csv(f'{_topDocsP_dir}/predictions-{self.top_docs_overlap}',
sep=' ')
_df['Jac_coefficient'].to_csv(f'{_jcP_dir}/predictions-{self.rbo_top}', sep=' ')
def generate_predictions(self, load_from_pkl=True):
_file = self.__get_pkl_file_name()
if load_from_pkl:
_df = self.__load_features_df(_file)
else:
_df = self._calc_features()
_df.to_pickle(_file)
self.save_predictions(_df)
def features_loader(file_to_load, corpus):
if file_to_load is None:
file = dp.ensure_file('features_{}_uqv.JSON'.format(corpus))
else:
file = dp.ensure_file(file_to_load)
features_df = pd.read_json(file, dtype={'topic': str, 'qid': str})
features_df.reset_index(drop=True, inplace=True)
features_df.set_index(['topic', 'qid'], inplace=True)
features_df.rename(index=lambda x: x.split('-')[0], level=0, inplace=True)
features_df.sort_values(['topic', 'qid'], axis=0, inplace=True)
return features_df
def write_basic_results(df: pd.DataFrame, corpus, qgroup):
"""The function is used to save basic predictions of a given queries set"""
_df = dp.convert_vid_to_qid(df)
_df.insert(loc=0, column='trec_Q0', value='Q0')
_df.insert(loc=4, column='trec_indri', value='indri')
_file_path = f'~/QppUqvProj/Results/{corpus}/test/ref/QL_{qgroup}.res'
# dp.ensure_dir(os.path.normpath(os.path.expanduser(_file_path)))
_df.to_csv(_file_path, sep=" ", header=False, index=True)
def run_predictions_process(n, corpus, queries_group, quantile):
sim_ref_pred = RefQueryFeatureFactory(corpus, queries_group, quantile, rbo_top=n, top_docs_overlap=n)
sim_ref_pred.generate_predictions()
return sim_ref_pred
def run_features_process(n, corpus, queries_group, quantile):
sim_ref_pred = RefQueryFeatureFactory(corpus, queries_group, quantile, rbo_top=n, top_docs_overlap=n)
df = sim_ref_pred.generate_features()
return df.drop('Jac_coefficient', axis=1)
def load_full_features_df(**kwargs):
"""
:param kwargs: corpus, queries_group, quantile or features_factory_obj: QueryFeatureFactory() object
:return: pd.DataFrame that contains all the features values
"""
corpus = kwargs.get('corpus', None)
queries_group = kwargs.get('queries_group', None)
quantile = kwargs.get('quantile', None)
features_factory_obj = kwargs.get('features_factory_obj', None)
if features_factory_obj:
features_obj = features_factory_obj
corpus = features_obj.corpus
queries_group = features_obj.queries_group
else:
assert corpus and queries_group and quantile, f"Can't create a factory object from Corpus={corpus}, " \
f"Queries group={queries_group}, Variations Quantile={quantile}"
features_obj = RefQueryFeatureFactory(corpus, queries_group, quantile)
pkl_dir = dp.ensure_dir(f'~/QppUqvProj/Results/{corpus}/test/ref/pkl_files/')
_list = []
last_df = pd.DataFrame()
for n in NUMBER_OF_DOCS:
_file = f'{pkl_dir}/{queries_group}_queries_{corpus}_RBO_{n}_TopDocs_{n}.pkl'
try:
dp.ensure_file(_file)
_df = pd.read_pickle(_file).set_index(['topic', 'qid'])
_df[f'Top_{n}_Docs_overlap'] = _df[f'Top_{n}_Docs_overlap'] / n
_list.append(_df.drop('Jac_coefficient', axis=1))
last_df = _df['Jac_coefficient']
except AssertionError:
print(f'!! Warning !! The file {_file} is missing')
df = pd.concat(_list + [last_df], axis=1)
return features_obj.divide_by_size(df)
def main(args):
corpus = args.corpus
generate = args.generate
predict = args.predict
queries_group = args.group
file_to_load = args.load
quantile = args.quantile
graphs = args.graphs
number_of_vars = args.vars
# Debugging
# corpus = 'ClueWeb12B'
# corpus = 'ROBUST'
# print('\n------+++^+++------ Debugging !! ------+++^+++------\n')
# queries_group = 'title'
# quantile = 'all'
# testing_feat = QueryFeatureFactory('ROBUST', 'title', 'all')
# norm_features_df = testing_feat.generate_features()
# norm_features_df.reset_index().to_json('query_features_{}_uqv.JSON'.format(corpus))
# return
cores = mp.cpu_count() - 1
if generate:
n = NUMBER_OF_DOCS[0]
sim_ref_pred = RefQueryFeatureFactory(corpus, queries_group, quantile, rbo_top=n, top_docs_overlap=n)
df = sim_ref_pred.generate_features()
with mp.Pool(processes=cores) as pool:
norm_features_list = pool.map(
partial(run_features_process, corpus=corpus, queries_group=queries_group, quantile=quantile),
NUMBER_OF_DOCS[1:])
norm_features_df = pd.concat(norm_features_list + [df], axis=1)
_path = f'~/QppUqvProj/Results/{corpus}/test/ref'
_path = dp.ensure_dir(_path)
norm_features_df.reset_index().to_json(
f'{_path}/{queries_group}_query_{quantile}_variations_features_{corpus}_uqv.JSON')
elif predict:
with mp.Pool(processes=cores) as pool:
sim_ref_pred = pool.map(
partial(run_predictions_process, corpus=corpus, queries_group=queries_group, quantile=quantile),
NUMBER_OF_DOCS)
elif graphs:
assert number_of_vars, 'Missing number of variations'
testing_feat = RefQueryFeatureFactory(corpus, queries_group, quantile, graphs=graphs)
norm_features_df = testing_feat.generate_features()
_path = f'~/QppUqvProj/Graphs/{corpus}/data/ref/'
_path = dp.ensure_dir(_path)
norm_features_df.reset_index().to_json(
f'{_path}/{queries_group}_query_{quantile}_variations_features_{corpus}_uqv.JSON')
elif file_to_load:
features_df = features_loader(file_to_load, corpus)
print(features_df)
else:
_path = f'~/QppUqvProj/Results/{corpus}/test/ref'
_path = dp.ensure_dir(_path)
df = load_full_features_df(corpus=corpus, queries_group=queries_group, quantile=quantile)
df.reset_index().to_json(f'{_path}/{queries_group}_query_{quantile}_variations_features_{corpus}_uqv.JSON')
if __name__ == '__main__':
args = parser.parse_args()
overall_timer = Timer('Total runtime')
main(args)
overall_timer.stop()
|
from django import template
from ..conf import settings
from ..forms import SubscriberForm
register = template.Library()
@register.inclusion_tag('easy_subscription/subscription_form.html')
def subscription_form():
return {
'title': settings.EASY_SUBSCRIPTION_FORM_TITLE,
'subtitle': settings.EASY_SUBSCRIPTION_FORM_SUBTITLE,
'footer_message': settings.EASY_SUBSCRIPTION_FORM_FOTTER_MESSAGE,
'form': SubscriberForm(),
'form_border_color': settings.EASY_SUBSCRIPTION_FORM_BORDER_COLOR,
'form_button_color': settings.EASY_SUBSCRIPTION_FORM_BUTTON_COLOR,
}
|
import collections
import pathlib
from typing import DefaultDict, Iterator
from unittest import mock
import pytest
from pysen import mypy
from pysen.mypy import _get_differences_from_base
from pysen.process_utils import add_python_executable
from pysen.reporter import Reporter
from pysen.runner_options import PathContext, RunOptions
from pysen.setting import SettingFile
BASE_DIR = pathlib.Path(__file__).resolve().parent
@pytest.fixture
def reporter() -> Iterator[Reporter]:
r = Reporter("")
with r:
yield r
def test__get_differences_from_base() -> None:
A = {"A": "a", "B": "b", "C": "c", "X": ["1", "2", "3"], "Y": ["a", "b"]}
B = {"A": "a", "C": "c2", "D": "d", "X": ["1", "2", "3"], "Y": ["a", "c"]}
assert _get_differences_from_base(A, B) == {"B": "b", "C": "c", "Y": ["a", "b"]}
assert _get_differences_from_base(B, A) == {"C": "c2", "D": "d", "Y": ["a", "c"]}
def test_mypy_setting() -> None:
s = mypy.MypySetting.very_strict()
assert s == mypy.MypySetting.very_strict()
assert s != mypy.MypySetting.strict()
section, settings = s.export(BASE_DIR)
assert section == ["mypy"]
assert settings["check_untyped_defs"]
assert "target_module" not in settings
s.check_untyped_defs = False
section, settings = s.export(BASE_DIR)
assert not settings["check_untyped_defs"]
section, settings = s.export(BASE_DIR, target_module="hoge.fuga.*")
assert section == ["mypy-hoge.fuga.*"]
s.mypy_path = [
"/opt/pysen/stubs",
"stubs2",
pathlib.Path("/usr/pysen/stubs3"),
pathlib.Path("stub4"),
]
section, settings = s.export(pathlib.Path("/opt/pysen/package/python"))
assert settings["mypy_path"] == [
"/opt/pysen/stubs",
"stubs2",
"/usr/pysen/stubs3",
"stub4",
]
# This option is set by pyproject loader
s._pysen_convert_abspath = True
section, settings = s.export(pathlib.Path("/opt/pysen/package/python"))
assert settings["mypy_path"] == [
"../../stubs",
"stubs2",
"../../../../usr/pysen/stubs3",
"stub4",
]
def test_settings() -> None:
m = mypy.Mypy(
setting=mypy.MypySetting.very_strict(),
module_settings={"hoge.fuga": mypy.MypySetting.strict()},
)
assert m.setting == mypy.MypySetting.very_strict()
assert m.module_settings == {"hoge.fuga": mypy.MypySetting.strict()}
m = mypy.Mypy()
assert m.setting == mypy.MypySetting()
assert m.module_settings == {}
def test_commands(reporter: Reporter) -> None:
m = mypy.Mypy(
mypy_targets=[mypy.MypyTarget([pathlib.Path("/bar"), pathlib.Path("baz")])]
)
expected_cmds = add_python_executable(
"mypy",
"--show-absolute-path",
"--no-color-output",
"--show-column-numbers",
"--no-error-summary",
"--config-file",
"/setting/setup.cfg",
"/bar",
"/foo/baz",
)
cmd = m.create_command(
"lint",
PathContext(pathlib.Path("/foo"), pathlib.Path("/setting")),
RunOptions(),
)
with mock.patch("os.chdir", return_value=None):
with mock.patch("pysen.process_utils.run", return_value=(0, "", "")) as patch:
assert cmd(reporter=reporter) == 0
patch.assert_called_with(expected_cmds, reporter)
def test_export_settings() -> None:
m = mypy.Mypy(
setting=mypy.MypySetting(
mypy_path=["hoge"],
plugins=[mypy.MypyPlugin(script=BASE_DIR / pathlib.Path("foo/bar"))],
disallow_any_decorated=False,
ignore_missing_imports=False,
warn_redundant_casts=True,
follow_imports=mypy.MypyFollowImports.ERROR,
_pysen_convert_abspath=True,
),
module_settings={
"foo.*": mypy.MypySetting(disallow_any_decorated=True), # duplicated
"bar.baz": mypy.MypySetting(
ignore_missing_imports=True, # duplicated
disallow_any_decorated=False, # same (not emitted in exported settings)
disallow_any_unimported=False, # new
),
},
)
files: DefaultDict[str, SettingFile] = collections.defaultdict(SettingFile)
m.export_settings(PathContext(BASE_DIR, BASE_DIR), files)
assert files.keys() == {"setup.cfg"}
setting_file = files["setup.cfg"]
expected = {
"mypy": {
"disallow_any_decorated": False,
"follow_imports": "error",
"ignore_missing_imports": False,
"mypy_path": ["hoge"],
"warn_redundant_casts": True,
"plugins": ["foo/bar"],
},
"mypy-foo.*": {"disallow_any_decorated": True},
"mypy-bar.baz": {
"disallow_any_unimported": False,
"ignore_missing_imports": True,
},
}
assert setting_file.as_dict() == expected
|
#!/usr/bin/env python
import os, re
def getFileName(allLogsName='AllCode'):
# Function finds the lowest numbered filename in the form AllLogs(n)
# that doesn't already exist and assigns it as the new filename
filepath = os.path.join('.', allLogsName)
for n in range(1,100):
newFile = filepath + '{0}.txt'.format(n)
if os.path.exists(newFile):
pass
else:
return newFile
def textCombiner(rootDir='.', filetype=".sas", outname="AllCode.txt"):
'''Walks through directory tree starting at [rootDir] opening any text
files with extension [filetype] default ".sas", and writes the combined
files to a text doc [outname] in the rootDir, default "AllCode.txt"
As the text files are read they are scanned for any %INCLUDE statements
that refer to code files found outside of the directory tree , these are
then appended to the compiled code file (dynamically generated filepaths,
e.g. those containing macro variables will not be found, but will be
listed at the end, along with any files that could not be found.)'''
outpath = os.path.join(rootDir, outname)
extras = [] #initiate list to contain files paths taken from %include statements
#creates and opens output file.
with open(outpath, "w") as outfile:
#walks down directory tree from rootDir
for dirName, subdirList, fileList in os.walk(rootDir):
#for each file found
for fname in fileList:
#create file path from directory and filename
filepath = "\\".join([dirName, fname])
#filepath = os.path.join(dirName, fname)
#check file is of type specified by parameter [filetype]
if fname.endswith(filetype):
#compile regex to find %include statements
inc = re.compile(r'%include\s+[\'\"]([^;]+)[\'\"];', re.IGNORECASE)
#open code file
with open(filepath, "r") as f:
#write code file details to output
outfile.write('\nSTART FILE\n\n')
theDir = "DIRECTORY --> {}\n".format(dirName.upper())
theFile = "FILE -------> {}".format(fname.upper())
outfile.write(theDir)
outfile.write(theFile)
outfile.write('\n\n')
#print(f.read())
for line in f.readlines():
outfile.write(line)
#scan file for %include statements and append any to the extras
matches = re.findall(inc, line)
if matches:
for match in matches:
if match not in extras:
extras.append(match)
else: pass
else: pass
outfile.write('\n\nEND OF SAS FILE\n')
outfile.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n')
#outfile.write('\n\n\nSTART FILE\n\n')
else:
pass
macropath = [] #list of filepaths from %include statements that contain macro variables
notfound = [] #list of filepaths from %include statements that could not be found
#if any %include statements found
if extras:
outfile.write("\n\nTHE FOLLOWING CODE FILES WERE FOUND IN %INCLUDE STATEMENTS AND REFER \nTO FILES OUTSIDE OF THE DIRECTORY TREE DEFINED BY THE ROOT DIRECTORY\n\n")
for e in extras:
#if filepath already included within the directory tree ignore it here
if e.startswith(rootDir):
print("Already included: {}".format(e))
pass
#if filepath includes macro statement add it to the macropath list
elif '&' in e:
print("MACRO in: {}".format(e))
macropath.append(e)
#attempt to open file and append details to the output file
else:
print("LEGIT path: {}".format(e))
try:
with open(e, "r") as ef:
outfile.write('\n\n\nSTART FILE\n\n')
theDir = "DIRECTORY --> {}\n".format("\\".join(e.split('\\')[:-1]).upper())
theFile = "FILE -------> {}\n".format(e.split('\\')[-1].upper())
outfile.write(theDir)
outfile.write(theFile)
outfile.write('\n\n')
for line in ef.readlines():
outfile.write(line)
outfile.write('\n\nEND OF SAS FILE\n')
outfile.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n')
#outfile.write('\n\n\nSTART FILE\n\n')
except FileNotFoundError:
print("File not found: {}".format(e))
notfound.append(e)
except error:
print("Unknown error with file: {}".format(e))
if macropath:
outfile.write("The following file paths include dynamically generated file paths with macro variables\nand it was not possible to add them to the compiled code file:\n\n")
for m in macropath:
outfile.write('\n\t')
outfile.write(m)
else: pass
if notfound:
outfile.write("\n\nThe following files were not found. Check that the file has not been moved \nand that the network drive is currently available:\n")
for nf in notfound:
outfile.write('\n\t')
outfile.write(nf)
else: pass
print('Done')
os.popen(outpath)
return outpath
#if __name__ == "__main__":
|
"""
prefix.py
Created by Diego Garcia del Rio on 2015-03-12.
Copyright (c) 2015 Alcatel-Lucent. All rights reserved.
Based on work by Thomas Morin on mac.py
Copyright (c) 2014-2017 Orange. All rights reserved.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.protocol.ip import IP
from exabgp.util import character
from exabgp.util import ordinal
from exabgp.util import concat_bytes
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
from exabgp.bgp.message.update.nlri.qualifier import Labels
from exabgp.bgp.message.update.nlri.qualifier import ESI
from exabgp.bgp.message.update.nlri.qualifier import EthernetTag
from exabgp.bgp.message.update.nlri import NLRI
from exabgp.bgp.message.update.nlri.evpn.nlri import EVPN
from exabgp.bgp.message.notification import Notify
# ------------ EVPN Prefix Advertisement NLRI ------------
# As described here:
# https://tools.ietf.org/html/draft-ietf-bess-evpn-prefix-advertisement-01
# +---------------------------------------+
# | RD (8 octets) |
# +---------------------------------------+
# |Ethernet Segment Identifier (10 octets)|
# +---------------------------------------+
# | Ethernet Tag ID (4 octets) |
# +---------------------------------------+
# | IP Prefix Length (1 octet) |
# +---------------------------------------+
# | IP Prefix (4 or 16 octets) |
# +---------------------------------------+
# | GW IP Address (4 or 16 octets) |
# +---------------------------------------+
# | MPLS Label (3 octets) |
# +---------------------------------------+
# total NLRI length is 34 bytes for IPv4 or 58 bytes for IPv6
# ======================================================================= Prefix
# https://tools.ietf.org/html/draft-rabadan-l2vpn-evpn-prefix-advertisement-03
@EVPN.register
class Prefix (EVPN):
CODE = 5
NAME = "IP Prefix Advertisement"
SHORT_NAME = "PrfxAdv"
def __init__(self, rd, esi, etag, label, ip, iplen, gwip, packed=None,nexthop=None,action=None,addpath=None):
'''
rd: a RouteDistinguisher
esi: an EthernetSegmentIdentifier
etag: an EthernetTag
mac: a MAC
label: a LabelStackEntry
ip: an IP address (dotted quad string notation)
iplen: prefixlength for ip (defaults to 32)
gwip: an IP address (dotted quad string notation)
'''
EVPN.__init__(self,action,addpath)
self.nexthop = nexthop
self.rd = rd
self.esi = esi
self.etag = etag
self.ip = ip
self.iplen = iplen
self.gwip = gwip
self.label = label
self.label = label if label else Labels.NOLABEL
self._pack(packed)
def __eq__ (self, other):
return \
NLRI.__eq__(self,other) and \
self.CODE == other.CODE and \
self.rd == other.rd and \
self.etag == other.etag and \
self.ip == other.ip and \
self.iplen == other.iplen
# esi, label and gwip must not be compared
def __ne__ (self, other):
return not self.__eq__(other)
def __str__ (self):
return "%s:%s:%s:%s:%s%s:%s:%s" % (
self._prefix(),
self.rd._str(),
self.esi,
self.etag,
self.ip,
"/%d" % self.iplen,
self.gwip,
self.label
)
def __hash__ (self):
# esi, and label, gwip must *not* be part of the hash
return hash("%s:%s:%s:%s" % (self.rd,self.etag,self.ip,self.iplen))
def _pack (self, packed=None):
if self._packed:
return self._packed
if packed:
self._packed = packed
return packed
self._packed = concat_bytes(
self.rd.pack(),
self.esi.pack(),
self.etag.pack(),
character(self.iplen),
self.ip.pack(),
self.gwip.pack(),
self.label.pack(),
)
return self._packed
@classmethod
def unpack (cls, exdata):
data = exdata
# Get the data length to understand if addresses are IPv4 or IPv6
datalen = len(data)
rd = RouteDistinguisher.unpack(data[:8])
data = data[8:]
esi = ESI.unpack(data[:10])
data = data[10:]
etag = EthernetTag.unpack(data[:4])
data = data[4:]
iplen = ordinal(data[0])
data = data[1:]
if datalen == (26 + 8): # Using IPv4 addresses
ip = IP.unpack(data[:4])
data = data[4:]
gwip = IP.unpack(data[:4])
data = data[4:]
elif datalen == (26 + 32): # Using IPv6 addresses
ip = IP.unpack(data[:16])
data = data[16:]
gwip = IP.unpack(data[:16])
data = data[16:]
else:
raise Notify(3,5,"Data field length is given as %d, but EVPN route currently support only IPv4 or IPv6(34 or 58)" % datalen)
label = Labels.unpack(data[:3])
return cls(rd,esi,etag,label,ip,iplen,gwip,exdata)
def json (self, compact=None):
content = ' "code": %d, ' % self.CODE
content += '"parsed": true, '
content += '"raw": "%s", ' % self._raw()
content += '"name": "%s", ' % self.NAME
content += '%s, ' % self.rd.json()
content += '%s, ' % self.esi.json()
content += '%s, ' % self.etag.json()
content += '%s, ' % self.label.json()
content += '"ip": "%s", ' % str(self.ip)
content += '"iplen": %d, ' % self.iplen
content += '"gateway": "%s" ' % str(self.gwip)
return '{%s}' % content
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'example/', 'simple.localsite.views.example', {}),
)
|
class TestDataBridge:
async def test_version(self, client_databridge):
resp = await client_databridge.get('api')
assert resp.status == 200
data = await resp.json()
assert 'api_version' in data
async def test_ping(self, client_databridge):
resp = await client_databridge.get('api/ping')
assert resp.status == 200
data = await resp.json()
assert data['text'] == 'pong'
|
from time import sleep
from kafka import KafkaProducer
from alpha_vantage.timeseries import TimeSeries
import random
import json
import sys
def dataGrabber():
try:
ticker = 'GOOGL'
lines = open('key.txt').read().splitlines()
keys = random.choice(lines)
time = TimeSeries(key=keys, output_format='json')
data, metadata = time.get_intraday(symbol=ticker, interval='1min', outputsize='full')
return data
except Exception:
print("Invalid Key!")
sys.exit(1)
def messagePublisher(producerKey, key, data_key):
keyBytes = bytes(key, encoding='utf-8')
producerKey.send("GoogleStock", json.dumps(data[key]).encode('utf-8'), keyBytes)
print("Message Published!")
def kafkaProducerConnect():
try:
producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
return producer
except Exception:
print("Connection Refused!")
if __name__ == "__main__":
data = dataGrabber()
if len(data) > 0:
kafkaProducer = kafkaProducerConnect()
for key in sorted(data):
messagePublisher(kafkaProducer, key, data[key])
sleep(3)
|
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
############################################################################
#
# This file contains common collectd plugin constructs and utilities
#
############################################################################
import collectd
import json
import uuid
import httplib2
import socket
import os
from oslo_concurrency import processutils
from fm_api import constants as fm_constants
import tsconfig.tsconfig as tsc
# http request constants
PLUGIN_TIMEOUT = 10
PLUGIN_HTTP_HEADERS = {'Accept': 'application/json', 'Connection': 'close'}
MIN_AUDITS_B4_FIRST_QUERY = 2
class PluginObject(object):
def __init__(self, plugin, url):
# static variables set in init_func
self.plugin = plugin # the name of this plugin
self.hostname = '' # the name of this host
self.port = 0 # the port number for this plugin
self.base_eid = '' # the base entity id host=<hostname>
self.controller = False # set true if node is controller
# dynamic gate variables
self.virtual = False # set to True if host is virtual
self.config_complete = False # set to True once config is complete
self.config_done = False # set true if config_func completed ok
self.init_done = False # set true if init_func completed ok
self.fm_connectivity = False # set true when fm connectivity ok
self.alarm_type = fm_constants.FM_ALARM_TYPE_7 # OPERATIONAL
self.cause = fm_constants.ALARM_PROBABLE_CAUSE_50 # THRESHOLD CROSS
self.suppression = True
self.service_affecting = False
# dynamic variables set in read_func
self.usage = float(0) # last usage value recorded as float
self.value = float(0) # last read value
self.audits = 0 # number of audit since init
self.enabled = False # tracks a plugin's enabled state
self.alarmed = False # tracks the current alarmed state
self.mode = '' # mode specific to plugin
# http and json specific variables
self.url = url # target url
self.jresp = None # used to store the json response
self.resp = ''
self.objects = [] # list of plugin specific objects
self.cmd = '' # plugin specific command string
# Log controls
self.config_logged = False # used to log once the plugin config
self.error_logged = False # used to prevent log flooding
self.log_throttle_count = 0 # used to count throttle logs
self.INIT_LOG_THROTTLE = 10 # the init log throttle threshold
self.phase = 0 # tracks current phase; init, sampling
collectd.debug("%s Common PluginObject constructor [%s]" %
(plugin, url))
###########################################################################
#
# Name : init_ready
#
# Description: Test for init ready condition
#
# Parameters : plugin name
#
# Returns : False if initial config complete is not done
# True if initial config complete is done
#
###########################################################################
def init_ready(self):
"""Test for system init ready state"""
if os.path.exists(tsc.INITIAL_CONFIG_COMPLETE_FLAG) is False:
self.log_throttle_count += 1
if self.log_throttle_count > self.INIT_LOG_THROTTLE:
collectd.info("%s initialization needs retry" % self.plugin)
self.log_throttle_count = 0
return False
else:
self.log_throttle_count = 0
return True
###########################################################################
#
# Name : gethostname
#
# Description: load the hostname
#
# Parameters : plugin name
#
# Returns : Success - hostname
# Failure - None
#
# Updates : obj.hostname
#
###########################################################################
def gethostname(self):
"""Fetch the hostname"""
# get current hostname
try:
hostname = socket.gethostname()
if hostname:
return hostname
except:
collectd.error("%s failed to get hostname" % self.plugin)
return None
###########################################################################
#
# Name : is_virtual
#
# Description: Execute facter command with output filter on 'is_virtual'
#
# Parameters : None
#
# Returns : True if current host is virtual.
# False if current host is NOT virtual
#
###########################################################################
def is_virtual(self):
"""Check for virtual host"""
try:
cmd = '/usr/bin/facter is_virtual'
res, err = processutils.execute(cmd, shell=True)
if err:
return False
elif res:
# remove the trailing '\n' with strip()
if res.strip() == 'true':
collectd.info("%s %s is virtual" %
(self.plugin, self.hostname))
return True
except Exception as ex:
collectd.info("%s failed to execute '/usr/bin/facter' ; %s" %
self.plugin, ex)
return False
###########################################################################
#
# Name : check_for_fit
#
# Description: load FIT data if it is present
#
# Fit Format : unit data -> 0 89
# - instance 0 value 89
#
# Parameters : plugin name
# object to update with fit
# name in fit file
# unit
#
# Returns : Did a failure occur ?
# False = no
# True = yes
#
# Updates : self.usage with FIT value if FIT conditions are present
# and apply
#
###########################################################################
def check_for_fit(self, name, unit):
"""Load FIT data into usage if it exists"""
fit_file = '/var/run/fit/' + name + '_data'
if os.path.exists(fit_file):
valid = False
with open(fit_file, 'r') as infile:
for line in infile:
try:
inst, val = line.split(' ')
if int(unit) == int(inst):
self.usage = float(val)
valid = True
except:
try:
val = float(line)
self.usage = float(val)
valid = True
except:
collectd.error("%s bad FIT data; ignoring" %
self.plugin)
if valid is True:
collectd.info("%s %.2f usage (unit %d) (FIT)" %
(self.plugin, unit, self.usage))
return False
return True
###########################################################################
#
# Name : make_http_request
#
# Description: Issue an http request to the specified URL.
# Load and return the response
# Handling execution errors
#
# Parameters : self as current context.
#
# Optional:
#
# url - override the default self url with http address to
# issue the get request to.
# to - timeout override
# hdrs - override use of the default header list
#
# Updates : self.jresp with the json string response from the request.
#
# Returns : Error indication (True/False)
# True on error
# False on success
#
###########################################################################
def make_http_request(self, url=None, to=None, hdrs=None):
"""Make a blocking HTTP Request and return result"""
try:
# handle timeout override
if to is None:
to = PLUGIN_TIMEOUT
# handle url override
if url is None:
url = self.url
# handle header override
if hdrs is None:
hdrs = PLUGIN_HTTP_HEADERS
http = httplib2.Http(timeout=to)
resp = http.request(url, headers=hdrs)
except Exception as ex:
collectd.info("%s http request failure (%s)" %
(self.plugin, str(ex)))
return True
try:
collectd.debug("%s Resp: %s" %
(self.plugin, resp[1]))
self.resp = resp[1]
self.jresp = json.loads(resp[1])
except Exception as ex:
collectd.info("%s http request parse failure (%s) (%s)" %
(self.plugin, str(ex), resp))
return True
return False
def is_uuid_like(val):
"""Returns validation of a value as a UUID
For our purposes, a UUID is a canonical form string:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
try:
return str(uuid.UUID(val)) == val
except (TypeError, ValueError, AttributeError):
return False
def get_severity_str(severity):
"""get string that represents the specified severity"""
if severity == fm_constants.FM_ALARM_SEVERITY_CLEAR:
return "clear"
elif severity == fm_constants.FM_ALARM_SEVERITY_CRITICAL:
return "critical"
elif severity == fm_constants.FM_ALARM_SEVERITY_MAJOR:
return "major"
elif severity == fm_constants.FM_ALARM_SEVERITY_MINOR:
return "minor"
else:
return "unknown"
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Tests for the Monorail home page."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from third_party import ezt
import settings
from framework import permissions
from proto import site_pb2
from services import service_manager
from sitewide import hostinghome
from sitewide import projectsearch
from testing import fake
from testing import testing_helpers
class MockProjectSearchPipeline(object):
def __init__(self, _mr, services):
self.visible_results = services.mock_visible_results
self.pagination = None
def SearchForIDs(self):
pass
def GetProjectsAndPaginate(self, cnxn, list_page_url):
pass
class HostingHomeTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services(
project=fake.ProjectService(),
project_star=fake.ProjectStarService())
self.services.mock_visible_results = []
self.project_a = self.services.project.TestAddProject('a', project_id=1)
self.project_b = self.services.project.TestAddProject('b', project_id=2)
self.servlet = hostinghome.HostingHome('req', 'res', services=self.services)
self.mr = testing_helpers.MakeMonorailRequest(user_info={'user_id': 111})
self.orig_pipeline_class = projectsearch.ProjectSearchPipeline
projectsearch.ProjectSearchPipeline = MockProjectSearchPipeline
def tearDown(self):
projectsearch.ProjectSearchPipeline = self.orig_pipeline_class
def testSearch_ZeroResults(self):
self.services.mock_visible_results = []
page_data = self.servlet.GatherPageData(self.mr)
self.assertEqual([], page_data['projects'])
def testSearch_NonzeroResults(self):
self.services.mock_visible_results = [self.project_a, self.project_b]
page_data = self.servlet.GatherPageData(self.mr)
self.assertEqual(['a', 'b'],
[pv.project_name for pv in page_data['projects']])
def testStarCounts(self):
"""Test the display of star counts on each displayed project."""
self.services.mock_visible_results = [self.project_a, self.project_b]
# We go straight to the services layer because this is a test set up
# rather than an actual user request.
self.services.project_star.SetStar('fake cnxn', 1, 111, True)
self.services.project_star.SetStar('fake cnxn', 1, 222, True)
page_data = self.servlet.GatherPageData(self.mr)
project_view_a, project_view_b = page_data['projects']
self.assertEqual(2, project_view_a.num_stars)
self.assertEqual(0, project_view_b.num_stars)
def testStarredProjects(self):
self.services.mock_visible_results = [self.project_a, self.project_b]
self.services.project_star.SetStar('fake cnxn', 1, 111, True)
page_data = self.servlet.GatherPageData(self.mr)
project_view_a, project_view_b = page_data['projects']
self.assertTrue(project_view_a.starred)
self.assertFalse(project_view_b.starred)
def testGatherPageData(self):
mr = testing_helpers.MakeMonorailRequest()
page_data = self.servlet.GatherPageData(mr)
self.assertEqual(settings.learn_more_link, page_data['learn_more_link'])
def testGatherPageData_CanCreateProject(self):
mr = testing_helpers.MakeMonorailRequest()
mr.perms = permissions.PermissionSet([permissions.CREATE_PROJECT])
page_data = self.servlet.GatherPageData(mr)
self.assertEqual(
ezt.boolean(settings.project_creation_restriction ==
site_pb2.UserTypeRestriction.ANYONE),
page_data['can_create_project'])
mr.perms = permissions.PermissionSet([])
page_data = self.servlet.GatherPageData(mr)
self.assertEqual(ezt.boolean(False), page_data['can_create_project'])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
import time
from bson.objectid import ObjectId
from config import setting
import helper
db = setting.db_web
url = ('/report/voice')
# - 用户反馈 -----------
class handler: #class PosReport:
def GET(self):
if helper.logged(helper.PRIV_USER,'REPORT_VOICE'):
render = helper.create_render()
#user_data=web.input(start_date='', shop='__ALL__', ret_type='table')
# 显示最近30天的
start_date = helper.time_str(time.time()-3600*24*30, format=1)
# 起至时间
begin_date = '%s 00:00:00' % start_date
#end_date = '%s 23:59:59' % start_date
#print begin_date, end_date
#
db_voice = db.customer_voice.find({'time' : {'$gt' : begin_date}},{'_id':0}).sort([('_id',-1)])
return render.report_voice(helper.get_session_uname(), helper.get_privilege_name(), db_voice)
else:
raise web.seeother('/')
|
from rezgui.qt import QtGui
from rezgui.widgets.BrowsePackageWidget import BrowsePackageWidget
from rezgui.widgets.ContextSettingsWidget import ContextSettingsWidget
from rezgui.mixins.ContextViewMixin import ContextViewMixin
from rezgui.util import get_icon
class BrowsePackagePane(QtGui.QTabWidget, ContextViewMixin):
"""A widget for browsing rez packages.
Unlike `BrowsePackageWidget`, this class has its own settings tab, so that
packages path can be changed. In contrast, `BrowsePackageWidget` does not,
because it is intended to allow browsing of packages within an existing
context.
"""
def __init__(self, context_model=None, parent=None):
super(BrowsePackagePane, self).__init__(parent)
ContextViewMixin.__init__(self, context_model)
self.browse = BrowsePackageWidget(self.context_model)
self.settings = ContextSettingsWidget(self.context_model,
attributes=("packages_path",))
icon = get_icon("package", as_qicon=True)
self.addTab(self.browse, icon, "packages")
icon = get_icon("cog", as_qicon=True)
self.addTab(self.settings, icon, "settings")
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
"""
This modules defines the board of the game based on the configuration the user has requested
"""
import numpy as np
import pawn
import math
##DIRECTIONS##
NORTHWEST = "northwest"
NORTHEAST = "northeast"
SOUTHWEST = "southwest"
SOUTHEAST = "southeast"
# Constant for Obstacle in the game, 21 because max pawn_id in our game is 20
OBSTACLE = 21
class Board:
# Initialize the board based on the config the user requested
def __init__(self, numOfSquares=8, num_of_pawns = 12):
self.board = np.zeros((numOfSquares, numOfSquares))
self.p1_pawns = {}
self.p2_pawns = {}
self.num_of_pawns = num_of_pawns
if numOfSquares == 10:
self.num_of_pawns = 20
elif numOfSquares == 6:
self.num_of_pawns = 6
num_of_rows = self.num_of_pawns / (numOfSquares / 2)
self.initialize_players(0, 1, self.num_of_pawns)
if numOfSquares == 8 and num_of_pawns != 6:
self.initialize_players(int(numOfSquares - num_of_rows), 0, self.num_of_pawns, False)
else:
self.initialize_players(int(numOfSquares - num_of_rows), 1, self.num_of_pawns, False)
self.total_moves = 0
self.moves_since_last_capture = 0
# Initialize player pawns and populate the board with their positions
def initialize_players(self, start_row, start_index, num_of_pawns, p1=True):
rows, cols = self.board.shape
num_rows_to_fill = math.ceil(num_of_pawns / (cols / 2))
pawn_id = 1
for row in range(start_row, start_row + num_rows_to_fill):
for col in range(start_index, cols, 2):
if pawn_id > num_of_pawns:
break
if (p1):
self.board[row, col] = pawn_id
self.p1_pawns[pawn_id] = pawn.Pawn(pawn_id, row, col, start_row)
pawn_id += 1
else:
self.board[row, col] = -pawn_id
self.p2_pawns[-pawn_id] = pawn.Pawn(-pawn_id, row, col, start_row)
pawn_id += 1
if start_index == 0:
start_index = 1
else:
start_index = 0
# Updates the board and pawn object to according to new coordinate
def update_board_pawn(self, new_x, new_y, pawn, p1=True):
old_x, old_y = pawn.coordinates
temp = self.board[new_x][new_y]
self.board[new_x][new_y] = pawn.id
self.board[old_x][old_y] = temp
if p1:
self.p1_pawns[pawn.id].coordinates = (new_x, new_y)
else:
self.p2_pawns[pawn.id].coordinates = (new_x, new_y)
# Returns all the available to move for a given player
# TODO: Rename the parameters to follow consistency
def get_available_pawns(self, player_pawns_list, p2_list, dir):
temp_dict = player_pawns_list
player_available_pawns = []
for p in temp_dict:
if not temp_dict[int(p)].is_king:
x, y = self.get_new_coordinates(dir[0], temp_dict[p])
a, b = self.get_new_coordinates(dir[1], temp_dict[p])
if self.check_boundry(x, y) and p not in player_available_pawns:
if self.board[x][y] == 0:
player_available_pawns.append(p)
elif self.board[x][y] in p2_list:
x1, y1 = self.get_new_coordinates(dir[0], p2_list[self.board[x][y]])
if self.check_boundry(x1, y1) and self.board[x1, y1] == 0:
player_available_pawns.append(p)
if self.check_boundry(a, b) and p not in player_available_pawns:
if self.board[a][b] == 0:
player_available_pawns.append(p)
elif self.board[a][b] in p2_list:
a1, b1 = self.get_new_coordinates(dir[1], p2_list[self.board[a][b]])
if self.check_boundry(a1, b1) and self.board[a1, b1] == 0:
player_available_pawns.append(p)
else:
temp_list = self.get_kings_move(temp_dict[p])
if len(temp_list) > 0:
player_available_pawns.append(p)
return player_available_pawns
# Checks if given point (x, y) is within the board
def check_boundry(self, x, y):
rows, cols = self.board.shape
if (0 <= x < rows) and (0 <= y < cols):
return True
else:
return False
# This method is used to search for all the movable pawns of the players
def check_available_pawns_to_move(self, p1=False):
"""
:param p1 boolean
:return array array of pawns that can move forward/backward
Available pawns to move
"""
if p1 == True:
return self.get_available_pawns(self.p1_pawns, self.p2_pawns, [SOUTHWEST, SOUTHEAST])
else:
return self.get_available_pawns(self.p2_pawns, self.p1_pawns, [NORTHWEST, NORTHEAST])
# Given direction, return the corresponding pawn
def get_new_coordinates(self, dir, pawn):
"""
Returns the coordinates one square in a different direction to (x,y).
"""
x, y = (pawn.coordinates)
if dir == NORTHWEST:
return x - 1, y - 1
elif dir == SOUTHWEST:
return x + 1, y - 1
elif dir == NORTHEAST:
return x - 1, y + 1
elif dir == SOUTHEAST:
return x + 1, y + 1
else:
return 0
# Given pawn, return all the coordinates the pawn can move for player 1
def get_player1_moves(self, dir1, dir2, pawn):
get_pawn_moves = []
sw_x, sw_y = self.get_new_coordinates(dir1, pawn)
se_x, se_y = self.get_new_coordinates(dir2, pawn)
if self.check_boundry(sw_x, sw_y) and self.board[sw_x][sw_y] < 0 and self.board[sw_x][sw_y] != OBSTACLE:
sw_sw_x, sw_sw_y = self.get_new_coordinates(dir1, self.p2_pawns[self.board[sw_x][sw_y]])
if self.check_boundry(sw_sw_x, sw_sw_y) and self.board[sw_sw_x][sw_sw_y] == 0:
get_pawn_moves.append((sw_sw_x, sw_sw_y))
if self.check_boundry(se_x, se_y) and self.board[se_x][se_y] < 0 and self.board[sw_x][sw_y] != OBSTACLE:
se_se_x, se_se_y = self.get_new_coordinates(dir2, self.p2_pawns[self.board[se_x][se_y]])
if self.check_boundry(se_se_x, se_se_y) and self.board[se_se_x][se_se_y] == 0:
get_pawn_moves.append((se_se_x, se_se_y))
if self.check_boundry(sw_x, sw_y) and self.board[sw_x][sw_y] == 0:
get_pawn_moves.append((sw_x, sw_y))
if self.check_boundry(se_x, se_y) and self.board[se_x][se_y] == 0:
get_pawn_moves.append((se_x, se_y))
return get_pawn_moves
# Given pawn, return all the coordinates the pawn can move for player 1
# TODO: combine this and above method to one single method
def get_player2_moves(self, dir1, dir2, pawn):
get_pawn_moves = []
nw_x, nw_y = self.get_new_coordinates(dir1, pawn)
ne_x, ne_y = self.get_new_coordinates(dir2, pawn)
if self.check_boundry(nw_x, nw_y) and self.board[nw_x][nw_y] > 0 and self.board[nw_x][nw_y] != OBSTACLE:
nw_nw_x, nw_nw_y = self.get_new_coordinates(dir1, self.p1_pawns[self.board[nw_x][nw_y]])
if self.check_boundry(nw_nw_x, nw_nw_y) and self.board[nw_nw_x][nw_nw_y] == 0:
get_pawn_moves.append((nw_nw_x, nw_nw_y))
if self.check_boundry(ne_x, ne_y) and self.board[ne_x][ne_y] > 0 and self.board[ne_x][ne_y] != OBSTACLE:
ne_ne_x, ne_ne_y = self.get_new_coordinates(dir2, self.p1_pawns[self.board[ne_x][ne_y]])
if self.check_boundry(ne_ne_x, ne_ne_y) and self.board[ne_ne_x][ne_ne_y] == 0:
get_pawn_moves.append((ne_ne_x, ne_ne_y))
if self.check_boundry(nw_x, nw_y) and self.board[nw_x][nw_y] == 0:
get_pawn_moves.append((nw_x, nw_y))
if self.check_boundry(ne_x, ne_y) and self.board[ne_x][ne_y] == 0:
get_pawn_moves.append((ne_x, ne_y))
return get_pawn_moves
# This method is used to check the possible coordinates that the pawn can move to
def get_moves(self, pawn):
"""
:param pawn Pawn object
:return array array of coordinates the pawn can move to
Returns a list of legal move locations from a set of coordinates (x,y) on the board.
If that location is empty, then get_moves() return an empty list.
"""
x, y = (pawn.coordinates)
pawn_id = self.board[x][y]
if pawn_id != 0:
if pawn_id < 0 and pawn.is_king is False:
get_pawn_moves = self.get_player2_moves(NORTHWEST, NORTHEAST, pawn)
elif pawn_id > 0 and pawn.is_king is False:
get_pawn_moves = self.get_player1_moves(SOUTHWEST, SOUTHEAST, pawn)
else:
get_pawn_moves = self.get_kings_move(pawn)
else:
get_pawn_moves = []
return get_pawn_moves
# Given a King pawn, get all the possible coordinates that the pawn can move to
def get_kings_move(self, pawn):
x, y = (pawn.coordinates)
get_pawn_moves = []
pawn_id = self.board[x][y]
if pawn_id != 0:
if pawn_id < 0:
get_pawn_moves.extend(self.get_player2_moves(NORTHWEST, NORTHEAST, pawn))
get_pawn_moves.extend(self.get_player2_moves(SOUTHWEST, SOUTHEAST, pawn))
elif pawn_id > 0:
get_pawn_moves.extend(self.get_player1_moves(NORTHWEST, NORTHEAST, pawn))
get_pawn_moves.extend(self.get_player1_moves(SOUTHWEST, SOUTHEAST, pawn))
return get_pawn_moves
# This method is used to analyze the move when the pawn is selected
def check_move_type(self, pawn, direction):
"""
:param pawn Pawn object
:return int 0 for simple move, 1 capturing move and -1 if the move cannot be made
"""
new_x, new_y = self.get_new_coordinates(direction, pawn)
new_id = self.board[new_x, new_y]
if new_id == 0:
return 0
elif new_id > 0 and pawn.id > 0 or new_id < 0 and pawn.id < 0:
return -1
else:
return 1
# This method controls the pawn's movement
def move_pawn(self, pawn, coordinate):
"""
This method handle the pawn movement inside the board
:param pawn_id int
Changes the position of the pawn selected and state of board
:return list if the move is of type capture and can be chained
"""
direction = self.get_direction_from_coordinates(pawn, coordinate)
self.total_moves += 1
self.moves_since_last_capture += 1
chain_capture_coordinates = []
if self.check_move_type(pawn, direction) == 0:
self.simple_move(pawn, direction)
elif self.check_move_type(pawn, direction) == 1:
self.move_capture_pawn(pawn, direction)
# Check if the move can be chained by another capture
chain_capture_coordinates = self.get_chain_capture_coordinates(pawn)
if (pawn.id > 0 and pawn.coordinates[0] == self.board.shape[0] - 1) or (
pawn.id < 0 and pawn.coordinates[0] == 0):
pawn.is_king = True
return chain_capture_coordinates
def get_chain_capture_coordinates(self, pawn):
chain_capture_coordinates = []
moves_list = self.get_moves(pawn)
for coordinate in moves_list:
move_type = self.check_move_type(pawn, self.get_direction_from_coordinates(pawn, coordinate))
if move_type == 1:
chain_capture_coordinates.append(coordinate)
return chain_capture_coordinates
# This method is used when the move type is a capturing move
def move_capture_pawn(self, pawn, direction):
"""
:param pawn Pawn object
"""
pawn_id = pawn.id
pawn_coordinates = pawn.coordinates
rival_pawn_coordinates = self.get_new_coordinates(direction, pawn)
rival_pawn = self.p1_pawns[self.board[rival_pawn_coordinates]] if self.board[rival_pawn_coordinates] > 0 else \
self.p2_pawns[self.board[rival_pawn_coordinates]]
new_x, new_y = self.get_new_coordinates(direction, rival_pawn)
self.remove_pawn(rival_pawn, rival_pawn.id > 0)
self.update_board_pawn(new_x, new_y, pawn, pawn.id > 0)
# This method is used when the move type is simple, move the pawn diagonally, change the state of board and coordinate of given pawn
def simple_move(self, pawn, direction):
"""
:param pawn Pawn object
"""
new_x, new_y = self.get_new_coordinates(direction, pawn)
self.update_board_pawn(new_x, new_y, pawn, pawn.id > 0)
# This method is used to update the state of the board and pawn
def update_board_pawn(self, new_x, new_y, pawn, p1=True):
self.board[new_x, new_y] = pawn.id
self.board[pawn.coordinates] = 0
if (p1):
self.p1_pawns[pawn.id].coordinates = (new_x, new_y)
else:
self.p2_pawns[pawn.id].coordinates = (new_x, new_y)
# This method is used to remove pawn from players' dictionary and updates the state of board
def remove_pawn(self, pawn, p1=True):
self.moves_since_last_capture = 0
pawn_id = pawn.id
pawn_coordinates = pawn.coordinates
if p1:
self.p1_pawns.pop(pawn_id, None)
else:
self.p2_pawns.pop(pawn_id, None)
self.board[pawn_coordinates] = 0
# This method checks if the game is over or not.
def check_game_status(self):
"""
This method checks the status of the game
Returns true if the game is over and false if the game is still active in progress
"""
if self.moves_since_last_capture > 40 or len(self.p1_pawns) == 0 or len(self.p2_pawns) == 0:
return True
return False
# This method is used to declare winner
def declare_winner(self):
"""
This method declares the winner of the game
Returns 1 | 0 | -1, 1 if player1 is the winner, -1 if player2 is the winner and 0 if its a tie
"""
if len(self.p1_pawns) == 0:
return -1
elif len(self.p2_pawns) == 0:
return 1
else:
return 1 if len(self.p1_pawns) > len(self.p2_pawns) else -1
# This method gives the direction from the given pawn and new coordinate
def get_direction_from_coordinates(self, pawn, new_coordinate):
x, y = (pawn.coordinates)
new_x, new_y = new_coordinate
if x > new_x and y > new_y:
return NORTHWEST
elif x < new_x and y > new_y:
return SOUTHWEST
elif x > new_x and y < new_y:
return NORTHEAST
elif x < new_x and y < new_y:
return SOUTHEAST
# Returns the number of kings in the given pawn list
def total_kings(self, pawns):
count = 0
for pawn in pawns.values():
if pawn.is_king:
count += 1
return count
# Evaluate score (simpler version)
def game_score(self):
return len(self.p1_pawns) - len(self.p2_pawns) + \
(self.total_kings(self.p1_pawns) * 0.5 - self.total_kings(self.p2_pawns) * 0.5)
# Computes the score of the state according to pawn coordinate position and is_king status
def compute_score(self):
score = 0
# if player1's turn
if self.total_moves % 2 == 0:
for i in range(self.board[0].size):
for j in range(self.board[0].size):
pawn = self.board[i][j]
if pawn in self.p1_pawns.keys() or pawn in self.p2_pawns.keys():
if pawn in self.p1_pawns.keys() and self.p1_pawns[pawn].is_king:
score += 10
elif pawn in self.p2_pawns.keys() and self.p2_pawns[pawn].is_king:
score -= 10
elif pawn in self.p1_pawns.keys() and i < 4:
score += 5
elif pawn in self.p2_pawns.keys() and i < 4:
score -= 7
elif pawn in self.p1_pawns.keys() and i >= 4:
score += 7
elif pawn in self.p2_pawns.keys() and i >= 4:
score -= 5
# if player2's turn
else:
for i in range(self.board[0].size):
for j in range(self.board[0].size):
pawn = self.board[i][j]
if pawn in self.p1_pawns.keys() or pawn in self.p2_pawns.keys():
if pawn in self.p1_pawns.keys() and self.p1_pawns[pawn].is_king:
score += 10
elif pawn in self.p2_pawns.keys() and self.p2_pawns[pawn].is_king:
score -= 10
elif pawn in self.p1_pawns.keys() and i < 4:
score += 7
elif pawn in self.p2_pawns.keys() and i < 4:
score -= 5
elif pawn in self.p1_pawns.keys() and i >= 4:
score += 7
elif pawn in self.p2_pawns.keys() and i >= 4:
score -= 5
#print(f"score: {score / (len(self.p1_pawns) + (len(self.p2_pawns)))}")
return score / (len(self.p1_pawns) + (len(self.p2_pawns)))
# This method adds obstacles to the board
def set_obstacles(self, num_of_obstacles=0):
obstacles = []
rows = self.board.shape[0]
while num_of_obstacles > 0:
x = np.random.randint(rows)
y = np.random.randint(rows)
if self.board[x, y] == 0:
self.board[x, y] = OBSTACLE
obstacles.append((x, y))
num_of_obstacles -= 1
return obstacles
# String representation of the Board object
def __str__(self):
return f"Board: \n{self.board}\n"
if __name__ == "__main__":
board = Board()
obs = board.set_obstacles(3)
print(board)
print(obs)
|
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
import time
from pymongo import *
import re
from TopicModeling import *#algorito LDA
dbName = "tw_arg_usa";
client = MongoClient('localhost', 27017)
db = client['twitter_db']
collection = db[dbName]
class Document():
def __init__(self,col):
self.documentText=""
self.collection=col
def obtenerPedazoDocumento(self,row):
texto=self.obtenerTexto(row['id'])
texto = re.sub('[.@/:!?"#$\n]', '', texto)
self.documentText=self.documentText+texto
return 1
def obtenerTexto(self,idTuit):
tuits = self.collection.find({"id": idTuit})
if(tuits.count()==0):
print("No existe ese tuit")
return None
t = tuits.next()
texto = t["text"]
return texto
documents=[]
df = pd.read_csv("tw_arg_usa.csv")
df['hour']=df.apply(lambda x:time.strftime('%H', time.strptime(x['created_at'],'%a %b %d %H:%M:%S +0000 %Y')),axis=1)
df['minute']=df.apply(lambda x:int(time.strftime('%M', time.strptime(x['created_at'],'%a %b %d %H:%M:%S +0000 %Y')))//10*10,axis=1)
#df['time']=df.apply(lambda x:str(x['hour'])+":"+str(x['minute']),axis=1)
grouped_df=df.groupby(['hour','minute'])
for key, item in grouped_df:
aux_df= grouped_df.get_group(key)
document=Document(collection)
aux_df.apply(document.obtenerPedazoDocumento,axis=1)
documents.append(document.documentText)
#clase que clacula LDA,
#tern score es el metodo que selecciona las palabras mas relevantes
lda_wrapper = TopicModelingLDA(documents, "term_score")
#recibe numero de topicos: le pongo el numero de documentos xd
#recibe el numero de iteraciones: le pongo 10
lda_wrapper.fit(len(documents), 10)
#se obtienen las 10 palabras mas importantes de cada topico
top10 = lda_wrapper.get_highest_scores()
"""
for i,doc in enumerate(documents):
print ("******************DOCUMENTO NUMERO: ",i, " ********************")
print (doc)
"""
|
import subprocess
import os
import math
template_dir = "new-template"
outers = ["one","two","three","four","five"]
inners = ["two","three","four","five"]
adds = ["const","lin","quad","cub","quar","quin"]
def writeLoop(outfile, degree, typ):
template = open(template_dir + "/" + adds[degree] + typ + ".tmp")
for line in template:
outfile.write(line)
outfile.write("\n")
template.close()
def createFile(target, a, b, degree, typ):
outfile = open(target, "w+")
outfile.write("int __cost = 0;\n\n")
if (degree != 0 and typ != "") :
writeLoop(outfile, degree, typ)
outfile.write("void recursive(int n) {\n")
outfile.write(" __VERIFIER_assume (n >= 0);\n")
terminating_cond = " || ".join(["n == " + str(i) for i in range(b, 2*b)])
outfile.write(" if (" + terminating_cond + ") {\n")
if (degree != 0):
outfile.write(" __cost += " + str(2*b-1) + ";\n")
else :
outfile.write(" __cost++;\n")
outfile.write(" return;\n")
outfile.write(" }\n")
outfile.write(" int m = n/" + str(b) + ";\n")
for i in range(0, a):
outfile.write(" recursive(m);\n")
if (typ == ""):
if (degree == 0):
outfile.write(" __cost++;\n")
elif (degree == 1):
outfile.write(" __cost+=m;\n")
elif (degree == 2):
outfile.write(" __cost+=m*m;\n")
elif (degree == 3):
outfile.write(" __cost+=m*m*m;\n")
elif (degree == 4):
outfile.write(" __cost+=m*m*m*m;\n")
else :
outfile.write(" __cost+=m*m*m*m*m;\n")
else :
outfile.write(" loop(m);\n")
outfile.write(" return;\n")
outfile.write("}\n\n")
outfile.write("void main(int n) {\n")
outfile.write(" recursive(n);\n")
outfile.write("}\n")
outfile.close()
for outerval, outer in enumerate(outers):
for innerval, inner in enumerate(inners):
for degree, add in enumerate(adds):
case = "case"
if (outerval == 0):
if (degree == 0):
case = case + "2"
else:
continue
elif (math.log(outerval+1,innerval+2) > degree):
case = case + "1"
elif (math.log(outerval+1,innerval+2) == degree):
case = case + "2"
elif (math.pow(innerval+2,degree)>(outerval+1)):
case = case + "3"
else:
continue
if (not os.path.isdir(case)):
subprocess.run(["mkdir", case])
if (not os.path.isdir(case+"/a-"+outer)):
subprocess.run(["mkdir", case + "/a-"+outer])
if (not os.path.isdir(case+"/a-"+outer+"/b-"+inner)):
subprocess.run(["mkdir", case+"/a-"+outer+"/b-"+inner])
if (not os.path.isdir(case+"/a-"+outer+"/b-"+inner+"/add-"+add)):
subprocess.run(["mkdir", case+"/a-"+outer+"/b-"+inner+"/add-"+add])
if (add == "const"):
target = case + "/a-"+outer+"/b-" + inner + "/add-" +add+"/a_"+outer+"_b_"+inner+"_add_"+add+".c"
createFile(target, outerval+1, innerval+2, degree, "")
else :
types = ["","_rec","_loop"]
for typ in types:
target = case + "/a-"+outer+"/b-" + inner + "/add-" +add+"/a_"+outer+"_b_"+inner+"_add_"+add+typ+".c"
createFile(target, outerval+1, innerval+2, degree, typ)
|
# To affect an HSMM, we want the probability of the state given the length of
# time to the last word
# This is only for non-0 transitions in the HMM
import numpy as np
import time
from collections import defaultdict
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
# from sklearn.naive_bayes import GaussianNB
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import cPickle
# import sys
# sys.path.append("../")
from deep_disfluency.feature_extraction.feature_utils import \
load_data_from_corpus_file
from deep_disfluency.load.load import load_tags
def load_timing_data(dialogues, labels2idx, simple=False):
# make the args into a dict
# get relevant dictionaries
if simple:
# overall_simple_trp_label2idx = {0: "<cc/>",
# 1: "<ct/>",
# 2: "<tc/>",
# 3: "<tt/>"}
simple_trp_label2idx = {0: "<c",
1: "<t"}
# simple_trp_label2idx2 = {0: "c/>",
# 1: "t/>"}
else:
simple_trp_label2idx = labels2idx
# a map from the label index to every duration of that label
timing_dict = defaultdict(list)
for dialogue in dialogues:
_, dialogue_speaker_data = dialogue
_, lex_data, _, _, labels = dialogue_speaker_data
timings = [float(l[2]) for l in lex_data] # just word end timings
prev = 0
prev_t = 0
prev_prev_t = 0 # effecting a trigram of timings
for i in range(0, len(timings)):
t = timings[i] - prev
complex_tag = labels[i]
# print complex_tag
found = False
tag = labels[i]
if simple:
for k, v in simple_trp_label2idx.items():
if v in complex_tag:
tag = k
found = True
break
if not found:
if "<laughter" in complex_tag:
continue
raw_input("warning: " + complex_tag + " " + tag)
if t < 0:
print "below zero"
t = np.average([x[0] for x in timing_dict[tag]])
timings[i-1] = timings[i] - t
# turn to milliseconds
timing_dict[tag].append((prev_prev_t, prev_t,
t))
# now the second one
prev = timings[i]
prev_prev_t = prev_t
prev_t = t
# compile all the timings information together
X = []
y = []
for i in sorted(timing_dict.keys()):
print simple_trp_label2idx[i]
print np.average([time[0] for time in timing_dict[i]]),
print np.std([time[0] for time in timing_dict[i]])
for tup in timing_dict[i]:
X.append(list(tup))
y.append(i)
X = np.asarray(X)
y = np.asarray(y)
print X.shape
print y.shape
return X, y
def train(X, y):
scaler = StandardScaler()
X = scaler.fit_transform(X)
# quite a sparsity problem
model = LogisticRegression(class_weight='balanced',
multi_class='ovr')
model.fit(X, y)
print(model)
return model, scaler
def test_simple(model, scaler, data, y):
# make predictions
X = scaler.transform(data)
expected = y
print expected
predicted = model.predict(X)
print model.predict_proba(X)
print metrics.classification_report(expected, predicted)
print metrics.confusion_matrix(expected, predicted)
def test(model, scaler, X, y):
# make predictions
def convert_to_overall_dict(a, b):
if a == 0:
if b == 0:
return 0
elif b == 1:
return 1
elif a == 1:
if b == 0:
return 2
elif b == 1:
return 3
print a, b, "wrong!"
return None
def convert_to_two_singles(a):
if a == 0:
return 0, 0
if a == 1:
return 0, 1
if a == 2:
return 1, 0
if a == 3:
return 1, 1
test1 = []
for x in list(y):
a, _ = convert_to_two_singles(x)
test1.append(a)
X1 = scaler.transform(X)
predicted = model.predict(X1)
print metrics.classification_report(np.asarray(test1), predicted)
print metrics.confusion_matrix(np.asarray(test1), predicted)
if __name__ == '__main__':
disf_dir = "../data/disfluency_detection/switchboard"
training_file = disf_dir + "/swbd_disf_train_1_2_partial_data_timings.csv"
heldout_file = disf_dir + "/swbd_disf_heldout_partial_data_timings.csv"
tag_dir = "../data/tag_representations"
labels2idx = load_tags(tag_dir + "/swbd_uttseg_tags.csv")
dialogues = load_data_from_corpus_file(training_file)
X, y = load_timing_data(dialogues, labels2idx, simple=True)
model, scaler = train(X, y)
dialogues = load_data_from_corpus_file(heldout_file)
X, y = load_timing_data(dialogues, labels2idx, simple=True)
test_simple(model, scaler, X, y)
# save the classifier
with open('timing_models/' +
'LogReg_balanced_timing_classifier.pkl', 'wb') as fid:
cPickle.dump(model, fid)
with open('timing_models/' +
'LogReg_balanced_timing_scaler.pkl', 'wb') as fid:
cPickle.dump(scaler, fid)
|
moveSet1 = 'R1009,D117,L888,D799,L611,U766,L832,U859,L892,D79,R645,U191,L681,D787,R447,D429,L988,U536,L486,D832,R221,D619,R268,D545,L706,U234,L528,D453,R493,D24,L688,U658,L74,D281,R910,D849,L5,U16,R935,D399,L417,U609,R22,D782,L432,D83,L357,D982,L902,U294,L338,U102,R342,D621,R106,U979,L238,U158,R930,D948,L700,D808,R445,U897,R980,U227,L466,D416,R244,U396,R576,U157,R548,U795,R709,U550,R137,U212,L977,U786,L423,D792,R391,D974,R390,U771,R270,D409,L917,D9,R412,D699,L170,D276,L912,U710,R814,U656,R4,D800,R596,U970,L194,U315,L845,D490,L303,U514,L675,D737,L880,D86,L253,D525,R861,D5,R424,D113,L764,D900,R485,D421,R125,U684,R53,U96,L871,U260,R456,U378,L448,D450,L903,D482,R750,U961,R264,D501,R605,D367,R550,U642,R228,U164,L343,U868,R595,D318,R452,U845,L571,D281,R49,D889,L481,U963,R182,U358,R454,U267,L790,D252,R455,D188,L73,U256,L835,D816,R503,U895,L259,U418,R642,U818,L187,U355,R772,U466,R21,U91,R707,D349,L200,U305,R931,D982,L334,D416,L247,D935,L326,U449,L398,D914,R602,U10,R762,D944,L639,D141,L457,U579,L198,U527,R750,U167,R816,D753,R850,D281,L712,D583,L172,D254,L544,D456,R966,U839,R673,D479,R730,D912,R992,D969,R766,U205,R477,D719,R172,D735,R998,D687,R698,D407,R172,U945,R199,U348,L256,D876,R580,U770,L483,D437,R353,D214,R619,U541,R234,D962,R842,U639,R520,D354,L279,D15,R42,U138,L321,D376,L628,D893,L670,D574,L339,U298,L321,D120,L370,U408,L333,D353,L263,D79,R535,D487,R113,D638,R623,D59,L508,D866,R315,U166,L534,U927,L401,D626,L19,D994,L778,D317,L936,U207,L768,U948,R452,U165,R864,D283,L874'
moveSet2 = 'L995,D93,L293,U447,L793,D605,R497,D155,L542,D570,R113,D779,L510,U367,L71,D980,R237,U290,L983,U49,R745,U182,L922,D174,L189,D629,R315,D203,R533,U72,L981,D848,L616,U654,R445,D864,R526,D668,L678,U378,L740,D840,L202,D429,R136,D998,L116,D554,L893,U759,R617,U942,R999,U582,L220,U447,R895,D13,R217,U743,L865,U950,R91,D381,R662,D518,L798,D637,L213,D93,L231,D185,R704,U581,L268,U773,R405,U862,R796,U73,L891,U553,L952,U450,R778,D868,R329,D669,L182,U378,L933,D83,R574,U807,R785,D278,R139,D362,R8,U546,R651,U241,L462,D309,L261,D307,L85,U701,L913,U271,R814,U723,L777,D256,R417,U814,L461,U652,R198,D747,R914,U520,R806,U956,L771,D229,R984,U685,R663,D812,R650,U214,R839,U574,L10,U66,R644,D371,L917,D819,L73,D236,R277,U611,R390,U723,L129,D496,L552,D451,R584,U105,L805,U165,R179,D372,L405,D702,R14,U332,L893,D419,R342,D146,R907,D672,L316,U257,L903,U919,L942,U771,R879,U624,L280,U150,L320,U220,R590,D242,R744,U291,R562,U418,L898,U66,L564,U495,R837,D555,L739,D780,R409,D122,L426,D857,R937,D600,R428,D592,R727,U917,R256,D680,L422,U630,L14,U240,R617,D664,L961,D554,L302,U925,L376,D187,L700,D31,L762,U397,L554,D217,R679,D683,R680,D572,R54,D164,L940,D523,R140,U52,L506,D638,R331,D415,R389,D884,R410,D62,R691,U665,R889,U864,L663,D690,R487,U811,L190,U780,L758,U267,R155,D344,L133,D137,R93,D229,L729,U878,L889,D603,R288,U890,R251,U531,L249,D995,R863,D257,R655,D311,R874,U356,L833,U151,L741,U246,R694,D899,L48,U915,L900,U757,L861,U402,R971,U537,R460,D844,R54,U956,L151,U74,R892,U248,R677,D881,R99,D931,R427'
def move_calculator(move_string: str):
move_set = move_string.split(",")
x_pos = 0
y_pos = 0
move_list = []
for move in move_set:
for step in range(0, int(move[1:])):
if move[0] == 'U':
y_pos += 1
elif move[0] == 'D':
y_pos -= 1
elif move[0] == 'L':
x_pos -= 1
elif move[0] == 'R':
x_pos += 1
move_list.append([x_pos, y_pos])
return move_list
def x_reference(move_list1: list, move_list2: list):
intersections = []
for move in move_list2:
if move in move_list1:
intersections.append(move)
return intersections
move_output1 = move_calculator(moveSet1)
move_output2 = move_calculator(moveSet1)
intersections = x_reference(move_output1, move_output2)
lowest_distance = 10000000000000000000000000000000000000000000000000000
for intersect in intersections:
final_x = 0
final_y = 0
if intersect[0] < 0:
final_x = intersect[0] * -1
else:
final_x = intersect[0]
if intersect[1] < 0:
final_y = intersect[1] * -1
else:
final_y = intersect[1]
if (final_x + final_y) < lowest_distance:
lowest_distance = final_x + final_y
print(lowest_distance)
|
from django.core.management import BaseCommand
from django.db import transaction
from voter.models import ChangeTracker, NCVoter
BULK_CREATE_AMOUNT = 3000
@transaction.atomic
def remove_changes(fileID):
print("Rebuiding voter table", flush=True)
processed_ncids = set()
rebuilt_records = []
for ncid in ChangeTracker.objects.filter(file_tracker_id=fileID).values_list('ncid', flat=True):
if ncid not in processed_ncids:
data = dict()
for change in ChangeTracker.objects.filter(ncid=ncid, file_tracker_id__lt=fileID).order_by('snapshot_dt'):
data.update(change.data)
rebuilt_records.append((ncid, data))
processed_ncids.add(ncid)
if len(rebuilt_records) > BULK_CREATE_AMOUNT:
for i in rebuilt_records:
NCVoter.objects.filter(ncid=i[0]).update(**(i[1]))
rebuilt_records.clear()
if len(rebuilt_records) > 0:
for i in rebuilt_records:
NCVoter.objects.filter(ncid=i[0]).update(**(i[1]))
rebuilt_records.clear()
print("Removing change trackers", flush=True)
ChangeTracker.objects.filter(file_tracker_id=fileID).delete()
class Command(BaseCommand):
help = "Rebuild the voter table until file ID"
def add_arguments(self, parser):
parser.add_argument(
'--fileid',
type=int,
help='Rebuild the voter table until file ID')
def handle(self, *args, **options):
fileID = options['fileid']
print('Removing all changes from file {}'.format(fileID), flush=True)
remove_changes(fileID)
|
# coding: utf-8
"""
Tool Name: Exploratory Regression
Source Name: ExploratoryRegression.py
Version: ArcGIS 10.0
Author: Environmental Systems Research Institute Inc.
"""
################ Imports ####################
import sys as SYS
import copy as COPY
import os as OS
import collections as COLL
import operator as OP
import locale as LOCALE
import numpy as NUM
import math as MATH
import numpy.linalg as LA
import arcpy as ARCPY
import arcpy.management as DM
import arcpy.da as DA
import ErrorUtils as ERROR
import SSDataObject as SSDO
import SSUtilities as UTILS
import Stats as STATS
import MoransI_Step as GI
import WeightsUtilities as WU
import gapy as GAPY
import itertools as ITER
import locale as LOCALE
LOCALE.setlocale(LOCALE.LC_ALL, '')
################ Output Field Names #################
erFieldNames = ["RunID", "AdjR2", "AICc", "JB",
"K_BP", "MaxVIF", "SA", "NumVars"]
############## Helper Functions ##############
masterJustify = ["right"] * 6 + ["left"]
def returnPerc(numer, denom):
if numer == 0:
return 0.0
else:
return ( (numer * 1.0) / denom) * 100.
def runMoransI(ssdo, residuals, weightsMatrix, weightsType = "SWM",
silent = True):
mi = GI.GlobalI_Step(ssdo, residuals, weightsMatrix,
weightsType = weightsType,
silent = silent)
return mi
def nChooseK(n, k):
top = MATH.factorial(n)
left = MATH.factorial(k)
right = MATH.factorial(n - k)
return (top * 1.0) / (left * right)
def inSameCombo(n, k):
top = MATH.factorial(n - 2)
left = MATH.factorial(k - 2)
right = MATH.factorial(n - k)
return (top * 1.0) / (left * right)
################ Interfaces ##################
def runExploratoryRegression():
"""Retrieves the parameters from the User Interface and executes the
appropriate commands."""
#### Get User Provided Inputs ####
ARCPY.env.overwriteOutput = True
inputFC = ARCPY.GetParameterAsText(0)
dependentVar = ARCPY.GetParameterAsText(1).upper()
independentVarsReg = ARCPY.GetParameterAsText(2)
independentVars = independentVarsReg.upper().split(";")
weightsFile = UTILS.getTextParameter(3)
#### Derived Output ####
outputReportFile = OS.path.join(ARCPY.env.scratchFolder, "ModelSelectionOLS.txt")
#### Search Criterion ####
maxIndVars = UTILS.getNumericParameter(5)
minIndVars = UTILS.getNumericParameter(6)
minR2 = UTILS.getNumericParameter(7)
maxCoef = UTILS.getNumericParameter(8)
maxVIF = UTILS.getNumericParameter(9)
minJB = UTILS.getNumericParameter(10)
minMI = UTILS.getNumericParameter(11)
#### Create a Spatial Stats Data Object (SSDO) ####
ssdo = SSDO.SSDataObject(inputFC)
#### Set Unique ID Field ####
masterField = UTILS.setUniqueIDField(ssdo, weightsFile = weightsFile)
#### MasterField Can Not Be The Dependent Variable ####
if masterField == dependentVar:
ARCPY.AddIDMessage("ERROR", 945, masterField,
ARCPY.GetIDMessage(84112))
raise SystemExit()
#### Remove the MasterField from Independent Vars ####
if masterField in independentVars:
independentVars.remove(masterField)
ARCPY.AddIDMessage("WARNING", 736, masterField)
#### Remove the Dependent Variable from Independent Vars ####
if dependentVar in independentVars:
independentVars.remove(dependentVar)
ARCPY.AddIDMessage("WARNING", 850, dependentVar)
#### Raise Error If No Independent Vars ####
if not len(independentVars):
ARCPY.AddIDMessage("ERROR", 737)
raise SystemExit()
#### Obtain Data ####
allVars = [dependentVar] + independentVars
#### Populate SSDO with Data ####
if not weightsFile:
ssdo.obtainDataGA(masterField, allVars, minNumObs = 5,
warnNumObs = 30)
else:
ssdo.obtainData(masterField, allVars, minNumObs = 5,
warnNumObs = 30)
ExploratoryRegression(ssdo, dependentVar,
independentVars,
weightsFile = weightsFile,
outputReportFile = outputReportFile,
maxIndVars = maxIndVars,
minIndVars = minIndVars,
minR2 = minR2, maxCoef = maxCoef,
maxVIF = maxVIF, minJB = minJB,
minMI = minMI)
#### Send Derived Output back to the tool ####
ARCPY.SetParameterAsText(4, outputReportFile)
################## Classes ###################
class ResultHandler(object):
"""Handles result information for Exploratory Regression."""
def __init__(self, allVarNames, numChoose, ssdo,
weightMatrix, weightsType = "SWM",
minR2 = .5, maxCoef = .01, maxVIF = 5.0,
minJB = .1, minMI = .1, silent = False):
#### Set Initial Attributes ####
UTILS.assignClassAttr(self, locals())
#### Set Label ####
self.numVars = len(self.allVarNames)
self.label = ARCPY.GetIDMessage(84283).format(numChoose, self.numVars)
if numChoose <= 2:
self.eachAppears = 1
else:
self.eachAppears = nChooseK(self.numVars - 2, numChoose - 2)
#### Set Result Structures ####
self.varSignDict = {}
self.signDict = {}
self.vifDict = {}
for varName in self.allVarNames:
self.varSignDict[varName] = [0, 0]
self.signDict[varName] = [0, 0]
self.vifDict[varName] = [0, []]
self.olsResults = {}
self.bestR2Vals = []
self.bestR2Res = []
self.passTable = []
self.passBools = []
self.r2Residuals = NUM.empty((self.ssdo.numObs, 3), dtype = float)
self.allJBPass = UTILS.compareFloat(0.0, self.minJB, rTol = .00000001)
self.allMIPass = UTILS.compareFloat(0.0, self.minMI, rTol = .00000001)
self.miVals = []
def returnSilentBool(self):
"""Returns whether SWM neighbor warnings should be printed."""
if not self.silent and not len(self.miVals):
#### Only Return Neighbor Warnings Once ####
self.silent = True
return False
else:
return True
def runR2Moran(self):
"""Runs Moran's I for highest R2 Models."""
resultList = []
for ind, olsID in enumerate(self.bestR2Res):
olsRes = self.olsResults[olsID]
if olsRes.miPVal is None:
silentBool = self.returnSilentBool()
residuals = self.r2Residuals[:,ind].flatten()
if not self.allMIPass:
mi = runMoransI(self.ssdo, residuals,
self.weightMatrix,
weightsType = self.weightsType,
silent = silentBool)
miPVal = mi.pVal
else:
miPVal = 1.0
olsRes.setMoransI(miPVal)
self.miVals.append(miPVal)
#### Allows the Update of Output Table ####
resultList.append( (olsID, olsRes.miPVal) )
return resultList
def evaluateResult(self, olsResult, residuals, keep = False):
"""Evaluates an OLS result in the context of search criteria."""
#### Evaluate R2 ####
r2Value = olsResult.r2
lenR2 = len(self.bestR2Vals)
inR2 = False
if lenR2 < 3:
self.bestR2Vals.append(r2Value)
self.bestR2Res.append(olsResult.id)
self.r2Residuals[:,lenR2] = residuals
inR2 = True
else:
minIndex = NUM.argsort(self.bestR2Vals)[0]
minValue = self.bestR2Vals[minIndex]
if r2Value > minValue:
self.bestR2Vals[minIndex] = r2Value
self.bestR2Res[minIndex] = olsResult.id
self.r2Residuals[:,minIndex] = residuals
inR2 = True
#### Add to Master List of OLS Results ####
keepBool = (keep or inR2)
if keepBool:
self.olsResults[olsResult.id] = olsResult
#### Evaluate p-values ####
pValVars = olsResult.evaluatePVals(maxCoef = self.maxCoef)
#### Evaluate VIF ####
vifVars = olsResult.evaluateVIF(maxVIF = self.maxVIF)
#### Populate Result Structures ####
for ind, varName in enumerate(olsResult.varNames):
self.signDict[varName][0] += 1
if olsResult.coef[ind] < 0.0:
self.varSignDict[varName][0] += 1
else:
self.varSignDict[varName][1] += 1
for varName in pValVars:
self.signDict[varName][1] += 1
for varName in vifVars:
self.vifDict[varName][0] += 1
self.vifDict[varName][1] += list(vifVars)
#### Obtain Bools ####
pvBool = len(pValVars) == self.numChoose
vifBool = len(vifVars) == 0
r2Bool = olsResult.r2 >= self.minR2
if not self.allJBPass:
jbBool = olsResult.jb > self.minJB
else:
jbBool = True
#### Decision Based on Bools ####
tableBool = pvBool and vifBool
if tableBool:
self.passTable.append(olsResult.id)
allBool = pvBool and vifBool and r2Bool and jbBool
miBool = False
if allBool:
silentBool = self.returnSilentBool()
if not self.allMIPass:
mi = runMoransI(self.ssdo, residuals, self.weightMatrix,
weightsType = self.weightsType,
silent = silentBool)
miPVal = mi.pVal
else:
miPVal = 1.0
olsResult.setMoransI(miPVal)
self.miVals.append(miPVal)
if miPVal > self.minMI:
self.passBools.append(olsResult.id)
self.olsResults[olsResult.id] = olsResult
miBool = True
return r2Bool, pvBool, vifBool, jbBool, miBool, keepBool
def report(self):
"""Reports the results from exploratory regression analysis."""
#### Set Title ####
title = self.label
#### Column Labels ####
labs = [ARCPY.GetIDMessage(84021), ARCPY.GetIDMessage(84249),
ARCPY.GetIDMessage(84042), ARCPY.GetIDMessage(84036),
ARCPY.GetIDMessage(84284), ARCPY.GetIDMessage(84292),
ARCPY.GetIDMessage(84286)]
r2Info = [ labs ]
#### Adjusted R2, Sorted Highest to Lowest with ID Tie Breaks ####
header = ARCPY.GetIDMessage(84287)
numRes = UTILS.ssRange(len(self.bestR2Res))
r2Data = []
for i in numRes:
r2Val = self.bestR2Vals[i]
idVal = int(self.bestR2Res[i].split(":")[-1])
r2Data.append((r2Val, idVal))
r2Data = NUM.array(r2Data, dtype = [('r2', float), ('ids', int)])
r2SortedInds = r2Data.argsort(order = ('r2', 'ids'))
sortIndex = reversed(r2SortedInds)
for ind in sortIndex:
olsID = self.bestR2Res[ind]
olsRes = self.olsResults[olsID]
olsOut = olsRes.report(formatStr = "%0.2f")
r2Info.append(olsOut)
r2Report = UTILS.outputTextTable(r2Info, header = header,
justify = masterJustify)
#### Passing Models ####
header = ARCPY.GetIDMessage(84288)
passList = [ labs ]
r2Values = []
olsIDs = []
for olsID in self.passBools:
olsRes = self.olsResults[olsID]
r2Values.append(olsRes.r2)
olsIDs.append(olsID)
sortIndex = NUM.argsort(r2Values).tolist()
sortIndex.reverse()
for ind in sortIndex:
olsID = olsIDs[ind]
olsRes = self.olsResults[olsID]
olsOut = olsRes.report(formatStr = "%0.6f")
passList.append(olsOut)
passingReport = UTILS.outputTextTable(passList, header = header)
#### Print Report ####
starMess = ARCPY.GetIDMessage(84289) * 78
finalReport = [starMess, title, r2Report, passingReport]
finalReport = "\n".join(finalReport)
finalReport = finalReport + "\n"
ARCPY.AddMessage(finalReport)
return finalReport
class OLSResult(object):
"""Holds OLS Result Info for Exploratory Regression."""
def __init__(self, id, varNames, coef, pVals, vifVals,
r2, aic, jb, bp, allMIPass = False):
#### Set Initial Attributes ####
UTILS.assignClassAttr(self, locals())
self.pVals = NUM.array(pVals)
self.varNameArray = NUM.array(self.varNames)
self.miPVal = None
self.k = len(varNames)
#### Create Model to Print ####
self.createModel()
def evaluateVIF(self, maxVIF = 5.0):
"""Evaluates VIF values."""
if self.k >= 2:
self.maxVIFValue = self.vifVals.max()
overIndices = NUM.where(self.vifVals >= maxVIF)
return self.varNameArray[overIndices]
else:
self.maxVIFValue = 1.0
return NUM.array([])
def evaluatePVals(self, maxCoef = .01):
"""Evaluates coefficient p-values."""
overIndices = NUM.where(self.pVals <= maxCoef)
return self.varNameArray[overIndices]
def createModel(self):
model = []
for ind, varName in enumerate(self.varNames):
pVal = self.pVals[ind]
coefVal = self.coef[ind]
#### Determine Variable Sign ####
if coefVal < 0:
vRes = " -"
else:
vRes = " +"
#### Determine Significance Level ####
if pVal <= .1 and pVal > .05:
vRes += varName + "*"
elif pVal <= .05 and pVal > .01:
vRes += varName + "**"
elif pVal <= .01:
vRes += varName + "***"
else:
vRes += varName
#### Add to Model ####
model.append(vRes)
#### Set Attribute ####
self.model = " ".join(model)
def setMoransI(self, value):
self.miPVal = value
def report(self, orderType = 0, formatStr = "%0.6f", addModel = True):
"""Reports the results of the OLS run.
INPUTS:
orderType {int, 0}: Sort by - 0:R2, 1:Jarque-Bera, 2:Moran
formatStr (str): format string, E.g. "%0.6f"
addModel {bool, True}: Add model to report?
"""
#### Set Output Moran's I p-value ####
if self.allMIPass:
#### Make p-value NA ####
miVal = ARCPY.GetIDMessage(84499)
else:
if self.miPVal is None:
miVal = ""
else:
miVal = self.miPVal
vifInd = -2
if orderType == 0:
resultList = [ self.r2, self.aic, self.jb, self.bp,
self.maxVIFValue, miVal ]
elif orderType == 1:
resultList = [ self.jb, self.r2, self.aic, self.bp,
self.maxVIFValue, miVal ]
else:
resultList = [ miVal, self.r2, self.aic, self.jb, self.bp,
self.maxVIFValue ]
vifInd = -1
resultListVals = []
for val in resultList:
try:
outValue = UTILS.formatValue(val, formatStr)
except:
outValue = val
resultListVals.append(outValue)
if self.maxVIFValue >= 1000:
resultListVals[vifInd] = ">" + LOCALE.format("%0.2f", 1000.)
if addModel:
resultListVals.append(self.model)
return resultListVals
class ExploratoryRegression(object):
"""Computes linear regression via Ordinary Least Squares,
Psuedo-Step-Wise
"""
def __init__(self, ssdo, dependentVar, independentVars, weightsFile,
outputReportFile = None, maxIndVars = 5, minIndVars = 1, minR2 = .5,
maxCoef = .01, maxVIF = 5.0, minJB = .1, minMI = .1):
ARCPY.env.overwriteOutput = True
#### Set Initial Attributes ####
UTILS.assignClassAttr(self, locals())
self.masterField = self.ssdo.masterField
self.warnedTProb = False
#### Set Boolean For Passing All Moran's I ####
self.allMIPass = UTILS.compareFloat(0.0, self.minMI, rTol = .00000001)
#### Assess Whether SWM File Being Used ####
if weightsFile:
weightSuffix = weightsFile.split(".")[-1].lower()
if weightSuffix == "swm":
self.weightsType = "SWM"
self.weightsMatrix = self.weightsFile
else:
self.weightsType = "GWT"
self.weightsMatrix = WU.buildTextWeightDict(weightsFile,
self.ssdo.master2Order)
else:
#### If No Weightsfile Provided, Use 8 Nearest Neighbors ####
if ssdo.numObs <= 9:
nn = ssdo.numObs - 2
ARCPY.AddIDMessage("WARNING", 1500, 8, nn)
else:
nn = 8
self.weightsType = "GA"
gaSearch = GAPY.ga_nsearch(self.ssdo.gaTable)
gaSearch.init_nearest(0.0, nn, "euclidean")
self.weightsMatrix = gaSearch
#### Initialize Data ####
self.runModels()
def runModels(self):
"""Performs additional validation and populates the
SSDataObject."""
#### Shorthand Attributes ####
ssdo = self.ssdo
#### Create Dependent Variable ####
self.y = ssdo.fields[self.dependentVar].returnDouble()
self.n = ssdo.numObs
self.y.shape = (self.n, 1)
#### Assure that Variance is Larger than Zero ####
yVar = NUM.var(self.y)
if NUM.isnan(yVar) or yVar <= 0.0:
ARCPY.AddIDMessage("Error", 906)
raise SystemExit()
#### Validate Chosen Number of Combos ####
k = len(ssdo.fields)
if self.maxIndVars > (k - 1):
ARCPY.AddIDMessage("WARNING", 1171, self.maxIndVars)
self.maxIndVars = k - 1
ARCPY.AddIDMessage("WARNING", 1172, self.maxIndVars)
#### Assure Degrees of Freedom ####
withIntercept = self.maxIndVars + 1
dof = self.n - withIntercept
if dof <= 2:
ARCPY.AddIDMessage("WARNING", 1128, 2)
dofLimit = self.n - 4
ARCPY.AddIDMessage("WARNING", 1419, dofLimit)
self.maxIndVars = dofLimit
if self.maxIndVars < 1:
ARCPY.AddIDMessage("WARNING", 1173)
#### Assure Min Vars is less than or equal to Max Vars ####
if self.maxIndVars < self.minIndVars:
ARCPY.AddIDMessage("WARNING", 1174)
ARCPY.AddIDMessage("WARNING", 1175)
self.minIndVars = self.maxIndVars
#### Gen Range Combos ####
rangeVars = range(1, k)
rangeCombos = NUM.arange(self.minIndVars, self.maxIndVars+1)
#### Create Base Design Matrix ####
self.x = NUM.ones((self.n, k), dtype = float)
for column, variable in enumerate(self.independentVars):
self.x[:,column + 1] = ssdo.fields[variable].data
#### Calculate Global VIF ####
self.globalVifVals = COLL.defaultdict(float)
if k > 2:
#### Values Less Than One Were Forced by Psuedo-Inverse ####
self.printVIF = True
else:
self.printVIF = False
#### Create Output Report File ####
fo = UTILS.openFile(self.outputReportFile, "w")
#### Hold Results for Every Choose Combo ####
self.resultDict = {}
self.vifVarCount = COLL.defaultdict(int)
self.model2Table = {}
self.sumRuns = 0
self.sumGI = 0
self.boolGI = 0
self.boolResults = NUM.zeros(4, dtype = int)
self.jbModels = []
self.jbValues = []
self.jbResiduals = NUM.empty((self.n, 3), dtype = float)
self.perfectMultiWarnBool = False
self.neighborWarn = False
for choose in rangeCombos:
#### Generate Index Combos ####
comboGenerator = ITER.combinations(rangeVars, choose)
#### Set Progressor ####
message = ARCPY.GetIDMessage(84293).format(k-1, choose)
ARCPY.SetProgressor("default", message)
#### Set Result Structure ####
rh = ResultHandler(self.independentVars, choose,
self.ssdo, self.weightsMatrix,
weightsType = self.weightsType,
minR2 = self.minR2, maxCoef = self.maxCoef,
maxVIF = self.maxVIF, minJB = self.minJB,
minMI = self.minMI, silent = self.neighborWarn)
#### Loop Through All Combinations ####
modelCount = 0
emptyTabValues = [""] * ( self.maxIndVars - choose )
perfectMultiModels = []
for combo in comboGenerator:
#### Create Design Matrix for Given Combination ####
columns = [0] + list(combo)
comboX = self.x[0:,columns]
#### Get Model Info for given Combination ####
N, K = comboX.shape
varNameList = [ self.independentVars[j-1] for j in combo ]
varNameListInt = ["Intercept"] + varNameList
modelAll = self.dependentVar + " ~ "
modelAll += " + ".join(varNameListInt)
modelID = str(K) + ":" + str(modelCount)
#### Run Linear Regression ####
runModel = self.calculate(comboX)
#### Set Near/Perfect Multicoll Bool ####
nearPerfectBool = False
if K > 2 and runModel:
nearPerfectBool = NUM.any(abs(self.vifVal) >= 1000)
if (not runModel) or nearPerfectBool:
#### Perfect Multicollinearity ####
#### Unable to Invert the Matrix ####
perfectMultiModels.append(modelAll)
else:
#### Keep Track of Total Number of Models Ran ####
modelCount += 1
self.sumRuns += 1
residuals = self.residuals.flatten()
#### Evaluate p-values ####
if self.BPProb < .1:
#### Use Robust Coefficients ####
pValsOut = self.pValsRob[1:]
else:
pValsOut = self.pVals[1:]
coefOut = self.coef[1:]
#### Process Largest VIF Values ####
if K > 2:
for ind, varName in enumerate(varNameList):
vif = self.vifVal[ind]
previousVIF = self.globalVifVals[varName]
if vif > previousVIF:
self.globalVifVals[varName] = vif
#### Set OLS Result ####
res = OLSResult(modelID, varNameList, coefOut, pValsOut,
self.vifVal, self.r2Adj, self.aicc,
self.JBProb, self.BPProb,
allMIPass = self.allMIPass)
#### Evaluate Jarque-Bera Stat ####
keep = self.pushPopJB(res, self.residuals.flatten())
boolReport = rh.evaluateResult(res, residuals, keep = keep)
r2Bool, pvBool, vifBool, jbBool, giBool, keepBool = boolReport
#### Add Booleans for End Total Summary ####
boolResult = [r2Bool, pvBool, vifBool, jbBool]
self.boolResults += boolResult
#### Delete OLS Instance if Not Necessary For Summary ####
if not keepBool:
del res
#### Run Moran's I for Highest Adj. R2 ####
r2ResultList = rh.runR2Moran()
self.neighborWarn = True
#### Add Results to Report File ####
result2Print = rh.report()
UTILS.writeText(fo, result2Print)
if len(perfectMultiModels):
self.perfectMultiWarnBool = True
ARCPY.AddIDMessage("WARNING", 1304)
for modelStr in perfectMultiModels:
ARCPY.AddIDMessage("WARNING", 1176, modelStr)
#### Add Choose Run to Result Dictionary ####
self.resultDict[choose] = rh
#### Run Moran's I on Best Jarque-Bera ####
self.createJBReport()
#### Final Moran Stats ####
self.getMoranStats()
#### Ending Summary ####
self.endSummary()
UTILS.writeText(fo, self.fullReport)
fo.close()
def getMoranStats(self):
self.sumMoranRuns = 0
self.sumMoranPass = 0
miValues = []
miModels = []
miSorted = NUM.array([(0.0, 0.0), (0.0, 0.0), (0.0, 0.0)],
dtype = [('mi', float), ('r2', float)])
for resKey, resHandler in UTILS.iteritems(self.resultDict):
for olsKey, olsRes in UTILS.iteritems(resHandler.olsResults):
miValue = olsRes.miPVal
if miValue is not None:
numRes = len(miValues)
if numRes < 3:
miValues.append(miValue)
miModels.append(olsRes)
else:
if numRes == 3:
miSorted['mi'] = miValues
miSorted['r2'] = [ ols.r2 for ols in miModels ]
minIndex = miSorted.argsort(order = ('mi', 'r2'))[0]
minValue = miValues[minIndex]
valueSame = UTILS.compareFloat(minValue, miValue)
update = False
if valueSame:
if olsRes.r2 > miModels[minIndex].r2:
update = True
else:
if minValue < miValue:
update = True
if update:
miValues[minIndex] = miValue
miModels[minIndex] = olsRes
miSorted['mi'][minIndex] = miValue
miSorted['r2'][minIndex] = olsRes.r2
self.sumMoranRuns += 1
if miValue > self.minMI:
self.sumMoranPass += 1
miOrder = list(miSorted.argsort(order = ('mi', 'r2')))
#miOrder = list(NUM.argsort(miValues))
miOrder.reverse()
self.miReportRows = []
for miIndex in miOrder:
try:
miResult = miModels[miIndex]
self.miReportRows.append(miResult.report(orderType = 2))
except:
pass
def createJBReport(self):
self.jbReportRows = []
sortIndex = NUM.argsort(self.jbValues).tolist()
sortIndex.reverse()
for ind in sortIndex:
olsRes = self.jbModels[ind]
if olsRes.miPVal is None:
if not self.allMIPass:
residuals = self.jbResiduals[:,ind].flatten()
mi = runMoransI(self.ssdo, residuals, self.weightsMatrix,
weightsType = self.weightsType)
miPVal = mi.pVal
else:
miPVal = 1.0
olsRes.setMoransI(miPVal)
olsOut = olsRes.report(orderType = 1)
self.jbReportRows.append(olsOut)
def pushPopJB(self, olsRes, residuals):
"""Keeps track of the best (highest) Jarque-Bera p-values."""
lenRes = len(self.jbValues)
keep = False
if lenRes < 3:
self.jbValues.append(self.JBProb)
self.jbModels.append(olsRes)
self.jbResiduals[:,lenRes] = residuals
keep = True
else:
minIndex = NUM.argsort(self.jbValues)[0]
minValue = self.jbValues[minIndex]
if minValue < self.JBProb:
self.jbValues[minIndex] = self.JBProb
self.jbModels[minIndex] = olsRes
self.jbResiduals[:,minIndex] = residuals
keep = True
return keep
def endSummary(self):
"""Creates End Summary for Report File."""
#### Passing Model Global Summary ####
passHeader = ARCPY.GetIDMessage(84294)
emptyValue = ARCPY.GetIDMessage(84092)
perfectMultiStr = ARCPY.GetIDMessage(84368)
perfectInterStr = ARCPY.GetIDMessage(84369)
perfectInterStr += " (%s)" % LOCALE.format("%0.2f", 100)
passResults = [ [ARCPY.GetIDMessage(84295),
ARCPY.GetIDMessage(84296),
ARCPY.GetIDMessage(84297),
ARCPY.GetIDMessage(84298),
ARCPY.GetIDMessage(84299)] ]
cutoffList = [ "> " + UTILS.formatValue(self.minR2, "%0.2f"),
"< " + UTILS.formatValue(self.maxCoef, "%0.2f"),
"< " + UTILS.formatValue(self.maxVIF, "%0.2f"),
"> " + UTILS.formatValue(self.minJB, "%0.2f"),
"> " + UTILS.formatValue(self.minMI, "%0.2f") ]
categories = [ARCPY.GetIDMessage(84300), ARCPY.GetIDMessage(84301),
ARCPY.GetIDMessage(84302), ARCPY.GetIDMessage(84303),
ARCPY.GetIDMessage(84304)]
boolPerc = [ returnPerc(i, self.sumRuns) for i in self.boolResults ]
boolPerc.append( returnPerc(self.sumMoranPass, self.sumMoranRuns) )
boolOut = list(self.boolResults) + [self.sumMoranPass]
sumOut = [ self.sumRuns for i in self.boolResults ]
sumOut += [self.sumMoranRuns]
for ind, category in enumerate(categories):
outValue = LOCALE.format("%0.2f", boolPerc[ind])
outCutoff = cutoffList[ind]
outTrial = sumOut[ind]
outCount = boolOut[ind]
passResults.append( [category, outCutoff, outTrial,
outCount, outValue] )
self.passReport = UTILS.outputTextTable(passResults,
header = passHeader,
pad = 1, justify = "right")
##### Variable Significance and VIF Reports ####
##### Create Table Headers ####
signHeader = ARCPY.GetIDMessage(84305)
vifHeader = ARCPY.GetIDMessage(84306)
#### Create Column Labels and Result Lists ####
signColInfo = [ [ARCPY.GetIDMessage(84068), ARCPY.GetIDMessage(84307),
ARCPY.GetIDMessage(84366), ARCPY.GetIDMessage(84367)] ]
signResults = []
vifResults = [ [ARCPY.GetIDMessage(84068), ARCPY.GetIDMessage(84284),
ARCPY.GetIDMessage(84308), ARCPY.GetIDMessage(84309)] ]
##### Get Covariate Total ####
percVarRes = []
totalTogether = 0
for resultKey, result in UTILS.iteritems(self.resultDict):
totalTogether += result.eachAppears
##### Populate Result Lists ####
for ind, varName in enumerate(self.independentVars):
totalNeg = 0
totalPos = 0
totalSign = 0
totalRan = 0
totalViolations = 0
totalCovariates = COLL.defaultdict(int)
for resultKey, result in UTILS.iteritems(self.resultDict):
#### Significance Results ####
numRan, numSign = result.signDict[varName]
totalSign += numSign
totalRan += numRan
numNeg, numPos = result.varSignDict[varName]
totalNeg += numNeg
totalPos += numPos
#### VIF Results ####
numViolate, covariates = result.vifDict[varName]
totalViolations += numViolate
for covariate in covariates:
if covariate != varName:
totalCovariates[covariate] += 1
#### Add Perfect Multicollinearity Results * ####
successfulRun = totalRan > 0
#### Complete Significance Row ####
if successfulRun:
percentSign = ((totalSign * 1.0) / totalRan) * 100.0
percentNeg = ((totalNeg * 1.0) / totalRan) * 100.0
percentPos = ((totalPos * 1.0) / totalRan) * 100.0
rowRes = [varName, LOCALE.format("%0.2f", percentSign),
LOCALE.format("%0.2f", percentNeg),
LOCALE.format("%0.2f", percentPos)]
else:
percentSign = -1.0
rowRes = [varName, emptyValue, emptyValue, emptyValue]
ind2Insert = None
if len(percVarRes):
for ind, percVal in enumerate(percVarRes):
if percVal < percentSign:
ind2Insert = ind
break
if ind2Insert is None:
percVarRes.append(percentSign)
signResults.append(rowRes)
else:
percVarRes.insert(ind2Insert, percentSign)
signResults.insert(ind2Insert, rowRes)
#### Complete VIF Row ####
if successfulRun:
if self.printVIF:
globalVIF = self.globalVifVals[varName]
if abs(globalVIF) >= 1000:
globalVIFOut = "> " + LOCALE.format("%0.2f", 1000.)
else:
globalVIFOut = LOCALE.format("%0.2f", globalVIF)
else:
globalVIFOut = emptyValue
coString = []
sortedCovariates = sorted(UTILS.iteritems(totalCovariates),
key=OP.itemgetter(1),
reverse=True)
for covariate, totalTimes in sortedCovariates:
tRatio = (totalTimes/totalTogether) * 100
tRatio = LOCALE.format("%0.2f", tRatio)
coString.append(covariate + " (" + str(tRatio) + ")")
if len(coString):
coString = ", ".join(coString)
else:
coString = emptyValue
vifRes = [varName, globalVIFOut, "%i" % totalViolations, coString]
else:
vifRes = [varName, emptyValue, perfectMultiStr, perfectInterStr]
vifResults.append(vifRes)
#### Create Report Tables ####
signResults = signColInfo + signResults
self.signReport = UTILS.outputTextTable(signResults,
header = signHeader,
justify = ["left", "right",
"right", "right"],
pad = 1)
if self.perfectMultiWarnBool:
vifHeader += ARCPY.GetIDMessage(84111)
self.vifReport = UTILS.outputTextTable(vifResults, header = vifHeader,
justify = ["left", "right",
"center", "left"],
pad = 1)
#### Add Perfect Multi Warning ####
if self.perfectMultiWarnBool:
msg = ARCPY.GetIDMessage(84409) + "\n" + ARCPY.GetIDMessage(84410)
self.vifReport += msg
##### Residual Normality Summary ####
jbHeader = ARCPY.GetIDMessage(84310)
jbResults = [ [ARCPY.GetIDMessage(84042), ARCPY.GetIDMessage(84021),
ARCPY.GetIDMessage(84249), ARCPY.GetIDMessage(84036),
ARCPY.GetIDMessage(84284), ARCPY.GetIDMessage(84292),
ARCPY.GetIDMessage(84286)] ]
jbResults += self.jbReportRows
self.jbReport = UTILS.outputTextTable(jbResults, header = jbHeader,
pad = 1, justify = masterJustify)
##### Residual Autocorrelation ####
if not self.allMIPass:
miHeader = ARCPY.GetIDMessage(84311)
miResults = [ [ARCPY.GetIDMessage(84292), ARCPY.GetIDMessage(84021),
ARCPY.GetIDMessage(84249), ARCPY.GetIDMessage(84042),
ARCPY.GetIDMessage(84036), ARCPY.GetIDMessage(84284),
ARCPY.GetIDMessage(84286)] ]
miResults += self.miReportRows
justify = ["right"] * 6 + ["left"]
self.miReport = UTILS.outputTextTable(miResults, header = miHeader,
pad = 1, justify = masterJustify)
else:
self.miReport = "\n" + ARCPY.GetIDMessage(84311)
self.miReport += " (" + ARCPY.GetIDMessage(84500) + ")\n"
#### Significance Locale String ####
decimalSep = UTILS.returnDecimalChar()
modelString2 = ARCPY.GetIDMessage(84314)
if decimalSep == ".":
numSep = ","
else:
numSep = ";"
modelString2 = modelString2.format(LOCALE.format("%0.2f", .1), numSep,
LOCALE.format("%0.2f", .05),
LOCALE.format("%0.2f", .01))
##### Abbreviation Table ####
modelStrip = ARCPY.GetIDMessage(84286).strip()
abbHeader = ARCPY.GetIDMessage(84312)
modelString1 = ARCPY.GetIDMessage(84313)
abbResults = [ [ARCPY.GetIDMessage(84021),ARCPY.GetIDMessage(84315)],
[ARCPY.GetIDMessage(84249),ARCPY.GetIDMessage(84316)],
[ARCPY.GetIDMessage(84042),ARCPY.GetIDMessage(84317)],
[ARCPY.GetIDMessage(84036),ARCPY.GetIDMessage(84318)],
[ARCPY.GetIDMessage(84284),ARCPY.GetIDMessage(84319)],
[ARCPY.GetIDMessage(84292),ARCPY.GetIDMessage(84320)],
[modelStrip, modelString1],
[modelStrip, modelString2]]
self.abbReport = UTILS.outputTextTable(abbResults)
self.abbReport = "\n" + abbHeader + "\n" + self.abbReport + "\n"
##### Display Tables ####
starMess = "*" * 78
dashMess = "-" * 78
ARCPY.AddMessage(starMess)
globalHeader = ARCPY.GetIDMessage(84321)
globalHeader = globalHeader.format(self.dependentVar)
globalHeader = globalHeader.center(78, "*")
ARCPY.AddMessage(globalHeader)
ARCPY.AddMessage(self.passReport)
ARCPY.AddMessage(dashMess)
ARCPY.AddMessage(self.signReport)
ARCPY.AddMessage(dashMess)
ARCPY.AddMessage(self.vifReport)
ARCPY.AddMessage(dashMess)
ARCPY.AddMessage(self.jbReport)
ARCPY.AddMessage(dashMess)
ARCPY.AddMessage(self.miReport)
ARCPY.AddMessage(dashMess)
ARCPY.AddMessage(self.abbReport)
ARCPY.AddMessage(dashMess)
self.fullReport = [starMess, globalHeader, self.passReport,
dashMess, self.signReport,
dashMess, self.vifReport,
dashMess, self.jbReport,
dashMess, self.miReport,
dashMess, self.abbReport,
dashMess]
self.fullReport = "\n".join(self.fullReport)
def calculate(self, comboX):
"""Performs OLS and related diagnostics."""
#### Shorthand Attributes ####
ssdo = self.ssdo
x = comboX
n, k = NUM.shape(comboX)
y = self.y
#### General Information ####
fn = n * 1.0
dof = n - k
fdof = dof * 1.0
xt = x.T
yt = y.T
xx = NUM.dot(xt, x)
#### Check for Perfect Multicollinearity ####
U, s, V = LA.svd(xx)
if UTILS.compareFloat(0.0, s[-1]):
return False
#### Attempt to Invert Design Matrix ####
try:
xxi = LA.inv(xx)
except:
#### Perfect multicollinearity, cannot proceed ####
return False
#### Bad Probabilities - Near Multicollinearity ####
badProbs = False
#### Compute Coefficients ####
xy = NUM.dot(xt, y)
coef = NUM.dot(xxi, xy)
#### Residuals, Sum Of Squares, R2, Etc. ####
yHat = NUM.dot(x, coef)
yBar = (y.sum())/fn
e = y - yHat
ess = ( NUM.dot(e.T, e) )[0][0]
s2 = (ess / fdof)
s2mle = (ess / fn)
seResiduals = NUM.sqrt(s2)
ss = y - yBar
tss = ( NUM.dot(ss.T, ss) )[0][0]
r2 = 1.0 - (ess/tss)
r2Adj = 1.0 - ( (ess / (fdof)) / (tss / (fn-1)) )
u2 = e * e
#### Variance-Covariance for Coefficients ####
varBeta = xxi * s2
#### Standard Errors / t-Statistics ####
seBeta = NUM.sqrt(varBeta.diagonal())
tStat = (coef.T / seBeta).flatten()
#### White's Robust Standard Errors ####
dofScale = (int( n / (n - k) )) * 1.0
sHat = NUM.dot((u2 * x).T, x) * dofScale
varBetaRob = NUM.dot(NUM.dot(xxi, sHat), xxi)
seBetaRob = NUM.sqrt(varBetaRob.diagonal())
tStatRob = (coef.T / seBetaRob).flatten()
#### DOF Warning Once for t-Stats ####
silentVector = [ True for i in range(k) ]
if (2 <= dof <= 4) and not self.warnedTProb:
silentVector[0] = False
self.warnedTProb = True
#### Coefficient t-Tests ####
pVals = []
pValsRob = []
for varInd in UTILS.ssRange(k):
#### General ####
try:
p = STATS.tProb(tStat[varInd], dof, type = 2,
silent = silentVector[varInd])
except:
p = NUM.nan
badProbs = True
pVals.append(p)
#### Robust ####
try:
p = STATS.tProb(tStatRob[varInd], dof, type = 2,
silent = True)
except:
p = NUM.nan
badProbs = True
pValsRob.append(p)
#### Jarque-Bera Test For Normality of the Residuals ####
muE = (e.sum()) / fn
devE = e - muE
u3 = (devE**3.0).sum() / fn
u4 = (devE**4.0).sum() / fn
denomS = s2mle**1.5
denomK = s2mle**2.0
skew = u3 / denomS
kurt = u4 / denomK
self.JB = (n/6.) * ( skew**2. + ( (kurt - 3.)**2. / 4. ))
if self.JB >= 0.0:
self.JBProb = STATS.chiProb(self.JB, 2, type = 1)
else:
self.JBProb = NUM.nan
badProbs = True
#### Breusch-Pagan Test for Heteroskedasticity ####
u2y = NUM.dot(xt, u2)
bpCoef = NUM.dot(xxi, u2y)
u2Hat = NUM.dot(x, bpCoef)
eU = u2 - u2Hat
essU = NUM.dot(eU.T, eU)
u2Bar = (u2.sum()) / fn
ssU = u2 - u2Bar
tssU = NUM.dot(ssU.T, ssU)
r2U = 1.0 - (essU/tssU)
self.BP = (fn * r2U)[0][0]
if self.BP >= 0.0:
self.BPProb = STATS.chiProb(self.BP, (k-1), type = 1)
else:
self.BPProb = NUM.nan
badProbs = True
#### Classic Joint-Hypothesis F-Test ####
q = k - 1
fq = q * 1.0
self.fStat = (r2/fq) / ((1 - r2) / (fn - k))
try:
self.fProb = abs(STATS.fProb(self.fStat, q,
(n-k), type = 1))
except:
self.fProb = NUM.nan
badProbs = True
#### Wald Robust Joint Hypothesis Test ####
R = NUM.zeros((q,k))
R[0:,1:] = NUM.eye(q)
Rb = NUM.dot(R, coef)
try:
invRbR = LA.inv( NUM.dot(NUM.dot(R, varBetaRob), R.T) )
except:
#### Perfect multicollinearity, cannot proceed ####
return False
self.waldStat = ( NUM.dot(NUM.dot(Rb.T, invRbR), Rb) )[0][0]
if self.waldStat >= 0.0:
self.waldProb = STATS.chiProb(self.waldStat, q, type = 1)
else:
self.waldProb = NUM.nan
badProbs = True
#### Log-Likelihood ####
self.logLik = -(n / 2.) * (1. + NUM.log(2. * NUM.pi)) - \
(n / 2.) * NUM.log(s2mle)
#### AIC/AICc ####
k1 = k + 1
self.aic = -2. * self.logLik + 2. * k1
self.aicc = -2. * self.logLik + 2. * k1 * (fn / (fn - k1 - 1))
#### Calculate the Variance Inflation Factor ####
if k <= 2:
self.vifVal = ARCPY.GetIDMessage(84090)
self.vif = False
else:
xTemp = xt[1:]
corX = NUM.corrcoef(xTemp)
try:
ic = LA.inv(corX)
self.vifVal = abs(ic.diagonal())
self.vifVal[self.vifVal >= 1000] = 1000
self.vif = True
except:
#### Perfect multicollinearity, cannot proceed ####
return False
#### Set Attributes ####
self.dof = dof
self.coef = coef
self.yHat = yHat
self.yBar = yBar
self.residuals = e
self.seResiduals = seResiduals
self.stdRedisuals = e / self.seResiduals
self.ess = ess
self.tss = tss
self.varCoef = varBeta
self.seCoef = seBeta
self.tStats = tStat
self.pVals = pVals
self.varCoefRob = varBetaRob
self.seCoefRob = seBetaRob
self.tStatsRob = tStatRob
self.pValsRob = pValsRob
self.r2 = r2
self.r2Adj = r2Adj
self.s2 = s2
self.s2mle = s2mle
self.q = q
self.badProbs = badProbs
self.varLabels = [ARCPY.GetIDMessage(84064)] + self.independentVars
return True
if __name__ == '__main__':
er = runExploratoryRegression()
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class OpenpmdApi(CMakePackage):
"""API for easy reading and writing of openPMD files"""
homepage = "http://www.openPMD.org"
git = "https://github.com/openPMD/openPMD-api.git"
maintainers = ['ax3l']
version('dev', branch='dev')
version('0.12.0', tag='0.12.0-alpha')
version('0.11.1', tag='0.11.1-alpha')
version('0.11.0', tag='0.11.0-alpha')
version('0.10.3', tag='0.10.3-alpha')
version('0.10.2', tag='0.10.2-alpha')
version('0.10.1', tag='0.10.1-alpha')
version('0.10.0', tag='0.10.0-alpha')
variant('shared', default=True,
description='Build a shared version of the library')
variant('mpi', default=True,
description='Enable parallel I/O')
variant('hdf5', default=True,
description='Enable HDF5 support')
variant('adios1', default=False,
description='Enable ADIOS1 support')
variant('adios2', default=True,
description='Enable ADIOS2 support')
variant('python', default=False,
description='Enable Python bindings')
depends_on('cmake@3.12.0:', type='build')
depends_on('mpark-variant@1.4.0:')
depends_on('catch2@2.6.1:', type='test')
depends_on('mpi@2.3:', when='+mpi') # might become MPI 3.0+
depends_on('hdf5@1.8.13:', when='+hdf5')
depends_on('hdf5@1.8.13: ~mpi', when='~mpi +hdf5')
depends_on('hdf5@1.8.13: +mpi', when='+mpi +hdf5')
depends_on('adios@1.13.1: ~sz', when='+adios1')
depends_on('adios@1.13.1: ~mpi ~sz', when='~mpi +adios1')
depends_on('adios@1.13.1: +mpi ~sz', when='+mpi +adios1')
depends_on('adios2@2.5.0:', when='+adios2')
depends_on('adios2@2.6.0:', when='+adios2 @0.12.0:')
depends_on('adios2@2.5.0: ~mpi', when='~mpi +adios2')
depends_on('adios2@2.5.0: +mpi', when='+mpi +adios2')
depends_on('nlohmann-json@3.7.0:')
depends_on('py-pybind11@2.3.0:', when='+python', type='link')
depends_on('py-numpy@1.15.1:', when='+python', type=['test', 'run'])
depends_on('py-mpi4py@2.1.0:', when='+python +mpi', type=['test', 'run'])
depends_on('python@3.5:', when='+python', type=['link', 'test', 'run'])
extends('python', when='+python')
# Fix breaking HDF5 1.12.0 API
# https://github.com/openPMD/openPMD-api/pull/696
patch('hdf5-1.12.0.patch', when='@:0.11.0 +hdf5')
def cmake_args(self):
spec = self.spec
args = [
'-DBUILD_SHARED_LIBS:BOOL={0}'.format(
'ON' if '+shared' in spec else 'OFF'),
# variants
'-DopenPMD_USE_MPI:BOOL={0}'.format(
'ON' if '+mpi' in spec else 'OFF'),
'-DopenPMD_USE_HDF5:BOOL={0}'.format(
'ON' if '+hdf5' in spec else 'OFF'),
'-DopenPMD_USE_ADIOS1:BOOL={0}'.format(
'ON' if '+adios1' in spec else 'OFF'),
'-DopenPMD_USE_ADIOS2:BOOL={0}'.format(
'ON' if '+adios2' in spec else 'OFF'),
'-DopenPMD_USE_PYTHON:BOOL={0}'.format(
'ON' if '+python' in spec else 'OFF'),
# tests and examples
'-DBUILD_TESTING:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF'),
'-DBUILD_EXAMPLES:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF'),
]
# switch internally shipped third-party libraries for spack
if spec.satisfies('+python'):
args.append('-DopenPMD_USE_INTERNAL_PYBIND11:BOOL=OFF')
args.append('-DPYTHON_EXECUTABLE:FILEPATH={0}'.format(
self.spec['python'].command.path))
args.extend([
'-DopenPMD_USE_INTERNAL_JSON:BOOL=OFF',
'-DopenPMD_USE_INTERNAL_VARIANT:BOOL=OFF'
])
if self.run_tests:
args.append('-DopenPMD_USE_INTERNAL_CATCH:BOOL=OFF')
return args
def setup_run_environment(self, env):
spec = self.spec
# pre-load dependent CMake-PUBLIC header-only libs
env.prepend_path('CMAKE_PREFIX_PATH', spec['mpark-variant'].prefix)
env.prepend_path('CPATH', spec['mpark-variant'].prefix.include)
# more deps searched in openPMDConfig.cmake
if spec.satisfies("+mpi"):
env.prepend_path('CMAKE_PREFIX_PATH', spec['mpi'].prefix)
if spec.satisfies("+adios1"):
env.prepend_path('CMAKE_PREFIX_PATH', spec['adios'].prefix)
env.prepend_path('PATH', spec['adios'].prefix.bin) # adios-config
if spec.satisfies("+adios2"):
env.prepend_path('CMAKE_PREFIX_PATH', spec['adios2'].prefix)
if spec.satisfies("+hdf5"):
env.prepend_path('CMAKE_PREFIX_PATH', spec['hdf5'].prefix)
def setup_dependent_build_environment(self, env, dependent_spec):
# pre-load dependent CMake-PUBLIC header-only libs
env.prepend_path('CMAKE_PREFIX_PATH',
self.spec['mpark-variant'].prefix)
env.prepend_path('CPATH', self.spec['mpark-variant'].prefix.include)
|
import rospy
# time evaluation
import time
from sensor_msgs.msg import JointState, PointCloud
from geometry_msgs.msg import Polygon, Point32, PolygonStamped
from jsk_recognition_msgs.msg import PolygonArray
from std_msgs.msg import Header,Float64
from visualization_msgs.msg import Marker
import tf
import numpy as np
def create_vertex_msg(force_vertex, pose, frame, scaling_factor = 500):
pointcloud_massage = PointCloud()
for i in range(force_vertex.shape[1]):
point = Point32()
point.x = force_vertex[0,i]/scaling_factor + pose[0]
point.y = force_vertex[1,i]/scaling_factor + pose[1]
point.z = force_vertex[2,i]/scaling_factor + pose[2]
pointcloud_massage.points.append(point)
# polytop stamped message
pointcloud_massage.header = Header()
pointcloud_massage.header.frame_id = frame
pointcloud_massage.header.stamp = rospy.Time.now()
return pointcloud_massage
def create_polytopes_msg(force_polytopes, pose, frame, scaling_factor = 500):
polygonarray_message = PolygonArray()
polygonarray_message.header = Header()
polygonarray_message.header.frame_id = frame
polygonarray_message.header.stamp = rospy.Time.now()
for face_polygon in force_polytopes:
polygon_massage = Polygon()
for i in range(face_polygon.shape[1]):
point = Point32()
point.x = face_polygon[0,i]/scaling_factor + pose[0]
point.y = face_polygon[1,i]/scaling_factor + pose[1]
point.z = face_polygon[2,i]/scaling_factor + pose[2]
polygon_massage.points.append(point)
# polytope stamped message
polygon_stamped = PolygonStamped()
polygon_stamped.polygon = polygon_massage
polygon_stamped.header = Header()
polygon_stamped.header.frame_id = frame
polygon_stamped.header.stamp = rospy.Time.now()
polygonarray_message.polygons.append(polygon_stamped)
polygonarray_message.likelihood.append(1.0)
return polygonarray_message
def create_ellipsoid_msg(S, U, pose, frame, scaling_factor = 500):
# calculate rotation matrix
Rot_f = np.identity(4)
Rot_f[0:3,0:3] = U
marker = Marker()
marker.header.frame_id = frame
marker.pose.position.x = pose[0]
marker.pose.position.y = pose[1]
marker.pose.position.z = pose[2]
quaternion = tf.transformations.quaternion_from_matrix( Rot_f )
#type(pose) = geometry_msgs.msg.Pose
marker.pose.orientation.x = quaternion[0]
marker.pose.orientation.y = quaternion[1]
marker.pose.orientation.z = quaternion[2]
marker.pose.orientation.w = quaternion[3]
marker.type = marker.SPHERE
marker.color.g = 0.7
marker.color.r = 1.0
marker.color.a = 0.5
marker.scale.x = 2*S[0]/scaling_factor
marker.scale.y = 2*S[1]/scaling_factor
marker.scale.z = 2*S[2]/scaling_factor
return marker
# definition of the four_link_solver module
if __name__ == '__main__':
capacity_visual_utils()
|
"""
14. 最长公共前缀
https://leetcode-cn.com/problems/longest-common-prefix/
"""
def longestCommonPrefix(strs):
if not strs:
return ""
def findPrefix(p, s):
length, i = min(len(p), len(s)), 0
while i < length:
if p[i] == s[i]:
i += 1
else:
break
return p[:i]
prefix = strs[0]
for v in strs[1:]:
prefix = findPrefix(prefix, v)
if not prefix:
return ""
return prefix
print(longestCommonPrefix(["flower","flow","flight"]))
|
import mysql.connector
#连接数据库
# 获取操作游标
#创建数据库表
#存储过程
#关闭数据库
# mydb = mysql.connector.connect(host="localhost",user="root",password="gziscas",database="mydatabase",charset='utf8')
# mycursor = mydb.cursor()
# # 如果数据表已经存在使用 execute() 方法删除表。
# #mycursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
# # 创建数据表SQL语句
def createDatabase():
mydb = mysql.connector.connect(host="localhost",user="root",password="password",database="oneplum",charset="utf8")
mycursor = mydb.cursor()
sql = """CREATE TABLE test_one(
id int (11) PRIMARY KEY AUTO_INCREMENT,
date_time char(20) NOT NULL,
rank_code char(20) NOT NULL)
ENGINE=InnoDB DEFAULT CHARSET=utf8"""
mycursor.execute(sql)
mycursor.close()
#连接数据库
#获取游标
#插入数据
#发送数据
#关闭数据
def addDatabase(value_date, value_rank):
mydb = mysql.connector.connect(host="localhost",user="root",password="password",database="oneplum",charset="utf8")
mycursor = mydb.cursor()
sql = "insert into test_one(date_time,rank_code)values('%s','%s')"%(value_date,value_rank)
try:
# 执行sql语句
mycursor.execute(sql)
# 提交到数据库执行
mydb.commit()
print("success")
except:
mydb.rollback()
print("error")
mycursor.execute(sql)
mydb.close()
#链接数据库
#找游标
#写数据库要干嘛、
#try,
#连接游标
#关闭数据库
#删除数据库
def deleteDatabase():
mydb = mysql.connector.connect(host="192.168.229.136",user="root",password="password",database="oneplum",charset="utf8")
mycursor = mydb.cursor()
sql = "delete from test_one where id = 1 "
try:
#执行sql语句
mycursor.execute(sql)
#提交到数据库执行
mydb.commit()
print("success")
except:
mydb.rollback()
print("error")
#mycursor.execute(sql)
mydb.close()
#deleteDatabase()
#连接数据库,通过定义一个变量来连接到数据库
#获取数据库游标
#编写数据库要执行的命令
#try
#用获取到的游标q启动数据库
#关闭数据库
import time
def updateDatabase(value3,value4,ids):
mydb = mysql.connector.connect(host="localhost",user="root",password="password",database="oneplum",charset="utf8")
mycursor = mydb.cursor()
# sql = "update test_one set(date_time ,rank_code)values('%s','%s',) "%(value3,value4) where id='%d'%('ids')
sql = "update test_one set date_time = '%s', rank_code = '%s' where id='%d'"%(value3,value4,ids)
#sql = "UPDATE EMPLOYEE SET AGE = AGE + 1 WHERE SEX = '%c'" % ('M')
#sql = "insert into test_one(date_time,rank_code)values('%s','%s')"%(value_date,value_rank)
try:
#执行sql语句
mycursor.execute(sql)
#提交到数据库执行
mydb.commit()
print("success!")
except:
mydb.rollback()
print("error")
#mycursor.execute(sql)
mydb.close()
value1 = "sadf23"
value2="dshgj"
#addDatabase(value1,value2)
value3="xiaoming"
value4="xiaomei"
id=4
#updateDatabase(value3,value4,id)
#连接数据库
#获取数据库游标
#编写数据
#try
#关闭数据库
def selectDatabase():
mydb = mysql.connector.connect(host="localhost",user="root",password="password",database="oneplum",charset="utf8")
mycursor = mydb.cursor()
sql = "select * from test_one"
try:
#执行sql语句
mycursor.execute(sql)
#获取所有记录表
results = mycursor.fetchall()
#打印结果
for row in results:
id = row[0]
date_time = row[1]
rank_code = row[2]
#打印结果
#print ("id=%d,date_time=%s,rank_code=%s" % (id,date_time,rank_code)) #美观一些
print(id,date_time,rank_code)
print("success!!!!!!!")
except:
mydb.rollback()
print("error")
#mycursor.execute(sql)
mydb.close()
selectDatabase()
ide ="id"
values5 = "date_time"
values6 = "rank_code"
print(ide,values5,values6)
|
import sys
def getWays(squares, d, m):
# Complete this function
cumSum = [0]
for val in squares:
cumSum.append(cumSum[-1] + val)
#print(cumSum)
count = 0
for i in range(1, len(cumSum) - m + 1):
sumVal = cumSum[i + m - 1] - cumSum[i -1]
#print(sumVal)
if(sumVal == d):
count +=1
return count
n = int(input().strip())
s = list(map(int, input().strip().split(' ')))
d,m = input().strip().split(' ')
d,m = [int(d),int(m)]
result = getWays(s, d, m)
print(result)
|
""" metric_tree.py
This file uses sklearn trees generally used for KNN calculation as an
approximate metric tree for wasserstein distance. Further extensions are
quadtree, and one based on hierarchical clustering. The idea is to use the
tree with edge lengths as the (L2) distance between means. The distance
between any two points embedded in this tree is then the geodesic distance
along the tree. Note that this is an offline algorithm, we do not support
adding points after the initial construction.
"""
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.neighbors import KDTree, BallTree, DistanceMetric
from sklearn.cluster import MiniBatchKMeans
from scipy.sparse import coo_matrix
class QuadTree(object):
"""
This quadtree could be sped up, but is an easy implementation
"""
def __init__(self, X, n_levels=25, noise=1.0, *args, **kwargs):
assert np.all(np.min(X, axis=0) >= 0)
assert np.all(np.max(X, axis=0) <= 1)
assert n_levels >= 1
self.kwargs = kwargs
self.X = X
self.noise = noise
# self.X = self.X + np.random.randn(*self.X.shape) * noise
self.dims = X.shape[1]
self.n_clusters = 2 ** self.dims
self.n_levels = n_levels
center = np.random.rand(self.dims) * noise
self.tree, self.indices, self.centers, self.dists = self._cluster(
center, np.arange(X.shape[0]), n_levels=self.n_levels - 1, start=0
)
self.tree = [(0, self.X.shape[0], n_levels, 0), *self.tree]
self.dists = np.array([0, *self.dists])
self.centers = [center, *self.centers]
self.centers = np.array(self.centers)
def _cluster(self, center, index, n_levels, start):
"""
Parameters
----------
bounds:
[2 x D] matrix giving min / max of bounding box for this cluster
"""
if n_levels == 0 or len(index) == 0:
return None
labels = np.ones_like(index) * -1
dim_masks = np.array([self.X[index, d] > center[d] for d in range(self.dims)])
import itertools
bin_masks = np.array(list(itertools.product([False, True], repeat=self.dims)))
label_masks = np.all(bin_masks[..., None] == dim_masks[None, ...], axis=1)
for i, mask in enumerate(label_masks):
labels[mask] = i
assert np.all(labels > -1)
shift = 2 ** -(self.n_levels - n_levels + 2)
shifts = np.array(list(itertools.product([-shift, shift], repeat=self.dims)))
cluster_centers = shifts + center
sorted_index = []
children = []
ccenters = []
cdists = []
is_leaf = [0] * self.n_clusters
unique, ucounts = np.unique(labels, return_counts=True)
counts = np.zeros(self.n_clusters, dtype=np.int32)
for u, c in zip(unique, ucounts):
counts[u] = c
cstart = 0
for i, count, ccenter in zip(unique, counts, cluster_centers):
ret = self._cluster(
ccenter, index[labels == i], n_levels - 1, start + cstart
)
if ret is None:
sorted_index.extend(index[labels == i])
is_leaf[i] = 1
continue
sorted_index.extend(ret[1])
children.extend(ret[0])
ccenters.extend(ret[2])
cdists.extend(ret[3])
cstart += count
to_return = list(
zip(
*[
np.array([0, *np.cumsum(counts)]) + start,
np.cumsum(counts) + start,
[n_levels] * self.n_clusters,
is_leaf,
]
)
)
dists = np.linalg.norm(cluster_centers - center[None, :], axis=1)
return (
[*to_return, *children],
sorted_index,
[*cluster_centers, *ccenters],
[*dists, *cdists],
)
def get_arrays(self):
return None, self.indices, self.tree, self.centers, self.dists
class ClusterTree(object):
def __init__(self, X, n_clusters=10, n_levels=5, *args, **kwargs):
self.X = X
self.n_clusters = n_clusters
self.n_levels = n_levels
center = self.X.mean(axis=0)
self.tree, self.indices, self.centers, self.dists = self._cluster(
center, np.arange(X.shape[0]), n_levels=self.n_levels - 1, start=0
)
self.tree = [(0, self.X.shape[0], n_levels, n_levels == 1), *self.tree]
self.centers = [center, *self.centers]
self.dists = np.array([0, *self.dists])
self.centers = np.array(self.centers)
def _cluster(self, center, index, n_levels, start):
"""
Returns a list of tuples corresponding to each subnode of the tree
(center, level, start, end, is_leaf), sorted_index
center is the cluster center
level is the level of the node counting the root as the zeroth level
sorted_index is athe list of
"""
if n_levels == 0 or len(index) < self.n_clusters:
return None
cl = MiniBatchKMeans(n_clusters=self.n_clusters)
cl.fit(self.X[index])
sorted_index = []
children = []
ccenters = []
cdists = []
is_leaf = [0] * self.n_clusters
unique, ucounts = np.unique(cl.labels_, return_counts=True)
counts = np.zeros(self.n_clusters, dtype=np.int32)
for u, c in zip(unique, ucounts):
counts[u] = c
cstart = 0
for i, count in zip(unique, counts):
ret = self._cluster(
cl.cluster_centers_[i],
index[cl.labels_ == i],
n_levels - 1,
start + cstart,
)
if ret is None:
sorted_index.extend(index[cl.labels_ == i])
is_leaf[i] = 1
continue
sorted_index.extend(ret[1])
children.extend(ret[0])
ccenters.extend(ret[2])
cdists.extend(ret[3])
cstart += count
to_return = list(
zip(
*[
np.array([0, *np.cumsum(counts)]) + start,
np.cumsum(counts) + start,
[n_levels] * self.n_clusters,
is_leaf,
]
)
)
dists = np.linalg.norm(cl.cluster_centers_ - center[None, :], axis=1)
return (
[*to_return, *children],
sorted_index,
[*cl.cluster_centers_, *ccenters],
[*dists, *cdists],
)
def get_arrays(self):
return None, self.indices, self.tree, self.centers, self.dists
class MetricTree(BaseEstimator):
def __init__(self, tree_type="ball", leaf_size=40, metric="euclidean", **kwargs):
self.tree_type = tree_type
if tree_type == "ball":
self.tree_cls = BallTree
elif tree_type == "kd":
self.tree_cls = KDTree
elif tree_type == "cluster":
self.tree_cls = ClusterTree
elif tree_type == "quad":
self.tree_cls = QuadTree
else:
raise NotImplementedError("Unknown tree type")
self.kwargs = kwargs
self.leaf_size = leaf_size
self.metric = metric
self.dist_fn = DistanceMetric.get_metric(metric)
def get_node_weights(self):
""" Takes the middle of the bounds as the node center for each node
TODO (alex): This could be improved or at least experimented with
"""
node_weights = self.tree.get_arrays()[-1]
if self.tree_type == "ball":
centers = node_weights[0]
n = centers.shape[0]
# Subtracts the child from the parent relying on the order of nodes in the tree
lengths = np.linalg.norm(
centers[np.insert(np.arange(n - 1) // 2, 0, 0)] - centers[np.arange(n)],
axis=1,
)
return lengths
elif self.tree_type == "kd":
# Averages the two boundaries of the KD box
centers = node_weights.mean(axis=0)
n = centers.shape[0]
# Subtracts the child from the parent relying on the order of nodes in the tree
lengths = np.linalg.norm(
centers[np.insert(np.arange(n - 1) // 2, 0, 0)] - centers[np.arange(n)],
axis=1,
)
return lengths
elif self.tree_type == "cluster":
return node_weights
elif self.tree_type == "quad":
return node_weights
else:
raise NotImplementedError("Unknown tree type")
def fit_transform(self, X, y):
"""
X is data array (np array)
y is one-hot encoded distribution index (np array of size # points x #
distributions.
"""
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True)
self.classes_ = y.shape[1] # unique_labels(y)
self.X_ = X
self.y_ = y
self.tree = self.tree_cls(
X, leaf_size=self.leaf_size, metric=self.metric, **self.kwargs
)
tree_indices = self.tree.get_arrays()[1]
node_data = self.tree.get_arrays()[2]
y_indices = y[tree_indices] # reorders point labels by tree order.
self.edge_weights = self.get_node_weights()
counts = np.empty((len(node_data), y.shape[1]))
for node_idx in reversed(range(len(node_data))):
start, end, is_leaf, radius = node_data[node_idx]
# Find the number of points present in this range from each distribution
counts[node_idx] = np.sum(
y_indices[start:end], axis=0
) # as y is a one-hot encoding, we just need to sum over the relevant bits.
if np.issubdtype(y.dtype, np.floating):
# if is floating then don't worry about the logic below
self.counts_mtx = coo_matrix(counts).T
return self.counts_mtx, self.edge_weights
# convert to COO format
dim = (self.classes_, len(node_data))
dist_list = np.arange(1, self.classes_ + 1)
self.counts_mtx = coo_matrix(dim, dtype=np.int32)
for i, count in enumerate(counts):
if np.sum(count) == 0: # if no classes have signals in this region
continue
# get the signals with nonzero representation in the region
# count is a list of the representation per distribution.
# count_copy is used to eliminate distributions without representation
count_copy = count.copy()
count_copy[count_copy > 0] = 1
dists_represented = np.multiply(dist_list, count_copy)
j_list = (
dists_represented[dists_represented != 0] - 1
) # we added 1 to the distribution numbers to do the zero trick.
val_list = count[count != 0]
i_list = [i] * len(j_list)
self.counts_mtx += coo_matrix(
(val_list, (j_list, i_list)), shape=dim, dtype=np.int32
)
return self.counts_mtx, self.edge_weights
def transform(self, X):
""" Transforms datasets y to (L1) vector space.
Returns vectors representing edge weights and weights over vector.
"""
check_is_fitted(self, "X_")
if X != self.X_:
raise ValueError("X transformed must equal fitted X")
if __name__ == "__main__":
mt = MetricTree(tree_type="cluster")
gt = np.repeat(np.arange(10), 100)
gt = (
(np.repeat(np.arange(max(gt) + 1)[:, None], len(gt), axis=1) == gt)
.astype(int)
.T
)
counts, edge_weights = mt.fit_transform(X=np.random.random_sample((1000, 3)), y=gt)
print(counts, edge_weights)
print(counts.toarray()[:50])
|
from django.core.mail import EmailMultiAlternatives
from django.http import JsonResponse
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import CreateView
from management.models import Feedback
from sky_storage.settings import HOST, EMAIL_HOST_USER
@method_decorator(csrf_exempt, name='dispatch')
class AjaxFeedbackCreateView(CreateView):
model = Feedback
success_url = '/'
fields = ('email', 'phone', 'name', 'message')
def form_invalid(self, form):
super().form_invalid(form)
return JsonResponse(form.errors, status=400)
def form_valid(self, form):
super().form_valid(form)
# todo use send email
self.send_email()
return JsonResponse({
'pk': self.object.pk,
})
def send_email(self):
# todo write and use
# send email using the self.cleaned_data dictionary
form_kwargs = self.get_form_kwargs()["data"]
context = {
'host': HOST,
'name': form_kwargs.get("name"),
'email': form_kwargs.get("email"),
'message': form_kwargs.get("message"),
'phone': form_kwargs.get("phone"),
}
text_content = f'Feedback from: {form_kwargs.get("name")}, with email: {form_kwargs.get("email")},' \
f' his message is: {form_kwargs.get("message")},and his phone number are:' \
f' {form_kwargs.get("phone")}'
html_content = render_to_string('emails/feedback.html', context)
msg = EmailMultiAlternatives('Feedback', text_content, f'{form_kwargs.get("name")}', [EMAIL_HOST_USER])
# msg.attach_alternative(html_content, "text/html")
msg.send()
|
from bson import ObjectId
class PymongoExecutor:
def __init__(self, db):
self.db = db
def execute(self, expression):
return eval(expression)
|
from .steps import AppsFlyerEventsStep
from .steps import DestinationType
THIRD_PARTY_STEPS = [AppsFlyerEventsStep]
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.Trigger.SiPixel_OfflineMonitoring_Cluster_cff import *
from DQMOffline.Trigger.SiPixel_OfflineMonitoring_TrackCluster_cff import *
from DQM.HLTEvF.dqmCorrelationClient_cfi import *
pixelClusterVsLumi = dqmCorrelationClient.clone(
me = cms.PSet(
folder = cms.string("HLT/Pixel/"),
name = cms.string("num_clusters_per_instLumi"),
doXaxis = cms.bool( True ),
nbinsX = cms.int32( 40 ),
xminX = cms.double( 0.),
xmaxX = cms.double(20000.),
# doYaxis = cms.bool( False ),
doYaxis = cms.bool( True ),
nbinsY = cms.int32 ( 400 ),
xminY = cms.double( 0.),
xmaxY = cms.double( 400000.),
),
me1 = cms.PSet(
folder = cms.string("HLT/LumiMonitoring"),
name = cms.string("lumiVsLS"),
profileX = cms.bool(True)
),
me2 = cms.PSet(
folder = cms.string("HLT/Pixel"),
name = cms.string("num_clusters_per_Lumisection_PXBarrel"),
profileX = cms.bool(True)
),
)
pixelClusterVsLumiPXBarrel = pixelClusterVsLumi.clone()
pixelClusterVsLumiPXBarrel.me.name = "num_clusters_per_instLumi_PXBarrel"
pixelClusterVsLumiPXBarrel.me2.name = "num_clusters_per_Lumisection_PXBarrel"
pixelClusterVsLumiPXForward = pixelClusterVsLumi.clone()
pixelClusterVsLumiPXForward.me.name = "num_clusters_per_instLumi_PXForward"
pixelClusterVsLumiPXForward.me2.name = "num_clusters_per_Lumisection_PXForward"
pixelTrackClusterVsLumiPXBarrel = pixelClusterVsLumi.clone()
pixelTrackClusterVsLumiPXBarrel.me.folder = "HLT/Pixel/TrackClusters/"
pixelTrackClusterVsLumiPXBarrel.me.name = "num_clusters_ontrack_per_instLumi_PXBarrel"
pixelTrackClusterVsLumiPXBarrel.me2.folder = "HLT/Pixel/TrackClusters"
pixelTrackClusterVsLumiPXBarrel.me2.name = "num_clusters_ontrack_per_Lumisection_PXBarrel"
pixelTrackClusterVsLumiPXForward = pixelClusterVsLumi.clone()
pixelTrackClusterVsLumiPXForward.me.folder = "HLT/Pixel/TrackClusters/"
pixelTrackClusterVsLumiPXForward.me.name = "num_clusters_ontrack_per_instLumi_PXForward"
pixelTrackClusterVsLumiPXForward.me2.folder = "HLT/Pixel/TrackClusters"
pixelTrackClusterVsLumiPXForward.me2.name = "num_clusters_ontrack_per_Lumisection_PXForward"
sipixelHarvesterHLTsequence = cms.Sequence(
# hltSiPixelPhase1ClustersHarvester
# + hltSiPixelPhase1TrackClustersHarvester
pixelClusterVsLumiPXBarrel
+ pixelClusterVsLumiPXForward
# + pixelTrackClusterVsLumiPXBarrel
# + pixelTrackClusterVsLumiPXForward
)
|
from django.apps import AppConfig
class ChainsConfig(AppConfig):
name = 'cajas.chains'
verbose_name = 'Cadenas'
def ready(self):
try:
import chains.signals # noqa F401
except ImportError:
pass
|
from random import randint
from data_structures.LinkedList import LinkedList
from data_structures.Queue import Queue
from data_structures.Stack import Stack
def run_linked_list_demo():
print("\nRandomly generated linked list:")
l_list = LinkedList()
for i in range(10):
num = randint(1, 100)
print(f"Add {num}")
l_list.add(num)
def run_stack_demo():
print("\nRandomly generated stack")
stack = Stack()
for i in range(10):
stack.push_random_number()
popped = 1
while popped:
popped = stack.pop()
print(f"Popped {popped}")
def run_queue_demo():
print("\nRandomly generated queue")
queue = Queue()
for i in range(10):
num = randint(1, 100)
print(f"Enqueue {num}")
queue.Enqueue(num)
deq = 1
while deq:
deq = queue.Dequeue()
print(f"Dequeued {deq}")
def run():
run_linked_list_demo()
run_stack_demo()
run_queue_demo()
|
import pandas as pd
from app.app.routing.graph import Graph
def get_test_graph():
node_dict = {
'id': ['N1', 'N2', 'N3', 'N4'],
'lat': [52.3, 52.4, 52.4, 52.3],
'lon': [13.4, 13.4, 13.3, 13.3]
}
edge_dict = {
'node1': ['N1', 'N2', 'N3', 'N4'],
'node2': ['N2', 'N3', 'N4', 'N1'],
'distance': [30, 20, 20, 25]
}
nodes = pd.DataFrame.from_dict(node_dict)
nodes.set_index('id', inplace=True)
edges = pd.DataFrame(edge_dict)
return Graph(nodes, edges)
|
from shared_utils import *
from BaseModel import BaseModel
from config import *
import pymc3 as pm
import pickle as pkl
import pandas as pd
import os
import sys
from config_window import start as startixs
start = int(os.environ["SGE_DATE_ID"])
number_of_weeks = 3# int(sys.argv[4])
model_i = 35
start_date = pd.Timestamp("2020-01-28") + pd.Timedelta(start)
#NOTE: for jureca, extend to the number of available cores (chains and cores!)
num_samples = 250
num_chains = 4
num_cores = num_chains
# whether to sample the parameters or load them
SAMPLE_PARAMS = True
# whether to sample predictions on training, test or both
# SAMPLE_PREDS = "both" # can be "train", "test" or "both"
disease = "covid19"
prediction_region = "germany"
# model 15 selected by WAICS
# model 35 ohne report delay und mit trend order 1
# model 47 mit trend 4
use_ia, use_report_delay, use_demographics, trend_order, periodic_order = combinations[model_i]
# Print Model Eigenschaften
print("Model {} - IA: {} - RD: {} - DEMO: {} - Trend: {} - Per: {}".format(
model_i, use_ia, use_report_delay, use_demographics, trend_order, periodic_order
))
# use_interactions, use_report_delay = combinations_ia_report[model_complexity]
filename_params = "../data/mcmc_samples_backup/parameters_{}_{}".format(disease,start)
filename_pred = "../data/mcmc_samples_backup/predictions_{}_{}.pkl".format(disease, start)
#filename_pred_nowcast = "../data/mcmc_samples_backup/predictions_nowcast_{}_model_{}_window_{}_{}.pkl".format(disease, model_i, start, number_of_weeks)
filename_pred_trend = "../data/mcmc_samples_backup/predictions_trend_{}_{}.pkl".format(disease, start )
filename_model = "../data/mcmc_samples_backup/model_{}_{}.pkl".format(disease, start)
import os
print(os.getcwd())
print('../data/counties/counties.pkl')
# Load data
with open('../data/counties/counties.pkl', "rb") as f:
county_info = pkl.load(f)
# pad = days to look into the future
#days_into_future = 5
#data = load_daily_data(disease, prediction_region, county_info, pad=days_into_future)
days_into_future = 5
data = load_daily_data_n_weeks(start, number_of_weeks, disease, prediction_region, county_info, pad=days_into_future)
first_day = data.index.min()
last_day = data.index.max()
data_train, target_train, data_test, target_test = split_data(
data,
train_start=first_day,
test_start=last_day - pd.Timedelta(days=days_into_future+4),
post_test=last_day + pd.Timedelta(days=1)
)
tspan = (target_train.index[0], target_train.index[-1])
print("training for {} in {} with final model from {} to {}\nWill create files {}, {} and {}".format(
disease, prediction_region, *tspan, filename_params, filename_pred, filename_model))
print(os.getcwd())
print('../data/iaeffect')
year = str(start_date)[:4]
month = str(start_date)[5:7]
day = str(start_date)[8:10]
model = BaseModel(tspan,
county_info,
["../data/ia_effect_samples/{}_{}.pkl".format(disease, i) for i in range(100)],
include_ia=use_ia,
include_report_delay=use_report_delay,
include_demographics=use_demographics,
trend_poly_order=trend_order,
periodic_poly_order=periodic_order)
if SAMPLE_PARAMS:
print("Sampling parameters on the training set.")
trace = model.sample_parameters(
target_train,
samples=num_samples,
tune=100,
target_accept=0.95,
max_treedepth=15,
chains=num_chains,
cores=num_cores,
window=True)
with open(filename_model, "wb") as f:
pkl.dump(model.model, f)
with model.model:
pm.save_trace(trace, filename_params, overwrite=True)
else:
print("Load parameters.")
trace = load_trace_window(disease,model_i, start, number_of_weeks )
print("Sampling predictions on the training and test set.")
pred = model.sample_predictions(target_train.index,
target_train.columns,
trace,
target_test.index,
average_periodic_feature=False,
average_all=False,
window=True)
pred_trend = model.sample_predictions(target_train.index,
target_train.columns,
trace,
target_test.index,
average_periodic_feature=False,
average_all=True,
window=True)
with open(filename_pred, 'wb') as f:
pkl.dump(pred, f)
with open(filename_pred_trend, "wb") as f:
pkl.dump(pred_trend, f)
|
class Solution:
def singleNumber(self, A):
extraNumber = reduce(lambda x, y: x ^ y, A)
return extraNumber
|
import os
'''
Class : sourceControl
Description :
A class processing file IO on the source received.
'''
class sourceControl:
preCode = None
body = None
postCode = None
'''
Function : __init__(...)
Description :
A function that initializes the class
Parameters :
preCode : A source executed prior to the body source
body : A source executed in the middle. User input from the dashboard is required.
postCode : A source that wraps up the whole process.
BE ADVISED :
Unless there is a reason to modify both the preCode and postCode,
do not put anything on both the preCode and postCode parameter.
'''
def __init__(self, preCode = None, body = None, postCode = None):
self.preCode = preCode
self.body = body
self.postCode = postCode
self.storeSources()
'''
Function : storeSources()
Descriptions :
A function that stores the sources received.
'''
def storeSources(self):
'''
if not self.preCode or not self.body or self.postCode:
print("At least one of the source is empty\n")
'''
fileDir = os.path.dirname(os.path.realpath('__file__'))
pathPreCode = os.path.join(fileDir, 'enow/jython/pythonSrc/preCode.py')
pathbodyCode = os.path.join(fileDir, 'enow/jython/pythonSrc/body.py')
pathPostCode = os.path.join(fileDir, 'enow/jython/pythonSrc/preCode.py')
# fPreCode = open(pathPreCode, "wb")
fBodyCode = open(pathbodyCode, "wb")
# fPostCode = open(pathPostCode, "wb")
# fPreCode.seek(0)
fBodyCode.seek(0)
# fPostCode.seek(0)
fBodyCode.truncate()
fBodyCode.seek(0)
# if self.preCode:
# fPreCode.writelines(self.preCode)
fBodyCode.write(self.body)
# if self.postCode:
# fPostCode.writelines(self.postCode)
# fPreCode.close()
fBodyCode.close()
# fPostCode.close()
|
import json
from hypernets.experiment import CompeteExperiment
from hypernets.experiment import Experiment
from hypernets.tabular import dask_ex as dex
from hypernets.tests.model.plain_model_test import create_plain_model
from hypernets.tests.tabular.dask_transofromer_test import setup_dask
from hypernets.tabular.datasets import dsutils
import numpy as np
import pandas as pd
from dask import dataframe as dd
from dask.distributed import LocalCluster, Client
from sklearn.preprocessing import LabelEncoder
class Test_Get_character:
@classmethod
def setup_class(cls):
setup_dask(cls)
cls.boston = dd.from_pandas(dsutils.load_boston(), npartitions=1)
cls.blood = dd.from_pandas(dsutils.load_blood(), npartitions=1)
cls.bike_sharing = dd.from_pandas(dsutils.load_Bike_Sharing(), npartitions=1)
# A test for multiclass task
def experiment_with_bike_sharing(self, init_kwargs, run_kwargs, row_count=3000, with_dask=False):
if with_dask:
X = self.bike_sharing.copy()
y = X.pop('count')
y = y.astype('str')
else:
X = dsutils.load_Bike_Sharing()
if row_count is not None:
X = X.head(row_count)
X['count'] = LabelEncoder().fit_transform(X['count'])
y = X.pop('count')
hyper_model = create_plain_model(with_encoder=True)
X_train, X_test, y_train, y_test = \
dex.train_test_split(X, y, test_size=0.3, random_state=9527)
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=0.3, random_state=9527)
init_kwargs = {
'X_eval': X_eval, 'y_eval': y_eval, 'X_test': X_test,
**init_kwargs
}
compete_experiment = CompeteExperiment(hyper_model, X_train, y_train, **init_kwargs)
base_experiment = Experiment(hyper_model, X_train, y_train, **init_kwargs)
mydict_compete = compete_experiment.get_data_character()
mydict_base = base_experiment.get_data_character()
assert mydict_base
assert mydict_compete
assert mydict_base['experimentType'] is 'base'
assert mydict_compete['experimentType'] is 'compete'
assert mydict_base['target']['taskType'] is 'multiclass'
assert mydict_base['target']['freq'] is None
assert mydict_base['target']['unique']
assert mydict_base['target']['mean'] is None
assert mydict_base['target']['max'] is None
assert mydict_base['target']['min'] is None
assert mydict_base['target']['stdev'] is None
assert mydict_base['target']['dataType']
assert len(mydict_base['targetDistribution']) <= 10
assert mydict_base['datasetShape']['X_train']
assert mydict_base['datasetShape']['y_train']
assert mydict_base['datasetShape']['X_eval']
assert mydict_base['datasetShape']['y_eval']
assert mydict_base['datasetShape']['X_test']
assert mydict_compete['featureDistribution']
# A test for binary task
def experiment_with_blood(self, init_kwargs, run_kwargs, row_count=3000, with_dask=False):
if with_dask:
X = self.blood.copy()
y = X.pop('Class')
else:
X = dsutils.load_blood()
if row_count is not None:
X = X.head(row_count)
X['Class'] = LabelEncoder().fit_transform(X['Class'])
y = X.pop('Class')
hyper_model = create_plain_model(with_encoder=True)
X_train, X_test, y_train, y_test = \
dex.train_test_split(X, y, test_size=0.3, random_state=9527)
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=0.3, random_state=9527)
init_kwargs = {
'X_eval': X_eval, 'y_eval': y_eval, 'X_test': X_test,
**init_kwargs
}
compete_experiment = CompeteExperiment(hyper_model, X_train, y_train, **init_kwargs)
base_experiment = Experiment(hyper_model, X_train, y_train, **init_kwargs)
mydict_compete = compete_experiment.get_data_character()
mydict_base = base_experiment.get_data_character()
assert mydict_base
assert mydict_compete
assert mydict_base['experimentType'] is 'base'
assert mydict_compete['experimentType'] is 'compete'
assert mydict_base['target']['taskType'] is 'binary'
assert mydict_base['target']['freq'] is not None
assert mydict_base['target']['unique'] is 2
assert mydict_base['target']['mean'] is None
assert mydict_base['target']['max'] is None
assert mydict_base['target']['min'] is None
assert mydict_base['target']['stdev'] is None
assert mydict_base['target']['dataType']
assert len(mydict_base['targetDistribution']) <= 10
assert mydict_base['datasetShape']['X_train']
assert mydict_base['datasetShape']['y_train']
assert mydict_base['datasetShape']['X_eval']
assert mydict_base['datasetShape']['y_eval']
assert mydict_base['datasetShape']['X_test']
assert mydict_compete['featureDistribution']
# A test for regression task
def experiment_with_boston(self, init_kwargs, run_kwargs, row_count=3000, with_dask=False):
if with_dask:
X = self.boston
y = X.pop('target')
else:
X = dsutils.load_boston()
if row_count is not None:
X = X.head(row_count)
X['target'] = LabelEncoder().fit_transform(X['target'])
y = X.pop('target')
y = y.astype('float64')
hyper_model = create_plain_model(with_encoder=True)
X_train, X_test, y_train, y_test = \
dex.train_test_split(X, y, test_size=0.3, random_state=9527)
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=0.3, random_state=9527)
init_kwargs = {
'X_eval': X_eval, 'y_eval': y_eval, 'X_test': X_test,
**init_kwargs
}
compete_experiment = CompeteExperiment(hyper_model, X_train, y_train, **init_kwargs)
base_experiment = Experiment(hyper_model, X_train, y_train, **init_kwargs)
mydict_compete = compete_experiment.get_data_character()
mydict_base = base_experiment.get_data_character()
assert mydict_base
assert mydict_compete
assert mydict_base['experimentType'] is 'base'
assert mydict_compete['experimentType'] is 'compete'
assert mydict_base['target']['taskType'] is 'regression'
assert mydict_base['target']['freq'] is None
assert mydict_base['target']['unique']
assert mydict_base['target']['mean'] is not None
assert mydict_base['target']['max'] is not None
assert mydict_base['target']['min'] is not None
assert mydict_base['target']['stdev'] is not None
assert mydict_base['target']['dataType'] is 'float'
assert len(mydict_base['targetDistribution']) <= 10
assert mydict_base['datasetShape']['X_train']
assert mydict_base['datasetShape']['y_train']
assert mydict_base['datasetShape']['X_eval']
assert mydict_base['datasetShape']['y_eval']
assert mydict_base['datasetShape']['X_test']
assert mydict_compete['featureDistribution']
def test_multiclass_with_bike_sharing(self):
self.experiment_with_bike_sharing({}, {})
self.experiment_with_bike_sharing({}, {}, with_dask = True)
def test_binary_with_blood(self):
self.experiment_with_blood({}, {})
self.experiment_with_blood({}, {}, with_dask = True)
def test_regression_with_boston(self):
self.experiment_with_boston({}, {})
self.experiment_with_boston({}, {}, with_dask = True)
|
import sst
from sst.merlin.base import *
from sst.merlin.endpoint import *
from sst.merlin.topology import *
from sst.merlin.interface import *
from sst.merlin.router import *
platdef = PlatformDefinition("platform_dragon")
PlatformDefinition.registerPlatformDefinition(platdef)
##------------------------------------------------
routing_alogrithm = 'q-adaptive' # VALn ugal-g ugal-n par minimal q-adaptive
num_msgs = 5000
msg_size = '128B'
msg_interval = '64ns'
pattern = 'Uniform' # Uniform, Tornado, Stencil_3D, FFT3D_all2all, RandomNeighbors
dynamicload = False
##------------------------------------------------
#1056-node system
hpr = 4
rpg = 8
inter_link = 1
num_group = 33
size3dx = 4
size3dy = 8
size3dz = 33
#2550-node system:
# hpr = 5
# rpg = 10
# inter_link = 1
# num_group = 51
# size3dx = 5
# size3dy = 10
# size3dz = 51
platdef.addParamSet("topology",{
"hosts_per_router" : hpr,
"routers_per_group" : rpg,
"intergroup_links" : inter_link,
"num_groups" : num_group,
})
platdef.addParamSet("topology",{
"algorithm" : routing_alogrithm,
'link_lat_host' : '10ns',
'link_lat_local' : '30ns',
'link_lat_global' : '300ns',
})
platdef.addParamSet("topology",{
"adaptive_threshold": 2,
})
platdef.addParamSet("topology",{
"learning_rate": 0.2,
"learning_rate2": 0.04,
"epsilon": 0.001,
"q_threshold1": 0.2,
"q_threshold2": 0.35,
"save_qtable": 'yes',
"save_qtable_time": 1000, ##us
})
platdef.addClassType("topology","sst.merlin.topology.topoDragonFly")
platdef.addParamSet("router",{
"link_bw" : '4GB/s',
"link_bw:host" : '4GB/s',
"xbar_bw" : '40GB/s',
"flit_size" : '128B',
"input_buf_size" : '2560B',
"output_buf_size" : '2560B',
"input_latency" : "10ns",
"output_latency" : "10ns",
"input_buf_size:host" : '2560B',
"output_buf_size:host" : '2560B',
"num_vns" : 1,
"xbar_arb" : "merlin.xbar_arb_lru",
})
platdef.addClassType("router","sst.merlin.base.hr_router")
platdef.addParamSet("network_interface",{
"link_bw" : '4GB/s',
"input_buf_size" : '2560B',
"output_buf_size" : '2560B',
})
#platdef.addClassType("network_interface","sst.merlin.base.ReorderLinkControl")
platdef.addClassType("network_interface","sst.merlin.interface.LinkControl")
PlatformDefinition.setCurrentPlatform("platform_dragon")
# Allocate the system. This will create the topology since it's
# set up in the platform file
system = System()
### set up the endpoint
#----------------------------------------
syssize = system.topology.getNumNodes()
ep = TrafficGenJob(0, syssize)
ep.packets_to_send = num_msgs
ep.packet_size = msg_size
ep.delay_between_packets = msg_interval
ep.message_rate = '1GHz'
ep.extargs["PacketDest:pattern"] = pattern
if dynamicload:
ep.extargs["PacketDelay:pattern"] = "Step"
ep.extargs["PacketDelay:packet_delay_list"] = ['160ns', '80ns']
ep.extargs["PacketDelay:packet_num_list"] = [1000,1000]
if pattern == 'Tornado':
ep.topology = 'dragonfly'
ep.extargs["dragonfly:hosts_per_router"] = hpr
ep.extargs["dragonfly:routers_per_group"] = rpg
ep.extargs["dragonfly:num_groups"] = num_group
ep.extargs["Tornado:shift"] = 1
if pattern == 'Stencil_3D':
ep.extargs["PacketDest:Stencil_3D:3DSize"] = "{} {} {}".format(size3dx, size3dy, size3dz)
if pattern == 'FFT3D_all2all':
ep.extargs["PacketDest:FFT3D_all2all:3DSize"] = "{} {} {}".format(size3dx, size3dy, size3dz)
if pattern == 'RandomNeighbors':
ep.extargs["PacketDest:RandomNeighbors:range_min"] = 6
ep.extargs["PacketDest:RandomNeighbors:range_max"] = 21
system.allocateNodes(ep,"linear")
system.build()
sst.setStatisticLoadLevel(9)
sst.setStatisticOutput("sst.statOutputCSV")
sst.setStatisticOutputOptions({
"filepath" : 'stats.csv',
"separator" : ", "
})
sst.enableAllStatisticsForAllComponents()
|
import sqlite3
from tqdm import tqdm
def is_number(val):
try:
float(val)
return True
except Exception as e:
return False
def init_autocomplete(schema, db_path, redis, debug=False):
if debug:
print('Flushing autocomplete...', end='')
redis.delete(schema.db_id)
if debug:
print('Done')
conn = sqlite3.connect(db_path)
conn.text_factory = bytes
phrases = {}
if debug:
print('Retrieving phrases from columns...')
iterator = schema.columns
if debug:
iterator = tqdm(iterator)
for col in iterator:
if col.type != 'text' or col.syn_name == '*':
continue
cur = conn.cursor()
cur.execute(f'SELECT DISTINCT "{col.syn_name}" FROM "{col.table.syn_name}"')
for phrase in cur.fetchall():
if phrase[0] and not is_number(phrase[0]):
try:
val = phrase[0].decode()
cleaned = val.strip().lower()
phrase_key = f'{cleaned}\t{val}'
if phrase_key not in phrases:
phrases[phrase_key] = []
phrases[phrase_key].append(str(col.id))
except Exception as e:
continue
if debug:
print('Storing phrases in autocomplete...')
phrases = [f'{k}\t{",".join(v)}' for k, v in phrases.items()]
batch_size = 20000
iterator = range(0, len(phrases), batch_size)
if debug:
iterator = tqdm(iterator)
for start in iterator:
end = start + batch_size
redis.zadd(schema.db_id, dict.fromkeys(phrases[start:end], 0), nx=True)
|
from django.urls import path,include
from .views import CustomUserCreate
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('create-user/', CustomUserCreate.as_view(), name="create_user"),
path('auth/', include('dj_rest_auth.urls')),
path('teacher/', include('api.teacher.urls')),
path('class/', include('api.class.urls')),
]
|
#!/usr/bin/python
#
# Copyright 2016 British Broadcasting Corporation and Contributors(1)
#
# (1) Contributors are listed in the AUTHORS file (please extend AUTHORS,
# not this header)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import sys
print "Content-type: application/json"
print
raw_stdin = sys.stdin.read()
myjson = json.loads(raw_stdin)
file_tag = str(myjson["repr"]["id"])
z = open("../upload/%s.json" % file_tag)
rj = z.read()
j = json.loads(rj)
z.close()
record = j["myjson"]
record = json.loads(record)
program_xml= record["repr"]
print json.dumps(program_xml)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# date: 2018/4/15
# author: he.zhiming
#
from __future__ import unicode_literals, absolute_import
import logging
import logging.config
class LogFactory:
_LOG_CONFIG_DICT = {
}
logging.config.dictConfig(_LOG_CONFIG_DICT)
@classmethod
def get_logger(cls, logger_name) -> logging.Logger:
return logging.getLogger(logger_name)
DEBUGGER = LogFactory.get_logger('debugger')
CONSOLE_LOGGER = LogFactory.get_logger('console_logger')
|
"""Test ISY994 system health."""
import asyncio
from unittest.mock import Mock
from aiohttp import ClientError
from openpeerpower.components.isy994.const import DOMAIN, ISY994_ISY, ISY_URL_POSTFIX
from openpeerpower.const import CONF_HOST
from openpeerpower.setup import async_setup_component
from .test_config_flow import MOCK_HOSTNAME, MOCK_UUID
from tests.common import MockConfigEntry, get_system_health_info
MOCK_ENTRY_ID = "cad4af20b811990e757588519917d6af"
MOCK_CONNECTED = "connected"
MOCK_HEARTBEAT = "2021-05-01T00:00:00.000000"
async def test_system_health(opp, aioclient_mock):
"""Test system health."""
aioclient_mock.get(f"http://{MOCK_HOSTNAME}{ISY_URL_POSTFIX}", text="")
opp.config.components.add(DOMAIN)
assert await async_setup_component(opp, "system_health", {})
MockConfigEntry(
domain=DOMAIN,
entry_id=MOCK_ENTRY_ID,
data={CONF_HOST: f"http://{MOCK_HOSTNAME}"},
unique_id=MOCK_UUID,
).add_to_opp(opp)
opp.data[DOMAIN] = {}
opp.data[DOMAIN][MOCK_ENTRY_ID] = {}
opp.data[DOMAIN][MOCK_ENTRY_ID][ISY994_ISY] = Mock(
connected=True,
websocket=Mock(
last_heartbeat=MOCK_HEARTBEAT,
status=MOCK_CONNECTED,
),
)
info = await get_system_health_info(opp, DOMAIN)
for key, val in info.items():
if asyncio.iscoroutine(val):
info[key] = await val
assert info["host_reachable"] == "ok"
assert info["device_connected"]
assert info["last_heartbeat"] == MOCK_HEARTBEAT
assert info["websocket_status"] == MOCK_CONNECTED
async def test_system_health_failed_connect(opp, aioclient_mock):
"""Test system health."""
aioclient_mock.get(f"http://{MOCK_HOSTNAME}{ISY_URL_POSTFIX}", exc=ClientError)
opp.config.components.add(DOMAIN)
assert await async_setup_component(opp, "system_health", {})
MockConfigEntry(
domain=DOMAIN,
entry_id=MOCK_ENTRY_ID,
data={CONF_HOST: f"http://{MOCK_HOSTNAME}"},
unique_id=MOCK_UUID,
).add_to_opp(opp)
opp.data[DOMAIN] = {}
opp.data[DOMAIN][MOCK_ENTRY_ID] = {}
opp.data[DOMAIN][MOCK_ENTRY_ID][ISY994_ISY] = Mock(
connected=True,
websocket=Mock(
last_heartbeat=MOCK_HEARTBEAT,
status=MOCK_CONNECTED,
),
)
info = await get_system_health_info(opp, DOMAIN)
for key, val in info.items():
if asyncio.iscoroutine(val):
info[key] = await val
assert info["host_reachable"] == {"error": "unreachable", "type": "failed"}
|
import struct
# particles format version 0
PARS_FMT_VER_0 = 0
# particles format support versions
PARS_FMT_SUPP = (PARS_FMT_VER_0, )
# particle attributes
# position
POS = 0
# velocity
VEL = 1
# color
COL = 2
# material id
MAT = 3
def write_pars_v0(par_data):
data = bytearray()
# particles format version
data.extend(struct.pack('I', 0))
# particles count
pars_cnt = len(par_data[POS])
data.extend(struct.pack('I', pars_cnt))
print('Particles count:', pars_cnt)
# par_i - particles index
for par_i in range(pars_cnt):
data.extend(struct.pack('3f', *par_data[POS][par_i]))
data.extend(struct.pack('3f', *par_data[VEL][par_i]))
data.extend(struct.pack('I', par_data[COL][par_i]))
data.extend(struct.pack('I', par_data[MAT][par_i]))
return data
def read_pars_v0(data, caches, offs, folder):
# particles positions
pos = []
# particles velocities
vel = []
# particles colors
col = []
# particles materials
mat = []
# particles count
count = struct.unpack('I', data[offs : offs + 4])[0]
offs += 4
for index in range(count):
# particle position
p_pos = struct.unpack('3f', data[offs : offs + 12])
offs += 12
pos.append(p_pos)
# particle velocity
p_vel = struct.unpack('3f', data[offs : offs + 12])
offs += 12
vel.append(p_vel)
# particle color
p_col = struct.unpack('I', data[offs : offs + 4])[0]
offs += 4
col.append(p_col)
# particle material
p_mat = struct.unpack('I', data[offs : offs + 4])[0]
offs += 4
mat.append(p_mat)
caches[folder] = {POS: pos, VEL: vel, COL: col, MAT: mat}
# read particles
def read_pars(data, caches, folder):
# read offset in file
offs = 0
# particles format version
ver = struct.unpack('I', data[offs : offs + 4])[0]
offs += 4
if not ver in PARS_FMT_SUPP:
msg = 'Unsupported particles format version: {0}'.format(ver)
raise BaseException(msg)
if ver == PARS_FMT_VER_0:
read_pars_v0(data, caches, offs, folder)
|
class Solution:
def XXX(self, root: TreeNode) -> bool:
def depth(root):
if(not root):return 0
left=depth(root.left)
if(left==-1):return -1
right=depth(root.right)
if(right==-1):return -1
return max(left,right)+1 if abs(left-right)<2 else -1
return depth(root)!=-1
|
# coding=utf-8
# Runner for daemon
import os
from pyramid.paster import get_appsettings
from pyramid.paster import setup_logging
from tracim_backend.config import CFG
from tracim_backend.lib.mail_notifier.daemon import MailSenderDaemon
config_uri = os.environ["TRACIM_CONF_PATH"]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
settings.update(settings.global_conf)
app_config = CFG(settings)
app_config.configure_filedepot()
daemon = MailSenderDaemon(app_config, burst=False)
daemon.run()
|
#!/usr/bin/python3
""" Calculates the size of the hypotenuse of a right triangle,
using the Pythagorean Theorem.
a**2 + b**2 = c**2
Basically, the theorem says:
- the sum of the squares of the legs, is equal
to the square of the hypotenuse
"""
from math import sqrt
__author__ = "@ivanleoncz"
def hypotenuse_calc():
""" Calculates the hypotenuse. """
leg_a = int(input("\nSize of leg A: "))
leg_b = int(input("Size of leg B: "))
squared_a = leg_a ** 2
squared_b = leg_b ** 2
hypotenuse_c = squared_a + squared_b
return "Hypotenuse: %s " % int(sqrt(hypotenuse_c))
print(hypotenuse_calc(),"\n")
|
from torch.utils import data
import torch
import numpy as np
from model_training.utils import loadmat, CustomTensorDataset, load_weights, load_labels, resample, slide_and_cut, load_challenge_data
from utils.denoising import filter_and_detrend
import neurokit2 as nk
import os
# from process.extract_peak_targets import get_target_peaks as get_target_peak_bak
class CustomDataset(data.Dataset):
"""
PyTorch Dataset generator class
Ref: https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel
"""
def __init__(self, label_files, labels_onehot, label_dir, leads_index, name_list_full=[], transform=None, sample_rate=500, to_get_feature=False):
"""Initialization"""
self.file_names_list = label_files
self.label_dir = label_dir
self.labels_onehot = labels_onehot
self.class_weights = self.get_class_weights()
self.leads_index = leads_index
self.transform = transform
self.to_get_feature = to_get_feature
self.sample_rate = sample_rate
# self.normalization = TNormalize()
def __len__(self):
"""Return total number of data samples"""
return len(self.file_names_list)
def __getitem__(self, idx):
"""Generate data sample"""
sample_file_name = self.file_names_list[idx]
# header_file_name = self.file_names_list[idx][:-3] + "hea"
label = self.labels_onehot[idx]
recording, header, name = load_challenge_data(sample_file_name, self.label_dir)
# get class_weight by name
class_weight, data_source = self.get_class_weight_and_source_by_name(name)
# divide ADC_gain and resample
recording = resample(recording, header, resample_Fs=self.sample_rate)
for lead in recording:
assert np.isnan(lead).any() == False
# if lead.sum() == 0:
# print(idx)
# to extract features
# recording = self.normalization(recording)
feature = np.zeros((50,))
if self.to_get_feature:
feature = self.get_features(recording)
feature = torch.tensor(feature)
if self.transform is not None:
recording = self.transform(recording)
recording = filter_and_detrend(recording)
# recording = nk.ecg_clean(recording, method='biosppy')
# # slide and cut
# recording = slide_and_cut(recording, n_segment=1, window_size=4992, sampling_rate=500)
# recording = recording[0]
# to filter and detrend samples
recording = recording[self.leads_index, :]
recording = torch.tensor(recording)
label = torch.tensor(label)
class_weight = torch.tensor(class_weight)
data_source = torch.tensor(data_source)
if self.to_get_feature:
return recording, label, class_weight, data_source, feature
return recording, label, class_weight
def get_class_weights(self):
classes = "164889003,164890007,6374002,426627000,733534002,713427006,270492004,713426002,39732003,445118002,164947007,251146004,111975006,698252002,426783006,63593006,10370003,365413008,427172004,164917005,47665007,427393009,426177001,427084000,164934002,59931005"
### equivalent SNOMED CT codes merged, noted as the larger one
classes = classes.split(',')
CPSC_classes = ['270492004', '164889003', '733534002', '63593006', '426783006',
'713427006'] # "59118001" = "713427006"
CPSC_class_weight = np.zeros((26,))
for cla in CPSC_classes:
CPSC_class_weight[classes.index(cla)] = 1
# CPSC_extra
CPSC_extra_excluded_classes = ['6374002', '39732003', '445118002', '251146004', '365413008',
'164947007', '365413008', '164947007', '698252002', '426783006',
'10370003', '111975006', '164917005', '47665007', '427393009',
'426177001', '164934002', '59931005']
CPSC_extra_class_weight = np.ones((26,))
for cla in CPSC_extra_excluded_classes:
CPSC_extra_class_weight[classes.index(cla)] = 0
# PTB-XL
PTB_XL_excluded_classes = ['6374002', '426627000', '365413008', '427172004'] # , '17338001'
PTB_XL_class_weight = np.ones((26,))
for cla in PTB_XL_excluded_classes:
PTB_XL_class_weight[classes.index(cla)] = 0
# PTB_XL_class_weight[classes.index('426783006')] = 0.1
# G12ECG
G12ECG_excluded_classes = ['10370003', '365413008', '164947007']
G12ECG_class_weight = np.ones((26,))
for cla in G12ECG_excluded_classes:
G12ECG_class_weight[classes.index(cla)] = 0
# Chapman Shaoxing
Chapman_excluded_classes = ['6374002', '426627000', '713426002', '445118002', '10370003', '365413008',
'427172004', '427393009', '63593006']
Chapman_class_weight = np.ones((26,))
for cla in Chapman_excluded_classes:
Chapman_class_weight[classes.index(cla)] = 0
# Ningbo
Ningbo_excluded_classes = ['164889003', '164890007', '426627000']
Ningbo_class_weight = np.ones((26,))
for cla in Ningbo_excluded_classes:
Ningbo_class_weight[classes.index(cla)] = 0
return [CPSC_extra_class_weight, CPSC_extra_class_weight, PTB_XL_class_weight, G12ECG_class_weight, Chapman_class_weight, Ningbo_class_weight]
def get_class_weight_and_source_by_name(self, name):
if name[0] == 'A': # CPSC
class_weight = self.class_weights[0]
data_source_class = 0
elif name[0] == 'Q': # CPSC-extra
class_weight = self.class_weights[1]
data_source_class = 2
elif name[0] == 'H': # PTB-XL
class_weight = self.class_weights[2]
data_source_class = 0
elif name[0] == 'E': # G12ECG
class_weight = self.class_weights[3]
data_source_class = 1
elif name[0] == 'J' and int(name[2:]) <= 10646: # Chapman
class_weight = self.class_weights[4]
data_source_class = 2
elif name[0] == 'J' and int(name[2:]) > 10646: # Ningbo
class_weight = self.class_weights[5]
data_source_class = 2
elif name[0] == 'S' or name[0] == 'I': # Ningbo
class_weight = np.zeros((26,))
data_source_class = 2
return class_weight, data_source_class
class CustomDataset4PeakDetection(CustomDataset):
"""
PyTorch Dataset generator class
Ref: https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel
"""
def __init__(self, label_files, labels_onehot, label_dir, leads_index, name_list_full=[], sample_rate=500, transform=None, to_get_feature=False):
super().__init__(label_files, labels_onehot, label_dir, leads_index, name_list_full=name_list_full, sample_rate=sample_rate, transform=transform, to_get_feature=to_get_feature)
name_list_full_path = './name_list_all.npy'
self.name_list_full = np.load(name_list_full_path)
# load peak targets
if os.path.exists('./peak_targets_full_v2.npy') == False:
os.system('gunzip ./peak_targets_full_v2.npy.gz')
self.peak_targets_full = np.load('./peak_targets_full_v2.npy')
print("get peak target")
def __getitem__(self, idx):
"""Generate data sample"""
sample_file_name = self.file_names_list[idx]
# header_file_name = self.file_names_list[idx][:-3] + "hea"
label = self.labels_onehot[idx]
recording, header, name = load_challenge_data(sample_file_name, self.label_dir)
# get class_weight by name
class_weight, data_source = self.get_class_weight_and_source_by_name(name)
# divide ADC_gain and resample
recording = resample(recording, header, resample_Fs=self.sample_rate)
for lead in recording:
assert np.isnan(lead).any() == False
# if lead.sum() == 0:
# print(idx)
# to extract features
# recording = self.normalization(recording)
if self.transform is not None:
recording = self.transform(recording)
assert len(recording) <= 4992
recording = filter_and_detrend(recording)
# recording = nk.ecg_clean(recording, method='biosppy')
# target_peaks, _ = get_target_peak_bak(recording, type_num=5)
# target_peaks = self.get_target_peaks(recording)
target_peaks = self.peak_targets_full[np.where(self.name_list_full==name)]
recording = recording[self.leads_index, :]
recording = torch.tensor(recording)
label = torch.tensor(label)
# class_weight = torch.tensor([0])
# data_source = torch.tensor([0])
target_peaks = torch.tensor(target_peaks[0])
# print(recording.size(), label.size(), class_weight.size(), target_peaks.size())
return recording, label, class_weight, target_peaks
|
import string
import emoji
main_special_characters = string.punctuation + string.digits + string.whitespace
other_special_characters = (
" ’“”–ー一▬…✦�£•€«»°·═"
"×士^˘⇓↓↑←→()§″′´¿−±∈¢ø‚„½¼¾¹²³―⁃,ˌ¸‹›ʺˈʻ¦‐⠀‰
‑≤≥‖"
"◆●■►▼▲▴∆▻¡★☆✱ːº。¯˜¥ɪ≈†上ン:∼⁄・♡✓⊕․.⋅÷1‟;،、¨ाাी्े◦˚"
"゜ʼ≖ʼ¤ッツシ℃√!【】‿∞➤~πه۩☛₨➩☻๑٪♥ıॽ《‘©﴿٬?▷Г♫∟™ª₪®「—❖"
"」﴾》"
)
emoji = list(emoji.UNICODE_EMOJI["en"].keys())
special_characters_default = set(main_special_characters + other_special_characters)
special_characters_default.update(emoji)
parameters_filtering_default = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": False,
"length_word_max_cutoff": 50,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.4,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": False,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.70,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_af = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 25,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.3,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.6,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_ar = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 25,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.45,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 1000000,
}
parameters_filtering_arz = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 25,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.5,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_as = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 25,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.25,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_bn = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.275,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0.05,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 575000,
}
parameters_filtering_ca = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.35,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 1750000,
}
parameters_filtering_en = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": True,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 25,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 20,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.4,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0.3,
"cond_check_flagged_words": True,
"flagged_words_max_cutoff": 0.045,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.80,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 2500,
}
parameters_filtering_es = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.3,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0.2,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 2500000,
}
parameters_filtering_eu = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 35,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.3,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_fr = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.35,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0.15,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_gu = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.3,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 250000,
}
parameters_filtering_hi = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 25,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.35,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 600000,
}
parameters_filtering_id = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.25,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0.25,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 2500000,
}
parameters_filtering_kn = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 50,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.25,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 400000,
}
parameters_filtering_ml = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 50,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.2,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 1600000,
}
parameters_filtering_mr = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.25,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 425000,
}
parameters_filtering_pt = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.3,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0.15,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": True,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_so = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": False,
"length_word_max_cutoff": 1000,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.3,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": False,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_sw = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.275,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_ta = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 50,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.25,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_te = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 35,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.25,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_ur = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.4,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_vi = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.35,
"cond_words_augmentation": True,
"words_augmentation_group_sizes": [2, 3],
"words_augmentation_join_char": " ",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_yo = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": True,
"length_word_max_cutoff": 30,
"cond_check_number_words": True,
"tokenization": False,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.3,
"cond_words_augmentation": False,
"words_augmentation_group_sizes": [],
"words_augmentation_join_char": "",
"cond_check_stopwords": True,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering_zh = {
"cond_uniform_whitespace": True,
"cond_replace_unicode_punctuation": False,
"cond_remove_words_with_incorrect_substrings": False,
"incorrect_word_substrings": ["http", "www", ".com", "href", "//"],
"cond_remove_long_words": False,
"length_word_max_cutoff": 1000,
"cond_check_number_words": True,
"tokenization": True,
"strip_characters": special_characters_default,
"number_words_min_cutoff": 1,
"number_words_max_cutoff": 100000,
"cond_check_character_repetition_removal": True,
"character_repetition_length": 10,
"character_repetition_max_cutoff": 0.106,
"cond_check_word_repetition_removal": True,
"word_repetition_length": 5,
"word_repetition_max_cutoff": 0.19,
"cond_check_special_characters": True,
"special_characters": special_characters_default,
"special_characters_max_cutoff": 0.4,
"cond_words_augmentation": True,
"words_augmentation_group_sizes": [2, 3],
"words_augmentation_join_char": "",
"cond_check_stopwords": False,
"stopwords_min_cutoff": 0,
"cond_check_flagged_words": False,
"flagged_words_max_cutoff": 0.2,
"cond_check_lang_id": True,
"lang_id_min_cutoff": 0.75,
"cond_check_perplexity": False,
"perplexity_max_cutoff": 3000000,
}
parameters_filtering = {
"default": parameters_filtering_default,
"af": parameters_filtering_af,
"ar": parameters_filtering_ar,
"arz": parameters_filtering_arz,
"as": parameters_filtering_as,
"bn": parameters_filtering_bn,
"ca": parameters_filtering_ca,
"en": parameters_filtering_en,
"es": parameters_filtering_es,
"eu": parameters_filtering_eu,
"fr": parameters_filtering_fr,
"gu": parameters_filtering_gu,
"hi": parameters_filtering_hi,
"id": parameters_filtering_id,
"kn": parameters_filtering_kn,
"ml": parameters_filtering_ml,
"mr": parameters_filtering_mr,
"pt": parameters_filtering_pt,
"so": parameters_filtering_so,
"sw": parameters_filtering_sw,
"ta": parameters_filtering_ta,
"te": parameters_filtering_te,
"ur": parameters_filtering_ur,
"vi": parameters_filtering_vi,
"yo": parameters_filtering_yo,
"zh": parameters_filtering_zh,
}
|
# Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import pytest
import numpy.testing as npt
import tensorflow as tf
from trieste.acquisition.function import NegativeLowerConfidenceBound
from trieste.acquisition.rule import (
EfficientGlobalOptimization,
ThompsonSampling,
TrustRegion,
OBJECTIVE,
)
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.space import SearchSpace, DiscreteSearchSpace, Box
from tests.util.misc import one_dimensional_range, zero_dataset
from tests.util.model import QuadraticWithUnitVariance
@pytest.mark.parametrize('datasets', [{}, {"foo": zero_dataset()}])
@pytest.mark.parametrize(
'models', [{}, {"foo": QuadraticWithUnitVariance()}, {OBJECTIVE: QuadraticWithUnitVariance()}]
)
def test_trust_region_raises_for_missing_datasets_key(
datasets: Dict[str, Dataset],
models: Dict[str, ProbabilisticModel]
) -> None:
search_space = one_dimensional_range(-1, 1)
rule = TrustRegion()
with pytest.raises(KeyError):
rule.acquire(search_space, datasets, models, None)
@pytest.mark.parametrize('models', [
{},
{"foo": QuadraticWithUnitVariance()},
{"foo": QuadraticWithUnitVariance(), OBJECTIVE: QuadraticWithUnitVariance()}
])
@pytest.mark.parametrize('datasets', [{}, {OBJECTIVE: zero_dataset()}])
def test_thompson_sampling_raises_for_invalid_models_keys(
datasets: Dict[str, Dataset], models: Dict[str, ProbabilisticModel]
) -> None:
search_space = one_dimensional_range(-1, 1)
rule = ThompsonSampling(100, 10)
with pytest.raises(ValueError):
rule.acquire(search_space, datasets, models)
@pytest.mark.parametrize('search_space, expected_minimum', [
(
DiscreteSearchSpace(tf.constant([[-2.2, -1.0], [0.1, -0.1], [1.3, 3.3]])),
tf.constant([[0.1, -0.1]])
),
(Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3])), tf.constant([[0.0, 0.0]])),
])
def test_ego(search_space: SearchSpace, expected_minimum: tf.Tensor) -> None:
ego = EfficientGlobalOptimization(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
query_point, _ = ego.acquire(
search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticWithUnitVariance()}
)
npt.assert_array_almost_equal(query_point, expected_minimum, decimal=5)
def test_trust_region_for_default_state() -> None:
tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
dataset = Dataset(tf.constant([[0.1, 0.2]]), tf.constant([[0.012]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
query_point, state = tr.acquire(
search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticWithUnitVariance()}, None
)
npt.assert_array_almost_equal(query_point, tf.constant([[0.0, 0.0]]), 5)
npt.assert_array_almost_equal(state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(state.acquisition_space.upper, upper_bound)
npt.assert_array_almost_equal(state.y_min, [0.012])
assert state.is_global
def test_trust_region_successful_global_to_global_trust_region_unchanged() -> None:
tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = True
previous_state = TrustRegion.State(search_space, eps, previous_y_min, is_global)
query_point, current_state = tr.acquire(
search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticWithUnitVariance()}, previous_state
)
npt.assert_array_almost_equal(current_state.eps, previous_state.eps)
assert current_state.is_global
npt.assert_array_almost_equal(query_point, tf.constant([[0.0, 0.0]]), 5)
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)
def test_trust_region_for_unsuccessful_global_to_local_trust_region_unchanged() -> None:
tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.5]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = True
acquisition_space = search_space
previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)
query_point, current_state = tr.acquire(
search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticWithUnitVariance()}, previous_state
)
npt.assert_array_almost_equal(current_state.eps, previous_state.eps)
assert not current_state.is_global
npt.assert_array_less(lower_bound, current_state.acquisition_space.lower)
npt.assert_array_less(current_state.acquisition_space.upper, upper_bound)
assert query_point[0] in current_state.acquisition_space
def test_trust_region_for_successful_local_to_global_trust_region_increased() -> None:
tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = False
acquisition_space = Box(dataset.query_points - eps, dataset.query_points + eps)
previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)
query_point, current_state = tr.acquire(
search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticWithUnitVariance()}, previous_state
)
npt.assert_array_less(previous_state.eps, current_state.eps) # current TR larger than previous
assert current_state.is_global
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)
def test_trust_region_for_unsuccessful_local_to_global_trust_region_reduced() -> None:
tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.5]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = False
acquisition_space = Box(dataset.query_points - eps, dataset.query_points + eps)
previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)
query_point, current_state = tr.acquire(
search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticWithUnitVariance()}, previous_state
)
npt.assert_array_less(current_state.eps, previous_state.eps) # current TR smaller than previous
assert current_state.is_global
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
|
import matplotlib.pyplot as plt
import numpy as np
import copy
import time
def brute_force_find_max(array):
max_profit = 0
max_left = 0
max_right = 0
for i in range(len(array)):
profit = 0
for j in range(i, len(array)-1):
profit = profit + array[j]
if profit>max_profit:
max_left = i
max_right = j
max_profit = profit
return max_left, max_right, max_profit
def find_max_crossing_subarray(array, low, mid, high):
left_sum = -1500
sum_subarray = 0
max_left = 0
max_right = 0
for i in range(mid, low-1,-1):
sum_subarray = sum_subarray + array[i]
if sum_subarray > left_sum:
left_sum = sum_subarray
max_left = i
right_sum = -1500
sum_subarray = 0
for i in range(mid+1, high+1):
sum_subarray = sum_subarray + array[i]
if sum_subarray > right_sum:
right_sum = sum_subarray
max_right = i
return max_left, max_right, (left_sum + right_sum)
def find_maximum_subarray(array, low, high):
if low == high:
return low, high, 0
else:
mid = (low+high)//2
left_low, left_high, left_sum = find_maximum_subarray(array, low, mid)
right_low, right_high, right_sum = find_maximum_subarray(array, mid+1, high)
cross_low, cross_high, cross_sum = find_max_crossing_subarray(array, low, mid, high)
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
else:
return cross_low, cross_high, cross_sum
def display_plots( size, brute_time, recursive_time):
incrementer = brute_time / size
array_of_time_brute = [incrementer * x for x in range(size)]
n = np.arange(0, size)
plt.plot(n, array_of_time_brute,label='$O(n^2)$')
incrementer = recursive_time / size
array_of_time_recursive = [incrementer * x for x in range(size)]
n = np.arange(0, size)
plt.plot(n, array_of_time_recursive, label='$O(nlgn)$')
temp = np.argwhere(np.diff(np.sign(np.array(array_of_time_brute) - np.array(array_of_time_recursive)))).flatten()
print("the intercept is at times: ", temp, " seconds.")
plt.xlabel('n')
plt.ylabel('seconds')
plt.title('Brute Force vs Recursive')
plt.grid('on')
plt.legend()
plt.show()
def main():
size_of_array = -1
#makes sure input is positive int
while size_of_array < 1:
size_of_array = int(input("Enter the size of the array to be filled: "))
if size_of_array < 1:
print("please enter a positive number!")
# display info to screen
print("your number was: ", size_of_array)
random_array_of_ints = np.random.randint(-999, 1001, size_of_array)
copy_of_array = copy.deepcopy(random_array_of_ints)
#print("Array: ", random_array_of_ints)
# here we print sorted array
#max subarray brute force
start_brute = time.time()
max_left, max_right, max_profit = brute_force_find_max(random_array_of_ints)
end_brute = time.time()
print("BRUTE FORCE: max left index: ", max_left, "max right index", max_right, "max profit value: ",max_profit)
random_array_of_ints = copy_of_array
#max subarray recursive
start_recursive = time.time()
max_left, max_right, max_profit = find_maximum_subarray(random_array_of_ints, 0, size_of_array-1)
end_recursive = time.time()
print("RECURSIVE: max left index: ", max_left, "max right index", max_right, "max profit value: ", max_profit)
display_plots(size_of_array, (end_brute-start_brute), (end_recursive-start_recursive))
if __name__ == "__main__":
main()
|
"""Utility functions for Dyson Python library."""
import base64
import hashlib
import re
import time
from typing import Tuple
from .const import DEVICE_TYPE_360_EYE
from .exceptions import DysonFailedToParseWifiInfo
# For some devices, the model in WiFi SSID is not the same as the model for MQTT.
# The model on Dyson Cloud always matches the one used for MQTT.
_DEVICE_TYPE_MAP = {
"455A": "455",
}
def mqtt_time():
"""Return current time string for mqtt messages."""
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
def get_credential_from_wifi_password(wifi_password: str) -> str:
"""Calculate MQTT credential from WiFi password."""
hash_ = hashlib.sha512()
hash_.update(wifi_password.encode("utf-8"))
return base64.b64encode(hash_.digest()).decode("utf-8")
def get_mqtt_info_from_wifi_info(
wifi_ssid: str, wifi_password: str
) -> Tuple[str, str, str]:
"""Get MQTT information from WiFi information."""
result = re.match(r"^[0-9A-Z]{3}-[A-Z]{2}-[0-9A-Z]{8}$", wifi_ssid)
if result is not None:
serial = wifi_ssid
device_type = DEVICE_TYPE_360_EYE
else:
result = re.match(
r"^DYSON-([0-9A-Z]{3}-[A-Z]{2}-[0-9A-Z]{8})-([0-9]{3}[A-Z]?)$", wifi_ssid
)
if result is not None:
serial = result.group(1)
device_type = result.group(2)
device_type = _DEVICE_TYPE_MAP.get(device_type, device_type)
else:
raise DysonFailedToParseWifiInfo
credential = get_credential_from_wifi_password(wifi_password)
return serial, credential, device_type
|
from http_parser.parser import HttpParser
import aiohttp
class EmulatedClient(object):
""" Class for emulating the client to the server.
Notes:
To accomplish a proper man-in-the-middle attack with TLS capability,
the man-in-the-middle must be the one sending the original request to
the server. With the emulated client we are changing the typical structure:
client <-> server
To one that looks like so:
client <-> mitm (server) <-> mitm (emulated client) <-> server
Where we then reply back to the client with the response the emulated client
retrieved from the server on behalf of the client.
"""
def __init__(self, using_ssl):
# Creates our HttpParser object.
self.http_parser = HttpParser()
# Sets flag to whether or not we are using SSL.
self.using_ssl = using_ssl
async def connect(self, data):
# Parses the data coming in.
self.http_parser.execute(data, len(data))
host = self.http_parser.get_wsgi_environ()["HTTP_HOST"]
uri = self.http_parser.get_wsgi_environ()["RAW_URI"]
# Sets the proper URL client is trying to reach.
if self.using_ssl:
url = f"https://{host}:{uri}"
else:
url = uri
# Retrieves the destination server data.
async with aiohttp.ClientSession() as session:
async with session.get(url, ssl=False) as response:
status = response.status
reason = response.reason
headers = response.headers
response = await response.read()
# Re-creates the servers response.
resp = f"HTTP/1.1 {status} {reason}\r\n".encode("latin-1")
for header in headers:
resp += f"{header}: {headers[header]}\r\n".encode("latin-1")
resp += b"\r\n" + response
# Returns the data.
return resp
|
#!/bin/python
# -*- coding: utf-8 -*-
# @brief PovRay Planetary Grid representation
# @date 2016/11/28
# @author B.Seignovert (univ-reims@seignovert.fr)
# @version 1.0
#----------------------------------------------------
import vapory as pov
import numpy as np
DIST_SUN = {'TITAN': 1.5e9 } # Distance to the Sun
BODY_RADIUS = {'TITAN': 2575 } # Planetary body radius[km]
INST_FOV = {'ISSNA': .352, 'ISSWA': 3.52 } # Instrument field of view [deg]
class PLANET_GRID:
def __init__(self,planet,inst,verbose=True):
self.target = planet
self.inst = inst
self.scene = None
self.verbose = verbose
return
def __repr__(self):
return 'PovRay Planetary Grid representation of %s seen by %s.' % (self.target, self,inst)
# Convert lon/lat in Cartesian coordinates
def XYZ(self,lon,lat):
return np.array([ np.cos(np.radians(-lon))*np.cos(np.radians(lat)),np.sin(np.radians(lat)),np.sin(np.radians(-lon))*np.cos(np.radians(lat)) ])
# Set the observation geometry (Planet + Instrument positions)
def setGeo(self,SS,SC,dist,North):
SS_xyz = self.XYZ( SS['lon'], SS['lat'] ) ; SC_xyz = self.XYZ( SC['lon'], SC['lat'] )
declares = [
'SS_lon = %f; // Subsolar longitude [deg_W]' % SS['lon'],
'SS_lat = %f; // Subsolar latitude [deg_N]' % SS['lat'],
'SC_lon = %f; // Subspacecraft longitude [deg_W]' % SC['lon'],
'SC_lat = %f; // Subspacecraft latitude [deg_N]' % SC['lat'],
'R_Body = %f; // Planetary body radius [km]' % BODY_RADIUS[self.target]
]
camera = pov.Camera('angle', INST_FOV[self.inst], 'location', SC_xyz * dist, 'look_at', [0,0,0],'Axis_Rotate_Trans(',SC_xyz,',',-North,')')
light = pov.LightSource(SS_xyz * DIST_SUN[self.target], 'color','White')
obj = pov.Object('Grid')
self.scene = pov.Scene(declares=declares, camera=camera, objects=[light,obj], included=['colors.inc','transforms.inc','Planet_grid.inc'])
return self
# Render with PovRay script
def render(self,filename=None,width=None,height=None):
if filename is None: filename = '%s-%s.png' % (self.target, self.inst)
if self.scene is None: raise ValueError('Planet geo is not set. Run self.setGeo(SS,SC,dist,North)')
if self.verbose: print '>> Rendering PovRay Grid Globe...'
self.scene.render(filename , width=width, height=height, antialiasing=0.01)
return
if __name__ == '__main__':
# ISS image of Titan (N1827821295_1)
width = 1024
height = 1024
inst = 'ISSNA'
target = 'TITAN'
SS = {'lat': 25.6, 'lon': 129.4}
SC = {'lat': 0.4, 'lon': 277.0}
dist = 1.4e6
North = -178.7
filename = None
import sys
if sys.argv[1] in ['-h', '--help', 'usage']:
print 'USAGE: python grid.py filename SC_lat SC_lon SS_lat SS_lon dist north [NAC|WAC]'
sys.exit()
if len(sys.argv) > 1:
filename = sys.argv[1]
SC['lat'] = float(sys.argv[2])
SC['lon'] = float(sys.argv[3])
SS['lat'] = float(sys.argv[4])
SS['lon'] = float(sys.argv[5])
dist = float(sys.argv[6])
North = float(sys.argv[7])
if len(sys.argv) > 8:
if 'NA' in sys.argv[8]:
inst = 'ISSNA'
elif 'WA' in sys.argv[8]:
inst = 'ISSWA'
else:
raise ValueError('Instrument unknown')
PLANET_GRID(target,inst).setGeo(SS,SC,dist,North).render(filename=filename,width=width,height=height)
|
from django.urls import path
from . import views
urlpatterns = [
path('home/',views.terminal_home, name='index'),
path('latest/',views.terminal_news,name='terminal_news'),
path('sign-in/',views.sign_in,name='sign-in')
]
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base value for integer blank. can be used as an unspecified blank
DCGM_INT32_BLANK = 0x7ffffff0
DCGM_INT64_BLANK = 0x7ffffffffffffff0
# Base value for double blank. 2 ** 47. FP 64 has 52 bits of mantissa,
#so 47 bits can still increment by 1 and represent each value from 0-15
DCGM_FP64_BLANK = 140737488355328.0
DCGM_STR_BLANK = "<<<NULL>>>"
# Represents an error where data was not found
DCGM_INT32_NOT_FOUND = (DCGM_INT32_BLANK+1)
DCGM_INT64_NOT_FOUND = (DCGM_INT64_BLANK+1)
DCGM_FP64_NOT_FOUND = (DCGM_FP64_BLANK+1.0)
DCGM_STR_NOT_FOUND = "<<<NOT_FOUND>>>"
# Represents an error where fetching the value is not supported
DCGM_INT32_NOT_SUPPORTED = (DCGM_INT32_BLANK+2)
DCGM_INT64_NOT_SUPPORTED = (DCGM_INT64_BLANK+2)
DCGM_FP64_NOT_SUPPORTED = (DCGM_FP64_BLANK+2.0)
DCGM_STR_NOT_SUPPORTED = "<<<NOT_SUPPORTED>>>"
# Represents and error where fetching the value is not allowed with our current credentials
DCGM_INT32_NOT_PERMISSIONED = (DCGM_INT32_BLANK+3)
DCGM_INT64_NOT_PERMISSIONED = (DCGM_INT64_BLANK+3)
DCGM_FP64_NOT_PERMISSIONED = (DCGM_FP64_BLANK+3.0)
DCGM_STR_NOT_PERMISSIONED = "<<<NOT_PERM>>>"
###############################################################################
# Functions to check if a value is blank or not
def DCGM_INT32_IS_BLANK(val):
if val >= DCGM_INT32_BLANK:
return True
else:
return False
def DCGM_INT64_IS_BLANK(val):
if val >= DCGM_INT64_BLANK:
return True
else:
return False
def DCGM_FP64_IS_BLANK(val):
if val >= DCGM_FP64_BLANK:
return True
else:
return False
#Looks for <<< at first position and >>> inside string
def DCGM_STR_IS_BLANK(val):
if 0 != val.find("<<<"):
return False
elif 0 > val.find(">>>"):
return False
return True
###############################################################################
class DcgmValue:
def __init__(self, value):
self.value = value #Contains either an integer (int64), string, or double of the actual value
###########################################################################
def SetFromInt32(self, i32Value):
'''
Handle the special case where our source data was an int32 but is currently
stored in a python int (int64), dealing with blanks
'''
value = int(i32Value)
if not DCGM_INT32_IS_BLANK(i32Value):
self.value = value
return
if value == DCGM_INT32_NOT_FOUND:
self.value = DCGM_INT64_NOT_FOUND
elif value == DCGM_INT32_NOT_SUPPORTED:
self.value = DCGM_INT64_NOT_SUPPORTED
elif value == DCGM_INT32_NOT_PERMISSIONED:
self.value = DCGM_INT64_NOT_PERMISSIONED
else:
self.value = DCGM_INT64_BLANK
###########################################################################
def IsBlank(self):
'''
Returns True if the currently-stored value is a blank value. False if not
'''
if self.value is None:
return True
elif type(self.value) == int or type(self.value) == long:
return DCGM_INT64_IS_BLANK(self.value)
elif type(self.value) == float:
return DCGM_FP64_IS_BLANK(self.value)
elif type(self.value) == str:
return DCGM_STR_IS_BLANK(self.value)
else:
raise Exception("Unknown type: %s") % str(type(self.value))
###########################################################################
def __str__(self):
return str(self.value)
###########################################################################
###############################################################################
def self_test():
v = DcgmValue(1.0)
assert(not v.IsBlank())
assert(v.value == 1.0)
v = DcgmValue(100)
assert(not v.IsBlank())
assert(v.value == 100)
v = DcgmValue(DCGM_INT64_NOT_FOUND)
assert(v.IsBlank())
v = DcgmValue(DCGM_FP64_NOT_FOUND)
assert(v.IsBlank())
v.SetFromInt32(DCGM_INT32_NOT_SUPPORTED)
assert(v.IsBlank())
assert(v.value == DCGM_INT64_NOT_SUPPORTED)
print "Tests passed"
return
###############################################################################
if __name__ == "__main__":
self_test()
###############################################################################
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.identity import DefaultAzureCredential
from azure.mgmt.resource import ApplicationClient, ResourceManagementClient
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
GROUP_NAME = "testgroupx"
GROUP_NAME_2 = "testgroupx2"
APP_DEF_NAME = "applicationdefinition"
APPLICATION_NAME = "applicationtest"
# Create client
# For other authentication approaches, please see: https://pypi.org/project/azure-identity/
app_client = ApplicationClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
resource_client = ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": "eastus"}
)
resource_client.resource_groups.create_or_update(
GROUP_NAME_2,
{"location": "eastus"}
)
# Create application definition
app_definition = app_client.application_definitions.begin_create_or_update(
GROUP_NAME,
APP_DEF_NAME,
{
"lock_level": "None",
"display_name": "myManagedApplicationDef",
"description": "myManagedApplicationDef description",
"authorizations": [],
"package_file_uri": "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-managed-application/artifacts/ManagedAppZip/pkg.zip",
"location": "East US"
}
).result()
print("Create application definition: {}".format(app_definition))
# Get application definition
app_definition = app_client.application_definitions.get(
GROUP_NAME,
APP_DEF_NAME
)
print("Get application definition: {}".format(app_definition))
# Create application
app = app_client.applications.begin_create_or_update(
GROUP_NAME,
APPLICATION_NAME,
{
"application_definition_id": app_definition.id,
"managed_resource_group_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/myManagedRG" + GROUP_NAME_2,
"location": "East US",
"kind": "ServiceCatalog"
}
)
# ).result()
print("Create application: {}".format(app))
# Get application
app = app_client.applications.get(
GROUP_NAME,
APPLICATION_NAME
)
print("Get application: {}".format(app))
# Update application
app = app_client.applications.update(
GROUP_NAME,
APPLICATION_NAME,
{
"managed_resource_group_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/myManagedRG" + GROUP_NAME_2,
"kind": "ServiceCatalog"
}
)
print("Update application: {}".format(app))
# Delete application
app_client.applications.begin_delete(
GROUP_NAME,
APPLICATION_NAME
).result()
print("Delete application.")
# Delete application definition
app_client.application_definitions.begin_delete(
GROUP_NAME,
APP_DEF_NAME
).result()
print("Delete appliation definition.")
if __name__ == "__main__":
main()
|
import numpy as np
import platform
import matplotlib as mpl
if platform.system() == 'Darwin': # avoid bugs in some versions of matplotlib with macOS catalina
mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
import networkx as nx
from typing import Any, Callable, List, Union
from .agents import BaseAgent, SingleIntegratorAgent
from .event_trigger import EventTrigger
from .common import length, calculate_convex_optimal
class MultiagentControl():
def __init__(self, num_agents: int, id: int = None, graph: nx.Graph = None, agents: List[BaseAgent] = None, agent_type: BaseAgent = SingleIntegratorAgent, state_dim: int = 1, init_state: List[List[float]] = None, step_size: float = 1.0):
"""Initialisation for the multi-agent system
Args:
num_agents (int): Number of agents in the network
id (int, optional): Identifier of the multi-agent system. Defaults to None.
graph (nx.Graph, optional): Graph representation of the communication network. Defaults to a random regular graph.
agents (List[BaseAgent], optional): List of the agents. Defaults to None.
agent_type (BaseAgent, optional): Type of the agents. Defaults to SingleIntegratorAgent.
state_dim (int, optional): Dimension of the agents' internal state. Defaults to 1.
init_state (List[List[float]], optional): Initial states for all agents. Defaults to None.
step_size (float, optional): Step size for time discretisation. Defaults to 1.0.
"""
self.num_agents = num_agents
self.state_dim = state_dim
self.step_size = step_size
self.id = id
if graph is not None:
self.graph = graph
else:
self.graph = nx.random_regular_graph(2, num_agents)
self.laplacian = nx.laplacian_matrix(graph)
if init_state is None:
self.agents = agents if agents is not None else [agent_type(state_dim=state_dim, step_size=step_size) for _ in range(self.num_agents)]
else:
self.agents = agents if agents is not None else [agent_type(state_dim=state_dim, step_size=step_size, init_state=init_state[i]) for i in range(self.num_agents)]
for i in range(self.num_agents):
for j in range(self.num_agents):
Lij = self.laplacian[i,j]
if Lij < 0:
self.agents[i].add_neighbours(self.agents[j], -Lij)
def step(self):
"""Proceed one step for all agents
"""
for agent in self.agents:
agent.step()
def reset(self):
"""Reset the multi-agent system
"""
# TODO: implement the reset function
pass
def set_triggering_law(self, trigger: 'EventTrigger'):
"""Set the event-triggering law for all agents
Args:
trigger (EventTrigger): Event-triggering law to be set
"""
for agent in self.agents:
agent.set_triggering_law(trigger)
def set_objective_functions(self, objective_functions: List[Callable]):
"""Set the objective functions for each agent
Args:
objective_functions (List[Callable]): List of objective functions for each agent, according to the agent id
"""
self.global_obj = lambda x: np.sum([f(x) for f in objective_functions])
for i in range(self.num_agents):
self.agents[i].set_objective_function(objective_functions[i])
def set_dynamics_functions(self, dynamics_functions: List[Callable]):
"""Set the dynamics functions for each agent
Args:
dynamics_functions (List[Callable]): [description]
"""
for i in range(self.num_agents):
self.agents[i].set_dynamics_function(dynamics_functions[i])
def get_id(self) -> Union[int, None]:
"""Get the object identifier if it exists
Returns:
Union[int, None]: the object identifier
"""
return self.id
def get_num_agents(self) -> int:
"""Get the number of agents in the multi-agent system
Returns:
int: number of agents in the system
"""
return self.num_agents
def get_time(self) -> float:
"""Get the current time of the multi-agent system
Returns:
float: current time
"""
_t = np.array(self.agents[0].stat['t'])
return _t
def get_step_size(self) -> float:
"""Get the step size of the multi-agent system
Returns:
float: step size
"""
return self.step_size
def get_states(self) -> List[np.array]:
"""Get the states of all agents
Returns:
List[np.array]: List of states of agents
"""
return [agent.stat['x'] for agent in self.agents]
def get_mean_states(self) -> np.array:
"""Get the mean state of all agents
Returns:
np.array: mean state of all agents
"""
return np.mean([agent.get_state() for agent in self.agents], axis=0)
def get_current_consensus_error(self) -> float:
"""Get the latest consensus error
Returns:
float: latest consensus error
"""
return self.get_consensus_error()[-1]
def get_consensus_error(self) -> np.array:
"""Get the consensus error for all time instances
Returns:
np.array: consensus error
"""
_err = 0
_shape = np.transpose(self.agents[0].stat['x']).shape
_e = np.zeros(_shape)
try:
_e = 0
for agent in self.agents:
_e += np.transpose(agent.stat['x'])
_e = _e / self.num_agents
except AttributeError:
_e = 0
for agent in self.agents:
_e += np.transpose(agent.stat['x'])
_e = _e / self.num_agents
for agent in self.agents:
_err += (np.transpose(agent.stat['x']) - _e) ** 2
_err = np.sum(_err, axis=0)
return _err
def get_communication_rate(self) -> float:
"""Get the average communication rate for all time instances
Returns:
float: average communication rate
"""
_Gamma = 0
_gammas = 0
_t = self.get_time()
for agent in self.agents:
_gammas = _gammas + np.array(agent.stat['gamma'])
_gammas[0] = 0
_t[0] = 1e-5
_Gamma = np.cumsum(_gammas) / (self.num_agents * _t)
_Gamma[0] = 0
return _Gamma
def get_state_dim(self) -> int:
"""Get the dimension of agent states
Returns:
int: state dimension
"""
return self.state_dim
def plot_states(self, index: int = 1, grid: bool = True, xlim: tuple = None, ylim: tuple = None, zoom: bool = True):
"""Plot the agent states
Args:
index (int, optional): figure index. Defaults to 1.
grid (bool, optional): whether of not to display grid. Defaults to True.
xlim (tuple, optional): display limit on x axis. Defaults to None.
ylim (tuple, optional): display limit on y axis. Defaults to None.
zoom (bool, optional): whether of not to create zoomed view. Defaults to True.
"""
for k in range(self.num_agents):
agent = self.agents[k]
_t = agent.stat['t']
_x = np.transpose(agent.stat['x'])
for n in range(self.state_dim):
fig = plt.figure(n+index)
_xn = _x[n]
plt.plot(_t, _xn, label='Agent ' + str(k+1))
plt.xlabel('$t$', fontsize=14)
plt.ylabel('$x_i^' + str(n+1) + '(t)$', fontsize=14)
try:
_obj = self.global_obj
_opt_sol = calculate_convex_optimal(_obj, self.state_dim)
if self.state_dim == 1:
_v = np.ones(length(_t)) * _opt_sol
plt.plot(_t, _v, label='Optimal Value', linestyle='dashdot')
else:
for n in range(self.state_dim):
plt.figure(n+index)
_v = np.ones(length(_t)) * _opt_sol[n]
plt.plot(_t, _v, label='Optimal Value', linestyle='dashdot')
except AttributeError:
pass
for n in range(self.state_dim):
plt.figure(n+index)
if self.num_agents < 10:
plt.legend()
if grid:
plt.grid(True)
if xlim != None:
plt.xlim(xlim)
if ylim != None:
plt.ylim(ylim)
if zoom:
for n in range(self.state_dim):
fig = plt.figure(n+index)
ax_new = fig.add_axes([0.2, 0.175, 0.35, 0.225])
for k in range(self.num_agents):
agent = self.agents[k]
_t = agent.stat['t']
_x = np.transpose(agent.stat['x'])
_xn = _x[n]
plt.plot(_t[0:160], _xn[0:160])
ax_new.set_xlim([0, 0.4])
# ax_new.set_ylim(ylim)
def plot_consensus_error(self, index: int = None, loglog: bool = False, semilogy: bool = True, grid: bool = True, label: bool = '', color: str = None, xlim: tuple = None, ylim: tuple = None, zoom: bool = True, fig = None, ax1 = None, ax_new = None, zoom_xlim: tuple = None, zoom_ylim: tuple = None, zoom_pos: List[float] = [0.325, 0.65, 0.35, 0.225]):
"""Plot the consensus error
Args:
index (int, optional): figure index. Defaults to None.
loglog (bool, optional): log-log scale. Defaults to False.
semilogy (bool, optional): semilog-y scale. Defaults to True.
grid (bool, optional): display grid. Defaults to True.
label (bool, optional): figure label. Defaults to ''.
color (str, optional): plot colour. Defaults to None.
xlim (tuple, optional): display limit on x axis. Defaults to None.
ylim (tuple, optional): display limit on y axis. Defaults to None.
zoom (bool, optional): display zoomed view. Defaults to True.
fig ([type], optional): figure to superpose on. Defaults to None.
ax1 ([type], optional): axis to superpose on. Defaults to None.
ax_new ([type], optional): new axis. Defaults to None.
zoom_xlim (tuple, optional): range limit on x axis of the zoomed view. Defaults to None.
zoom_ylim (tuple, optional): range limit on y axis of the zoomed view. Defaults to None.
zoom_pos (List[float], optional): zoomed view position. Defaults to [0.325, 0.65, 0.35, 0.225].
Returns:
Any: figure, ax1, ax_new
"""
if index is None:
index = self.state_dim + 1
_t = self.get_time()
_err = self.get_consensus_error()
if fig is None and ax1 is None:
fig = plt.figure(index)
ax1 = plt.subplot(1,1,1, label='ce')
if loglog:
plot = ax1.loglog
elif semilogy:
plot = ax1.semilogy
else:
plot = ax1.plot
if color != None:
plot(_t, _err, label=label, color=color)
else:
plot(_t, _err, label=label)
ax1.set_xlabel(r'$t$', fontsize=14)
ax1.set_ylabel(r'$\varepsilon(t)$', fontsize=14)
if grid:
ax1.grid(True)
if label != '' or label != None:
ax1.legend()
if xlim != None:
ax1.set_xlim(xlim)
if ylim != None:
ax1.set_ylim(ylim)
if zoom:
if ax_new is None:
ax_new = fig.add_axes(zoom_pos)
if loglog:
plot = ax_new.loglog
elif semilogy:
plot = ax_new.semilogy
else:
plot = ax_new.plot
if color != None:
plot(_t, _err, color=color)
else:
plot(_t, _err)
if zoom_xlim != None:
ax_new.set_xlim(zoom_xlim)
else:
ax_new.set_xlim([0, 2])
if zoom_ylim != None:
ax_new.set_ylim(zoom_ylim)
return fig, ax1, ax_new
def plot_communication_rate(self, index: int = None, grid: bool = True, label: str = '', color: str = None, xlim: tuple = None, ylim: tuple = None, legend_loc: int = 1, zoom: bool = True, fig = None, ax1 = None, ax_new = None, zoom_xlim: tuple = None, zoom_ylim: tuple = None, zoom_pos: List[float] = [0.17, 0.65, 0.35, 0.225]):
"""Plot the average communication rate
Args:
index (int, optional): figure index. Defaults to None.
grid (bool, optional): display grid. Defaults to True.
label (str, optional): figure label. Defaults to ''.
color (str, optional): plot colour. Defaults to None.
xlim (tuple, optional): display limit on x axis. Defaults to None.
ylim (tuple, optional): display limit on y axis. Defaults to None.
legend_loc (int, optional): legend location. Defaults to 1.
zoom (bool, optional): display zoomed view. Defaults to True.
fig ([type], optional): figure to superpose on. Defaults to None.
ax1 ([type], optional): axis to superpose on. Defaults to None.
ax_new ([type], optional): new axis. Defaults to None.
zoom_xlim (tuple, optional): range limit on x axis of the zoomed view. Defaults to None.
zoom_ylim (tuple, optional): range limit on y axis of the zoomed view. Defaults to None.
zoom_pos (List[float], optional): zoomed view position. Defaults to [0.17, 0.65, 0.35, 0.225].
Returns:
Any: figure, ax1, ax_new
"""
if index is None:
index = self.state_dim + 2
_t = self.get_time()
_Gamma = self.get_communication_rate()
if fig is None and ax1 is None:
fig = plt.figure(index)
ax1 = plt.subplot(1,1,1, label='ce')
if color != None:
ax1.plot(_t, _Gamma, label=label, color=color)
else:
ax1.plot(_t, _Gamma, label=label)
ax1.set_xlabel(r'$t$', fontsize=14)
ax1.set_ylabel(r'$\Gamma(t)$', fontsize=14)
if grid:
ax1.grid(True)
if label != '' or label != None:
ax1.legend(loc=legend_loc)
if xlim != None:
ax1.set_xlim(xlim)
if ylim != None:
ax1.set_ylim(ylim)
if zoom:
if ax_new is None:
ax_new = fig.add_axes(zoom_pos)
if color != None:
plt.plot(_t, _Gamma, color=color)
else:
plt.plot(_t, _Gamma)
if zoom_xlim != None:
ax_new.set_xlim(zoom_xlim)
else:
ax_new.set_xlim([0, 2])
if zoom_ylim != None:
ax_new.set_ylim(zoom_ylim)
return fig, ax1, ax_new
def plot_interevent_time(self, index = 100, grid = True, label = '', color = None, xlim = None, ylim = None, legend_loc = 1):
# TODO: plot the interevent interval
pass
def draw_graph(self, index: int = None):
"""Display the underlying communication graph of the multi-agent system
Args:
index (int, optional): figure index. Defaults to 100.
"""
plt.figure(index)
nx.draw(self.graph)
def show_plots(self):
"""Alias function for plt.show() from matplotlib
"""
plt.show()
|
''' Unit tests for module utility_functions '''
import unittest
from utility_functions import calculate_distance, delete_indexes
from class_definitions import ThreeDimensionalPoint
class TestDistance(unittest.TestCase):
'''Test return value and test type error '''
# Check if calculation is done correctly
def test_distance(self):
''' Check if calculation is done correctly '''
# Check to see if the distance is calculated correctly 1, 1, 1 to 1, 1, 2 should be 1
self.assertAlmostEqual(calculate_distance(ThreeDimensionalPoint(1, 1, 1),
ThreeDimensionalPoint(1, 1, 2)), 1)
# Check if the distance from a point to itself is 0
self.assertAlmostEqual(calculate_distance(ThreeDimensionalPoint(1, 2, 3),
ThreeDimensionalPoint(1, 2, 3)), 0)
# Check if the distance is calculated correctly(Should be: 6.928203230275509)
self.assertAlmostEqual(calculate_distance(ThreeDimensionalPoint(-1, -2, -3),
ThreeDimensionalPoint(-5, -6, -7)), 6.928203230275509)
def test_inputs(self):
''' Check if type errors are raised correctly '''
# 1. Test response to wrong type
# 1.1 arg1 of correct type, arg2 of wrong type
self.assertRaises(TypeError, calculate_distance, ThreeDimensionalPoint(-5, -6, -7), 5)
# 1.2 arg1 of wrong type, arg2 of correct type
self.assertRaises(TypeError, calculate_distance, 5, ThreeDimensionalPoint(-5, -6, -7))
class TestDeleteIndexes(unittest.TestCase):
''' Test type errors and value errors '''
# Check to see if type errors and value errors are raised correctly
def test_inputs(self):
''' Test for errors with wrong type and empty list '''
# 1. Test response to wrong type
# 1.1 arg1 of correct type, arg2 of wrong type
self.assertRaises(TypeError, delete_indexes, [1, 2, 3, 4, 5], 5)
# 1.2 arg1 of wrong type, arg2 of correct type
self.assertRaises(TypeError, delete_indexes, 5, [1, 2, 3, 4, 5])
# 2. Test response to empty list
# 2.1 arg1 of correct type, arg2 empty list
self.assertRaises(ValueError, delete_indexes, [0, 1, 2], [])
# 2.2 arg1 empty list, arg2 of correct type
self.assertRaises(ValueError, delete_indexes, [], [0, 1, 2])
# 3. Test if code is correctly working
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
deletion = [0, 3, 6, 9]
correct_result = [1, 2, 4, 5, 7, 8]
self.assertEqual(delete_indexes(data, deletion), correct_result)
|
#!/opt/conda_envs/collection-2018-1.0/bin/python
import sys
import os
fname1 = sys.argv[1]
fname2 = sys.argv[2]
pinAlignDir = os.environ['PINALIGNDIR']
baseDirectory = os.environ["PWD"]
beamline = os.environ["BEAMLINE_ID"]
runningDir = baseDirectory + "/pinAlign"
os.chdir(runningDir)
scriptName = os.path.join(pinAlignDir, f'pin_align_{beamline}.sh')
comm_s = f'{scriptName} {fname1} {fname2}'
lines = os.popen(comm_s).readlines()
tilted = False
##### TODO add check for high values
for outputline in lines:
if (outputline.find("TILTED") != -1 or outputline.find('MISSING') != -1 or outputline.find('VIOLATION') != -1):
print(outputline)
tilted = True
sys.exit()
if (not tilted):
for outputline in lines:
try:
if (outputline.find("OVERALL X,Y,Z OFFSETS TO CENTER") != -1):
index_s = outputline.find("[")
substring = outputline[index_s + 1:len(outputline) - 3]
offsetTokens = substring.split(',')
print(offsetTokens[0] + " " +
offsetTokens[1] + " " + offsetTokens[2])
sys.exit()
except Exception:
print('Top-view error, pin could be out of view, manual centering required')
|
import unittest
class UnionFind:
def __init__( self ):
self.numberOfDisjointSets = 0
self.elementToRepresentativeDict = dict()
def add( self, element ):
assert element not in self.elementToRepresentativeDict
self.elementToRepresentativeDict[ element ] = element
self.numberOfDisjointSets += 1
def merge( self, elementA, elementB ):
assert elementA in self.elementToRepresentativeDict and elementB in self.elementToRepresentativeDict
_representativeA = self.representative( elementA )
_representativeB = self.representative( elementB )
if _representativeA != _representativeB:
self.elementToRepresentativeDict[ _representativeA ] = _representativeB
self.numberOfDisjointSets -= 1
def representative( self, element ):
assert element in self.elementToRepresentativeDict
_representative = self.elementToRepresentativeDict[ element ]
if _representative == element:
return _representative
_representative = self.elementToRepresentativeDict[ element ] = self.representative( _representative )
return _representative
def getDisjointSets( self ):
representativeToElementsDict = dict()
for element in self.elementToRepresentativeDict.keys():
representative = self.representative( element )
if representative not in representativeToElementsDict:
representativeToElementsDict[ representative ] = list()
representativeToElementsDict[ representative ].append( element )
return list( representativeToElementsDict.values() )
class CallingCirclesComparator:
@staticmethod
def compare( testObject, circleList1, circleList2 ):
circleSet1 = set()
circleSet2 = set()
for circleElementList in circleList1:
circleSet1.add( '#'.join( sorted( circleElementList ) ) )
for circleElementList in circleList2:
circleSet2.add( '#'.join( sorted( circleElementList ) ) )
testObject.assertEqual( circleSet1, circleSet2 )
class CallingCircles:
def __init__( self, callLogList ):
self.callLogDict = dict()
for caller, callee in callLogList:
for customer in (caller, callee):
if customer not in self.callLogDict:
self.callLogDict[ customer ] = list()
self.callLogDict[ caller ].append( callee )
self.uf = UnionFind()
for caller in self.callLogDict:
self.uf.add( caller )
def circles( self ):
for caller in self.callLogDict.keys():
self._search( caller, set(), set(), list() )
return self.uf.getDisjointSets()
def _search( self, caller, visited, pathTaken, pathTakenStack ):
if caller in visited:
return
if caller in pathTaken:
index = -1
while pathTakenStack[ index ] != caller:
self.uf.merge( caller, pathTakenStack[ index ] )
index = index - 1
return
pathTaken.add( caller )
pathTakenStack.append( caller )
for callee in self.callLogDict[ caller ]:
self._search( callee, visited, pathTaken, pathTakenStack )
pathTakenStack.pop()
pathTaken.remove( caller )
visited.add( caller )
class CallingCirclesTest( unittest.TestCase ):
def test_callingCircles( self ):
for datafile in ('example', 'sample', 'callingcircles'):
self._verify( datafile )
def _verify( self, datafile ):
callingCirclesObjectList = list()
testcaseCount = 0
with open( 'tests/{}.in'.format( datafile ) ) as inputFile:
while True:
m, n = map( int, inputFile.readline().strip().split() )
if m == 0 and n == 0:
break
testcaseCount += 1
print( 'datafile = {} testcase = {} CallLog Size = {}'.format( datafile, testcaseCount, n ) )
callLogList = list()
for _ in range( n ):
callLogList.append( tuple( inputFile.readline().strip().split() ) )
callingCirclesObjectList.append( CallingCircles( callLogList ) )
index = 0
currentCallingCircle = list()
with open( 'tests/{}.ans'.format( datafile ) ) as solutionFile:
for line in solutionFile.readlines():
line = line.strip()
if 'Calling circles' in line:
continue
if len( line ) == 0:
CallingCirclesComparator.compare( self, currentCallingCircle, callingCirclesObjectList[ index ].circles() )
index += 1
currentCallingCircle = list()
else:
currentCallingCircle.append( list( map( lambda name : name.strip(), line.split( ',' ) ) ) )
if len( currentCallingCircle ) > 0:
CallingCirclesComparator.compare( self, currentCallingCircle, callingCirclesObjectList[ index ].circles() )
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8
def create_auth_token(auth_token_store, user):
return auth_token_store.create(user)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 28 09:51:36 2018
@author: user
"""
from ynltk import Langvowel, OmnibusStem
txt = "Dit is een boek."
l=Langvowel()
s=OmnibusStem()
print(l.langvowel(txt))
print(s.compStemmer("haus","huis"))
|
import re
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
prefix = 'rails'
private_var_names = [
'deploy_user',
'env',
'env_vars',
'gemset',
'group',
'root_dir',
'ruby',
'rvm',
'service',
'user',
]
path_re = re.compile(r'^/[-_./a-z0-9]*$')
common_name_re = re.compile(r'^[a-z_][a-z0-9_-]{0,30}(\$|[a-z0-9_-])?$')
env_var_name_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
ruby_version_re = re.compile(r'^ruby-[0-9]+\.[0-9]+\.[0-9]+$')
domain_name_re = re.compile(
r'^(?:[a-z0-9]' # First character of the domain
r'(?:[a-z0-9-_]{0,61}[a-z0-9])?\.)' # Sub domain + hostname
r'+[a-z0-9][a-z0-9-_]{0,61}' # First 61 characters of the gTLD
r'[a-z0-9]$' # Last character of the gTLD
)
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = False
result['failed'] = False
for var_name in self.private_var_names:
full_var_name = '%s__%s' % (self.prefix, var_name)
validator = getattr(self, 'validate_' + var_name)
value = task_vars[full_var_name]
msg_part = validator(value)
if msg_part is not None:
value = str(value)
if len(value) <= 30:
partial_value = value
else:
partial_value = '%s...' % value[0:30]
full_msg = "Invalid var '%s' with value '%s': %s" % (
full_var_name,
partial_value,
msg_part,
)
result['failed'] = True
result['msg'] = full_msg
return result
return result
def validate_deploy_user(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.common_name_re.fullmatch(value):
return 'has invalid format'
def validate_env(self, value):
if not isinstance(value, str):
return 'is not str'
if value not in ['production', 'staging']:
return 'is invalid'
def validate_env_vars(self, obj):
if not isinstance(obj, dict):
return 'is not dict'
if not all(isinstance(key, str) for key in obj.keys()):
return 'key is not str'
if not all(self.env_var_name_re.fullmatch(key) for key in obj.keys()):
return 'key has invalid format'
def validate_gemset(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.common_name_re.fullmatch(value):
return 'has invalid format'
def validate_group(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.common_name_re.fullmatch(value):
return 'has invalid format'
def validate_root_dir(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.path_re.fullmatch(value):
return 'has invalid format'
def validate_ruby(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.ruby_version_re.fullmatch(value):
return 'has invalid format'
def validate_rvm(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.path_re.fullmatch(value):
return 'has invalid format'
def validate_service(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.common_name_re.fullmatch(value):
return 'has invalid format'
def validate_user(self, value):
if not isinstance(value, str):
return 'is not str'
if not self.common_name_re.fullmatch(value):
return 'has invalid format'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path
import smtplib
import quopri
from jinja2 import Template
from PyMassMailer.ConfigData import ConfigData
from PyMassMailer.SenderMail import SenderMail
from PyMassMailer.DataParser import DataParser
from PyMassMailer.TemplateEngine import TemplateEngine
if __name__ == "__main__":
conf = ConfigData()
data_parser = DataParser(conf.get_section('data'))
address = data_parser.getData()
engine = TemplateEngine(Template)
sender = SenderMail(conf, smtplib, engine)
view = open(
path.normpath('templates/test.html'),
'r',
encoding='UTF-8'
).read()
# print(view)
sender.set_address(address).send_all(
view,
{},
'test'
)
|
from newton.trade.tools import last_price
from newton.rules.exit.base import Exit
class BuyLowerExit(Exit):
def __init__(self, exit_price, name=None):
super().__init__(name=name)
self.exit_price = exit_price
def can_exit(self, trade):
return self.exit_price > last_price(trade.currency_pair)
|
# ImageNet-CoG Benchmark
# Copyright 2021-present NAVER Corp.
# 3-Clause BSD License
import logging
from typing import List
import numpy as np
import torch as th
logger = logging.getLogger()
class FeatureSet():
"""
Store (image feature, label) pairs.
"""
def __init__(self, x, y, name):
assert x.shape[0] == y.shape[0]
self.name = name
self.x = x
self.y = y
def print_info(self):
logger.info("FeatureSet:{} | x:{}, y:{} | x.norm:{:.3f} +- {:.3f}, x.non_zero:{:.3f} | unique(y):{}".format(
self.name,
list(self.x.shape),
list(self.y.shape),
self.x.norm(dim=1, p=2).mean(),
self.x.norm(dim=1, p=2).std(),
(self.x != 0).float().sum(dim=1).mean(),
th.unique(self.y).shape[0]
))
def to_gpu(self):
"""
Move the tensors into GPU.
"""
self.x = self.x.to("cuda")
self.y = self.y.to("cuda")
def move_data_to_cuda(data_sets: List[FeatureSet]):
"""
Moves the data to GPU if we have enough GPU memory.
"""
mem_1gb = 1024 * 1024 * 1024
mem_required = sum([np.prod(set.x.shape) for set in data_sets]) * 4 + 2 * mem_1gb # a float is 4 bytes
mem_available = th.cuda.get_device_properties(0).total_memory
mem_available -= th.cuda.memory_cached(0) + th.cuda.memory_allocated(0)
def in_gb(x):
return x / mem_1gb
logger.info("{:.1f}GB GPU memory available, {:.1f}GB required.".format(in_gb(mem_available), in_gb(mem_required)))
if mem_available > mem_required:
logger.info("Moving all the data to GPU.")
for set in data_sets:
set.to_gpu()
else:
logger.info("Not enough space in GPU. Data will stay in CPU memory.")
def load_feature_set(path, name="", normalize=False):
"""
Loads features from the file (path).
The file is expected to be saved by torch.save and contain torch.tensors named X and Y.
normalize: whether to apply l2 normalization or not.
"""
# load features
pkl = th.load(path, "cpu")
X = pkl["X"]
Y = pkl["Y"]
name = pkl.get("name", name)
assert X.shape[0] == Y.shape[0]
logger.info(f"Features of {name} are loaded.")
if normalize:
logger.info("Applying l2-normalization to the features.")
X = normalize_features(X)
fset = FeatureSet(X, Y, name)
return fset
def normalize_features(X):
"""
L2-Normalizes the features.
"""
if isinstance(X, np.ndarray):
norm = np.linalg.norm(X, axis=1)
X = X / (norm + 1e-5)[:, np.newaxis]
elif isinstance(X, th.Tensor):
X = th.nn.functional.normalize(X, p=2, dim=1)
else:
raise NotImplementedError("Unknown type:{}".format(type(X)))
return X
def split_trainset(trainset, p_val=0.2):
"""
Randomly split the trainin set into train and val.
Args:
p_val: percentage of the validation set.
"""
train_inds = []
val_inds = []
for cix in th.unique(trainset.y):
# samples belonging to this class cix
inds = th.where(trainset.y == cix)[0]
# random ordering of these samples
order = th.randperm(inds.shape[0])
inds = inds[order]
# number of validation samples
n_val = int(inds.shape[0] * p_val)
# split the indices into train and val
train_inds.append(inds[:-n_val])
val_inds.append(inds[-n_val:])
train_inds = th.cat(train_inds, dim=0)
val_inds = th.cat(val_inds, dim=0)
assert len(trainset.y) == len(train_inds) + len(val_inds), \
"While splitting the training set into (train, val), some samples are ignored."
assert len(np.intersect1d(train_inds.numpy(), val_inds.numpy())) == 0, \
"Training and validation sets overlap!"
x_train = trainset.x[train_inds]
y_train = trainset.y[train_inds]
x_val = trainset.x[val_inds]
y_val = trainset.y[val_inds]
return (
FeatureSet(x_train, y_train, "train"),
FeatureSet(x_val, y_val, "val"),
)
def make_fewshot_dataset(feature_set, n_shot):
"""
Randomly select n_shot sample per class from the feature set.
"""
assert n_shot > 0, f"n_shot ({n_shot}) must be > 0"
x = []
y = []
for cix in th.unique(feature_set.y):
# samples whose label is cix
inds = th.where(feature_set.y == cix)[0]
# random order of samples
inds = inds[th.randperm(inds.shape[0])]
# split the validation set
x.append(feature_set.x[inds[:n_shot]])
y.append(feature_set.y[inds[:n_shot]])
x = th.cat(x, dim=0)
y = th.cat(y, dim=0)
assert len(x) == len(y), f"Interesting, the length of x ({x.shape}) and y ({y.shape}) do not match."
assert len(x) == n_shot * len(th.unique(feature_set.y)), "It seems that we didn't sample the same number of samples per class."
return FeatureSet(x, y, feature_set.name + "_{}-shot".format(n_shot))
|
# 1530. Number of Good Leaf Nodes Pairs
# User Accepted:2427
# User Tried:3383
# Total Accepted:2509
# Total Submissions:6456
# Difficulty:Medium
# Given the root of a binary tree and an integer distance.
# A pair of two different leaf nodes of a binary tree is said to be good
# if the length of the shortest path between them is less than or equal to distance.
# Return the number of good leaf node pairs in the tree.
# Example 1:
# Input: root = [1,2,3,null,4], distance = 3
# Output: 1
# Explanation: The leaf nodes of the tree are 3 and 4
# and the length of the shortest path between them is 3. This is the only good pair.
# Example 2:
# Input: root = [1,2,3,4,5,6,7], distance = 3
# Output: 2
# Explanation: The good pairs are [4,5] and [6,7] with shortest path = 2.
# The pair [4,6] is not good because the length of ther shortest path between them is 4.
# Example 3:
# Input: root = [7,1,4,6,null,5,3,null,null,null,null,null,2], distance = 3
# Output: 1
# Explanation: The only good pair is [2,5].
# Example 4:
# Input: root = [100], distance = 1
# Output: 0
# Example 5:
# Input: root = [1,1,1], distance = 2
# Output: 1
# Constraints:
# The number of nodes in the tree is in the range [1, 2^10].
# Each node's value is between [1, 100].
# 1 <= distance <= 10
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def countPairs(self, root, distance):
"""
:type root: TreeNode
:type distance: int
:rtype: int
"""
pass
|
from json import loads
from os.path import splitext
import boto3
from botocore.exceptions import ClientError
import pandas as pd
import pickle
from smart_open import open as s_open
from vespid import setup_logger
logger = setup_logger(__name__)
def yield_object_keys(bucket, prefix, limit=float('inf'),
skip_until_suffix=None):
"""yield object keys matching s3://bucket/prefix up to limit, skipping until suffix given"""
logger.debug(f"yielding {limit} object keys from {bucket} / {prefix}")
s3_client = boto3.client('s3')
paginator = s3_client.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
count = 0
skipping = False
if skip_until_suffix:
skipping = True
for page in pages:
for obj in page['Contents']:
if count >= limit:
return
logger.debug(f"{count} key found: {obj}")
if skip_until_suffix:
key = obj['Key']
basename = splitext(key)[0]
if basename.endswith(skip_until_suffix):
skipping = False
logger.debug(f"stop skipping {obj} vs {skip_until_suffix}")
else:
logger.debug(f"keep skipping! {obj} vs {skip_until_suffix}")
if skipping:
logger.debug(f"{count} key skipped: {obj}")
continue
logger.debug(f"{count} yielding: {obj}")
yield obj['Key']
count += 1
def yield_json_from_s3_obj(key, s3_bucket, encoding='utf-8', limit=float('inf')):
"""yield lines of loaded JSON from s3://bucket/key up to limit,
assuming open access to the S3 bucket"""
logger.info(f"yielding {limit} json rows from {s3_bucket} / {key} with encoding {encoding}")
with s_open(f"s3://{s3_bucket}/{key}", encoding=encoding) as thing:
count = 0
for line in thing:
if count >= limit:
break
line = line.strip()
if not line:
continue
json_paper = loads(line)
logger.debug(f"{count} json found: {json_paper}")
yield json_paper
count += 1
def get_s3_file(bucket, key, resource=None):
'''
Pulls the contents of a given object in S3 into memory.
Parameters
----------
bucket : str
Name of the S3 bucket containing the object
key : str
Object prefix + name. E.g. if the bucket is called "vespid",
and the file of interest is s3://vespid/data/a.json, the key
would be "data/a.json"
resource : boto3.resource object, optional
Result of calling `boto3.resource('s3')`, by default None
Returns
-------
bytestring
The file contents
'''
if resource is None:
s3 = boto3.resource('s3')
else:
s3 = resource
return s3.Bucket(bucket).Object(key).get()['Body'].read()
def count_s3_objects(bucket, prefix):
'''
Given a bucket name and file prefix to match, counts how many qualifying
objects there are.
Parameters
----------
bucket : str
Name of S3 bucket to inspect
prefix : str
Prefix of matching objects (e.g. 'data/processed/')
Returns
-------
int
The number of matching objects found
'''
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket)
num_s3_objects = sum(
1 for _ in bucket.objects.filter(
Prefix=prefix
)
)
return num_s3_objects
def upload_file(file_name, bucket, object_prefix=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_prefix: prefix to add to ``file_name``, e.g. '<data/external/>filename'
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_prefix is None:
object_name = file_name
else:
object_name = object_prefix + file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logger.error(e)
return False
return True
def upload_object(
object,
path,
access_key_path=None,
bucket='vespid',
session=None,
auto_pickle=False
):
"""
Upload an arbitrary object to an S3 bucket, such as
a pickled DataFrame.
Note that, if you send an object as a bytestring (e.g. via pickle.dumps()),
you will want to read the resulting file when you need it via
with open('path/to/file.pkl', 'rb') as f:
obj = pickle.load(f)
or
pickle.loads('path/to/file.pkl')
Parameters
----------
object: object to upload, usually in a StringIO or ByteStringIO format
path: str. URI to use for object when in the bucket (e.g. <bucket>/<path>).
access_key_path: str. Indicates filepath to AWS access credentials CSV.
If None, will assume it's operating in a pre-approved security environment
(e.g. an AWS instance with proper role permissions)
bucket: str. Target S3 bucket.
session: boto3.Session object. If None, will attempt to upload via
boto3.resource
auto_pickle: bool. If True, and `object` is determined to not already be
a bytestring, will pickle `object` before sending to S3 via
`pickle.dumps(object)'
Returns
-------
Nothing.
"""
if path[-1] == '/':
raise ValueError("object name should not end with '/'. "
"Please ensure a valid object name has been provided.")
if access_key_path is not None:
aws_access_key, secret_access_key = pd.read_csv(access_key_path).loc[0]
else:
aws_access_key, secret_access_key = None, None
# Upload the file
if session is None:
s3_resource = boto3.resource(
's3',
aws_access_key_id=aws_access_key,
aws_secret_access_key=secret_access_key
)
else:
s3_resource = session.resource('s3')
# Detect if object is not already in proper format, transform accordingly
if not isinstance(object, bytes) and auto_pickle:
logger.debug(f"Detected that object is of type {type(object)}; "
"pickling it and sending to S3 as a byte string...")
to_upload = pickle.dumps(object)
else:
to_upload = object
try:
s3_resource.Object(bucket, path).put(Body=to_upload)
except Exception as e:
raise e
def test_s3_access(bucket):
'''
Tests S3 ingress and egress. Useful for code run in different environments
(e.g. AWS Batch) to run this before doing any heavy lifting to make sure
access credentialing, etc. is good to go.
Parameters
----------
bucket : str
Name of S3 bucket to test
'''
# Test uploading
test_object = pd.DataFrame({'a': [1,2,3], 'b': [4,5,6]})
logger.debug("Testing upload of a pickled DataFrame to S3...")
upload_object(
test_object,
'upload_test.pkl',
auto_pickle=True,
bucket=bucket
)
# Testing downloading
assert get_s3_file(bucket, 'upload_test.pkl') is not None, \
"S3 file download failed"
logger.info("S3 upload test successful!")
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from .model_base import ModelBase
from h2o.utils.shared_utils import can_use_pandas
class H2OAnomalyDetectionModel(ModelBase):
def varsplits(self, use_pandas=False):
"""
Retrieve per-variable split information for a given Isolation Forest model. Output will include:
- count - The number of times a variable was used to make a split.
- aggregated_split_ratios - The split ratio is defined as "abs(#left_observations - #right_observations) / #before_split".
Even splits (#left_observations approx the same as #right_observations) contribute
less to the total aggregated split ratio value for the given feature;
highly imbalanced splits (eg. #left_observations >> #right_observations) contribute more.
- aggregated_split_depths - The sum of all depths of a variable used to make a split. (If a variable is used
on level N of a tree, then it contributes with N to the total aggregate.)
:param use_pandas: If True, then the variable splits will be returned as a Pandas data frame.
:returns: A list or Pandas DataFrame.
"""
model = self._model_json["output"]
if "variable_splits" in list(model.keys()) and model["variable_splits"]:
vals = model["variable_splits"].cell_values
header = model["variable_splits"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't provide variable split information")
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
import dawg
class TestPrediction(object):
DATA = ['ЁЖИК', 'ЁЖИКЕ', 'ЁЖ', 'ДЕРЕВНЯ', 'ДЕРЁВНЯ', 'ЕМ', 'ОЗЕРА', 'ОЗЁРА', 'ОЗЕРО']
LENGTH_DATA = list(zip(DATA, ((len(w),) for w in DATA)))
REPLACES = dawg.DAWG.compile_replaces({'Е': 'Ё'})
SUITE = [
('УЖ', []),
('ЕМ', ['ЕМ']),
('ЁМ', []),
('ЁЖ', ['ЁЖ']),
('ЕЖ', ['ЁЖ']),
('ЁЖИК', ['ЁЖИК']),
('ЕЖИКЕ', ['ЁЖИКЕ']),
('ДЕРЕВНЯ', ['ДЕРЕВНЯ', 'ДЕРЁВНЯ']),
('ДЕРЁВНЯ', ['ДЕРЁВНЯ']),
('ОЗЕРА', ['ОЗЕРА', 'ОЗЁРА']),
('ОЗЕРО', ['ОЗЕРО']),
]
SUITE_ITEMS = [
(
it[0], # key
[
(w, [(len(w),)]) # item, value pair
for w in it[1]
]
)
for it in SUITE
]
SUITE_VALUES = [
(
it[0], # key
[[(len(w),)] for w in it[1]]
)
for it in SUITE
]
@pytest.mark.parametrize(("word", "prediction"), SUITE)
def test_dawg_prediction(self, word, prediction):
d = dawg.DAWG(self.DATA)
assert d.similar_keys(word, self.REPLACES) == prediction
@pytest.mark.parametrize(("word", "prediction"), SUITE)
def test_record_dawg_prediction(self, word, prediction):
d = dawg.RecordDAWG(str("=H"), self.LENGTH_DATA)
assert d.similar_keys(word, self.REPLACES) == prediction
@pytest.mark.parametrize(("word", "prediction"), SUITE_ITEMS)
def test_record_dawg_items(self, word, prediction):
d = dawg.RecordDAWG(str("=H"), self.LENGTH_DATA)
assert d.similar_items(word, self.REPLACES) == prediction
@pytest.mark.parametrize(("word", "prediction"), SUITE_VALUES)
def test_record_dawg_items_values(self, word, prediction):
d = dawg.RecordDAWG(str("=H"), self.LENGTH_DATA)
assert d.similar_item_values(word, self.REPLACES) == prediction
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
from tqsdk import TqApi
# 创建API实例.
api = TqApi()
# 获得上期所 cu2001 的行情引用,当行情有变化时 quote 中的字段会对应更新
quote = api.get_quote("SHFE.cu2001")
# 输出 cu2001 的最新行情时间和最新价
print(quote.datetime, quote.last_price)
# 关闭api,释放资源
api.close()
|
"""
Command line actions to manage workenv
"""
import os
import subprocess
from pathlib import Path
from . import bash
from .config import Command, Project
from .constants import COMMAND_NAME
from .io import echo, error
registry = {}
def action(fn):
def wrap(config, actions, args):
return fn(config, actions, args)
registry[fn.__name__] = fn
return wrap
@action
def install(config, actions, args):
"""
Install workenv into your bashrc
"""
command_name = COMMAND_NAME
if len(args) == 1:
command_name = args[0]
elif len(args) > 1:
error("Usage: workenv --install [<as>]")
return
bash.install(command_name)
echo(f"Installed as {command_name}, open a new shell to use")
@action
def edit(config, actions, args):
"""
Open the yaml source in the shell editor
"""
if len(args) > 0:
error("Usage: workenv --edit")
editor = os.environ.get("EDITOR") or "vim"
subprocess.call([editor, config.file])
@action
def add(config, actions, args):
"""
Register a new project or command using the current path
"""
cwd = Path.cwd()
if len(args) == 0:
error("Must specify a project name to add it")
return
project_name, command_name = (args + [None])[0:2]
# Get or create project
if not command_name and project_name in config.projects:
error(f"Project {project_name} already exists")
return
if project_name not in config.projects:
config.projects[project_name] = Project(
config=config,
name=project_name,
path=cwd,
source=[],
env=[],
run=[],
parent=None,
)
project = config.projects[project_name]
if not command_name:
config.save()
echo(f"Added project {project_name}")
return
if command_name in project.commands:
error(f"Command {command_name} already exists in project {project_name}")
return
project.commands[command_name] = Command(
name=command_name, path=cwd, source=[], env=[], run=[], parent=project,
)
config.save()
echo(f"Added command {command_name} to project {project_name}")
|
from oscar.apps.customer import config
class CustomerConfig(config.CustomerConfig):
name = 'oscar_apps.customer'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-11-02 03:21
from __future__ import unicode_literals
from tqdm import tqdm
from django.db import migrations
from django.db.models import F
def extract_external_id_from_url(apps, schema_editor):
AttachmentFile = apps.get_model('data', 'AttachmentFile')
non_documentcloud_files = AttachmentFile.objects.exclude(source_type='DOCUMENTCLOUD')
for file in tqdm(non_documentcloud_files, desc='Get external_id from original_url '):
url = file.original_url
file.external_id = url[url.rindex('/') + 1:] if '/' in url else url
file.save()
def use_url_for_external_id(apps, schema_editor):
AttachmentFile = apps.get_model('data', 'AttachmentFile')
AttachmentFile.objects.exclude(source_type='DOCUMENTCLOUD').update(external_id=F('original_url'))
class Migration(migrations.Migration):
dependencies = [
('data', '0093_attachment_request_add_airtable_record_id'),
]
operations = [
migrations.RunPython(
extract_external_id_from_url,
reverse_code=use_url_for_external_id,
elidable=True),
]
|
from owlready2 import *
import csv
import sys
import os
def aro_query(start_id=sys.argv[1], depth=sys.argv[2]):
# Load the ontology
os.chdir('..')
aro_path = os.path.abspath(os.curdir)
onto = get_ontology(aro_path + '\\aro.owl').load()
namespace = onto.get_namespace("http://purl.obolibrary.org/obo/")
#onto.search(label = 'microbial susceptibility test')
# Grab the relevant subclasses, starting from start_id and progressing down through the hierarchy <depth> # of times
# If the depth is 1, only the subclasses of the initial element are pulled.
subclass_list = [start_id]
done = []
for d in range(0,int(depth)):
for sc in range(len(subclass_list)):
if subclass_list[sc] not in done:
checking = subclass_list[sc]
# Pull the subclasses and add them to the list
curr_query_string = 'list(namespace.%s.subclasses())' % (checking)
subclass_list.extend(eval(curr_query_string))
# Format the ARO IDs: remove '.obo' prefix, convert to string
subclass_list = [str(aro).split('.')[1] if 'obo.' in str(aro) else aro for aro in subclass_list]
# Prevent the current ARO element from having its subclasses pulled again by marking it as done
done.append(checking)
# Extract the parent term of "child" term in the subclass list, and extract the labels for each term in the subclass list
hierarchy_dict = dict.fromkeys(subclass_list)
label_list = []
for term in hierarchy_dict:
# Extract the labels
query_label_string = 'namespace.%s.label' % (term)
label_list.append(eval(query_label_string)[0])
# Search for terms that are subclasses ("children") of the current <term> (the "parent")
search_string = 'onto.search(is_a = namespace.%s)' % (term)
children = eval(search_string)
children = [str(aro).split('.')[1] for aro in children]
# Map the current parent to any of its children that are part of the hierarchy dictionary
for c in range(len(children)):
child = children[c]
# Since the onto.search method includes the search term or parent in the returned results, skip the parent (otherwise the parent of this term will be recorded as itself).
# Also, skip any children that are not present in the hierarchy dictionary.
if child != term and child in subclass_list:
hierarchy_dict[child] = term
parent_terms = list(hierarchy_dict.values())
# Store labels in a separate list
#label_list = []
#for i in range(len(subclass_list)):
#query_label_string = 'namespace.%s.label' % (subclass_list[i])
#label_list.append(eval(query_label_string)[0])
# Combine the lists in preparation for writing to a csv
rows = zip(subclass_list, label_list, parent_terms)
# Write to csv
file = open('aro-query.csv', 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(['ARO_id', 'label', 'parent_ARO_id'])
for row in rows:
writer.writerow(row)
# Allow function to be called from the command line
if __name__ == '__main__':
aro_query(sys.argv[1], sys.argv[2])
|
"""
Tests for bitarray
Author: Ilan Schnell
"""
import os
import sys
import unittest
import tempfile
import shutil
from random import randint
is_py3k = bool(sys.version_info[0] == 3)
if is_py3k:
from io import StringIO
else:
from cStringIO import StringIO
if __name__ == '__main__':
from __init__ import bitarray, bits2bytes
repr_type = "<class '__init__.bitarray'>"
else:
from bitarray import bitarray, bits2bytes
repr_type = "<class 'bitarray.bitarray'>"
tests = []
if sys.version_info[:2] < (2, 6):
def next(x):
return x.next()
def to_bytes(s):
if is_py3k:
return bytes(s.encode('latin1'))
elif sys.version_info[:2] >= (2, 6):
return bytes(s)
else:
return s
class Util(object):
def randombitarrays(self):
for n in list(range(25)) + [randint(1000, 2000)]:
yield bitarray([randint(0, 1) for d in range(n)],
endian='big' if randint(0, 1) else 'little')
def randomlists(self):
for n in list(range(25)) + [randint(1000, 2000)]:
yield [bool(randint(0, 1)) for d in range(n)]
def rndsliceidx(self, length):
return randint(-2*length, 2*length-1) if randint(0, 1) == 1 else None
def slicelen(self, r, length):
return getIndicesEx(r, length)[-1]
def check_obj(self, a):
self.assertEqual(repr(type(a)), repr_type)
unused = 8 * a.buffer_info()[1] - len(a)
self.assert_(0 <= unused < 8)
self.assertEqual(unused, a.buffer_info()[3])
def assertEQUAL(self, a, b):
self.assertEqual(a, b)
self.assertEqual(a.endian(), b.endian())
self.check_obj(a)
self.check_obj(b)
def getIndicesEx(r, length):
if not isinstance(r, slice):
raise TypeError("slice object expected")
start = r.start
stop = r.stop
step = r.step
if r.step is None:
step = 1
else:
if step == 0:
raise ValueError("slice step cannot be zero")
defstart = length-1 if step < 0 else 0
defstop = -1 if step < 0 else length
if r.start is None:
start = defstart
else:
if start < 0: start += length
if start < 0: start = -1 if step < 0 else 0
if start >= length: start = length-1 if step < 0 else length
if r.stop is None:
stop = defstop
else:
if stop < 0: stop += length
if stop < 0: stop = -1
if stop > length: stop = length
if (step < 0 and stop >= length) or (step > 0 and start >= stop):
slicelength = 0
elif step < 0:
slicelength = (stop-start+1) / step + 1
else:
slicelength = (stop-start-1) / step + 1
if slicelength < 0:
slicelength = 0
return start, stop, step, slicelength
# ---------------------------------------------------------------------------
class TestsModuleFunctions(unittest.TestCase, Util):
def test_bits2bytes(self):
for arg in ['foo', [], None, {}]:
self.assertRaises(TypeError, bits2bytes, arg)
self.assertRaises(TypeError, bits2bytes)
self.assertRaises(TypeError, bits2bytes, 1, 2)
self.assertRaises(ValueError, bits2bytes, -1)
self.assertRaises(ValueError, bits2bytes, -924)
for n in range(1000):
self.assertEqual(bits2bytes(n),
0 if n==0 else ((n - 1) // 8 + 1));
for n, m in [(0, 0), (1, 1), (2, 1), (7, 1), (8, 1), (9, 2),
(10, 2), (15, 2), (16, 2), (64, 8), (65, 9),
(0, 0), (1, 1), (65, 9), (2**29, 2**26),
(2**31, 2**28), (2**32, 2**29), (2**34, 2**31),
(2**34+793, 2**31+100), (2**35-8, 2**32-1),
(2**62, 2**59), (2**63-8, 2**60-1)]:
self.assertEqual(bits2bytes(n), m)
tests.append(TestsModuleFunctions)
# ---------------------------------------------------------------------------
class CreateObjectTests(unittest.TestCase, Util):
def test_noInitializer(self):
a = bitarray()
self.assertEqual(len(a), 0)
self.assertEqual(a.tolist(), [])
self.check_obj(a)
def test_endian(self):
a = bitarray(endian='little')
a.fromstring('A')
self.assertEqual(a.endian(), 'little')
self.check_obj(a)
b = bitarray(endian='big')
b.fromstring('A')
self.assertEqual(b.endian(), 'big')
self.check_obj(b)
self.assertEqual(a.tostring(), b.tostring())
a = bitarray(endian='little')
a.fromstring(' ')
self.assertEqual(a.endian(), 'little')
self.check_obj(a)
b = bitarray(endian='big')
b.fromstring(' ')
self.assertEqual(b.endian(), 'big')
self.check_obj(b)
self.assertEqual(a.tostring(), b.tostring())
self.assertRaises(TypeError, bitarray.__new__, bitarray, endian=0)
self.assertRaises(ValueError, bitarray.__new__, bitarray, endian='')
def test_integers(self):
for n in range(50):
a = bitarray(n)
self.assertEqual(len(a), n)
self.check_obj(a)
a = bitarray(int(n))
self.assertEqual(len(a), n)
self.check_obj(a)
self.assertRaises(ValueError, bitarray.__new__, bitarray, -1)
self.assertRaises(ValueError, bitarray.__new__, bitarray, -924)
def test_list(self):
lst = ['foo', None, [1], {}]
a = bitarray(lst)
self.assertEqual(a.tolist(), [True, False, True, False])
self.check_obj(a)
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_tuple(self):
tup = ('', True, [], {1:2})
a = bitarray(tup)
self.assertEqual(a.tolist(), [False, True, False, True])
self.check_obj(a)
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(tuple(lst))
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_iter(self):
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(iter(lst))
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_iter2(self):
for lst in self.randomlists():
def foo():
for x in lst:
yield x
a = bitarray(foo())
self.assertEqual(a, bitarray(lst))
self.check_obj(a)
def test_01(self):
a = bitarray('0010111')
self.assertEqual(a.tolist(), [0, 0, 1, 0, 1, 1, 1])
self.check_obj(a)
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
s = ''.join('1' if x else '0' for x in lst)
a = bitarray(s)
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
self.assertRaises(ValueError, bitarray.__new__, bitarray, '01012100')
def test_bitarray(self):
for n in range(50):
a = bitarray(n)
b = bitarray(a)
self.assert_(a is not b)
self.assertEQUAL(a, b)
for end in ('little', 'big'):
a = bitarray(endian=end)
c = bitarray(a)
self.assertEqual(c.endian(), end)
c = bitarray(a, endian='little')
self.assertEqual(c.endian(), 'little')
c = bitarray(a, endian='big')
self.assertEqual(c.endian(), 'big')
def test_None(self):
self.assertEQUAL(bitarray(), bitarray(0))
self.assertEQUAL(bitarray(), bitarray(None))
def test_WrongArgs(self):
self.assertRaises(TypeError, bitarray.__new__, bitarray, 'A', 42, 69)
self.assertRaises(TypeError, bitarray.__new__, bitarray, Ellipsis)
self.assertRaises(TypeError, bitarray.__new__, bitarray, slice(0))
self.assertRaises(TypeError, bitarray.__new__, bitarray, 2.345)
self.assertRaises(TypeError, bitarray.__new__, bitarray, 4+3j)
self.assertRaises(TypeError, bitarray.__new__, bitarray, '', 0, 42)
self.assertRaises(ValueError, bitarray.__new__, bitarray, 0, 'foo')
tests.append(CreateObjectTests)
# ---------------------------------------------------------------------------
class MetaDataTests(unittest.TestCase):
def test_buffer_info(self):
a = bitarray('0000111100001', endian='little')
self.assertEqual(a.buffer_info()[1:4], (2, 'little', 3))
a = bitarray()
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assert_(isinstance(bi, tuple))
self.assertEqual(len(bi), 5)
self.assert_(isinstance(bi[0], int))
if is_py3k:
self.assert_(isinstance(bi[1], int))
self.assert_(isinstance(bi[2], str))
self.assert_(isinstance(bi[3], int))
if is_py3k:
self.assert_(isinstance(bi[4], int))
for n in range(50):
bi = bitarray(n).buffer_info()
self.assertEqual(bi[1], bits2bytes(n))
self.assertEqual(bi[3] + n, 8 * bi[1])
self.assert_(bi[4] >= bi[1])
a = bitarray(endian='little')
self.assertEqual(a.buffer_info()[2], 'little')
a = bitarray(endian='big')
self.assertEqual(a.buffer_info()[2], 'big')
def test_endian(self):
a = bitarray(endian='little')
self.assertEqual(a.endian(), 'little')
a = bitarray(endian='big')
self.assertEqual(a.endian(), 'big')
def test_length(self):
for n in range(1000):
a = bitarray(n)
self.assertEqual(len(a), n)
self.assertEqual(a.length(), n)
tests.append(MetaDataTests)
# ---------------------------------------------------------------------------
class SliceTests(unittest.TestCase, Util):
def test_getitem(self):
a = bitarray()
self.assertRaises(IndexError, a.__getitem__, 0)
a.append(True)
self.assertEqual(a[0], True)
self.assertRaises(IndexError, a.__getitem__, 1)
self.assertRaises(IndexError, a.__getitem__, -2)
a.append(False)
self.assertEqual(a[1], False)
self.assertRaises(IndexError, a.__getitem__, 2)
self.assertRaises(IndexError, a.__getitem__, -3)
a = bitarray('1100010')
for i, b in enumerate([True, True, False, False, False, True, False]):
self.assertEqual(a[i], b)
self.assertEqual(a[i-7], b)
self.assertRaises(IndexError, a.__getitem__, 7)
self.assertRaises(IndexError, a.__getitem__, -8)
a = bitarray('0100000100001')
self.assertEQUAL(a[:], a)
self.assert_(a[:] is not a)
aa = a.tolist()
self.assertEQUAL(a[11:2:-3], bitarray(aa[11:2:-3]))
self.check_obj(a[:])
self.assertRaises(ValueError, a.__getitem__, slice(None, None, 0))
self.assertRaises(TypeError, a.__getitem__, (1, 2))
for a in self.randombitarrays():
aa = a.tolist()
la = len(a)
if la == 0: continue
for dum in range(10):
step = self.rndsliceidx(la)
if step == 0: step = None
s = slice(self.rndsliceidx(la),
self.rndsliceidx(la), step)
self.assertEQUAL(a[s], bitarray(aa[s], endian=a.endian()))
def test_setitem(self):
a = bitarray([False])
a[0] = 1
self.assertEqual(a.tolist(), [True])
a = bitarray(2)
a[0] = 0
a[1] = 1
self.assertEqual(a.tolist(), [False, True])
a[-1] = 0
a[-2] = 1
self.assertEqual(a.tolist(), [True, False])
self.assertRaises(IndexError, a.__setitem__, 2, True)
self.assertRaises(IndexError, a.__setitem__, -3, False)
for a in self.randombitarrays():
la = len(a)
if la == 0:
continue
i = randint(0, la-1)
aa = a.tolist()
ida = id(a)
val = bool(randint(0, 1))
a[i] = val
aa[i] = val
self.assertEqual(a.tolist(), aa)
self.assertEqual(id(a), ida)
self.check_obj(a)
b = bitarray(la)
b[0:la] = bitarray(a)
self.assertEqual(a, b)
self.assertNotEqual(id(a), id(b))
b = bitarray(la)
b[:] = bitarray(a)
self.assertEqual(a, b)
self.assertNotEqual(id(a), id(b))
b = bitarray(la)
b[::-1] = bitarray(a)
self.assertEqual(a.tolist()[::-1], b.tolist())
a = bitarray(5*[False])
a[0] = 1
a[-2] = 1
self.assertEqual(a, bitarray('10010'))
self.assertRaises(IndexError, a.__setitem__, 5, 'foo')
self.assertRaises(IndexError, a.__setitem__, -6, 'bar')
for a in self.randombitarrays():
la = len(a)
if la == 0: continue
for dum in range(3):
step = self.rndsliceidx(la)
if step == 0: step = None
s = slice(self.rndsliceidx(la),
self.rndsliceidx(la), step)
for b in self.randombitarrays():
if len(b) == self.slicelen(s, len(a)) or step is None:
c = bitarray(a)
d = c
c[s] = b
self.assert_(c is d)
self.check_obj(c)
cc = a.tolist()
cc[s] = b.tolist()
self.assertEqual(c, bitarray(cc))
def test_setslice_to_bool(self):
a = bitarray('11111111')
a[::2] = False
self.assertEqual(a, bitarray('01010101'))
a[4::] = True
self.assertEqual(a, bitarray('01011111'))
a[-2:] = False
self.assertEqual(a, bitarray('01011100'))
a[:2:] = True
self.assertEqual(a, bitarray('11011100'))
a[:] = True
self.assertEqual(a, bitarray('11111111'))
def test_delitem(self):
a = bitarray('100110')
del a[1]
self.assertEqual(len(a), 5)
del a[3]
del a[-2]
self.assertEqual(a, bitarray('100'))
self.assertRaises(IndexError, a.__delitem__, 3)
self.assertRaises(IndexError, a.__delitem__, -4)
for a in self.randombitarrays():
la = len(a)
if la == 0: continue
for dum in range(10):
step = self.rndsliceidx(la)
if step == 0: step = None
s = slice(self.rndsliceidx(la),
self.rndsliceidx(la), step)
c = bitarray(a)
d = c
del c[s]
self.assert_(c is d)
self.check_obj(c)
cc = a.tolist()
del cc[s]
self.assertEQUAL(c, bitarray(cc, endian=c.endian()))
tests.append(SliceTests)
# ---------------------------------------------------------------------------
class MiscTests(unittest.TestCase, Util):
def test_booleanness(self):
self.assertEqual(bool(bitarray('')), False)
self.assertEqual(bool(bitarray('0')), True)
self.assertEqual(bool(bitarray('1')), True)
def test_iterate(self):
for lst in self.randomlists():
acc = []
for b in bitarray(lst):
acc.append(b)
self.assertEqual(acc, lst)
def test_iterable(self):
a = iter(bitarray('011'))
self.assertEqual(next(a), False)
self.assertEqual(next(a), True)
self.assertEqual(next(a), True)
if is_py3k:
self.assertRaises(StopIteration, a.__next__)
else:
self.assertRaises(StopIteration, a.next)
def test_assignment(self):
a = bitarray('00110111001')
a[1:3] = a[7:9]
a[-1:] = a[:1]
b = bitarray('01010111000')
self.assertEqual(a, b)
def test_compare(self):
for a in self.randombitarrays():
aa = a.tolist()
for b in self.randombitarrays():
bb = b.tolist()
self.assertEqual(a == b, aa == bb)
self.assertEqual(a != b, aa != bb)
self.assertEqual(a <= b, aa <= bb)
self.assertEqual(a < b, aa < bb)
self.assertEqual(a >= b, aa >= bb)
self.assertEqual(a > b, aa > bb)
def test_subclassing(self):
class ExaggeratingBitarray(bitarray):
def __new__(cls, data, offset):
return bitarray.__new__(cls, data)
def __init__(self, data, offset):
self.offset = offset
def __getitem__(self, i):
return bitarray.__getitem__(self, i - self.offset)
for a in self.randombitarrays():
if len(a) == 0:
continue
b = ExaggeratingBitarray(a, 1234)
for i in range(len(a)):
self.assertEqual(a[i], b[i+1234])
def test_endianness(self):
a = bitarray(endian='little')
a.frombytes(to_bytes('\x01'))
self.assertEqual(a.to01(), '10000000')
b = bitarray(endian='little')
b.frombytes(to_bytes('\x80'))
self.assertEqual(b.to01(), '00000001')
c = bitarray(endian='big')
c.frombytes(to_bytes('\x80'))
self.assertEqual(c.to01(), '10000000')
d = bitarray(endian='big')
d.frombytes(to_bytes('\x01'))
self.assertEqual(d.to01(), '00000001')
self.assertEqual(a, c)
self.assertEqual(b, d)
a = bitarray(8, endian='little')
a.setall(False)
a[0] = True
self.assertEqual(a.tobytes(), to_bytes('\x01'))
a[1] = True
self.assertEqual(a.tobytes(), to_bytes('\x03'))
a.frombytes(to_bytes(' '))
self.assertEqual(a.tobytes(), to_bytes('\x03 '))
self.assertEqual(a.to01(), '1100000000000100')
a = bitarray(8, endian='big')
a.setall(False)
a[7] = True
self.assertEqual(a.tobytes(), to_bytes('\x01'))
a[6] = True
self.assertEqual(a.tobytes(), to_bytes('\x03'))
a.frombytes(to_bytes(' '))
self.assertEqual(a.tobytes(), to_bytes('\x03 '))
self.assertEqual(a.to01(), '0000001100100000')
a = bitarray('00100000', endian='big')
self.assertEqual(a.tobytes(), to_bytes(' '))
b = bitarray('00000100', endian='little')
self.assertEqual(b.tobytes(), to_bytes(' '))
self.assertNotEqual(a, b)
a = bitarray('11100000', endian='little')
b = bitarray(a, endian='big')
self.assertNotEqual(a, b)
self.assertEqual(a.tobytes(), b.tobytes())
def test_pickle(self):
from pickle import loads, dumps
for a in self.randombitarrays():
b = loads(dumps(a))
self.assert_(b is not a)
self.assertEQUAL(a, b)
def test_cPickle(self):
from pickle import loads, dumps
for a in self.randombitarrays():
b = loads(dumps(a))
self.assert_(b is not a)
self.assertEQUAL(a, b)
def test_overflow(self):
from platform import architecture
if architecture()[0] == '64bit':
return
self.assertRaises(OverflowError, bitarray.__new__,
bitarray, 2**34 + 1)
a = bitarray(10**6)
self.assertRaises(OverflowError, a.__imul__, 17180)
tests.append(MiscTests)
# ---------------------------------------------------------------------------
class SpecialMethodTests(unittest.TestCase, Util):
def test_all(self):
a = bitarray()
self.assertTrue(a.all())
for a in self.randombitarrays():
self.assertEqual(all(a), a.all())
self.assertEqual(all(a.tolist()), a.all())
def test_any(self):
a = bitarray()
self.assertFalse(a.any())
for a in self.randombitarrays():
self.assertEqual(any(a), a.any())
self.assertEqual(any(a.tolist()), a.any())
def test_repr(self):
a = bitarray()
self.assertEqual(repr(a), "bitarray()")
a = bitarray('10111')
self.assertEqual(repr(a), "bitarray('10111')")
for a in self.randombitarrays():
b = eval(repr(a))
self.assert_(b is not a)
self.assertEqual(a, b)
self.check_obj(b)
def test_copy(self):
import copy
for a in self.randombitarrays():
b = a.copy()
self.assert_(b is not a)
self.assertEQUAL(a, b)
b = copy.copy(a)
self.assert_(b is not a)
self.assertEQUAL(a, b)
b = copy.deepcopy(a)
self.assert_(b is not a)
self.assertEQUAL(a, b)
def assertReallyEqual(self, a, b):
# assertEqual first, because it will have a good message if the
# assertion fails.
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
if not is_py3k:
self.assertEqual(0, cmp(a, b))
self.assertEqual(0, cmp(b, a))
def assertReallyNotEqual(self, a, b):
# assertNotEqual first, because it will have a good message if the
# assertion fails.
self.assertNotEqual(a, b)
self.assertNotEqual(b, a)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
if not is_py3k:
self.assertNotEqual(0, cmp(a, b))
self.assertNotEqual(0, cmp(b, a))
def test_equality(self):
self.assertReallyEqual(bitarray(''), bitarray(''))
self.assertReallyEqual(bitarray('0'), bitarray('0'))
self.assertReallyEqual(bitarray('1'), bitarray('1'))
def test_not_equality(self):
self.assertReallyNotEqual(bitarray(''), bitarray('1'))
self.assertReallyNotEqual(bitarray(''), bitarray('0'))
self.assertReallyNotEqual(bitarray('0'), bitarray('1'))
tests.append(SpecialMethodTests)
# ---------------------------------------------------------------------------
class NumberTests(unittest.TestCase, Util):
def test_add(self):
c = bitarray('001') + bitarray('110')
self.assertEQUAL(c, bitarray('001110'))
for a in self.randombitarrays():
aa = a.copy()
for b in self.randombitarrays():
bb = b.copy()
c = a + b
self.assertEqual(c, bitarray(a.tolist() + b.tolist()))
self.assertEqual(c.endian(), a.endian())
self.check_obj(c)
self.assertEQUAL(a, aa)
self.assertEQUAL(b, bb)
a = bitarray()
self.assertRaises(TypeError, a.__add__, 42)
def test_iadd(self):
c = bitarray('001')
c += bitarray('110')
self.assertEQUAL(c, bitarray('001110'))
for a in self.randombitarrays():
for b in self.randombitarrays():
c = bitarray(a)
d = c
d += b
self.assertEqual(d, a + b)
self.assert_(c is d)
self.assertEQUAL(c, d)
self.assertEqual(d.endian(), a.endian())
self.check_obj(d)
a = bitarray()
self.assertRaises(TypeError, a.__iadd__, 42)
def test_mul(self):
c = 0 * bitarray('1001111')
self.assertEQUAL(c, bitarray())
c = 3 * bitarray('001')
self.assertEQUAL(c, bitarray('001001001'))
c = bitarray('110') * 3
self.assertEQUAL(c, bitarray('110110110'))
for a in self.randombitarrays():
b = a.copy()
for n in range(-10, 20):
c = a * n
self.assertEQUAL(c, bitarray(n * a.tolist(),
endian=a.endian()))
c = n * a
self.assertEqual(c, bitarray(n * a.tolist(),
endian=a.endian()))
self.assertEQUAL(a, b)
a = bitarray()
self.assertRaises(TypeError, a.__mul__, None)
def test_imul(self):
c = bitarray('1101110011')
idc = id(c)
c *= 0
self.assertEQUAL(c, bitarray())
self.assertEqual(idc, id(c))
c = bitarray('110')
c *= 3
self.assertEQUAL(c, bitarray('110110110'))
for a in self.randombitarrays():
for n in range(-10, 10):
b = a.copy()
idb = id(b)
b *= n
self.assertEQUAL(b, bitarray(n * a.tolist(),
endian=a.endian()))
self.assertEqual(idb, id(b))
a = bitarray()
self.assertRaises(TypeError, a.__imul__, None)
tests.append(NumberTests)
# ---------------------------------------------------------------------------
class BitwiseTests(unittest.TestCase, Util):
def test_misc(self):
for a in self.randombitarrays():
b = ~a
c = a & b
self.assertEqual(c.any(), False)
self.assertEqual(a, a ^ c)
d = a ^ b
self.assertEqual(d.all(), True)
b &= d
self.assertEqual(~b, a)
def test_and(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a & b, bitarray('10001'))
b = bitarray('1001')
self.assertRaises(ValueError, a.__and__, b) # not same length
self.assertRaises(TypeError, a.__and__, 42)
def test_iand(self):
a = bitarray('110010110')
ida = id(a)
a &= bitarray('100110011')
self.assertEQUAL(a, bitarray('100010010'))
self.assertEqual(ida, id(a))
def test_or(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a | b, bitarray('11011'))
def test_iand(self):
a = bitarray('110010110')
a |= bitarray('100110011')
self.assertEQUAL(a, bitarray('110110111'))
def test_xor(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a ^ b, bitarray('01010'))
def test_ixor(self):
a = bitarray('110010110')
a ^= bitarray('100110011')
self.assertEQUAL(a, bitarray('010100101'))
def test_invert(self):
a = bitarray()
a.invert()
self.assertEQUAL(a, bitarray())
a = bitarray('11011')
a.invert()
self.assertEQUAL(a, bitarray('00100'))
a = bitarray('11011')
b = ~a
self.assertEQUAL(b, bitarray('00100'))
self.assertEQUAL(a, bitarray('11011'))
self.assert_(a is not b)
for a in self.randombitarrays():
aa = a.tolist()
b = bitarray(a)
b.invert()
for i in range(len(a)):
self.assertEqual(b[i], not aa[i])
self.check_obj(b)
c = ~a
self.assert_(c is not a)
self.assertEQUAL(a, bitarray(aa, endian=a.endian()))
for i in range(len(a)):
self.assertEqual(c[i], not aa[i])
self.check_obj(b)
tests.append(BitwiseTests)
# ---------------------------------------------------------------------------
class SequenceTests(unittest.TestCase, Util):
def test_contains(self):
a = bitarray()
self.assert_(False not in a)
self.assert_(True not in a)
a.append(True)
self.assert_(True in a)
self.assert_(False not in a)
a = bitarray([False])
self.assert_(False in a)
self.assert_(True not in a)
a.append(True)
self.assert_(0 in a)
self.assert_(1 in a)
for n in range(2, 100):
a = bitarray(n)
a.setall(0)
self.assert_(False in a)
self.assert_(True not in a)
a[randint(0, n-1)] = 1
self.assert_(True in a)
self.assert_(False in a)
a.setall(1)
self.assert_(True in a)
self.assert_(False not in a)
a[randint(0, n-1)] = 0
self.assert_(True in a)
self.assert_(False in a)
a = bitarray('011010000001')
self.assert_('1' in a)
self.assert_('11' in a)
self.assert_('111' not in a)
self.assert_(bitarray('00') in a)
self.assert_([0, 0, 0, 1] in a)
self.assert_((0, 0, 0, 1, 1) not in a)
self.assert_((0, 0, 0, 0, 2) in a)
tests.append(SequenceTests)
# ---------------------------------------------------------------------------
class ExtendTests(unittest.TestCase, Util):
def test_wrongArgs(self):
a = bitarray()
self.assertRaises(TypeError, a.extend)
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend, True)
self.assertRaises(TypeError, a.extend, 24)
self.assertRaises(ValueError, a.extend, '0011201')
def test_bitarray(self):
a = bitarray()
a.extend(bitarray())
self.assertEqual(a, bitarray())
a.extend(bitarray('110'))
self.assertEqual(a, bitarray('110'))
a.extend(bitarray('1110'))
self.assertEqual(a, bitarray('1101110'))
a = bitarray('00001111', endian='little')
a.extend(bitarray('00111100', endian='big'))
self.assertEqual(a, bitarray('0000111100111100'))
for a in self.randombitarrays():
for b in self.randombitarrays():
c = bitarray(a)
idc = id(c)
c.extend(b)
self.assertEqual(id(c), idc)
self.assertEqual(c, a + b)
def test_list(self):
a = bitarray()
a.extend([0, 1, 3, None, {}])
self.assertEqual(a, bitarray('01100'))
a.extend([True, False])
self.assertEqual(a, bitarray('0110010'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(b)
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_iterable(self):
def bar():
for x in ('', '1', None, True, []):
yield x
a = bitarray()
a.extend(bar())
self.assertEqual(a, bitarray('01010'))
for a in self.randomlists():
for b in self.randomlists():
def foo():
for e in b:
yield e
c = bitarray(a)
idc = id(c)
c.extend(foo())
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_tuple(self):
a = bitarray()
a.extend((0, 1, 2, 0, 3))
self.assertEqual(a, bitarray('01101'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(tuple(b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_iter(self):
a = bitarray()
a.extend(iter([3, 9, 0, 1, -2]))
self.assertEqual(a, bitarray('11011'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(iter(b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_string01(self):
a = bitarray()
a.extend('0110111')
self.assertEqual(a, bitarray('0110111'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(''.join(('1' if x else '0') for x in b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
tests.append(ExtendTests)
# ---------------------------------------------------------------------------
class MethodTests(unittest.TestCase, Util):
def test_append(self):
a = bitarray()
a.append(True)
a.append(False)
a.append(False)
self.assertEQUAL(a, bitarray('100'))
for a in self.randombitarrays():
aa = a.tolist()
b = a
b.append(1)
self.assert_(a is b)
self.check_obj(b)
self.assertEQUAL(b, bitarray(aa+[1], endian=a.endian()))
b.append('')
self.assertEQUAL(b, bitarray(aa+[1, 0], endian=a.endian()))
def test_insert(self):
a = bitarray()
b = a
a.insert(0, True)
self.assert_(a is b)
self.assertEqual(a, bitarray('1'))
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
for a in self.randombitarrays():
aa = a.tolist()
item = bool(randint(0, 1))
pos = randint(-len(a), len(a))
a.insert(pos, item)
aa.insert(pos, item)
self.assertEqual(a.tolist(), aa)
self.check_obj(a)
def test_index(self):
a = bitarray()
for i in (True, False, 1, 0):
self.assertRaises(ValueError, a.index, i)
a = bitarray(100*[False])
self.assertRaises(ValueError, a.index, True)
a[20] = a[27] = 54
self.assertEqual(a.index(42), 20)
self.assertEqual(a.index(0), 0)
a = bitarray(200*[True])
self.assertRaises(ValueError, a.index, False)
a[173] = a[187] = 0
self.assertEqual(a.index(False), 173)
self.assertEqual(a.index(True), 0)
for n in range(50):
for m in range(n):
a = bitarray(n)
a.setall(0)
self.assertRaises(ValueError, a.index, 1)
a[m] = 1
self.assertEqual(a.index(1), m)
a.setall(1)
self.assertRaises(ValueError, a.index, 0)
a[m] = 0
self.assertEqual(a.index(0), m)
def test_count(self):
a = bitarray('10011')
self.assertEqual(a.count(), 3)
self.assertEqual(a.count(True), 3)
self.assertEqual(a.count(False), 2)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(0), 2)
self.assertRaises(TypeError, a.count, 'A')
for a in self.randombitarrays():
self.assertEqual(a.count(), a.count(1))
self.assertEqual(a.count(1), a.to01().count('1'))
self.assertEqual(a.count(0), a.to01().count('0'))
def test_search(self):
a = bitarray('')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), [])
a = bitarray('1')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), [0])
self.assertEqual(a.search(bitarray('11')), [])
a = bitarray(100*'1')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), list(range(100)))
a = bitarray('10011')
for s, res in [('0', [1, 2]), ('1', [0, 3, 4]),
('01', [2]), ('11', [3]),
('000', []), ('1001', [0]),
('011', [2]), ('0011', [1]),
('10011', [0]), ('100111', [])]:
self.assertEqual(a.search(s), res)
b = bitarray(s)
self.assertEqual(a.search(b), res)
self.assertEqual(a.search(list(b)), res)
self.assertEqual(a.search(tuple(b)), res)
a = bitarray('10010101110011111001011')
for limit in range(10):
self.assertEqual(a.search('011', limit),
[6, 11, 20][:limit])
# search is overlapping
self.assertEqual(a.search('111'), [7, 12, 13, 14])
def test_fill(self):
a = bitarray('')
self.assertEqual(a.fill(), 0)
self.assertEqual(len(a), 0)
a = bitarray('101')
self.assertEqual(a.fill(), 5)
self.assertEQUAL(a, bitarray('10100000'))
self.assertEqual(a.fill(), 0)
self.assertEQUAL(a, bitarray('10100000'))
for a in self.randombitarrays():
aa = a.tolist()
la = len(a)
b = a
self.assert_(0 <= b.fill() < 8)
self.assertEqual(b.endian(), a.endian())
bb = b.tolist()
lb = len(b)
self.assert_(a is b)
self.check_obj(b)
if la % 8 == 0:
self.assertEqual(bb, aa)
self.assertEqual(lb, la)
else:
self.assert_(lb % 8 == 0)
self.assertNotEqual(bb, aa)
self.assertEqual(bb[:la], aa)
self.assertEqual(b[la:], (lb-la)*bitarray('0'))
self.assert_(0 < lb-la < 8)
def test_sort(self):
a = bitarray('1101000')
a.sort()
self.assertEqual(a, bitarray('0000111'))
a = bitarray('1101000')
a.sort(reverse=True)
self.assertEqual(a, bitarray('1110000'))
a = bitarray('1101000')
a.sort(True)
self.assertEqual(a, bitarray('1110000'))
self.assertRaises(TypeError, a.sort, 'A')
for a in self.randombitarrays():
ida = id(a)
rev = randint(0, 1)
a.sort(rev)
self.assertEqual(a, bitarray(sorted(a.tolist(), reverse=rev)))
self.assertEqual(id(a), ida)
def test_reverse(self):
self.assertRaises(TypeError, bitarray().reverse, 42)
a = bitarray()
a.reverse()
self.assertEQUAL(a, bitarray())
a = bitarray('1001111')
a.reverse()
self.assertEQUAL(a, bitarray('1111001'))
a = bitarray('11111000011')
a.reverse()
self.assertEQUAL(a, bitarray('11000011111'))
for a in self.randombitarrays():
aa = a.tolist()
ida = id(a)
a.reverse()
self.assertEqual(ida, id(a))
self.assertEQUAL(a, bitarray(aa[::-1], endian=a.endian()))
def test_tolist(self):
a = bitarray()
self.assertEqual(a.tolist(), [])
a = bitarray('110')
self.assertEqual(a.tolist(), [True, True, False])
for lst in self.randomlists():
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
def test_remove(self):
a = bitarray()
for i in (True, False, 1, 0):
self.assertRaises(ValueError, a.remove, i)
a = bitarray(21)
a.setall(0)
self.assertRaises(ValueError, a.remove, 1)
a.setall(1)
self.assertRaises(ValueError, a.remove, 0)
a = bitarray('1010110')
a.remove(False); self.assertEqual(a, bitarray('110110'))
a.remove(True); self.assertEqual(a, bitarray('10110'))
a.remove(1); self.assertEqual(a, bitarray('0110'))
a.remove(0); self.assertEqual(a, bitarray('110'))
a = bitarray('0010011')
b = a
b.remove('1')
self.assert_(b is a)
self.assertEQUAL(b, bitarray('000011'))
def test_pop(self):
a = bitarray()
self.assertRaises(IndexError, a.pop)
for a in self.randombitarrays():
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
if len(a) == 0:
continue
aa = a.tolist()
enda = a.endian()
self.assertEqual(a.pop(), aa[-1])
self.check_obj(a)
self.assertEqual(a.endian(), enda)
for a in self.randombitarrays():
if len(a) == 0:
continue
n = randint(-len(a), len(a)-1)
aa = a.tolist()
self.assertEqual(a.pop(n), aa[n])
self.check_obj(a)
def test_setall(self):
a = bitarray(5)
a.setall(True)
self.assertEQUAL(a, bitarray('11111'))
for a in self.randombitarrays():
val = randint(0, 1)
b = a
b.setall(val)
self.assertEqual(b, bitarray(len(b)*[val]))
self.assert_(a is b)
self.check_obj(b)
def test_bytereverse(self):
a = bitarray()
a.bytereverse()
self.assertEqual(a, bitarray())
a = bitarray('1011')
a.bytereverse()
self.assertEqual(a, bitarray('0000'))
a = bitarray('111011')
a.bytereverse()
self.assertEqual(a, bitarray('001101'))
a = bitarray('11101101')
a.bytereverse()
self.assertEqual(a, bitarray('10110111'))
for i in range(256):
a = bitarray()
a.frombytes(to_bytes(chr(i)))
aa = a.tolist()
b = a
b.bytereverse()
self.assertEqual(b, bitarray(aa[::-1]))
self.assert_(a is b)
self.check_obj(b)
tests.append(MethodTests)
# ---------------------------------------------------------------------------
class StringTests(unittest.TestCase, Util):
def randombytes(self):
for n in range(1, 20):
yield to_bytes(''.join(chr(randint(0, 255))
for x in range(n)))
def test_frombytes(self):
a = bitarray(endian='big')
a.frombytes(to_bytes('A'))
self.assertEqual(a, bitarray('01000001'))
b = a
b.frombytes(to_bytes('BC'))
self.assertEQUAL(b, bitarray('01000001' '01000010' '01000011'))
self.assert_(b is a)
for b in self.randombitarrays():
c = b.__copy__()
b.frombytes(to_bytes(''))
self.assertEQUAL(b, c)
for b in self.randombitarrays():
for s in self.randombytes():
a = bitarray(endian=b.endian())
a.frombytes(s)
c = b.__copy__()
b.frombytes(s)
self.assertEQUAL(b[-len(a):], a)
self.assertEQUAL(b[:-len(a)], c)
self.assertEQUAL(c + a, b)
def test_tobytes(self):
a = bitarray()
self.assertEqual(a.tobytes(), to_bytes(''))
for end in ('big', 'little'):
a = bitarray(endian=end)
a.frombytes(to_bytes('foo'))
self.assertEqual(a.tobytes(), to_bytes('foo'))
for s in self.randombytes():
a = bitarray(endian=end)
a.frombytes(s)
self.assertEqual(a.tobytes(), s)
for n, s in [(1, '\x01'), (2, '\x03'), (3, '\x07'), (4, '\x0f'),
(5, '\x1f'), (6, '\x3f'), (7, '\x7f'), (8, '\xff'),
(12, '\xff\x0f'), (15, '\xff\x7f'), (16, '\xff\xff'),
(17, '\xff\xff\x01'), (24, '\xff\xff\xff')]:
a = bitarray(n, endian='little')
a.setall(1)
self.assertEqual(a.tobytes(), to_bytes(s))
def test_unpack(self):
a = bitarray('01')
self.assertEqual(a.unpack(), to_bytes('\x00\xff'))
self.assertEqual(a.unpack(to_bytes('A')), to_bytes('A\xff'))
self.assertEqual(a.unpack(to_bytes('0'), to_bytes('1')),
to_bytes('01'))
self.assertEqual(a.unpack(one=to_bytes('\x01')),
to_bytes('\x00\x01'))
self.assertEqual(a.unpack(zero=to_bytes('A')),
to_bytes('A\xff'))
self.assertEqual(a.unpack(one=to_bytes('t'), zero=to_bytes('f')),
to_bytes('ft'))
self.assertRaises(TypeError, a.unpack,
to_bytes('a'), zero=to_bytes('b'))
self.assertRaises(TypeError, a.unpack, foo=to_bytes('b'))
for a in self.randombitarrays():
self.assertEqual(a.unpack(to_bytes('0'), to_bytes('1')),
to_bytes(a.to01()))
b = bitarray()
b.pack(a.unpack())
self.assertEqual(b, a)
b = bitarray()
b.pack(a.unpack(to_bytes('\x01'), to_bytes('\x00')))
b.invert()
self.assertEqual(b, a)
def test_pack(self):
a = bitarray()
a.pack(to_bytes('\x00'))
self.assertEqual(a, bitarray('0'))
a.pack(to_bytes('\xff'))
self.assertEqual(a, bitarray('01'))
a.pack(to_bytes('\x01\x00\x7a'))
self.assertEqual(a, bitarray('01101'))
a = bitarray()
for n in range(256):
a.pack(to_bytes(chr(n)))
self.assertEqual(a, bitarray('0' + 255 * '1'))
self.assertRaises(TypeError, a.pack, 0)
if is_py3k:
self.assertRaises(TypeError, a.pack, '1')
self.assertRaises(TypeError, a.pack, [1, 3])
self.assertRaises(TypeError, a.pack, bitarray())
tests.append(StringTests)
# ---------------------------------------------------------------------------
class FileTests(unittest.TestCase, Util):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tmpfname = os.path.join(self.tmpdir, 'testfile')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_cPickle(self):
from pickle import load, dump
for a in self.randombitarrays():
fo = open(self.tmpfname, 'wb')
dump(a, fo)
fo.close()
b = load(open(self.tmpfname, 'rb'))
self.assert_(b is not a)
self.assertEQUAL(a, b)
def test_shelve(self):
import shelve, hashlib
d = shelve.open(self.tmpfname)
stored = []
for a in self.randombitarrays():
key = hashlib.md5(repr(a).encode() +
a.endian().encode()).hexdigest()
d[key] = a
stored.append((key, a))
d.close()
del d
d = shelve.open(self.tmpfname)
for k, v in stored:
self.assertEQUAL(d[k], v)
d.close()
def test_fromfile_wrong_args(self):
b = bitarray()
self.assertRaises(TypeError, b.fromfile)
self.assertRaises(TypeError, b.fromfile, StringIO()) # file not open
self.assertRaises(TypeError, b.fromfile, 42)
self.assertRaises(TypeError, b.fromfile, 'bar')
def test_from_empty_file(self):
fo = open(self.tmpfname, 'wb')
fo.close()
a = bitarray()
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray())
def test_from_large_file(self):
N = 100000
fo = open(self.tmpfname, 'wb')
fo.write(N * to_bytes('X'))
fo.close()
a = bitarray()
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(len(a), 8 * N)
self.assertEqual(a.buffer_info()[1], N)
# make sure there is no over-allocation
self.assertEqual(a.buffer_info()[4], N)
def test_fromfile_Foo(self):
fo = open(self.tmpfname, 'wb')
fo.write(to_bytes('Foo\n'))
fo.close()
a = bitarray(endian='big')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('01000110011011110110111100001010'))
a = bitarray(endian='little')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('01100010111101101111011001010000'))
a = bitarray('1', endian='little')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('101100010111101101111011001010000'))
for n in range(20):
a = bitarray(n, endian='little')
a.setall(1)
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a,
n*bitarray('1') +
bitarray('01100010111101101111011001010000'))
def test_fromfile_n(self):
a = bitarray()
a.fromstring('ABCDEFGHIJ')
fo = open(self.tmpfname, 'wb')
a.tofile(fo)
fo.close()
b = bitarray()
f = open(self.tmpfname, 'rb')
b.fromfile(f, 1); self.assertEqual(b.tostring(), 'A')
f.read(1)
b = bitarray()
b.fromfile(f, 2); self.assertEqual(b.tostring(), 'CD')
b.fromfile(f, 1); self.assertEqual(b.tostring(), 'CDE')
b.fromfile(f, 0); self.assertEqual(b.tostring(), 'CDE')
b.fromfile(f); self.assertEqual(b.tostring(), 'CDEFGHIJ')
b.fromfile(f); self.assertEqual(b.tostring(), 'CDEFGHIJ')
f.close()
b = bitarray()
f = open(self.tmpfname, 'rb')
f.read(1);
self.assertRaises(EOFError, b.fromfile, f, 10)
f.close()
self.assertEqual(b.tostring(), 'BCDEFGHIJ')
b = bitarray()
f = open(self.tmpfname, 'rb')
b.fromfile(f);
self.assertEqual(b.tostring(), 'ABCDEFGHIJ')
self.assertRaises(EOFError, b.fromfile, f, 1)
f.close()
def test_tofile(self):
a = bitarray()
f = open(self.tmpfname, 'wb')
a.tofile(f)
f.close()
fi = open(self.tmpfname, 'rb')
self.assertEqual(fi.read(), to_bytes(''))
fi.close()
a = bitarray('01000110011011110110111100001010', endian='big')
f = open(self.tmpfname, 'wb')
a.tofile(f)
f.close()
fi = open(self.tmpfname, 'rb')
self.assertEqual(fi.read(), to_bytes('Foo\n'))
fi.close()
for a in self.randombitarrays():
b = bitarray(a, endian='big')
fo = open(self.tmpfname, 'wb')
b.tofile(fo)
fo.close()
s = open(self.tmpfname, 'rb').read()
self.assertEqual(len(s), a.buffer_info()[1])
for n in range(3):
a.fromstring(n * 'A')
self.assertRaises(TypeError, a.tofile)
self.assertRaises(TypeError, a.tofile, StringIO())
f = open(self.tmpfname, 'wb')
a.tofile(f)
f.close()
self.assertRaises(TypeError, a.tofile, f)
for n in range(20):
a = n * bitarray('1', endian='little')
fo = open(self.tmpfname, 'wb')
a.tofile(fo)
fo.close()
s = open(self.tmpfname, 'rb').read()
self.assertEqual(len(s), a.buffer_info()[1])
b = a.__copy__()
b.fill()
c = bitarray(endian='little')
c.frombytes(s)
self.assertEqual(c, b)
tests.append(FileTests)
# ---------------------------------------------------------------------------
class PrefixCodeTests(unittest.TestCase):
def test_encode_check_codedict(self):
a = bitarray()
self.assertRaises(TypeError, a.encode, 0, '')
self.assertRaises(ValueError, a.encode, {}, '')
self.assertRaises(TypeError, a.encode, {'a':42}, '')
self.assertRaises(ValueError, a.encode, {'a':bitarray()}, '')
# 42 not iterable
self.assertRaises(TypeError, a.encode, {'a':bitarray('0')}, 42)
self.assertEqual(len(a), 0)
def test_encode_string(self):
a = bitarray()
d = {'a':bitarray('0')}
a.encode(d, '')
self.assertEqual(a, bitarray())
a.encode(d, 'a')
self.assertEqual(a, bitarray('0'))
self.assertEqual(d, {'a':bitarray('0')})
def test_encode_list(self):
a = bitarray()
d = {'a':bitarray('0')}
a.encode(d, [])
self.assertEqual(a, bitarray())
a.encode(d, ['a'])
self.assertEqual(a, bitarray('0'))
self.assertEqual(d, {'a':bitarray('0')})
def test_encode_iter(self):
a = bitarray()
d = {'a':bitarray('0'), 'b':bitarray('1')}
a.encode(d, iter('abba'))
self.assertEqual(a, bitarray('0110'))
def foo():
for c in 'bbaabb':
yield c
a.encode(d, foo())
self.assertEqual(a, bitarray('0110110011'))
self.assertEqual(d, {'a':bitarray('0'), 'b':bitarray('1')})
def test_encode(self):
d = {'I':bitarray('1'),
'l':bitarray('01'),
'a':bitarray('001'),
'n':bitarray('000')}
a = bitarray()
a.encode(d, 'Ilan')
self.assertEqual(a, bitarray('101001000'))
a.encode(d, 'a')
self.assertEqual(a, bitarray('101001000001'))
self.assertEqual(d, {'I':bitarray('1'), 'l':bitarray('01'),
'a':bitarray('001'), 'n':bitarray('000')})
self.assertRaises(ValueError, a.encode, d, 'arvin')
def test_decode_check_codedict(self):
a = bitarray()
self.assertRaises(TypeError, a.decode, 0)
self.assertRaises(ValueError, a.decode, {})
# 42 not iterable
self.assertRaises(TypeError, a.decode, {'a':42})
self.assertRaises(ValueError, a.decode, {'a':bitarray()})
def test_decode_simple(self):
d = {'I':bitarray('1'),
'l':bitarray('01'),
'a':bitarray('001'),
'n':bitarray('000')}
a = bitarray('101001000')
self.assertEqual(a.decode(d), ['I', 'l', 'a', 'n'])
self.assertEqual(d, {'I':bitarray('1'), 'l':bitarray('01'),
'a':bitarray('001'), 'n':bitarray('000')})
self.assertEqual(a, bitarray('101001000'))
def test_decode_empty(self):
d = {'a':bitarray('1')}
a = bitarray()
self.assertEqual(a.decode(d), [])
self.assertEqual(d, {'a':bitarray('1')})
def test_decode_buggybitarray(self):
d = {'a':bitarray('0')}
a = bitarray('1')
self.assertRaises(ValueError, a.decode, d)
self.assertEqual(a, bitarray('1'))
self.assertEqual(d, {'a':bitarray('0')})
def test_decode_buggybitarray2(self):
d = {'a':bitarray('00'), 'b':bitarray('01')}
a = bitarray('1')
self.assertRaises(ValueError, a.decode, d)
self.assertEqual(a, bitarray('1'))
def test_decode_ambiguous_code(self):
d = {'a':bitarray('0'), 'b':bitarray('0'), 'c':bitarray('1')}
a = bitarray()
self.assertRaises(ValueError, a.decode, d)
def test_decode_ambiguous2(self):
d = {'a':bitarray('01'), 'b':bitarray('01'), 'c':bitarray('1')}
a = bitarray()
self.assertRaises(ValueError, a.decode, d)
def test_miscitems(self):
d = {None :bitarray('00'),
0 :bitarray('110'),
1 :bitarray('111'),
'' :bitarray('010'),
2 :bitarray('011')}
a = bitarray()
a.encode(d, [None, 0, 1, '', 2])
self.assertEqual(a, bitarray('00110111010011'))
self.assertEqual(a.decode(d), [None, 0, 1, '', 2])
def test_real_example(self):
code = {' ' : bitarray('001'),
'.' : bitarray('0101010'),
'a' : bitarray('0110'),
'b' : bitarray('0001100'),
'c' : bitarray('000011'),
'd' : bitarray('01011'),
'e' : bitarray('111'),
'f' : bitarray('010100'),
'g' : bitarray('101000'),
'h' : bitarray('00000'),
'i' : bitarray('1011'),
'j' : bitarray('0111101111'),
'k' : bitarray('00011010'),
'l' : bitarray('01110'),
'm' : bitarray('000111'),
'n' : bitarray('1001'),
'o' : bitarray('1000'),
'p' : bitarray('101001'),
'q' : bitarray('00001001101'),
'r' : bitarray('1101'),
's' : bitarray('1100'),
't' : bitarray('0100'),
'u' : bitarray('000100'),
'v' : bitarray('0111100'),
'w' : bitarray('011111'),
'x' : bitarray('0000100011'),
'y' : bitarray('101010'),
'z' : bitarray('00011011110')}
a = bitarray()
a.encode(code, 'the quick brown fox jumps over the lazy dog.')
self.assertEqual(a, bitarray('01000000011100100001001101000100101100'
'00110001101000100011001101100001111110010010101001000000010001100'
'10111101111000100000111101001110000110000111100111110100101000000'
'0111001011100110000110111101010100010101110001010000101010'))
self.assertEqual(''.join(a.decode(code)),
'the quick brown fox jumps over the lazy dog.')
tests.append(PrefixCodeTests)
# ---------------------------------------------------------------------------
def pages():
if sys.platform != 'linux2':
return 0
dat = open('/proc/%i/statm' % os.getpid()).read()
return int(dat.split()[0])
def check_memory_leaks(verbosity):
suite = unittest.TestSuite()
for cls in tests:
suite.addTest(unittest.makeSuite(cls))
logfile = 'pages.log'
if os.path.isfile(logfile):
os.unlink(logfile)
i = 0
runner = unittest.TextTestRunner(verbosity=verbosity)
while True:
print('Run', i)
r = runner.run(suite)
if i % 1 == 0:
fo = open(logfile, 'a')
fo.write('%10i %r %10i\n' % (i, r.wasSuccessful(), pages()))
fo.close()
i += 1
def run(verbosity, chk_mem_leaks=False):
suite = unittest.TestSuite()
for cls in tests:
suite.addTest(unittest.makeSuite(cls))
runner = unittest.TextTestRunner(verbosity=verbosity)
return runner.run(suite)
if __name__ == '__main__':
verbosity = 2 if 'v' in sys.argv else 1
if 'm' in sys.argv:
check_memory_leaks(verbosity)
else:
run(verbosity)
else:
from bitarray import __version__
print('bitarray is installed in:', os.path.dirname(__file__))
print('bitarray version:', __version__)
print(sys.version)
|
import argparse
import os
import pickle
from typing import Callable
import pandas as pd
import torch
from torch.optim import Adagrad, Adadelta, Adam
from common.data_iterator import DataLoader, NegativeSampler, TestIterator
from config import CONFIG
from model.callbacks import ModelCheckPoint, MlflowLogger
from model.hrnn4recom import HRNN
from model.loss_functions import TOP1Loss
from model.metrics import nDCG, RecallAtK
def args():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--model_version', required=True, help='모델 버전', type=str)
parser.add_argument('-d', '--dataset', default='10M', choices=['10M', '1M', 'BRUNCH'], help='데이터셋', type=str)
parser.add_argument('-k', '--eval_k', default=25, help='', type=int)
parser.add_argument('-lr', '--learning_rate', default=0.1, help='learning rate', type=float)
parser.add_argument('-o', '--dropout', default=0.2, help='dropout', type=float)
parser.add_argument('-bs', '--batch_size', default=50, help='batch size', type=int)
parser.add_argument('-ns', '--negative_sample', default=0, help='negative sample size', type=int)
return parser.parse_args()
def get_optimizer(model, name: str, lr: float) -> Callable:
""" optimizer를 return 하는 함수
Args:
name: optimizer name
lr: learning rate
Returns: pytorch optimizer function
"""
functions = {
'Adagrad': Adagrad(model.parameters(), lr=lr, eps=0.00001, weight_decay=0.0),
'Adadelta': Adadelta(model.parameters(), lr=lr, eps=1e-06, weight_decay=0.0),
'Adam': Adam(model.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-06, weight_decay=0.0, amsgrad=False)
}
try:
return functions[name]
except KeyError:
raise ValueError(f'optimizer [{name}] not exist, available optimizer {list(functions.keys())}')
if __name__ == '__main__':
argument = args()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model_params = {
'hiddenUnits': 100, # 500
'negativeSampleSize': argument.negative_sample,
'learningRate': argument.learning_rate,
'loss': 'TOP1Loss',
'optimizer': 'Adagrad', #Adagrad
'k': argument.eval_k, 'dropout': argument.dropout,
'batchSize': argument.batch_size
}
print('loading data...')
data_dir = os.path.join(CONFIG.DATA, argument.dataset)
train_dataset = pickle.load(open(os.path.join(data_dir, f'train.pkl'), 'rb'))
context_dataset = pickle.load(open(os.path.join(data_dir, f'valid.pkl'), 'rb'))
item_meta = pd.read_csv(os.path.join(data_dir, f'item_meta.tsv'), sep='\t', low_memory=False)
item_size = int(item_meta.item_id.max() + 1)
model_params['itemSize'] = item_size
n_sampler = NegativeSampler(item_meta, sample_size=model_params['negativeSampleSize'])
train_dataloader = DataLoader(
train_dataset, batch_size=model_params['batchSize'], device=device,
negative_sampler=n_sampler if model_params['negativeSampleSize'] > 0 else None
)
test_iterator = TestIterator(
os.path.join(data_dir, 'negative_test.dat'), context_dataset, device=device
)
hrnn = HRNN(
model_params['hiddenUnits'], item_size, device=device, k=model_params['k'],
dropout=model_params['dropout']
)
loss_func = TOP1Loss()
optimizer = get_optimizer(hrnn, model_params['optimizer'], lr=model_params['learningRate'])
metrics = [nDCG(), RecallAtK()]
model_name = f'hrnn_v{argument.model_version}'
callbacks = [
ModelCheckPoint(
os.path.join(
'.', 'result', argument.dataset,
model_name + '-e{epoch:02d}-loss{val_loss:1.4f}-nDCG{val_nDCG:1.3f}.zip'),
monitor='val_nDCG', mode='max'
)
,
MlflowLogger(f'{argument.dataset}', model_params, run_name=model_name, log_model=False)
]
print(hrnn)
print(f"device : {device}")
hrnn.fit(
50, train_dataloader, test_iterator, loss_func=loss_func, optimizer=optimizer,
metrics=metrics, callback=callbacks, sample=1.
)
|
import datetime
import os
import time
from numpy.lib import index_tricks
import torch
import torch.utils.data
import transforms as T
from torch import nn
import torchvision
import numpy as np
import scipy.io
import random
from PIL import Image
import matplotlib.image as mpimg
from tqdm import tqdm
from coco_utils import get_coco
import presets
import utils
from torchvision import utils as torch_utils
import os
import sys
import torch
import torch.distributed as dist
import matplotlib.pyplot as plt
import math
from torchvision.utils import save_image
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from models.segmentation.segmentation import _load_model
import csv
from torchvision.utils import save_image
seed=565
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
g = torch.Generator()
g.manual_seed(seed)
evaluate_step = 0
train_step = 0
def get_dataset(dir_path, name, image_set, transform):
def sbd(*args, **kwargs):
return torchvision.datasets.SBDataset(*args, mode='segmentation', **kwargs)
paths = {
"coco": (dir_path, get_coco, 21),
"voc": (dir_path, torchvision.datasets.VOCSegmentation, 21),
"voc_aug": (dir_path, sbd, 21)
}
p, ds_fn, num_classes = paths[name]
if name == "voc":
ds = ds_fn(p, year="2012", image_set=image_set, transforms=transform, download=False)
elif name == "voc_aug":
ds = ds_fn(p, image_set=image_set, transforms=transform, download=False)
else:
ds = ds_fn(p, image_set=image_set, transforms=transform, download=False)
return ds, num_classes
def get_transform(train):
base_size = 520
crop_size = 480
return presets.SegmentationPresetTrain(base_size, crop_size) if train else presets.SegmentationPresetEval(base_size, contrast=args.contrast, grid_size=args.grid_size)
def main(args):
utils.init_distributed_mode(args)
print(args)
iterator = utils.Iterator()
device = torch.device(args.device)
dataset, num_classes = get_dataset(args.data_path, args.dataset, "train", get_transform(train=True))
dataset_test, _ = get_dataset(args.data_path, args.dataset, "val", get_transform(train=False))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
collate_fn=utils.collate_fn, drop_last=True)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1,
sampler=test_sampler, num_workers=args.workers,
collate_fn=utils.collate_fn)
torch.save(data_loader_test, '/home/AD/rraina/segmentation_benchmark/semseg/dataloaders/dataloader_test_contrast(1.0,{:.1f}).pth'.format(args.contrast))
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description='PyTorch Segmentation Training', add_help=add_help)
parser.add_argument('--data-path', default='/home/AD/rraina/segmentation_benchmark/', help='dataset path')
parser.add_argument('--seed', default=429, type=float, help='seed')
parser.add_argument('--dataset', default='coco', help='dataset name')
parser.add_argument('--model', default='deeplabv3', help='model')
parser.add_argument('--backbone', default='resnet101', help='backbone')
parser.add_argument('--aux-loss', action='store_true', help='auxiliar loss')
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('-b', '--batch-size', default=8, type=int)
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--contrast', default=1.0, type=float)
parser.add_argument('--grid-size', default=20, type=int)
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--output', default='./deeplabv3resnet50', help='path where to save')
parser.add_argument('--use-tensorboard', dest="use_tensorboard", help="Flag to use tensorboard", action="store_true",)
parser.add_argument('--tensorboard-dir', default='runs', help='path where to save tensorboard')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
# distributed training parameters
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
if args.use_tensorboard:
writer = SummaryWriter(str(args.tensorboard_dir))
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.