content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python
import os
import sys
import dbus
import time
import urllib2
import subprocess
import ConfigParser
from bs4 import BeautifulSoup
from os.path import expanduser
item = "org.freedesktop.Notifications"
path = "/org/freedesktop/Notifications"
interface = "org.freedesktop.Notifications"
app_name = "Gmail Plugin"
id_num_to_replace = 0
icon = os.path.join(sys.path[0], 'gmail.png')
title = "Gmail"
actions_list = ''
hint = ''
bus = dbus.SessionBus()
notif = bus.get_object(item, path)
notify = dbus.Interface(notif, interface)
FEED_URL = 'https://mail.google.com/mail/feed/atom'
def ConfigSectionMap(section):
values = {}
options = Config.options(section)
for option in options:
try:
values[option] = Config.get(section, option)
if values[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
values[option] = None
return values
def internet_on():
return subprocess.call(['/bin/ping', '-c1', 'google.com'])
def updatecredentials():
home = expanduser("~")
homefile = os.path.join(home, '.gmailnotf.ini')
if os.path.exists(homefile):
updateconfig = ConfigParser.RawConfigParser()
updateconfig.read(homefile)
username = updateconfig.get(updateconfig.sections()[0], updateconfig.options(updateconfig.sections()[0])[0])
password = updateconfig.get(updateconfig.sections()[0], updateconfig.options(updateconfig.sections()[0])[1])
changeconfig = ConfigParser.RawConfigParser()
conpath = sys.path[0]
configpath = os.path.join(conpath, 'config.ini')
changeconfig.read(configpath)
changeconfig.sections()
changeconfig.set('SectionOne', 'username', username)
changeconfig.set('SectionOne', 'password', password)
with open(configpath, 'wb') as configfile:
changeconfig.write(configfile)
else:
return
class Gmailnotification:
def __init__(self, user, passwd, previousnumber):
self.getnumberofmessage(user, passwd, previousnumber)
def getnumberofmessage(self, user, passwd, previousnumber):
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(
realm='New mail feed',
uri='https://mail.google.com',
user='{user}@gmail.com'.format(user=user),
passwd=passwd
)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
feed = urllib2.urlopen(FEED_URL)
self.parsingfullcount(feed, previousnumber)
def parsingfullcount(self, feed, previousnumber):
soup = BeautifulSoup(feed.read())
number = soup.fullcount.string
number = int(number)
unreadmessages = "You have %d unread mails" % int(number)
self.sendmessage(unreadmessages, number, previousnumber)
def sendmessage(self, message, number, previousnumber):
diff = int(number) - int(previousnumber)
if int(diff) == 0:
self.dontshowpopup(message, number, previousnumber)
else:
self.showpopup(number, message)
def dontshowpopup(self, message, number, previousnumber):
self.value = number
def showpopup(self, number, message):
nomessage = "No unread mails"
if number == 0:
text = nomessage
time = 5000
notify.Notify(app_name, id_num_to_replace, icon, title, text, actions_list, hint, time)
self.updateconfig(number)
else:
text = message
time = 5000
notify.Notify(app_name, id_num_to_replace, icon, title, text, actions_list, hint, time)
self.updateconfig(number)
def updateconfig(self, number):
self.cwd = sys.path[0]
self.basefile = os.path.join(self.cwd, 'config.ini')
self.editconfig = ConfigParser.RawConfigParser()
self.editconfig.read(self.basefile)
self.editconfig.set('SectionOne', 'previousnumber', number)
with open(self.basefile, 'wb') as configfile:
self.editconfig.write(configfile)
return
if __name__ == "__main__":
updatecredentials()
while True:
if internet_on() == 0:
cwd = sys.path[0]
basefile = os.path.join(cwd, 'config.ini')
Config = ConfigParser.ConfigParser()
Config.read(basefile)
user = ConfigSectionMap("SectionOne")['username']
passwd = ConfigSectionMap("SectionOne")['password']
previousnumber = ConfigSectionMap("SectionOne")['previousnumber']
d = Gmailnotification(user, passwd, previousnumber)
time.sleep(300)
else:
time.sleep(30)
|
class SuperError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class DownloadError(SuperError):
pass
class ParseError(SuperError):
pass |
import json
import scipy.stats
import matplotlib.pyplot as plt
import numpy as np
def open_file(nameFile):
try:
f = open(nameFile + ".json", "r")
dados = json.loads(f.read())
f.close()
except:
dados = 0
pass
return dados
def mean_confidence_interval(data, confidence=0.90):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
#return m, m - h, m + h
return m, h
"""
files = ['../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_coords_distance_heuristic_dijkstra_nn']
files_s = ['../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_coords_distance_heuristic_SPFA_nn']
files_a = ['../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_coords_distance_heuristic_astar_nn']
files_ci = ['../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_coords_distance_heuristic_dijkstra_ci']
files_s_ci = ['../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_coords_distance_heuristic_SPFA_ci']
files_a_ci = ['../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_coords_distance_heuristic_astar_ci']
files_fi = ['../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_coords_distance_heuristic_dijkstra_fi']
files_s_fi = ['../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_coords_distance_heuristic_SPFA_fi']
files_a_fi = ['../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_coords_distance_heuristic_astar_fi']
"""
files = [
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_dijkstra_nearest_neighbor_coords_weight_heuristic_dijkstra'
]
files_s = [
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_SPFA_nearest_neighbor_coords_weight_heuristic_SPFA'
]
files_a = [#'../../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_distance_heuristic_dijkstra_fi_distance'
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_astar_nearest_neighbor_coords_weight_heuristic_astar'
]
files_ci = [#'../../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_distance_heuristic_dijkstra_nn'
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_dijkstra_closest_insertion_coords_weight_heuristic_dijkstra'
]
files_s_ci = [#'../../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_distance_heuristic_dijkstra_nn'
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_SPFA_closest_insertion_coords_weight_heuristic_SPFA'
]
files_a_ci = [#'../../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_distance_heuristic_dijkstra_nn'
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_astar_closest_insertion_coords_weight_heuristic_astar'
]
files_fi = [
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_dijkstra_further_insertion_coords_weight_heuristic_dijkstra'
]
files_s_fi = [
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_SPFA_further_insertion_coords_weight_heuristic_SPFA'
]
files_a_fi = [
'../data/results/m38.49905230272549_m12.960541036813272_m38.47398437502447_m12.935229804750517_0_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.499613596916454_m12.961216812913838_m38.47548425277925_m12.934070088770925_1_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50194412971296_m12.9624676749896_m38.472997875909336_m12.93487294586209_2_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.498581450235484_m12.9619559499298_m38.475389747728904_m12.934784985867735_3_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49996781691653_m12.95986050660711_m38.474784788561664_m12.933876269107426_4_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50102106363388_m12.960490752611433_m38.47530338641699_m12.935070144844953_5_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50130467830106_m12.961824509508324_m38.47401790429914_m12.931900743216616_6_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49922134252434_m12.959719860966981_m38.47230805005746_m12.932265326057136_7_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.499023327452186_m12.96043952416794_m38.47288011285585_m12.935194971832598_8_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50157434253033_m12.960963430607745_m38.47367938539426_m12.934943284635198_9_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50173708534096_m12.961142864695704_m38.472735872376994_m12.934002867600155_10_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501770300615625_m12.962923879056133_m38.47456776187294_m12.933458582758297_11_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.500350682125635_m12.962082099834404_m38.474252489838484_m12.933159784666088_12_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.499541957062846_m12.961275066314741_m38.47543065870227_m12.933077757489697_13_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50096584572687_m12.960054889071188_m38.47537633515103_m12.93494576442133_14_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49992604759511_m12.96137329471482_m38.474439318456355_m12.934385438592946_15_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501118552381065_m12.96079542837906_m38.47527163205215_m12.934807266431482_16_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.5002628268697_m12.96291845683024_m38.474969528890774_m12.935323121601408_17_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50148403583942_m12.959860721735883_m38.473738459371354_m12.932454395581454_18_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50449792282273_m12.960749843857812_m38.47312892278054_m12.934855166198494_19_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501890924160584_m12.961519343957082_m38.474698888311465_m12.933784238917099_20_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.5007597052321_m12.959937832694857_m38.4746987632653_m12.934062022103753_21_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50140543268961_m12.962059262780658_m38.47465373021255_m12.933888947418161_22_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.502953641658266_m12.963948797656334_m38.473898022861405_m12.935715998602321_23_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50187151458964_m12.960561184183135_m38.47481398113283_m12.934620537016835_24_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501402782516365_m12.96039753671852_m38.47361068224981_m12.93529938288262_25_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.49976769604373_m12.961496062259055_m38.474156963136124_m12.934301693944008_26_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.500486678608006_m12.959835378598493_m38.474758327361364_m12.936386705101464_27_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50066682622218_m12.960812050907476_m38.47216531424985_m12.934908879722355_28_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.50234447884447_m12.962113594988761_m38.47520010149299_m12.935206277553998_29_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar',
'../data/results/m38.501602450236554_m12.962836591132051_m38.47539177946605_m12.934754563384551_30_15_weight_heuristic_astar_further_insertion_coords_weight_heuristic_astar'
]
todas_medias = []
todas_medias_s = []
todas_medias_a = []
todas_medias_ci = []
todas_medias_s_ci = []
todas_medias_a_ci = []
todas_medias_fi = []
todas_medias_s_fi = []
todas_medias_a_fi = []
for a in range(len(files)):
dados_nn = dict(open_file(files[a]))
dados_ci = dict(open_file(files_s[a]))
dados_fi = dict(open_file(files_a[a]))
todas_medias.append(float(dados_nn.get('total_time'))*1000)
todas_medias_s.append(float(dados_ci.get('total_time'))*1000)
todas_medias_a.append(float(dados_fi.get('total_time'))*1000)
dados_ci_1 = dict(open_file(files_ci[a]))
dados_ci_2 = dict(open_file(files_s_ci[a]))
dados_ci_3 = dict(open_file(files_a_ci[a]))
todas_medias_ci.append(float(dados_ci_1.get('total_time'))*1000)
todas_medias_s_ci.append(float(dados_ci_2.get('total_time'))*1000)
todas_medias_a_ci.append(float(dados_ci_3.get('total_time'))*1000)
dados_fi_1 = dict(open_file(files_fi[a]))
dados_fi_2 = dict(open_file(files_s_fi[a]))
dados_fi_3 = dict(open_file(files_a_fi[a]))
todas_medias_fi.append(float(dados_fi_1.get('total_time'))*1000)
todas_medias_s_fi.append(float(dados_fi_2.get('total_time'))*1000)
todas_medias_a_fi.append(float(dados_fi_3.get('total_time'))*1000)
m, h = mean_confidence_interval(todas_medias, 0.95)
m1, h1 = mean_confidence_interval(todas_medias_s, 0.95)
m2, h2 = mean_confidence_interval(todas_medias_a, 0.95)
m_ci, h_ci = mean_confidence_interval(todas_medias_ci, 0.95)
m1_ci, h1_ci = mean_confidence_interval(todas_medias_s_ci, 0.95)
m2_ci, h2_ci = mean_confidence_interval(todas_medias_a_ci, 0.95)
m_fi, h_fi = mean_confidence_interval(todas_medias_fi, 0.95)
m1_fi, h1_fi = mean_confidence_interval(todas_medias_s_fi, 0.95)
m2_fi, h2_fi = mean_confidence_interval(todas_medias_a_fi, 0.95)
medias = [m, m1, m2]
erros = [h, h1, h2]
medias_ci = [m_ci, m1_ci, m2_ci]
erros_ci = [h_ci, h1_ci, h2_ci]
medias_fi = [m_fi, m1_fi, m2_fi]
erros_fi = [h_fi, h1_fi, h2_fi]
labels = ['Bidirectional Dijkstra', 'SPFA', 'A-star']
x = np.arange(len(labels)) # the label locations
width = 0.45 # 0.35 # the width of the bars
print(medias, medias_ci, medias_fi)
fig, ax = plt.subplots()
r1 = ax.bar(x - width/3, medias, width/3, yerr=erros, label='Nearest Neighbor', zorder=10)
r2 = ax.bar(x, medias_ci, width/3, yerr=erros_ci, label='Closest Insertion', zorder=10)
r3 = ax.bar(x + width/3, medias_fi, width/3, yerr=erros_fi, label='Further Insertion', zorder=10)
plt.yscale('log')
# Add some text for labels, title and custom x-axis tick labels, etc.
#ax.set_ylabel('Potência média (W)', fontdict='bold')
plt.ylabel('Time [ms]', fontweight="bold", fontsize=11)
plt.ylim(10**(3), 10**(5)) #max(medias_ci) + 5)
plt.grid(True, which="both", ls="-", linewidth=0.1, color='0.10', zorder=0.5)
ax.set_xticks(x)
ax.set_xticklabels(labels)
#plt.xlabel([], )
ax.legend(numpoints=1, loc="upper right", ncol=3, prop={'size': 9})
fig.tight_layout()
plt.show() |
from .activations import *
from ..layers.convolutions import Convolutional, Cond_Convolutional
import math
import numpy as np
class Shuffle_new(nn.Module):
def __init__(self, filters_in, filters_out, kernel_size=3 ,c_tag=0.5, groups=3, dila=1):
super(Shuffle_new, self).__init__()
self.left_part = round(c_tag * filters_in)
self.right_part = filters_out - self.left_part
self.__dw = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=kernel_size, stride=1, pad=(kernel_size-1)//2, groups=self.right_part, dila=dila, norm="bn")
self.__pw1 = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=1, stride=1, pad=0, norm="bn", activate="leaky")
self.groups = groups
def channel_shuffle(self, features):
batchsize, num_channels, height, width = features.data.size()
assert (num_channels % self.groups == 0)
channels_per_group = num_channels // self.groups
features = features.view(batchsize, self.groups, channels_per_group, height, width)# reshape
features = torch.transpose(features, 1, 2).contiguous()
features = features.view(batchsize, -1, height, width)# flatten
return features
def forward(self, x):
left = x[:, :self.left_part, :, :].contiguous()
right = x[:, self.left_part:, :, :].contiguous()
right = self.__dw(right)
right = self.__pw1(right)
cat = torch.cat((left, right), 1)
out = self.channel_shuffle(cat)
return out
class Shuffle_Cond_RFA(nn.Module):
def __init__(self, filters_in, filters_out, c_tag=0.5, groups=3, dila_r=4, dila_l=6):
super(Shuffle_Cond_RFA, self).__init__()
self.left_part = round(c_tag * filters_in)
self.right_part = filters_out - self.left_part
self.__dw_right = Cond_Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=3,
stride=1, pad=dila_r, groups=self.right_part, dila=dila_r, bias=True, norm="bn")
self.__pw_right = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
self.__dw_left = Cond_Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=3,
stride=1, pad=dila_l, groups=self.right_part, dila=dila_l, bias=True, norm="bn")
self.__pw1_left = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
#self.groups = groups
def forward(self, x):
left = x[:, :self.left_part, :, :].contiguous()
right = x[:, self.left_part:, :, :].contiguous()
left = self.__dw_left(left)
left = self.__pw1_left(left)
right = self.__dw_right(right)
right = self.__pw_right(right)
#cat = torch.cat((left, right), 1)
#out = self.channel_shuffle(cat)
return left+right
class Shuffle_new_s(nn.Module):
def __init__(self, filters_in, filters_out, kernel_size=3 ,c_tag=0.5, groups=3, dila=1):
super(Shuffle_new_s, self).__init__()
self.__dw = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=kernel_size, stride=1, pad=(kernel_size-1)//2, groups=filters_in, dila=dila, norm="bn")
self.__pw1 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=1, stride=1, pad=0, norm="bn", activate="leaky")
self.groups = groups
def channel_shuffle(self, features):
batchsize, num_channels, height, width = features.data.size()
assert (num_channels % self.groups == 0)
channels_per_group = num_channels // self.groups
features = features.view(batchsize, self.groups, channels_per_group, height, width)# reshape
features = torch.transpose(features, 1, 2).contiguous()
features = features.view(batchsize, -1, height, width)# flatten
return features
def forward(self, x):
right = self.__dw(x)
right = self.__pw1(right)
cat = torch.cat((x, right), 1)
out = self.channel_shuffle(cat)
return out
class Shuffle_RFA(nn.Module):
def __init__(self, filters_in, filters_out, c_tag=0.5, groups=3, dila_r=4, dila_l=6):
super(Shuffle_RFA, self).__init__()
self.left_part = round(c_tag * filters_in)
self.right_part = filters_out - self.left_part
self.__dw_right = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=3,
stride=1, pad=dila_r, groups=self.right_part, dila=dila_r, norm="bn")
self.__pw_right = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=1,
stride=1, pad=0, norm="bn", activate="relu")
self.__dw_left = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=3,
stride=1, pad=dila_l, groups=self.right_part, dila=dila_l, norm="bn")
self.__pw1_left = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=1,
stride=1, pad=0, norm="bn", activate="relu")
self.groups = groups
def channel_shuffle(self, features):
batchsize, num_channels, height, width = features.data.size()
assert (num_channels % self.groups == 0)
channels_per_group = num_channels // self.groups
features = features.view(batchsize, self.groups, channels_per_group, height, width)# reshape
features = torch.transpose(features, 1, 2).contiguous()
features = features.view(batchsize, -1, height, width)# flatten
return features
def forward(self, x):
left = x[:, :self.left_part, :, :].contiguous()
right = x[:, self.left_part:, :, :].contiguous()
left = self.__dw_left(left)
left = self.__pw1_left(left)
right = self.__dw_right(right)
right = self.__pw_right(right)
cat = torch.cat((left, right), 1)
out = self.channel_shuffle(cat)
return out
class DRF3(nn.Module):
def __init__(self, filters_in, filters_out, c_tag=0.5, groups=3):
super(DRF3, self).__init__()
self.left_part = round(c_tag * filters_in)
self.right_part = filters_out - self.left_part
#self.__dw_right = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=5, stride=1, pad=dila_r*2, groups=self.right_part, dila=dila_r, norm="bn")
self.__right_weight = nn.Parameter(torch.Tensor(self.right_part,1,3,3), requires_grad=True)#torch.rand(self.right_part,self.right_part,5,5)
self.__bn = nn.BatchNorm2d(self.right_part,affine=True)
self.__pw_right = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
self.__globpool = nn.AdaptiveAvgPool2d(1)
self.__fc = Convolutional(1,1,1,1,0,norm='bn',activate="leaky")
self.groups = groups
def channel_shuffle(self, features):
batchsize, num_channels, height, width = features.data.size()
assert (num_channels % self.groups == 0)
channels_per_group = num_channels // self.groups
features = features.view(batchsize, self.groups, channels_per_group, height, width)# reshape
features = torch.transpose(features, 1, 2).contiguous()
features = features.view(batchsize, -1, height, width)# flatten
return features
def forward(self, x):
left = x[:, :self.left_part, :, :].contiguous()
right = x[:, self.left_part:, :, :].contiguous()
fc = self.__fc(self.__globpool(right[:, 0:1, :, :]))
#print(fc.shape)
fcc = fc.detach().cpu()
#print(fcc.shape)
rfa = round(torch.sigmoid(torch.sum(fcc)).item() * 2 + 1)
right = self.__bn(F.conv2d(right, self.__right_weight, stride=1, padding=rfa, dilation=rfa, groups=self.right_part)) #self.__dw_right(right)
right = self.__pw_right(right)
cat = torch.cat((left, right), 1)
out = self.channel_shuffle(cat)
return out
class DRF5(nn.Module):
def __init__(self, filters_in, filters_out, c_tag=0.5, groups=3):
super(DRF5, self).__init__()
self.left_part = round(c_tag * filters_in)
self.right_part = filters_out - self.left_part
#self.__dw_right = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=5, stride=1, pad=dila_r*2, groups=self.right_part, dila=dila_r, norm="bn")
self.__right_weight = nn.Parameter(torch.Tensor(self.right_part,1,5,5), requires_grad=True)#torch.rand(self.right_part,self.right_part,5,5)
self.__bn = nn.BatchNorm2d(self.right_part,affine=True)
self.__pw_right = Convolutional(filters_in=self.right_part, filters_out=self.right_part, kernel_size=1,
stride=1, pad=0, norm="bn", activate="leaky")
self.__globpool = nn.AdaptiveAvgPool2d(1)
self.__fc = Convolutional(1,1,1,1,0,norm='bn',activate="leaky")
self.groups = groups
def channel_shuffle(self, features):
batchsize, num_channels, height, width = features.data.size()
assert (num_channels % self.groups == 0)
channels_per_group = num_channels // self.groups
features = features.view(batchsize, self.groups, channels_per_group, height, width)# reshape
features = torch.transpose(features, 1, 2).contiguous()
features = features.view(batchsize, -1, height, width)# flatten
return features
def forward(self, x):
left = x[:, :self.left_part, :, :].contiguous()
right = x[:, self.left_part:, :, :].contiguous()
fc = self.__fc(self.__globpool(right[:, 0:1, :, :]))
#print(fc.shape)
fcc = fc.detach().cpu()
#print(fcc.shape)
rfa = round(torch.sigmoid(torch.sum(fcc)).item() * 2 + 1)
right = self.__bn(F.conv2d(right, self.__right_weight, stride=1, padding=2*rfa, dilation=rfa, groups=self.right_part)) #self.__dw_right(right)
right = self.__pw_right(right)
cat = torch.cat((left, right), 1)
out = self.channel_shuffle(cat)
return out
|
#Import necessary package
import requests
import re
from bs4 import BeautifulSoup
import json
import html
import pandas as pd
import numpy as np
import datetime as dt
import configparser
import os
#Configure parameter
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
mall = config['general']['mall']
shoplisturl = config['url']['shoplisturl']
fnblisturl = config['url']['fnblisturl']
shopdetailurl = config['url']['shopdetailurl']
shoplistapi = config['api']['shoplistapi']
def getShopCategory():
#Create empty DataFrame for shop category
shopcategory = pd.DataFrame()
#Get shop category
for type, url in zip(['Shopping','Dining'],[shoplisturl,fnblisturl]):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser', from_encoding = 'iso-8859-1')
for cat_list in soup.find_all('div', class_ = 'category-list-items'):
for cat_list_cat in cat_list.find_all('div', class_ = 'category-list-category'):
for cat in cat_list_cat.find_all('li'):
try:
shop_category_id = cat.text.strip()
except:
shop_category_id = np.nan
try:
shop_category_name = cat.text.strip()
except:
shop_category_name = np.nan
shopcategory = shopcategory.append(
{
'type':type,
'shop_category_id':shop_category_id,
'shop_category_name':shop_category_name
}, ignore_index=True
)
shopcategory['update_date'] = dt.date.today()
shopcategory['mall'] = mall
shopcategory.drop(shopcategory[shopcategory.shop_category_id == 'ALL'].index, inplace = True)
shopcategory = shopcategory.loc[:, ['mall','type','shop_category_id','shop_category_name','update_date']]
return shopcategory
#Get shop master data and export into csv
def getShopMaster():
shopcategory = getShopCategory()
#Create empty DataFrame for shop master
shoplist = pd.DataFrame()
shopdetail = pd.DataFrame()
#Generate shop list from the mall api
api = shoplistapi
shoplistrequest = requests.get(api)
shoplistresponse = json.loads(shoplistrequest.content)
for shop in shoplistresponse['search']:
try:
shop_id = shop['url']
except:
shop_id = np.nan
try:
shop_number = shop['url']
except:
shop_number = np.nan
try:
shop_name = shop['label_en']
except:
shop_name = np.nan
try:
shop_name_zh = shop['label_tc']
except:
shop_name_zh = np.nan
try:
type = shop['category']
except:
type = np.nan
shoplist = shoplist.append(
{
'type': type,
'shop_id': str(shop_id),
'shop_number': shop_number,
'shop_name_en': shop_name,
'shop_name_tc': shop_name_zh
}, ignore_index = True
)
for shop_id in shoplist['shop_id'].unique():
combine_url = shopdetailurl + str(shop_id).replace('&', '%26')
page = requests.get(combine_url)
soup = BeautifulSoup(page.content, 'html.parser', from_encoding = 'iso-8859-1')
for shop_info in soup.find_all('div', class_ = 'shop-info-col'):
try:
shop_category_name = ';'.join([cat.text for cat in shop_info.find_all('div', class_ = 'shop-category')])
except:
shop_category_name = np.nan
try:
shop_category_id = ';'.join([shopcategory.loc[shopcategory['shop_category_name'] == cat, 'shop_category_id'].values[0] for cat in shop_category_name.split(';')])
except:
shop_category_id = np.nan
try:
phone = shop_info.find('p', class_ = 'shop-phone').text.replace('Telephone: ','').replace(' ','')
except:
phone = np.nan
try:
opening_hours = shop_info.find('p', class_ = 'shop-time').text.replace('Opening Hours: ','').strip()
except:
opening_hours = np.nan
for shop_map in soup.find_all('section', class_ = 'vc_row shop-map'):
try:
shop_floor = shop_map.find('div', class_ = 'shop-map-floor').text.replace('/','').replace('\n','').replace('\r','').replace('\t','')
except:
shop_floor = np.nan
shopdetail = shopdetail.append(
{
'shop_id': shop_id,
'shop_floor':shop_floor,
'shop_category_id': shop_category_id,
'shop_category_name': shop_category_name,
'phone': phone,
'opening_hours':opening_hours
}, ignore_index = True
)
#Merge shop list and shop detail into shop master
shopmaster = pd.merge(shoplist, shopdetail, on = 'shop_id')
shopmaster['update_date'] = dt.date.today()
shopmaster['mall'] = mall
shopmaster['loyalty_offer'] = np.nan
shopmaster['voucher_acceptance'] = np.nan
shopmaster['tag'] = np.nan
shopmaster = shopmaster.loc[:, ['mall','type','shop_id','shop_name_en','shop_name_tc','shop_number','shop_floor','phone','opening_hours','loyalty_offer','voucher_acceptance','shop_category_id','shop_category_name','tag','update_date']]
return shopmaster |
from dataclasses import dataclass, field
from enum import auto, Enum
from typing import Dict, Optional
from cu_pass.dpa_calculator.cbsd.cbsd import CbsdCategories
from cu_pass.dpa_calculator.number_of_aps.number_of_aps_calculator import NumberOfCbsdsCalculatorOptions
NEIGHBORHOOD_DISTANCES_TYPE = Dict[CbsdCategories, int]
SIMULATION_DISTANCES_DEFAULT: NEIGHBORHOOD_DISTANCES_TYPE = {CbsdCategories.A: 250, CbsdCategories.B: 500}
class PopulationRetrieverTypes(Enum):
census = auto()
region_type = auto()
@dataclass
class CbsdDeploymentOptions:
population_override: Optional[int] = None
simulation_distances_in_kilometers: NEIGHBORHOOD_DISTANCES_TYPE = field(
default_factory=lambda: SIMULATION_DISTANCES_DEFAULT)
population_retriever_type: PopulationRetrieverTypes = PopulationRetrieverTypes.census
number_of_cbsds_calculator_options: NumberOfCbsdsCalculatorOptions = NumberOfCbsdsCalculatorOptions()
|
"""
Sponge Knowledge Base
Removing scheduled events
"""
from java.util.concurrent.atomic import AtomicInteger
def onInit():
global eventEntry, eventCounter
eventEntry = None
eventCounter = AtomicInteger(0)
sponge.setVariable("eventCounter", eventCounter)
sponge.setVariable("allowNumber", 2)
class Trigger1(Trigger):
def onConfigure(self):
self.withEvent("e1")
def onRun(self, event):
global eventCounter
eventCounter.incrementAndGet()
self.logger.debug("Received event {}, counter: {}", event.name, eventCounter)
if eventCounter.get() > sponge.getVariable("allowNumber"):
self.logger.debug("This line should not be displayed!")
class Trigger2(Trigger):
def onConfigure(self):
self.withEvent("e2")
def onRun(self, event):
self.logger.debug("Removing entry")
global eventEntry
sponge.removeEvent(eventEntry)
def onStartup():
global eventEntry
start = 500
interval = 1000
eventEntry = sponge.event("e1").sendAfter(start, interval)
sponge.event("e2").sendAfter(interval * sponge.getVariable("allowNumber"))
|
import unittest
from conans.util.misc import make_tuple
class MakeTupleTestCase(unittest.TestCase):
def test_corner_cases(self):
self.assertIsNone(make_tuple(None))
self.assertTupleEqual(make_tuple("one"), ("one",))
def test_iterable(self):
self.assertTupleEqual(make_tuple([1, 2, 3]), (1, 2, 3))
self.assertTupleEqual(make_tuple(("one", "two")), ("one", "two"))
self.assertTupleEqual(make_tuple({1: "a", 2: "b", 3: "c"}.keys()), (1, 2, 3))
self.assertTupleEqual(make_tuple({1: "a", 2: "b", 3: "c"}.values()), ("a", "b", "c"))
def test_generator(self):
def items():
for i in [1, 2, 3]:
yield i
self.assertTupleEqual(make_tuple(items()), (1, 2, 3))
|
from django.test import TestCase, Client
from django.contrib.auth.models import User
from ..models import (
DomainProvider,
Contact,
)
class TestSetup(TestCase):
fixtures = ["test_auth.json",
"contact_types.json",
"providers.json",
"tlds.json",
"tld_providers.json",
"test_account_details.json",
"default_account_contacts.json",
"test_contacts.json",
"test_registrants.json",
"test_nameservers.json",
"test_domain_contacts.json",
"test_registered_domains.json",
]
"""
Set up users, providers, tlds, etc. for testing.
"""
def setUp(self):
"""
Set up test suite
"""
super().setUp()
self.client = Client()
self.user = User.objects.get(username='testadmin')
self.test_customer_user = User.objects.get(username='testcustomer')
self.joe_user = self.test_customer_user.personal_details.filter(
email='joeuser@test.com'
).first()
self.joe_user_registrant = self.test_customer_user.registrants.filter(
email='joeuser@test.com'
).first()
self.centralnic_test = DomainProvider.objects.get(
slug="centralnic-test"
)
self.generic_admin_contact = Contact.objects.get(
registry_id="contact-123"
)
def api_login(self,
username="testcustomer",
password="imacust1"):
"""
Log client in using api-token-auth endpoint
:returns: str JSON Web token
"""
credentials = {
"username": username,
"password": password
}
response = self.client.post('/api-token-auth',
secure=True,
data=credentials)
data = response.data
return 'JWT ' + data["token"]
def login_client(self):
"""
Log user in to API.
:returns: logged in session
"""
self.client.login(username="testcustomer", password="imacust1")
|
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test Zarr driver
# Author: Even Rouault <even.rouault@spatialys.com>
#
###############################################################################
# Copyright (c) 2021, Even Rouault <even.rouault@spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import array
import base64
import json
import math
import struct
import sys
from osgeo import gdal
from osgeo import osr
import gdaltest
import pytest
@pytest.mark.parametrize("dtype,structtype,gdaltype,fill_value,nodata_value",
[["!b1", 'B', gdal.GDT_Byte, None, None],
["!i1", 'b', gdal.GDT_Int16, None, None],
["!i1", 'b', gdal.GDT_Int16, -1, -1],
["!u1", 'B', gdal.GDT_Byte, None, None],
["!u1", 'B', gdal.GDT_Byte, "1", 1], # not really legit to have the fill_value as a str
["<i2", 'h', gdal.GDT_Int16, None, None],
[">i2", 'h', gdal.GDT_Int16, None, None],
["<i4", 'i', gdal.GDT_Int32, None, None],
[">i4", 'i', gdal.GDT_Int32, None, None],
["<i8", 'q', gdal.GDT_Int64, None, None],
["<i8", 'q', gdal.GDT_Int64, -(1<<63), -(1<<63)],
["<i8", 'q', gdal.GDT_Int64, str(-(1<<63)), -(1<<63)], # not really legit to have the fill_value as a str
[">i8", 'q', gdal.GDT_Int64, None, None],
["<u2", 'H', gdal.GDT_UInt16, None, None],
[">u2", 'H', gdal.GDT_UInt16, None, None],
["<u4", 'I', gdal.GDT_UInt32, None, None],
[">u4", 'I', gdal.GDT_UInt32, None, None],
["<u4", 'I', gdal.GDT_UInt32, 4000000000, 4000000000],
["<u8", 'Q', gdal.GDT_UInt64, str((1<<64)-1), (1<<64)-1], # not really legit to have the fill_value as a str, but libjson-c can't support numeric values in int64::max(), uint64::max() range.
[">u8", 'Q', gdal.GDT_UInt64, None, None],
["<f4", 'f', gdal.GDT_Float32, None, None],
[">f4", 'f', gdal.GDT_Float32, None, None],
["<f4", 'f', gdal.GDT_Float32, 1.5, 1.5],
["<f4", 'f', gdal.GDT_Float32, "NaN", float('nan')],
["<f4", 'f', gdal.GDT_Float32,
"Infinity", float('infinity')],
["<f4", 'f', gdal.GDT_Float32,
"-Infinity", float('-infinity')],
["<f8", 'd', gdal.GDT_Float64, None, None],
[">f8", 'd', gdal.GDT_Float64, None, None],
["<f8", 'd', gdal.GDT_Float64, "NaN", float('nan')],
["<f8", 'd', gdal.GDT_Float64,
"Infinity", float('infinity')],
["<f8", 'd', gdal.GDT_Float64,
"-Infinity", float('-infinity')],
["<c8", 'f', gdal.GDT_CFloat32, None, None],
[">c8", 'f', gdal.GDT_CFloat32, None, None],
["<c16", 'd', gdal.GDT_CFloat64, None, None],
[">c16", 'd', gdal.GDT_CFloat64, None, None]])
@pytest.mark.parametrize("use_optimized_code_paths", [True, False])
def test_zarr_basic(dtype, structtype, gdaltype, fill_value, nodata_value, use_optimized_code_paths):
j = {
"chunks": [
2,
3
],
"compressor": None,
"dtype": dtype,
"fill_value": fill_value,
"filters": None,
"order": "C",
"shape": [
5,
4
],
"zarr_format": 2
}
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
if gdaltype not in (gdal.GDT_CFloat32, gdal.GDT_CFloat64):
tile_0_0_data = struct.pack(
dtype[0] + (structtype * 6), 1, 2, 3, 5, 6, 7)
tile_0_1_data = struct.pack(
dtype[0] + (structtype * 6), 4, 0, 0, 8, 0, 0)
else:
tile_0_0_data = struct.pack(
dtype[0] + (structtype * 12), 1, 11, 2, 0, 3, 0, 5, 0, 6, 0, 7, 0)
tile_0_1_data = struct.pack(
dtype[0] + (structtype * 12), 4, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/0.0', tile_0_0_data)
gdal.FileFromMemBuffer('/vsimem/test.zarr/0.1', tile_0_1_data)
with gdaltest.config_option('GDAL_ZARR_USE_OPTIMIZED_CODE_PATHS',
'YES' if use_optimized_code_paths else 'NO'):
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.GetDimensionCount() == 2
assert [ar.GetDimensions()[i].GetSize() for i in range(2)] == [5, 4]
assert ar.GetBlockSize() == [2, 3]
if nodata_value is not None and math.isnan(nodata_value):
assert math.isnan(ar.GetNoDataValue())
else:
assert ar.GetNoDataValue() == nodata_value
assert ar.GetOffset() is None
assert ar.GetScale() is None
assert ar.GetUnit() == ''
# Check reading one single value
assert ar[1, 2].Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 1, 7)
if structtype == 'b':
structtype_read = 'h'
else:
structtype_read = structtype
# Read block 0,0
if gdaltype not in (gdal.GDT_CFloat32, gdal.GDT_CFloat64):
assert ar[0:2, 0:3].Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 6, 1, 2, 3, 5, 6, 7)
assert struct.unpack(
structtype_read * 6, ar[0:2, 0:3].Read()) == (1, 2, 3, 5, 6, 7)
else:
assert ar[0:2, 0:3].Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_CFloat64)) == \
struct.pack('d' * 12, 1, 11, 2, 0, 3, 0, 5, 0, 6, 0, 7, 0)
assert struct.unpack(
structtype * 12, ar[0:2, 0:3].Read()) == (1, 11, 2, 0, 3, 0, 5, 0, 6, 0, 7, 0)
# Read block 0,1
assert ar[0:2, 3:4].Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 2, 4, 8)
# Read block 1,1 (missing)
nv = nodata_value if nodata_value else 0
assert ar[2:4, 3:4].Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 2, nv, nv)
# Read whole raster
assert ar.Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 20,
1, 2, 3, 4,
5, 6, 7, 8,
nv, nv, nv, nv,
nv, nv, nv, nv,
nv, nv, nv, nv)
if gdaltype not in (gdal.GDT_CFloat32, gdal.GDT_CFloat64):
assert ar.Read() == array.array(structtype_read, [1, 2, 3, 4,
5, 6, 7, 8,
nv, nv, nv, nv,
nv, nv, nv, nv,
nv, nv, nv, nv])
else:
assert ar.Read() == array.array(structtype, [1, 11, 2, 0, 3, 0, 4, 0,
5, 0, 6, 0, 7, 0, 8, 0,
nv, 0, nv, 0, nv, 0, nv, 0,
nv, 0, nv, 0, nv, 0, nv, 0,
nv, 0, nv, 0, nv, 0, nv, 0])
# Read with negative steps
assert ar.Read(array_start_idx=[2, 1],
count=[2, 2],
array_step=[-1, -1],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 4, nv, nv, 6, 5)
# array_step > 2
assert ar.Read(array_start_idx=[0, 0],
count=[1, 2],
array_step=[0, 2],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 2, 1, 3)
assert ar.Read(array_start_idx=[0, 0],
count=[3, 1],
array_step=[2, 0],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 3, 1, nv, nv)
assert ar.Read(array_start_idx=[0, 1],
count=[1, 2],
array_step=[0, 2],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 2, 2, 4)
assert ar.Read(array_start_idx=[0, 0],
count=[1, 2],
array_step=[0, 3],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == \
struct.pack('d' * 2, 1, 4)
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("fill_value,expected_read_data", [[base64.b64encode(b'xyz').decode('utf-8'), ['abc', 'xyz']],
[None, ['abc', None]]])
def test_zarr_string(fill_value, expected_read_data):
j = {
"chunks": [
1
],
"compressor": None,
"dtype": '|S3',
"fill_value": fill_value,
"filters": [],
"order": "C",
"shape": [
2
],
"zarr_format": 2
}
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
gdal.FileFromMemBuffer('/vsimem/test.zarr/0', b'abc')
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read() == expected_read_data
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
# Check that all required elements are present in .zarray
@pytest.mark.parametrize("member",
[None, 'zarr_format', 'chunks', 'compressor', 'dtype',
'filters', 'order', 'shape', 'fill_value'])
def test_zarr_invalid_json_remove_member(member):
j = {
"chunks": [
2,
3
],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [
5,
4
],
"zarr_format": 2
}
if member:
del j[member]
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
with gdaltest.error_handler():
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
if member == 'fill_value':
assert ds is not None
assert gdal.GetLastErrorMsg() != ''
elif member is None:
assert ds
else:
assert ds is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
# Check bad values of members in .zarray
@pytest.mark.parametrize("dict_update", [{"chunks": None},
{"chunks": "invalid"},
{"chunks": [2]},
{"chunks": [2, 0]},
{"shape": None},
{"shape": "invalid"},
{"shape": [5]},
{"shape": [5, 0]},
{"chunks": [1 << 40, 1 << 40],
"shape": [1 << 40, 1 << 40]},
{"shape": [1 << 30, 1 << 30, 1 << 30],
"chunks": [1, 1, 1]},
{"dtype": None},
{"dtype": 1},
{"dtype": ""},
{"dtype": "!"},
{"dtype": "!b"},
{"dtype": "<u16"},
{"dtype": "<u0"},
{"dtype": "<u10000"},
{"fill_value": []},
{"fill_value": "x"},
{"fill_value": "NaN"},
{"dtype": "!S1", "fill_value": 0},
{"order": None},
{"order": "invalid"},
{"compressor": "invalid"},
{"compressor": {}},
{"compressor": {"id": "invalid"}},
{"filters": "invalid"},
{"filters": {}},
{"filters": [{"missing_id": True}]},
{"zarr_format": None},
{"zarr_format": 1},
])
def test_zarr_invalid_json_wrong_values(dict_update):
j = {
"chunks": [
2,
3
],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [
5,
4
],
"zarr_format": 2
}
j.update(dict_update)
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
with gdaltest.error_handler():
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
# Check reading different compression methods
@pytest.mark.parametrize("datasetname,compressor", [('blosc.zarr', 'blosc'),
('gzip.zarr', 'gzip'),
('lz4.zarr', 'lz4'),
('lzma.zarr', 'lzma'),
('lzma_with_filters.zarr',
'lzma'),
('zlib.zarr', 'zlib'),
('zstd.zarr', 'zstd'),
])
def test_zarr_read_compression_methods(datasetname, compressor):
compressors = gdal.GetDriverByName('Zarr').GetMetadataItem('COMPRESSORS')
filename = 'data/zarr/' + datasetname
if compressor not in compressors:
with gdaltest.error_handler():
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is None
else:
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read() == array.array('b', [1, 2])
@pytest.mark.parametrize("name", ["u1", "u2", "u4", "u8"])
def test_zarr_read_fortran_order(name):
filename = 'data/zarr/order_f_' + name + '.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Byte)) == \
array.array('b', [i for i in range(16)])
def test_zarr_read_fortran_order_string():
filename = 'data/zarr/order_f_s3.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read() == ['000', '111', '222', '333',
'444', '555', '666', '777',
'888', '999', 'AAA', 'BBB',
'CCC', 'DDD', 'EEE', 'FFF']
def test_zarr_read_fortran_order_3d():
filename = 'data/zarr/order_f_u1_3d.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read(buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Byte)) == \
array.array('b', [i for i in range(2 * 3 * 4)])
def test_zarr_read_compound_well_aligned():
filename = 'data/zarr/compound_well_aligned.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
dt = ar.GetDataType()
assert dt.GetSize() == 4
comps = dt.GetComponents()
assert len(comps) == 2
assert comps[0].GetName() == 'a'
assert comps[0].GetOffset() == 0
assert comps[0].GetType().GetNumericDataType() == gdal.GDT_UInt16
assert comps[1].GetName() == 'b'
assert comps[1].GetOffset() == 2
assert comps[1].GetType().GetNumericDataType() == gdal.GDT_UInt16
assert ar['a'].Read() == array.array('H', [1000, 4000, 0])
assert ar['b'].Read() == array.array('H', [3000, 5000, 0])
j = gdal.MultiDimInfo(ds, detailed=True)
assert j['arrays']['compound_well_aligned']['values'] == [
{"a": 1000, "b": 3000},
{"a": 4000, "b": 5000},
{"a": 0, "b": 0}]
def test_zarr_read_compound_not_aligned():
filename = 'data/zarr/compound_not_aligned.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
dt = ar.GetDataType()
assert dt.GetSize() == 6
comps = dt.GetComponents()
assert len(comps) == 3
assert comps[0].GetName() == 'a'
assert comps[0].GetOffset() == 0
assert comps[0].GetType().GetNumericDataType() == gdal.GDT_UInt16
assert comps[1].GetName() == 'b'
assert comps[1].GetOffset() == 2
assert comps[1].GetType().GetNumericDataType() == gdal.GDT_Byte
assert comps[2].GetName() == 'c'
assert comps[2].GetOffset() == 4
assert comps[2].GetType().GetNumericDataType() == gdal.GDT_UInt16
assert ar['a'].Read() == array.array('H', [1000, 4000, 0])
assert ar['b'].Read() == array.array('B', [2, 4, 0])
assert ar['c'].Read() == array.array('H', [3000, 5000, 0])
j = gdal.MultiDimInfo(ds, detailed=True)
assert j['arrays']['compound_not_aligned']['values'] == [
{"a": 1000, "b": 2, "c": 3000},
{"a": 4000, "b": 4, "c": 5000},
{"a": 0, "b": 0, "c": 0}]
def test_zarr_read_compound_complex():
filename = 'data/zarr/compound_complex.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
is_64bit = sys.maxsize > 2**32
dt = ar.GetDataType()
assert dt.GetSize() == 24 if is_64bit else 16
comps = dt.GetComponents()
assert len(comps) == 4
assert comps[0].GetName() == 'a'
assert comps[0].GetOffset() == 0
assert comps[0].GetType().GetNumericDataType() == gdal.GDT_Byte
assert comps[1].GetName() == 'b'
assert comps[1].GetOffset() == 2
assert comps[1].GetType().GetClass() == gdal.GEDTC_COMPOUND
assert comps[1].GetType().GetSize() == 1 + 1 + 2 + \
1 + 1 # last one is padding
subcomps = comps[1].GetType().GetComponents()
assert len(subcomps) == 4
assert comps[2].GetName() == 'c'
assert comps[2].GetOffset() == 8
assert comps[2].GetType().GetClass() == gdal.GEDTC_STRING
assert comps[3].GetName() == 'd'
assert comps[3].GetOffset() == 16 if is_64bit else 12
assert comps[3].GetType().GetNumericDataType() == gdal.GDT_Int16
j = gdal.MultiDimInfo(ds, detailed=True)
assert j['arrays']['compound_complex']['values'] == [
{"a": 1, "b": {"b1": 2, "b2": 3, "b3": 1000, "b5": 4}, "c": "AAA", "d": -1},
{"a": 2, "b": {"b1": 255, "b2": 254, "b3": 65534, "b5": 253}, "c": "ZZ", "d": -2}]
def test_zarr_read_array_attributes():
filename = 'data/zarr/array_attrs.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
j = gdal.MultiDimInfo(ds)
assert j['arrays']['array_attrs']['attributes'] == {
"bool": True,
"double": 1.5,
"doublearray": [1.5, 2.5],
"int": 1,
"int64": 1234567890123,
"int64array": [1234567890123, -1234567890123],
"intarray": [1, 2],
"intdoublearray": [1, 2.5],
"mixedstrintarray": ["foo", 1],
"null": "",
"obj": {},
"str": "foo",
"strarray": ["foo", "bar"]
}
@pytest.mark.parametrize("crs_member", ["projjson", "wkt", "url"])
def test_zarr_read_crs(crs_member):
zarray = {
"chunks": [
2,
3
],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [
5,
4
],
"zarr_format": 2
}
zattrs_all = {
"_CRS": {
"projjson": {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "GeographicCRS",
"name": "WGS 84",
"datum_ensemble": {
"name": "World Geodetic System 1984 ensemble",
"members": [
{
"name": "World Geodetic System 1984 (Transit)",
"id": {
"authority": "EPSG",
"code": 1166
}
},
{
"name": "World Geodetic System 1984 (G730)",
"id": {
"authority": "EPSG",
"code": 1152
}
},
{
"name": "World Geodetic System 1984 (G873)",
"id": {
"authority": "EPSG",
"code": 1153
}
},
{
"name": "World Geodetic System 1984 (G1150)",
"id": {
"authority": "EPSG",
"code": 1154
}
},
{
"name": "World Geodetic System 1984 (G1674)",
"id": {
"authority": "EPSG",
"code": 1155
}
},
{
"name": "World Geodetic System 1984 (G1762)",
"id": {
"authority": "EPSG",
"code": 1156
}
}
],
"ellipsoid": {
"name": "WGS 84",
"semi_major_axis": 6378137,
"inverse_flattening": 298.257223563
},
"accuracy": "2.0",
"id": {
"authority": "EPSG",
"code": 6326
}
},
"coordinate_system": {
"subtype": "ellipsoidal",
"axis": [
{
"name": "Geodetic latitude",
"abbreviation": "Lat",
"direction": "north",
"unit": "degree"
},
{
"name": "Geodetic longitude",
"abbreviation": "Lon",
"direction": "east",
"unit": "degree"
}
]
},
"scope": "Horizontal component of 3D system.",
"area": "World.",
"bbox": {
"south_latitude": -90,
"west_longitude": -180,
"north_latitude": 90,
"east_longitude": 180
},
"id": {
"authority": "EPSG",
"code": 4326
}
},
"wkt": 'GEOGCRS["WGS 84",ENSEMBLE["World Geodetic System 1984 ensemble",MEMBER["World Geodetic System 1984 (Transit)"],MEMBER["World Geodetic System 1984 (G730)"],MEMBER["World Geodetic System 1984 (G873)"],MEMBER["World Geodetic System 1984 (G1150)"],MEMBER["World Geodetic System 1984 (G1674)"],MEMBER["World Geodetic System 1984 (G1762)"],ELLIPSOID["WGS 84",6378137,298.257223563,LENGTHUNIT["metre",1]],ENSEMBLEACCURACY[2.0]],PRIMEM["Greenwich",0,ANGLEUNIT["degree",0.0174532925199433]],CS[ellipsoidal,2],AXIS["geodetic latitude (Lat)",north,ORDER[1],ANGLEUNIT["degree",0.0174532925199433]],AXIS["geodetic longitude (Lon)",east,ORDER[2],ANGLEUNIT["degree",0.0174532925199433]],USAGE[SCOPE["Horizontal component of 3D system."],AREA["World."],BBOX[-90,-180,90,180]],ID["EPSG",4326]]',
"url": "http://www.opengis.net/def/crs/EPSG/0/4326"
}
}
zattrs = {"_CRS": {crs_member: zattrs_all["_CRS"][crs_member]}}
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(zarray))
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zattrs', json.dumps(zattrs))
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
srs = ar.GetSpatialRef()
if not(osr.GetPROJVersionMajor() > 6 or osr.GetPROJVersionMinor() >= 2) and crs_member == 'projjson':
assert srs is None
else:
assert srs is not None
assert srs.GetAuthorityCode(None) == '4326'
assert len(ar.GetAttributes()) == 0
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("use_get_names", [True, False])
def test_zarr_read_group(use_get_names):
filename = 'data/zarr/group.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
assert rg.GetName() == '/'
assert rg.GetFullName() == '/'
if use_get_names:
assert rg.GetGroupNames() == ['foo']
assert len(rg.GetAttributes()) == 1
assert rg.GetAttribute('key') is not None
subgroup = rg.OpenGroup('foo')
assert subgroup is not None
assert rg.OpenGroup('not_existing') is None
assert subgroup.GetName() == 'foo'
assert subgroup.GetFullName() == '/foo'
assert rg.GetMDArrayNames() is None
if use_get_names:
assert subgroup.GetGroupNames() == ['bar']
assert subgroup.GetAttributes() == []
subsubgroup = subgroup.OpenGroup('bar')
assert subsubgroup.GetName() == 'bar'
assert subsubgroup.GetFullName() == '/foo/bar'
if use_get_names:
assert subsubgroup.GetMDArrayNames() == ['baz']
ar = subsubgroup.OpenMDArray('baz')
assert ar is not None
assert ar.Read() == array.array('i', [1])
assert subsubgroup.OpenMDArray('not_existing') is None
def test_zarr_read_group_with_zmetadata():
filename = 'data/zarr/group_with_zmetadata.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
assert rg.GetName() == '/'
assert rg.GetFullName() == '/'
assert rg.GetGroupNames() == ['foo']
assert len(rg.GetAttributes()) == 1
assert rg.GetAttribute('key') is not None
subgroup = rg.OpenGroup('foo')
assert subgroup is not None
assert rg.OpenGroup('not_existing') is None
assert subgroup.GetName() == 'foo'
assert subgroup.GetFullName() == '/foo'
assert rg.GetMDArrayNames() is None
assert subgroup.GetGroupNames() == ['bar']
assert subgroup.GetAttributes() == []
subsubgroup = subgroup.OpenGroup('bar')
assert subsubgroup.GetName() == 'bar'
assert subsubgroup.GetFullName() == '/foo/bar'
assert subsubgroup.GetMDArrayNames() == ['baz']
assert subsubgroup.GetAttribute('foo') is not None
ar = subsubgroup.OpenMDArray('baz')
assert ar is not None
assert ar.Read() == array.array('i', [1])
assert ar.GetAttribute('bar') is not None
assert subsubgroup.OpenMDArray('not_existing') is None
@pytest.mark.parametrize("use_zmetadata, filename",
[(True, 'data/zarr/array_dimensions.zarr'),
(False, 'data/zarr/array_dimensions.zarr'),
(True, 'data/zarr/array_dimensions_upper_level.zarr'),
(False, 'data/zarr/array_dimensions_upper_level.zarr'),
(False, 'data/zarr/array_dimensions_upper_level.zarr/subgroup/var')])
def test_zarr_read_ARRAY_DIMENSIONS(use_zmetadata, filename):
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER, open_options=[
'USE_ZMETADATA=' + str(use_zmetadata)])
assert ds is not None
rg = ds.GetRootGroup()
if filename != 'data/zarr/array_dimensions_upper_level.zarr':
ar = rg.OpenMDArray('var')
else:
ar = rg.OpenGroup('subgroup').OpenMDArray('var')
assert ar
dims = ar.GetDimensions()
assert len(dims) == 2
assert dims[0].GetName() == 'lat'
assert dims[0].GetIndexingVariable() is not None
assert dims[0].GetIndexingVariable().GetName() == 'lat'
assert dims[0].GetType() == gdal.DIM_TYPE_HORIZONTAL_Y
assert dims[0].GetDirection() == 'NORTH'
assert dims[1].GetName() == 'lon'
assert dims[1].GetIndexingVariable() is not None
assert dims[1].GetIndexingVariable().GetName() == 'lon'
assert dims[1].GetType() == gdal.DIM_TYPE_HORIZONTAL_X
assert dims[1].GetDirection() == 'EAST'
assert len(rg.GetDimensions()) == 2
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER, open_options=[
'USE_ZMETADATA=' + str(use_zmetadata)])
assert ds is not None
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('lat')
assert ar
dims = ar.GetDimensions()
assert len(dims) == 1
assert dims[0].GetName() == 'lat'
assert dims[0].GetIndexingVariable() is not None
assert dims[0].GetIndexingVariable().GetName() == 'lat'
assert dims[0].GetType() == gdal.DIM_TYPE_HORIZONTAL_Y
assert len(rg.GetDimensions()) == 2
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER, open_options=[
'USE_ZMETADATA=' + str(use_zmetadata)])
assert ds is not None
rg = ds.GetRootGroup()
assert len(rg.GetDimensions()) == 2
@pytest.mark.parametrize("use_get_names", [True, False])
def test_zarr_read_v3(use_get_names):
filename = 'data/zarr/v3/test.zr3'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
assert rg.GetName() == '/'
assert rg.GetFullName() == '/'
if use_get_names:
assert rg.GetGroupNames() == ['marvin']
assert len(rg.GetAttributes()) == 1
assert rg.GetAttribute('root_foo') is not None
subgroup = rg.OpenGroup('marvin')
assert subgroup is not None
assert rg.OpenGroup('not_existing') is None
assert subgroup.GetName() == 'marvin'
assert subgroup.GetFullName() == '/marvin'
if use_get_names:
assert rg.GetMDArrayNames() == ['/', 'ar']
ar = rg.OpenMDArray('/')
assert ar
assert ar.Read() == array.array('i', [2] + ([1] * (5 * 10 - 1)))
ar = rg.OpenMDArray('ar')
assert ar
assert ar.Read() == array.array('b', [1, 2])
if use_get_names:
assert subgroup.GetGroupNames() == ['paranoid']
assert len(subgroup.GetAttributes()) == 1
subsubgroup = subgroup.OpenGroup('paranoid')
assert subsubgroup.GetName() == 'paranoid'
assert subsubgroup.GetFullName() == '/marvin/paranoid'
if use_get_names:
assert subgroup.GetMDArrayNames() == ['android']
ar = subgroup.OpenMDArray('android')
assert ar is not None
assert ar.Read() == array.array('b', [1] * 4 * 5)
assert subgroup.OpenMDArray('not_existing') is None
@pytest.mark.parametrize("endianness", ['le', 'be'])
def test_zarr_read_half_float(endianness):
filename = 'data/zarr/f2_' + endianness + '.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar.Read() == array.array('f', [1.5, float('nan')])
def test_zarr_read_classic():
ds = gdal.Open('data/zarr/zlib.zarr')
assert ds
assert not ds.GetSubDatasets()
assert ds.ReadRaster() == array.array('b', [1, 2])
ds = gdal.Open('ZARR:data/zarr/zlib.zarr')
assert ds
assert not ds.GetSubDatasets()
assert ds.ReadRaster() == array.array('b', [1, 2])
with gdaltest.error_handler():
assert gdal.Open('ZARR:"data/zarr/not_existing.zarr"') is None
assert gdal.Open('ZARR:"data/zarr/zlib.zarr":/not_existing') is None
assert gdal.Open('ZARR:"data/zarr/zlib.zarr":/zlib:0') is None
ds = gdal.Open('ZARR:"data/zarr/zlib.zarr":/zlib')
assert ds
assert not ds.GetSubDatasets()
assert ds.ReadRaster() == array.array('b', [1, 2])
ds = gdal.Open('data/zarr/order_f_u1_3d.zarr')
assert ds
subds = ds.GetSubDatasets()
assert len(subds) == 2
ds = gdal.Open(subds[0][0])
assert ds
assert ds.ReadRaster() == array.array('b', [i for i in range(12)])
ds = gdal.Open(subds[1][0])
assert ds
assert ds.ReadRaster() == array.array('b', [12 + i for i in range(12)])
with gdaltest.error_handler():
assert gdal.Open(
'ZARR:data/zarr/order_f_u1_3d.zarr:/order_f_u1_3d') is None
assert gdal.Open(
'ZARR:data/zarr/order_f_u1_3d.zarr:/order_f_u1_3d:2') is None
assert gdal.Open(subds[0][0] + ':0') is None
ds = gdal.Open('data/zarr/v3/test.zr3')
assert ds
subds = ds.GetSubDatasets()
assert len(subds) == 2
ds = gdal.Open(subds[0][0])
assert ds
assert ds.ReadRaster() == array.array('i', [2] + ([1] * (10 * 5 - 1)))
ds = gdal.Open(subds[1][0])
assert ds
assert ds.ReadRaster() == array.array('b', [1, 2])
def test_zarr_read_classic_too_many_samples_3d():
j = {
"chunks": [
65536, 2, 1
],
"compressor": None,
"dtype": '!u1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [
65536, 2, 1
],
"zarr_format": 2
}
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
gdal.ErrorReset()
with gdaltest.error_handler():
ds = gdal.Open('/vsimem/test.zarr')
assert gdal.GetLastErrorMsg() != ''
assert len(ds.GetSubDatasets()) == 0
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_classic_4d():
j = {
"chunks": [
3, 2, 1, 1
],
"compressor": None,
"dtype": '!u1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [
3, 2, 1, 1
],
"zarr_format": 2
}
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
ds = gdal.Open('/vsimem/test.zarr')
subds = ds.GetSubDatasets()
assert len(subds) == 6
for i in range(len(subds)):
assert gdal.Open(subds[i][0]) is not None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_classic_too_many_samples_4d():
j = {
"chunks": [
256, 256, 1, 1
],
"compressor": None,
"dtype": '!u1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [
256, 256, 1, 1
],
"zarr_format": 2
}
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
gdal.ErrorReset()
with gdaltest.error_handler():
ds = gdal.Open('/vsimem/test.zarr')
assert gdal.GetLastErrorMsg() != ''
assert len(ds.GetSubDatasets()) == 0
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_empty_shape():
ds = gdal.OpenEx('data/zarr/empty.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read() == array.array('b', [120])
def test_zarr_read_BLOSC_COMPRESSORS():
if 'blosc' not in gdal.GetDriverByName('Zarr').GetMetadataItem('COMPRESSORS'):
pytest.skip('blosc not available')
assert 'lz4' in gdal.GetDriverByName(
'Zarr').GetMetadataItem('BLOSC_COMPRESSORS')
@pytest.mark.parametrize("format,create_z_metadata", [('ZARR_V2', 'YES'),
('ZARR_V2', 'NO'),
('ZARR_V3', 'NO')])
def test_zarr_create_group(format,create_z_metadata):
filename = 'tmp/test.zarr'
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional(filename, options=['FORMAT='+format, 'CREATE_ZMETADATA='+create_z_metadata])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
assert rg.GetName() == '/'
attr = rg.CreateAttribute(
'str_attr', [], gdal.ExtendedDataType.CreateString())
assert attr
assert attr.Write('my_string') == gdal.CE_None
attr = rg.CreateAttribute(
'json_attr', [], gdal.ExtendedDataType.CreateString(0, gdal.GEDTST_JSON))
assert attr
assert attr.Write({"foo":"bar"}) == gdal.CE_None
attr = rg.CreateAttribute(
'str_array_attr', [2], gdal.ExtendedDataType.CreateString())
assert attr
assert attr.Write(
['first_string', 'second_string']) == gdal.CE_None
with gdaltest.error_handler():
attr = rg.CreateAttribute('dim_2_not_supported', [
2, 2], gdal.ExtendedDataType.CreateString())
assert attr is None
attr = rg.CreateAttribute(
'int_attr', [], gdal.ExtendedDataType.Create(gdal.GDT_Int32))
assert attr
assert attr.Write(12345678) == gdal.CE_None
attr = rg.CreateAttribute(
'uint_attr', [], gdal.ExtendedDataType.Create(gdal.GDT_UInt32))
assert attr
assert attr.Write(4000000000) == gdal.CE_None
attr = rg.CreateAttribute(
'int_array_attr', [2], gdal.ExtendedDataType.Create(gdal.GDT_Int32))
assert attr
assert attr.Write([12345678, -12345678]) == gdal.CE_None
attr = rg.CreateAttribute(
'double_attr', [], gdal.ExtendedDataType.Create(gdal.GDT_Float64))
assert attr
assert attr.Write(12345678.5) == gdal.CE_None
attr = rg.CreateAttribute('double_array_attr', [
2], gdal.ExtendedDataType.Create(gdal.GDT_Float64))
assert attr
assert attr.Write([12345678.5, -12345678.5]) == gdal.CE_None
subgroup = rg.CreateGroup('foo')
assert subgroup
assert subgroup.GetName() == 'foo'
assert subgroup.GetFullName() == '/foo'
assert rg.GetGroupNames() == ['foo']
subgroup = rg.OpenGroup('foo')
assert subgroup
create()
if create_z_metadata == 'YES':
f = gdal.VSIFOpenL(filename + '/.zmetadata', 'rb')
assert f
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
assert 'foo/.zgroup' in j['metadata']
def update():
ds = gdal.OpenEx(filename,
gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
assert rg.GetGroupNames() == ['foo']
attr = rg.GetAttribute('str_attr')
assert attr
assert attr.Read() == 'my_string'
assert attr.Write('my_string_modified') == gdal.CE_None
subgroup = rg.OpenGroup('foo')
assert subgroup
subgroup = rg.CreateGroup('bar')
assert subgroup
assert set(rg.GetGroupNames()) == set(['foo', 'bar'])
subgroup = rg.OpenGroup('foo')
assert subgroup
subsubgroup = subgroup.CreateGroup('baz')
assert subsubgroup
ds = None
update()
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
attr = rg.GetAttribute('str_attr')
assert attr
assert attr.Read() == 'my_string_modified'
attr = rg.GetAttribute('json_attr')
assert attr
assert attr.GetDataType().GetSubType() == gdal.GEDTST_JSON
assert attr.Read() == { "foo": "bar" }
attr = rg.GetAttribute('str_array_attr')
assert attr
assert attr.Read() == ['first_string', 'second_string']
attr = rg.GetAttribute('int_attr')
assert attr
assert attr.GetDataType().GetNumericDataType() == gdal.GDT_Int32
assert attr.ReadAsDouble() == 12345678
attr = rg.GetAttribute('uint_attr')
assert attr
assert attr.GetDataType().GetNumericDataType() == gdal.GDT_Float64
assert attr.ReadAsDouble() == 4000000000
attr = rg.GetAttribute('int_array_attr')
assert attr
assert attr.GetDataType().GetNumericDataType() == gdal.GDT_Int32
assert attr.ReadAsIntArray() == (12345678, -12345678)
attr = rg.GetAttribute('double_attr')
assert attr
assert attr.GetDataType().GetNumericDataType() == gdal.GDT_Float64
assert attr.ReadAsDouble() == 12345678.5
attr = rg.GetAttribute('double_array_attr')
assert attr
assert attr.GetDataType().GetNumericDataType() == gdal.GDT_Float64
assert attr.Read() == (12345678.5, -12345678.5)
assert set(rg.GetGroupNames()) == set(['foo', 'bar'])
with gdaltest.error_handler():
assert rg.CreateGroup('not_opened_in_update_mode') is None
assert rg.CreateAttribute(
'not_opened_in_update_mode', [], gdal.ExtendedDataType.CreateString()) is None
subgroup = rg.OpenGroup('foo')
assert subgroup
subsubgroup = subgroup.OpenGroup('baz')
assert subsubgroup
ds = None
finally:
gdal.RmdirRecursive(filename)
@pytest.mark.parametrize("group_name", ["foo", # already existing
"directory_with_that_name",
"",
".",
"..",
"a/b",
"a\\n",
"a:b",
".zarray",
])
@pytest.mark.parametrize("format", ['ZARR_V2', 'ZARR_V3'])
def test_zarr_create_group_errors(group_name, format):
try:
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr', options=['FORMAT='+format])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
subgroup = rg.CreateGroup('foo')
assert subgroup
if format == 'ZARR_V2':
gdal.Mkdir('/vsimem/test.zarr/directory_with_that_name', 0)
else:
gdal.Mkdir(
'/vsimem/test.zarr/meta/root/directory_with_that_name', 0)
with gdaltest.error_handler():
assert rg.CreateGroup(group_name) is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def getCompoundDT():
x = gdal.EDTComponent.Create(
'x', 0, gdal.ExtendedDataType.Create(gdal.GDT_Int16))
y = gdal.EDTComponent.Create(
'y', 0, gdal.ExtendedDataType.Create(gdal.GDT_Int32))
subcompound = gdal.ExtendedDataType.CreateCompound("", 4, [y])
subcompound_component = gdal.EDTComponent.Create(
'y', 4, subcompound)
return gdal.ExtendedDataType.CreateCompound("", 8, [x, subcompound_component])
@pytest.mark.parametrize("datatype,nodata", [
[gdal.ExtendedDataType.Create(gdal.GDT_Byte), None],
[gdal.ExtendedDataType.Create(gdal.GDT_Byte), 1],
[gdal.ExtendedDataType.Create(
gdal.GDT_UInt16), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_Int16), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_UInt32), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_Int32), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_Float32), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_Float64), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_Float64), 1.5],
[gdal.ExtendedDataType.Create(
gdal.GDT_Float64), float('nan')],
[gdal.ExtendedDataType.Create(
gdal.GDT_Float64), float('infinity')],
[gdal.ExtendedDataType.Create(
gdal.GDT_Float64), float('-infinity')],
[gdal.ExtendedDataType.Create(
gdal.GDT_CInt16), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_CInt32), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_CFloat32), None],
[gdal.ExtendedDataType.Create(
gdal.GDT_CFloat64), None],
[gdal.ExtendedDataType.CreateString(10), None],
[gdal.ExtendedDataType.CreateString(10), "ab"],
[getCompoundDT(), None],
[getCompoundDT(), bytes(array.array('h', [12])) +
bytes(array.array('h', [0])) + # padding
bytes(array.array('i', [2345678]))],
])
@pytest.mark.parametrize("format", ['ZARR_V2', 'ZARR_V3'])
def test_zarr_create_array(datatype, nodata, format):
error_expected = False
if format == 'ZARR_V3':
if datatype.GetClass() != gdal.GEDTC_NUMERIC or \
gdal.DataTypeIsComplex(datatype.GetNumericDataType()):
error_expected = True
elif datatype.GetNumericDataType() in (gdal.GDT_CInt16, gdal.GDT_CInt32):
error_expected = True
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr',
options=['FORMAT='+format])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
assert rg.GetName() == '/'
dim0 = rg.CreateDimension("dim0", None, None, 2)
dim1 = rg.CreateDimension("dim1", None, None, 3)
if error_expected:
with gdaltest.error_handler():
ar = rg.CreateMDArray("my_ar", [dim0, dim1], datatype)
assert ar is None
return False
else:
ar = rg.CreateMDArray("my_ar", [dim0, dim1], datatype)
assert ar
if nodata:
if datatype.GetClass() == gdal.GEDTC_STRING:
assert ar.SetNoDataValueString(nodata) == gdal.CE_None
elif datatype.GetClass() == gdal.GEDTC_NUMERIC:
assert ar.SetNoDataValueDouble(nodata) == gdal.CE_None
else:
assert ar.SetNoDataValueRaw(nodata) == gdal.CE_None
return True
if create():
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray('my_ar')
assert ar
got_dt = ar.GetDataType()
if got_dt.GetClass() == gdal.GEDTC_COMPOUND:
comps = got_dt.GetComponents()
assert len(comps) == 2
assert comps[1].GetType().GetClass() == gdal.GEDTC_COMPOUND
comps[1] = gdal.EDTComponent.Create(
comps[1].GetName(), comps[1].GetType().GetSize(),
gdal.ExtendedDataType.CreateCompound(
"", comps[1].GetType().GetSize(),
comps[1].GetType().GetComponents()))
got_dt = gdal.ExtendedDataType.CreateCompound(
"", got_dt.GetSize(), comps)
assert got_dt == datatype
assert len(ar.GetDimensions()) == 2
assert [ar.GetDimensions()[i].GetSize()
for i in range(2)] == [2, 3]
if nodata:
if datatype.GetClass() == gdal.GEDTC_STRING:
got_nodata = ar.GetNoDataValueAsString()
assert got_nodata == nodata
elif datatype.GetClass() == gdal.GEDTC_NUMERIC:
got_nodata = ar.GetNoDataValueAsDouble()
if math.isnan(nodata):
assert math.isnan(got_nodata)
else:
assert got_nodata == nodata
else:
got_nodata = ar.GetNoDataValueAsRaw()
assert got_nodata == nodata
else:
assert ar.GetNoDataValueAsRaw() is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("array_name", ["foo", # already existing
"directory_with_that_name",
"",
".",
"..",
"a/b",
"a\\n",
"a:b",
".zarray",
])
def test_zarr_create_array_errors(array_name):
try:
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
assert rg.CreateMDArray(
'foo', [], gdal.ExtendedDataType.Create(gdal.GDT_Byte)) is not None
gdal.Mkdir('/vsimem/test.zarr/directory_with_that_name', 0)
with gdaltest.error_handler():
assert rg.CreateMDArray(
array_name, [], gdal.ExtendedDataType.Create(gdal.GDT_Byte)) is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("compressor,options,expected_json", [
["NONE", [], None],
["zlib", [], {'id': 'zlib', 'level': 6}],
["zlib", ["ZLIB_LEVEL=1"], {'id': 'zlib', 'level': 1}],
["blosc", [], {'blocksize': 0,
'clevel': 5,
'cname': 'lz4',
'id': 'blosc',
'shuffle': 1}]])
def test_zarr_create_array_compressor(compressor, options, expected_json):
compressors = gdal.GetDriverByName('Zarr').GetMetadataItem('COMPRESSORS')
if compressor != 'NONE' and compressor not in compressors:
pytest.skip('compressor %s not available' % compressor)
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
assert rg.CreateMDArray(
"test", [], gdal.ExtendedDataType.Create(gdal.GDT_Byte),
['COMPRESS=' + compressor] + options) is not None
create()
f = gdal.VSIFOpenL('/vsimem/test.zarr/test/.zarray', 'rb')
assert f
data = gdal.VSIFReadL(1, 1000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
assert j['compressor'] == expected_json
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("compressor,options,expected_json", [
["NONE", [], None],
["gzip", [], {'codec': 'https://purl.org/zarr/spec/codec/gzip/1.0',
'configuration': {'level': 6}}]])
def test_zarr_create_array_compressor_v3(compressor, options, expected_json):
compressors = gdal.GetDriverByName('Zarr').GetMetadataItem('COMPRESSORS')
if compressor != 'NONE' and compressor not in compressors:
pytest.skip('compressor %s not available' % compressor)
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr',
options=['FORMAT=ZARR_V3'])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
assert rg.CreateMDArray(
"test", [], gdal.ExtendedDataType.Create(gdal.GDT_Byte),
['COMPRESS=' + compressor] + options) is not None
create()
f = gdal.VSIFOpenL('/vsimem/test.zarr/meta/root/test.array.json', 'rb')
assert f
data = gdal.VSIFReadL(1, 1000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
if expected_json is None:
assert 'compressor' not in j
else:
assert j['compressor'] == expected_json
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("format", ['ZARR_V2', 'ZARR_V3'])
def test_zarr_create_array_bad_compressor(format):
try:
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr',
options=['FORMAT='+format])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
with gdaltest.error_handler():
assert rg.CreateMDArray(
"test", [], gdal.ExtendedDataType.Create(gdal.GDT_Byte),
['COMPRESS=invalid']) is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("format", ['ZARR_V2', 'ZARR_V3'])
def test_zarr_create_array_attributes(format):
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr',
options=['FORMAT='+format])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
ar = rg.CreateMDArray(
"test", [], gdal.ExtendedDataType.Create(gdal.GDT_Byte))
assert ar
attr = ar.CreateAttribute(
'str_attr', [], gdal.ExtendedDataType.CreateString())
assert attr
assert attr.Write('my_string') == gdal.CE_None
with gdaltest.error_handler():
assert ar.CreateAttribute(
'invalid_2d', [2, 3], gdal.ExtendedDataType.CreateString()) is None
create()
def update():
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray('test')
assert ar
attr = ar.GetAttribute('str_attr')
assert attr
assert attr.Read() == 'my_string'
assert attr.Write('my_string_modified') == gdal.CE_None
update()
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray('test')
assert ar
attr = ar.GetAttribute('str_attr')
assert attr
assert attr.Read() == 'my_string_modified'
with gdaltest.error_handler():
assert attr.Write('foo') == gdal.CE_Failure
with gdaltest.error_handler():
assert ar.CreateAttribute(
'another_attr', [], gdal.ExtendedDataType.CreateString()) is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_create_array_set_crs():
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
ar = rg.CreateMDArray(
"test", [], gdal.ExtendedDataType.Create(gdal.GDT_Byte))
assert ar
crs = osr.SpatialReference()
crs.ImportFromEPSG(4326)
assert ar.SetSpatialRef(crs) == gdal.CE_None
create()
f = gdal.VSIFOpenL('/vsimem/test.zarr/test/.zattrs', 'rb')
assert f
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
assert '_CRS' in j
crs = j['_CRS']
assert 'wkt' in crs
assert 'url' in crs
if 'projjson' in crs:
assert crs['projjson']['type'] == 'GeographicCRS'
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_create_array_set_dimension_name():
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
dim0 = rg.CreateDimension("dim0", None, None, 2)
dim0_ar = rg.CreateMDArray(
"dim0", [dim0], gdal.ExtendedDataType.Create(gdal.GDT_Byte))
dim0.SetIndexingVariable(dim0_ar)
rg.CreateMDArray(
"test", [dim0], gdal.ExtendedDataType.Create(gdal.GDT_Byte))
create()
f = gdal.VSIFOpenL('/vsimem/test.zarr/test/.zattrs', 'rb')
assert f
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
assert '_ARRAY_DIMENSIONS' in j
assert j['_ARRAY_DIMENSIONS'] == ['dim0']
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("dtype,structtype,gdaltype,fill_value,nodata_value",
[["!b1", 'B', gdal.GDT_Byte, None, None],
["!i1", 'b', gdal.GDT_Int16, None, None],
["!i1", 'b', gdal.GDT_Int16, -1, -1],
["!u1", 'B', gdal.GDT_Byte, None, None],
["!u1", 'B', gdal.GDT_Byte, "1", 1],
["<i2", 'h', gdal.GDT_Int16, None, None],
[">i2", 'h', gdal.GDT_Int16, None, None],
["<i4", 'i', gdal.GDT_Int32, None, None],
[">i4", 'i', gdal.GDT_Int32, None, None],
["<i8", 'q', gdal.GDT_Float64, None, None],
[">i8", 'q', gdal.GDT_Float64, None, None],
["<u2", 'H', gdal.GDT_UInt16, None, None],
[">u2", 'H', gdal.GDT_UInt16, None, None],
["<u4", 'I', gdal.GDT_UInt32, None, None],
[">u4", 'I', gdal.GDT_UInt32, None, None],
["<u4", 'I', gdal.GDT_UInt32, 4000000000, 4000000000],
["<u8", 'Q', gdal.GDT_Float64, 4000000000, 4000000000],
[">u8", 'Q', gdal.GDT_Float64, None, None],
["<f4", 'f', gdal.GDT_Float32, None, None],
[">f4", 'f', gdal.GDT_Float32, None, None],
["<f4", 'f', gdal.GDT_Float32, 1.5, 1.5],
["<f4", 'f', gdal.GDT_Float32, "NaN", float('nan')],
["<f4", 'f', gdal.GDT_Float32,
"Infinity", float('infinity')],
["<f4", 'f', gdal.GDT_Float32,
"-Infinity", float('-infinity')],
["<f8", 'd', gdal.GDT_Float64, None, None],
[">f8", 'd', gdal.GDT_Float64, None, None],
["<f8", 'd', gdal.GDT_Float64, "NaN", float('nan')],
["<f8", 'd', gdal.GDT_Float64,
"Infinity", float('infinity')],
["<f8", 'd', gdal.GDT_Float64,
"-Infinity", float('-infinity')],
["<c8", 'f', gdal.GDT_CFloat32, None, None],
[">c8", 'f', gdal.GDT_CFloat32, None, None],
["<c16", 'd', gdal.GDT_CFloat64, None, None],
[">c16", 'd', gdal.GDT_CFloat64, None, None]])
@pytest.mark.parametrize("use_optimized_code_paths", [True, False])
def test_zarr_write_array_content(dtype, structtype, gdaltype, fill_value, nodata_value, use_optimized_code_paths):
j = {
"chunks": [
2,
3
],
"compressor": None,
"dtype": dtype,
"fill_value": fill_value,
"filters": None,
"order": "C",
"shape": [
5,
4
],
"zarr_format": 2
}
filename = '/vsimem/test' + \
dtype.replace('<', 'lt').replace('>', 'gt').replace(
'!', 'not') + structtype + '.zarr'
try:
gdal.Mkdir(filename, 0o755)
f = gdal.VSIFOpenL(filename + '/.zarray', 'wb')
assert f
data = json.dumps(j)
gdal.VSIFWriteL(data, 1, len(data), f)
gdal.VSIFCloseL(f)
if gdaltype not in (gdal.GDT_CFloat32, gdal.GDT_CFloat64):
tile_0_0_data = struct.pack(
dtype[0] + (structtype * 6), 1, 2, 3, 5, 6, 7)
tile_0_1_data = struct.pack(
dtype[0] + (structtype * 6), 4, 0, 0, 8, 0, 0)
else:
tile_0_0_data = struct.pack(
dtype[0] + (structtype * 12), 1, 11, 2, 0, 3, 0, 5, 0, 6, 0, 7, 0)
tile_0_1_data = struct.pack(
dtype[0] + (structtype * 12), 4, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0)
gdal.FileFromMemBuffer(filename + '/0.0', tile_0_0_data)
gdal.FileFromMemBuffer(filename + '/0.1', tile_0_1_data)
with gdaltest.config_option('GDAL_ZARR_USE_OPTIMIZED_CODE_PATHS',
'YES' if use_optimized_code_paths else 'NO'):
ds = gdal.OpenEx(
filename, gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
dt = gdal.ExtendedDataType.Create(gdal.GDT_CFloat64 if gdaltype in (
gdal.GDT_CFloat32, gdal.GDT_CFloat64) else gdal.GDT_Float64)
# Write all nodataset. That should cause tiles to be removed.
nv = nodata_value if nodata_value else 0
buf_nodata = array.array(
'd', [nv] * (5 * 4 * (2 if gdaltype in (gdal.GDT_CFloat32, gdal.GDT_CFloat64) else 1)))
assert ar.Write(buf_nodata, buffer_datatype=dt) == gdal.CE_None
assert ar.Read(buffer_datatype=dt) == bytearray(buf_nodata)
if fill_value is None or fill_value == 0 or not gdal.DataTypeIsComplex(gdaltype):
assert gdal.VSIStatL(filename + '/0.0') is None
# Write all ones
ones = array.array('d', [
0] * (5 * 4 * (2 if gdaltype in (gdal.GDT_CFloat32, gdal.GDT_CFloat64) else 1)))
assert ar.Write(ones, buffer_datatype=dt) == gdal.CE_None
assert ar.Read(buffer_datatype=dt) == bytearray(ones)
# Write with odd array_step
assert ar.Write(struct.pack('d' * 4, nv, nv, 6, 5),
array_start_idx=[2, 1],
count=[2, 2],
array_step=[-1, -1],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == gdal.CE_None
# Check back
assert ar.Read(array_start_idx=[2, 1],
count=[2, 2],
array_step=[-1, -1],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == struct.pack('d' * 4, nv, nv, 6, 5)
# Force dirty block eviction
ar.Read(buffer_datatype=dt)
# Check back again
assert ar.Read(array_start_idx=[2, 1],
count=[2, 2],
array_step=[-1, -1],
buffer_datatype=gdal.ExtendedDataType.Create(gdal.GDT_Float64)) == struct.pack('d' * 4, nv, nv, 6, 5)
finally:
gdal.RmdirRecursive(filename)
@pytest.mark.parametrize("string_format,input_str,output_str",
[('ASCII', '0123456789truncated', '0123456789'),
('UNICODE','\u00E9' + '123456789truncated', '\u00E9' + '123456789')],
ids=('ASCII', 'UNICODE'))
def test_zarr_create_array_string(string_format, input_str, output_str):
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
dim0 = rg.CreateDimension("dim0", None, None, 2)
ar = rg.CreateMDArray(
"test", [dim0], gdal.ExtendedDataType.CreateString(10),
['STRING_FORMAT='+string_format, 'COMPRESS=ZLIB'])
assert ar.Write(['ab', input_str]) == gdal.CE_None
create()
ds = gdal.OpenEx(
'/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar.Read() == ['ab', output_str]
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("srcfilename", ["data/zarr/unicode_le.zarr",
"data/zarr/unicode_be.zarr"])
def test_zarr_update_array_string(srcfilename):
filename = '/vsimem/test.zarr'
try:
gdal.Mkdir(filename, 0)
gdal.FileFromMemBuffer(filename + '/.zarray', open(srcfilename + '/.zarray', 'rb').read())
gdal.FileFromMemBuffer(filename + '/0', open(srcfilename + '/0', 'rb').read())
eta = '\u03B7'
def update():
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
rg = ds.GetRootGroup()
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar.Read() == ['\u00E9']
assert ar.Write([eta]) == gdal.CE_None
assert gdal.GetLastErrorMsg() == ''
update()
def check():
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar.Read() == [eta]
check()
finally:
gdal.RmdirRecursive(filename)
@pytest.mark.parametrize("format", ['ZARR_V2', 'ZARR_V3'])
def test_zarr_create_fortran_order_3d_and_compression_and_dim_separator(format):
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr',
options=['FORMAT='+format])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
dim0 = rg.CreateDimension("dim0", None, None, 2)
dim1 = rg.CreateDimension("dim1", None, None, 3)
dim2 = rg.CreateDimension("dim2", None, None, 4)
ar = rg.CreateMDArray(
"test", [dim0, dim1, dim2],
gdal.ExtendedDataType.Create(gdal.GDT_Byte),
['CHUNK_MEMORY_LAYOUT=F', 'COMPRESS=zlib', 'DIM_SEPARATOR=/'])
assert ar.Write(array.array(
'b', [i for i in range(2 * 3 * 4)])) == gdal.CE_None
create()
if format == 'ZARR_V2':
f = gdal.VSIFOpenL('/vsimem/test.zarr/test/.zarray', 'rb')
else:
f = gdal.VSIFOpenL(
'/vsimem/test.zarr/meta/root/test.array.json', 'rb')
assert f
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
if format == 'ZARR_V2':
assert 'order' in j
assert j['order'] == 'F'
else:
assert 'chunk_memory_layout' in j
assert j['chunk_memory_layout'] == 'F'
ds = gdal.OpenEx(
'/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar.Read() == \
array.array('b', [i for i in range(2 * 3 * 4)])
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_create_unit_offset_scale():
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
ar = rg.CreateMDArray(
"test", [], gdal.ExtendedDataType.Create(gdal.GDT_Byte))
assert ar.SetOffset(1.5) == gdal.CE_None
assert ar.SetScale(2.5) == gdal.CE_None
assert ar.SetUnit("my unit") == gdal.CE_None
create()
f = gdal.VSIFOpenL('/vsimem/test.zarr/test/.zattrs', 'rb')
assert f
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
assert 'add_offset' in j
assert j['add_offset'] == 1.5
assert 'scale_factor' in j
assert j['scale_factor'] == 2.5
assert 'units' in j
assert j['units'] == 'my unit'
ds = gdal.OpenEx(
'/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar.GetOffset() == 1.5
assert ar.GetScale() == 2.5
assert ar.GetUnit() == 'my unit'
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_getcoordinatevariables():
src_ds = gdal.OpenEx(
'data/netcdf/expanded_form_of_grid_mapping.nc', gdal.OF_MULTIDIM_RASTER)
if src_ds is None:
pytest.skip()
try:
def create(src_ds):
ds = gdal.MultiDimTranslate(
'/vsimem/test.zarr', src_ds, format='Zarr')
src_ds = None
assert ds
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('temp')
coordinate_vars = ar.GetCoordinateVariables()
assert len(coordinate_vars) == 2
assert coordinate_vars[0].GetName() == 'lat'
assert coordinate_vars[1].GetName() == 'lon'
assert len(coordinate_vars[0].GetCoordinateVariables()) == 0
create(src_ds)
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_create_copy():
tst = gdaltest.GDALTest('Zarr', '../../gcore/data/uint16.tif', 1, 4672)
try:
return tst.testCreate(vsimem=1, new_filename='/vsimem/test.zarr')
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("format", ['ZARR_V2', 'ZARR_V3'])
def test_zarr_create(format):
try:
ds = gdal.GetDriverByName('Zarr').Create('/vsimem/test.zarr', 1, 1, 3,
options=['ARRAY_NAME=foo',
'FORMAT=' + format])
assert ds.GetGeoTransform(can_return_null=True) is None
assert ds.GetSpatialRef() is None
assert ds.GetRasterBand(1).GetNoDataValue() is None
assert ds.GetRasterBand(1).SetNoDataValue(10) == gdal.CE_None
assert ds.GetRasterBand(1).GetOffset() is None
assert ds.GetRasterBand(1).SetOffset(1.5) == gdal.CE_None
assert ds.GetRasterBand(1).GetScale() is None
assert ds.GetRasterBand(1).SetScale(2.5) == gdal.CE_None
assert ds.GetRasterBand(1).GetUnitType() == ''
assert ds.GetRasterBand(1).SetUnitType("my_unit") == gdal.CE_None
assert ds.SetMetadata({"FOO": "BAR"}) == gdal.CE_None
ds = None
ds = gdal.Open('ZARR:/vsimem/test.zarr:/foo_band1')
assert ds
assert ds.GetMetadata() == {"FOO": "BAR"}
assert ds.GetRasterBand(1).GetNoDataValue() == 10.0
assert ds.GetRasterBand(1).GetOffset() == 1.5
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_create_append_subdataset():
try:
def create():
ds = gdal.GetDriverByName('Zarr').Create('/vsimem/test.zarr', 3, 2, 1,
options=['ARRAY_NAME=foo'])
assert ds
ds.SetGeoTransform([2, 1, 0, 49, 0, -1])
ds = None
# Same dimensions. Will reuse the ones of foo
ds = gdal.GetDriverByName('Zarr').Create('/vsimem/test.zarr', 3, 2, 1,
options=['APPEND_SUBDATASET=YES',
'ARRAY_NAME=bar'])
assert ds
ds.SetGeoTransform([2, 1, 0, 49, 0, -1])
ds = None
# Different dimensions.
ds = gdal.GetDriverByName('Zarr').Create('/vsimem/test.zarr', 30, 20, 1,
options=['APPEND_SUBDATASET=YES',
'ARRAY_NAME=baz'])
assert ds
ds.SetGeoTransform([2, .1, 0, 49, 0, -.1])
ds = None
create()
def check():
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
foo = rg.OpenMDArray('foo')
assert foo
assert foo.GetDimensions()[0].GetName() == 'Y'
assert foo.GetDimensions()[1].GetName() == 'X'
bar = rg.OpenMDArray('bar')
assert bar
assert bar.GetDimensions()[0].GetName() == 'Y'
assert bar.GetDimensions()[1].GetName() == 'X'
baz = rg.OpenMDArray('baz')
assert baz
assert baz.GetDimensions()[0].GetName() == 'baz_Y'
assert baz.GetDimensions()[1].GetName() == 'baz_X'
check()
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("blocksize", ['1,2',
'2,2,0',
'4000000000,4000000000,4000000000'])
def test_zarr_create_array_invalid_blocksize(blocksize):
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
dim0 = rg.CreateDimension("dim0", None, None, 2)
dim1 = rg.CreateDimension("dim1", None, None, 2)
dim2 = rg.CreateDimension("dim2", None, None, 2)
with gdaltest.error_handler():
ar = rg.CreateMDArray(
"test", [dim0, dim1, dim2],
gdal.ExtendedDataType.Create(gdal.GDT_Byte),
['BLOCKSIZE=' + blocksize])
assert ar is None
create()
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_filters():
filename = 'data/zarr/delta_filter_i4.zarr'
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read() == array.array('i', [i for i in range(10)])
def test_zarr_update_with_filters():
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray',
open('data/zarr/delta_filter_i4.zarr/.zarray', 'rb').read())
gdal.FileFromMemBuffer('/vsimem/test.zarr/0',
open('data/zarr/delta_filter_i4.zarr/0', 'rb').read())
def update():
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read() == array.array('i', [i for i in range(10)])
assert ar.Write(array.array(
'i', [10-i for i in range(10)])) == gdal.CE_None
update()
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.Read() == array.array('i', [10 - i for i in range(10)])
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_create_with_filter():
tst = gdaltest.GDALTest('Zarr', '../../gcore/data/uint16.tif', 1, 4672,
options=['FILTER=delta'])
try:
ret = tst.testCreate(vsimem=1, new_filename='/vsimem/test.zarr')
f = gdal.VSIFOpenL('/vsimem/test.zarr/test/.zarray', 'rb')
assert f
data = gdal.VSIFReadL(1, 10000, f)
gdal.VSIFCloseL(f)
j = json.loads(data)
assert 'filters' in j
assert j['filters'] == [{'id': 'delta', 'dtype': '<u2'}]
return ret
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_pam_spatial_ref():
try:
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional('/vsimem/test.zarr')
assert ds is not None
rg = ds.GetRootGroup()
assert rg
dim0 = rg.CreateDimension("dim0", None, None, 2)
dim1 = rg.CreateDimension("dim1", None, None, 2)
rg.CreateMDArray("test", [dim0, dim1],
gdal.ExtendedDataType.Create(gdal.GDT_Byte))
create()
assert gdal.VSIStatL('/vsimem/test.zarr/pam.aux.xml') is None
def check_crs_before():
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
crs = ar.GetSpatialRef()
assert crs is None
check_crs_before()
assert gdal.VSIStatL('/vsimem/test.zarr/pam.aux.xml') is None
def set_crs():
# Open in read-only
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
crs = osr.SpatialReference()
crs.ImportFromEPSG(4326)
crs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
crs.SetCoordinateEpoch(2021.2)
assert ar.SetSpatialRef(crs) == gdal.CE_None
set_crs()
assert gdal.VSIStatL('/vsimem/test.zarr/pam.aux.xml') is not None
f = gdal.VSIFOpenL('/vsimem/test.zarr/pam.aux.xml', 'rb+')
assert f
data = gdal.VSIFReadL(1, 1000, f).decode('utf-8')
assert data.endswith('</PAMDataset>\n')
data = data[0:-len('</PAMDataset>\n')] + '<Other/>' + '</PAMDataset>\n'
gdal.VSIFSeekL(f, 0, 0)
gdal.VSIFWriteL(data, 1, len(data), f)
gdal.VSIFCloseL(f)
def check_crs():
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
crs = ar.GetSpatialRef()
assert crs is not None
assert crs.GetAuthorityCode(None) == '4326'
assert crs.GetDataAxisToSRSAxisMapping() == [2, 1]
assert crs.GetCoordinateEpoch() == 2021.2
check_crs()
def check_crs_classic_dataset():
ds = gdal.Open('/vsimem/test.zarr')
crs = ds.GetSpatialRef()
assert crs is not None
check_crs_classic_dataset()
def unset_crs():
# Open in read-only
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
assert ar.SetSpatialRef(None) == gdal.CE_None
unset_crs()
f = gdal.VSIFOpenL('/vsimem/test.zarr/pam.aux.xml', 'rb')
assert f
data = gdal.VSIFReadL(1, 1000, f).decode('utf-8')
gdal.VSIFCloseL(f)
assert '<Other />' in data
def check_unset_crs():
ds = gdal.OpenEx('/vsimem/test.zarr',
gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg
ar = rg.OpenMDArray(rg.GetMDArrayNames()[0])
assert ar
crs = ar.GetSpatialRef()
assert crs is None
check_unset_crs()
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_too_large_tile_size():
j = {
"chunks": [
1000000,
2000
],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [
5,
4
],
"zarr_format": 2
}
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds is not None
with gdaltest.error_handler():
assert ds.GetRootGroup().OpenMDArray('test').Read() is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_recursive_array_loading():
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
j = { "zarr_format": 2 }
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zgroup', json.dumps(j))
j = { "chunks": [1],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 1 ],
"zarr_format": 2
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/a/.zarray', json.dumps(j))
gdal.FileFromMemBuffer('/vsimem/test.zarr/b/.zarray', json.dumps(j))
j = { "_ARRAY_DIMENSIONS": ["b"] }
gdal.FileFromMemBuffer('/vsimem/test.zarr/a/.zattrs', json.dumps(j))
j = { "_ARRAY_DIMENSIONS": ["a"] }
gdal.FileFromMemBuffer('/vsimem/test.zarr/b/.zattrs', json.dumps(j))
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds is not None
with gdaltest.error_handler():
ar = ds.GetRootGroup().OpenMDArray('a')
assert ar
assert gdal.GetLastErrorMsg() == 'Attempt at recursively loading /vsimem/test.zarr/a/.zarray'
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_too_deep_array_loading():
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
j = { "zarr_format": 2 }
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zgroup', json.dumps(j))
j = { "chunks": [1],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 1 ],
"zarr_format": 2
}
N = 33
for i in range(N):
gdal.FileFromMemBuffer('/vsimem/test.zarr/%d/.zarray' % i, json.dumps(j))
for i in range(N-1):
j = { "_ARRAY_DIMENSIONS": ["%d" % (i+1)] }
gdal.FileFromMemBuffer('/vsimem/test.zarr/%d/.zattrs' % i, json.dumps(j))
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds is not None
with gdaltest.error_handler():
ar = ds.GetRootGroup().OpenMDArray('0')
assert ar
assert gdal.GetLastErrorMsg() == 'Too deep call stack in LoadArray()'
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
@pytest.mark.parametrize("filename,path",
[('data/zarr/nczarr_v2.zarr', '/MyGroup/Group_A'),
('data/zarr/nczarr_v2.zarr/MyGroup', '/Group_A'),
('data/zarr/nczarr_v2.zarr/MyGroup/Group_A', ''),
('data/zarr/nczarr_v2.zarr/MyGroup/Group_A/dset2', None)])
def test_zarr_read_nczarr_v2(filename,path):
with gdaltest.error_handler():
assert gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE) is None
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
ar = rg.OpenMDArrayFromFullname((path if path else '') + '/dset2')
assert ar
dims = ar.GetDimensions()
assert len(dims) == 2
assert dims[0].GetSize() == 3
assert dims[0].GetName() == 'lat'
assert dims[0].GetFullName() == '/MyGroup/lat'
assert dims[0].GetIndexingVariable() is not None
assert dims[0].GetIndexingVariable().GetName() == 'lat'
assert dims[0].GetType() == gdal.DIM_TYPE_HORIZONTAL_Y
assert dims[0].GetDirection() == 'NORTH'
assert dims[1].GetSize() == 3
assert dims[1].GetName() == 'lon'
assert dims[1].GetFullName() == '/MyGroup/lon'
assert dims[1].GetIndexingVariable() is not None
assert dims[1].GetIndexingVariable().GetName() == 'lon'
assert dims[1].GetType() == gdal.DIM_TYPE_HORIZONTAL_X
assert dims[1].GetDirection() == 'EAST'
if path:
ar = rg.OpenMDArrayFromFullname(path + '/dset3')
assert ar
dims = ar.GetDimensions()
assert len(dims) == 2
assert dims[0].GetSize() == 2
assert dims[0].GetName() == 'lat'
assert dims[0].GetFullName() == '/MyGroup/Group_A/lat'
assert dims[1].GetSize() == 2
assert dims[1].GetName() == 'lon'
assert dims[1].GetFullName() == '/MyGroup/Group_A/lon'
if filename == 'data/zarr/nczarr_v2.zarr':
mygroup = rg.OpenGroup('MyGroup')
assert mygroup.GetMDArrayNames() == ['lon', 'lat', 'dset1']
@pytest.mark.parametrize("format", ['ZARR_V2', 'ZARR_V3'])
def test_zarr_cache_tile_presence(format):
if gdal.GetDriverByName('netCDF') is None:
pytest.skip('netCDF driver missing')
filename = 'tmp/test.zarr'
try:
# Create a Zarr array with sparse tiles
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional(filename, options=['FORMAT='+format])
assert ds is not None
rg = ds.GetRootGroup()
assert rg
dim0 = rg.CreateDimension("dim0", None, None, 2)
dim1 = rg.CreateDimension("dim1", None, None, 5)
ar = rg.CreateMDArray("test", [dim0, dim1],
gdal.ExtendedDataType.Create(gdal.GDT_Byte),
['BLOCKSIZE=1,2'])
assert ar
assert ar.Write(struct.pack('B' * 1, 10),
array_start_idx=[0, 0],
count=[1, 1]) == gdal.CE_None
assert ar.Write(struct.pack('B' * 1, 100),
array_start_idx=[1, 3],
count=[1, 1]) == gdal.CE_None
create()
# Create the tile presence cache
def open_with_cache_tile_presence_option():
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER,
open_options = ['CACHE_TILE_PRESENCE=YES'])
assert ds is not None
rg = ds.GetRootGroup()
assert rg.OpenMDArray('test') is not None
open_with_cache_tile_presence_option()
# Check that the cache exists
if format == 'ZARR_V2':
cache_filename = filename + '/test/.zarray.gmac'
else:
cache_filename = filename + '/meta/root/test.array.json.gmac'
assert gdal.VSIStatL(cache_filename) is not None
# Read content of the array
def read_content():
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('test')
assert ar is not None
assert struct.unpack('B' * 2 * 5, ar.Read()) == (10, 0, 0, 0, 0,
0, 0, 0, 100, 0)
read_content()
# again
open_with_cache_tile_presence_option()
read_content()
# Now alter the cache to mark a present tile as missing
def alter_cache():
ds = gdal.OpenEx(cache_filename,
gdal.OF_MULTIDIM_RASTER | gdal.OF_UPDATE)
assert ds is not None
rg = ds.GetRootGroup()
assert rg.GetMDArrayNames() == [ '_test_tile_presence' ]
ar = rg.OpenMDArray('_test_tile_presence')
assert struct.unpack('B' * 2 * 3, ar.Read()) == (1, 0, 0,
0, 1, 0)
assert ar.Write(struct.pack('B' * 1, 0),
array_start_idx=[1, 1],
count=[1, 1]) == gdal.CE_None
alter_cache()
# Check that reading the array reflects the above modification
def read_content_altered():
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('test')
assert ar is not None
assert struct.unpack('B' * 2 * 5, ar.Read()) == (10, 0, 0, 0, 0,
0, 0, 0, 0, 0)
read_content_altered()
finally:
gdal.RmdirRecursive(filename)
@pytest.mark.parametrize("compression", ["NONE", "ZLIB"])
def test_zarr_advise_read(compression):
filename = 'tmp/test.zarr'
try:
dim0_size = 1230
dim1_size = 2570
dim0_blocksize = 20
dim1_blocksize = 30
data_ar = [(i % 256) for i in range(dim0_size * dim1_size)]
# Create empty block
y_offset = dim0_blocksize
x_offset = dim1_blocksize
for y in range(dim0_blocksize):
for x in range(dim1_blocksize):
data_ar[dim1_size * (y + y_offset) + x + x_offset] = 0
data = array.array('B', data_ar)
def create():
ds = gdal.GetDriverByName(
'ZARR').CreateMultiDimensional(filename)
assert ds is not None
rg = ds.GetRootGroup()
assert rg
dim0 = rg.CreateDimension("dim0", None, None, dim0_size)
dim1 = rg.CreateDimension("dim1", None, None, dim1_size)
ar = rg.CreateMDArray("test", [dim0, dim1],
gdal.ExtendedDataType.Create(gdal.GDT_Byte),
['COMPRESS=' + compression,
'BLOCKSIZE=%d,%d' % (dim0_blocksize, dim1_blocksize)])
assert ar
ar.SetNoDataValueDouble(0)
assert ar.Write(data) == gdal.CE_None
create()
def read():
ds = gdal.OpenEx(filename, gdal.OF_MULTIDIM_RASTER)
assert ds is not None
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('test')
with gdaltest.error_handler():
assert ar.AdviseRead(options = ['CACHE_SIZE=1']) == gdal.CE_Failure
got_data_before_advise_read = ar.Read(array_start_idx=[40, 51],
count=[2 * dim0_blocksize, 2 * dim1_blocksize])
assert ar.AdviseRead() == gdal.CE_None
assert ar.Read() == data
assert ar.AdviseRead(array_start_idx=[40, 51],
count=[2 * dim0_blocksize, dim1_blocksize]) == gdal.CE_None
# Read more than AdviseRead() window
got_data = ar.Read(array_start_idx=[40, 51],
count=[2 * dim0_blocksize, 2 * dim1_blocksize])
assert got_data == got_data_before_advise_read
read()
finally:
gdal.RmdirRecursive(filename)
def test_zarr_read_invalid_nczarr_dim():
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
j = { "chunks": [1,1],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 1,1 ],
"zarr_format": 2,
"_NCZARR_ARRAY":{"dimrefs":["/MyGroup/lon", "/OtherGroup/lat"]}
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
j = { "chunks": [1],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 1 ],
"zarr_format": 2
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/MyGroup/lon/.zarray', json.dumps(j))
j = { "_NCZARR_GROUP":{ "dims":{ "lon": 0 } } }
gdal.FileFromMemBuffer('/vsimem/test.zarr/MyGroup/.zgroup', json.dumps(j))
j = { "chunks": [2],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 2 ],
"zarr_format": 2
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/OtherGroup/lat/.zarray', json.dumps(j))
j = { "_NCZARR_GROUP":{ "dims":{ "lat": 2, "invalid.name": 2 } } }
gdal.FileFromMemBuffer('/vsimem/test.zarr/OtherGroup/.zgroup', json.dumps(j))
with gdaltest.error_handler():
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('test')
assert ar
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_nczar_repeated_array_names():
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
j = { "_NCZARR_GROUP":{ "dims":{ "lon": 1 }, "vars": ["a", "a", "lon", "lon"], "groups": ["g", "g"] } }
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zgroup', json.dumps(j))
j = { "chunks": [1,1],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 1,1 ],
"zarr_format": 2
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/a/.zarray', json.dumps(j))
j = { "chunks": [1],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 1 ],
"zarr_format": 2
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/lon/.zarray', json.dumps(j))
with gdaltest.error_handler():
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
assert rg.GetMDArrayNames() == ['lon', 'a']
ar = rg.OpenMDArray('a')
assert ar
assert rg.GetGroupNames() == ["g"]
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_test_overflow_in_AllocateWorkingBuffers_due_to_fortran():
if sys.maxsize < (1 << 32):
pytest.skip()
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
j = { "chunks": [(1 << 32) - 1, (1 << 32) - 1],
"compressor": None,
"dtype": '!b1',
"fill_value": None,
"filters": None,
"order": "F",
"shape": [ 1, 1 ],
"zarr_format": 2
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('test')
with gdaltest.error_handler():
assert ar.Read(count = [1,1]) is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
def test_zarr_read_test_overflow_in_AllocateWorkingBuffers_due_to_type_change():
if sys.maxsize < (1 << 32):
pytest.skip()
try:
gdal.Mkdir('/vsimem/test.zarr', 0)
j = { "chunks": [(1 << 32) - 1, ((1 << 32) - 1) / 8],
"compressor": None,
"dtype": '<u8',
"fill_value": None,
"filters": None,
"order": "C",
"shape": [ 1, 1 ],
"zarr_format": 2
}
gdal.FileFromMemBuffer('/vsimem/test.zarr/.zarray', json.dumps(j))
ds = gdal.OpenEx('/vsimem/test.zarr', gdal.OF_MULTIDIM_RASTER)
assert ds
rg = ds.GetRootGroup()
ar = rg.OpenMDArray('test')
with gdaltest.error_handler():
assert ar.Read(count = [1,1]) is None
finally:
gdal.RmdirRecursive('/vsimem/test.zarr')
|
'''
Created on 18-sep-2020
@author: david
'''
import sys
sys.path.append("/flash/userapp")
from uvacbot.sensor.mpu6050 import Mpu6050
from math import radians
from micropython import alloc_emergency_exception_buf
from pyb import Pin, Switch
from uasyncio import run as uasyncio_run, sleep_ms
from uvacbot.engine.driver import Driver
from uvacbot.engine.motion import MotionController
from uvacbot.engine.motor import Motor
alloc_emergency_exception_buf(100)
async def mainTask(motion):
print("Press user switch to start.")
userSwitch = Switch()
while not userSwitch.value():
await sleep_ms(200)
print("Starting")
await sleep_ms(1000)
print("turning counter clockwise")
await motion.turn(radians(-30))
await sleep_ms(1000)
print("turning clockwise")
await motion.turn(radians(60))
print("finished")
def main():
print("Turn test")
#20210618 DPM: The code of this example is configured for the NUCLEO-L746RG board.
# Please, adapt according to the actual configuration.
PID_KP = 250.0
PID_KI = 0.0
PID_KD = 0.0
print("initializing MPU")
mpu=Mpu6050(1)
mpu.start()
print("MPU initialized")
motorLeft = Motor(Pin.board.D10, 4, 1, Pin.board.D11)
motorRight = Motor(Pin.board.D9, 8, 2, Pin.board.D8)
motorDriver = Driver(motorLeft, motorRight)
motion = MotionController(mpu, motorDriver, PID_KP, PID_KI, PID_KD)
try:
uasyncio_run(mainTask(motion))
finally:
motion.stop()
mpu.cleanup()
motorDriver.cleanup()
if __name__ == '__main__':
main()
|
from os import path
from collections import namedtuple
import click
import graphviz
from functional import seq
ENGINES = ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchword', 'osage']
Edge = namedtuple('Edge', 'left right label')
def split_edge(edge):
edge_label = None
if ':' in edge:
edge, edge_label = edge.split(':')
if '-' in edge:
left, right = edge.split('-')
if right == '':
right = None
else:
left, right = edge
return Edge(left, right, edge_label)
@click.command()
@click.option('--engine', '-e', default='dot', type=click.Choice(ENGINES),
help="Choose layout engine to use")
@click.option('--undirected/--directed', '-u/-d', default=True,
help="Specify undirected or directed edges")
@click.option('--format', default='png', type=str, help='Image format')
@click.option('--name', '-n', default=None, type=str, help='Name of graph in image')
@click.option('--dot', is_flag=True, help='Preserve the source dot file')
@click.option('--no-vertex-labels', is_flag=True, help="Don't label vertex labels")
@click.argument('file', type=click.Path(writable=True))
@click.argument('edges', nargs=-1, required=True)
def main(engine, undirected, format, name, dot, file, edges, no_vertex_labels):
if undirected:
graph = graphviz.Graph(engine=engine, format=format)
else:
graph = graphviz.Digraph(engine=engine, format=format)
if name:
graph.body.append(r'label = "{0}"'.format(name))
edges = seq(edges).map(split_edge)
if no_vertex_labels:
edges.map(lambda e: (e.left, e.right)).flatten().distinct()\
.filter_not(lambda n: n is None).for_each(lambda n: graph.node(n, label=''))
else:
edges.map(lambda e: (e.left, e.right)).flatten().distinct() \
.filter_not(lambda n: n is None).for_each(lambda n: graph.node(n))
edges.filter(lambda e: e.right is not None) \
.for_each(lambda e: graph.edge(e.left, e.right, label=e.label))
filepath, filename = path.split(file)
filepath = filepath if filepath != '' else None
graph.render(filename=filename, directory=filepath, cleanup=not dot)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
class Default:
"""
It is used to replace `None` or `object()` as a sentinel
that represents a default value. Sometimes we want to set
a value to `None` so we cannot use `None` to represent the
default value, and `object()` is hard to be typed.
"""
pass
DefaultValue = Default()
|
import serial
import deepstream
NoOfValues = 14 #No of values written by Arduino to Serial
ArduinoPort = '/dev/ttyACM0'
BaudRate = 57600
def getArduinoValues():
printValues = False
sensorValues = []
listIndex = 0
ser = serial.Serial(ArduinoPort,BaudRate)
while True:
if(ser.inWaiting() > 0):
readstr = ser.readline()
readstr = bytes.decode(readstr).rstrip()
if("START" in readstr):
printValues = True
continue
elif("END" in readstr and printValues == True):
return sensorValues
if(printValues):
sensorValues.append(readstr)
#print(readstr) #Print each value coming from sensor
def ListToDict(values):
dictValues = {}
dictValues['MPL3115A2_Pressure(Pa)']= float(values[0]) / 100
dictValues['MPL3115A2_Temp_inF'] = float(values[1]) / 100
dictValues['UV_SensorValue'] = int(values[2])
dictValues['UV_SensorVoltage'] = float(values[3]) / 100
dictValues['UV_Index'] = float(values[4]) / 100
dictValues['Ambient_Temp_inC'] = float(values[5]) / 100
dictValues['Object_Temp_inC'] = float(values[6]) / 100
dictValues['Ambient_Temp_inF'] = float(values[7]) / 100
dictValues['Object_Temp_inF'] = float(values[8]) / 100
dictValues['Electrical_Conductivity'] = values[9]
dictValues['Volumetric_Water_Content'] = values[10]
dictValues['5TE_Temperature'] = values[11]
dictValues['DHT11_TemperatureInC'] = float(values[12]) / 100
dictValues['DHT11_HumidityInPerc'] = float(values[13]) / 100
#dictValues['K30_CO2_ValueInPPM'] = int(values[14])
#dictValues['Anemometer_WindSpeed'] = int(values[15]) / 100
return dictValues
def PublishEvent(dictData, event, dictName):
try:
if(dictName == ''):
mesg = deepstream.publish(event, dictData)
else:
mesg = deepstream.publish(event, dictData[dictName])
if(mesg.upper() == "SUCCESS"):
print("Event {0} published to Deepstream".format(event))
except:
print("An error occured while publishing {0} to Deepstream".format(event))
def SaveData(dictData):
# Publish all data
PublishEvent(dictData,"sensors/All",'')
# Publish individual sensor data
PublishEvent(dictData,"sensors/MPL3115A2",'MPL3115A2_Pressure(Pa)')
PublishEvent(dictData,"sensors/GUVAS12SD",'UV_Index')
PublishEvent(dictData,"sensors/MLX90614_Ambient",'Ambient_Temp_inC')
PublishEvent(dictData,"sensors/MLX90614_Object",'Object_Temp_inC')
PublishEvent(dictData,"sensors/Decagon5TE_EC",'Electrical_Conductivity')
PublishEvent(dictData,"sensors/Decagon5TE_VWC",'Volumetric_Water_Content')
PublishEvent(dictData,"sensors/DHT11",'DHT11_HumidityInPerc')
#PublishEvent(dictData,"sensors/K30",'K30_CO2_ValueInPPM')
#PublishEvent(dictData,"sensors/Anemometer",'Anemometer_WindSpeed')
proceed=True
while(proceed):
listLength = 0
values = []
while(listLength != NoOfValues):
values = getArduinoValues()
listLength = len(values)
DictSensorValues = ListToDict(values)
print(DictSensorValues)
SaveData(DictSensorValues)
|
"""
Two-Mention or two-Entity constraints.
The first argument must always be the candidate antecedent, while the second
argument must be the Mention/Entity occurring later.
Not all constraints are necessarily available at both the Mention _and_ the
Entity level. Moreover, some constraints may take an Entity as antecedent and a
Mention that occurs later, or visa versa.
"""
def check_entity_head_match(antecedent, entity, offset2string):
"""
Entity head match
The head word of _any_ mention in `entity` (exactly) matches the head word
of _any_ mentions in the `antecedent` entity.
:param antecedent: candidate antecedent Entity
:param entity: Entity under considerations
:param offset2string: {offset: surface_string} dictionary
"""
antecedent_head_words = {
offset2string[offset]
for offset in antecedent.mention_attr('head_offset')
}
entity_head_words = {
offset2string[offset]
for offset in entity.mention_attr('head_offset')
}
return bool(entity_head_words & antecedent_head_words)
def check_word_inclusion(antecedent, entity, offset2string):
"""
entity level "Word inclusion", i.e.:
all the non-stop words in `entity` are included in the set
of non-stop words in the `antecedent` entity.
:param antecedent: candidate antecedent Entity
:param entity: Entity under consideration
:param offset2string: {offset: surface_string} dictionary
"""
non_stopwords = set(map(
offset2string.get,
entity.flat_mention_attr('non_stopwords')
))
antecedent_non_stopwords = set(map(
offset2string.get,
antecedent.flat_mention_attr('non_stopwords')
))
return non_stopwords <= antecedent_non_stopwords
def check_compatible_modifiers_only(
antecedent_mention, mention, offset2string):
"""
Compatible modifiers only
The `mention`s modifiers are all included in the modifiers of the
`antecedent_mention`. (...) For this feature we only use modifiers that
are nouns or adjectives. (Thus `main_modifiers` instead of `modifiers`.)
Documentation string adapted from Lee et al. (2013)
This description can either be interpreted as:
> Every constituent that modifies `mention` should occur as modifying
> constituent of `antecedent_mention`.
or as:
> All the tokens that appear as modifiers of `mention` should also appear
> as modifiers of `antecedent_mention`.
This code interprets it the **2nd** way.
"""
main_mods = {
offset2string[m]
for mods in mention.main_modifiers
for m in mods
}
antecedent_main_mods = {
offset2string[m]
for mods in antecedent_mention.main_modifiers
for m in mods
}
return main_mods <= antecedent_main_mods
def check_not_i_within_i(antecedent_mention, mention):
"""
Check whether one of the two mentions fully contains the other.
"Not i-within-i", i.e.:
the two mentions are not in an i-within-i constructs, that
is, one cannot be a child NP in the other's NP constituent
In this case, this is interpreted as "one mention does not
fully contain the other"
The following expression is equivalent to the one below
not_i_within_i = not (
(boffset2 <= boffset1 and eoffset1 <= eoffset2)
or
(boffset1 <= boffset2 and eoffset2 <= eoffset1)
)
This constraint is symmetric.
:param antecedent_mention: candidate antecedent Mention
:param mention: Mention under considerations
"""
boffset1 = antecedent_mention.begin_offset
eoffset1 = antecedent_mention.end_offset
boffset2 = mention.begin_offset
eoffset2 = mention.end_offset
return (
(boffset2 > boffset1 and eoffset2 > eoffset1)
or
(eoffset1 > eoffset2 and boffset1 > boffset2)
)
|
#!/usr/bin/env python3
from functools import reduce
import operator
import pygame
test_points = [[50, 50], [500, 1500], [500, -800], [750, 750]]
background_color = (100, 100, 100)
game_clock = 60
quit_flag = False
window_size = (800, 800)
def memoize(f):
memory = {}
def memoized(*args):
if args not in memory:
memory[args] = f(*args)
return memory[args]
return memoized
def get_basis(i, n, t):
@memoize
def f(n):
return reduce(operator.mul, range(1, n), 1)
return (f(n) / (f(i) * f(n - i))) * (t ** i) * (1 - t) ** (n - i)
def get_curve(arr, step=0.01, need_to_round=False):
res = []
t = 0
while t < 1 + step:
if t > 1:
t = 1
new_value_x = 0
new_value_y = 0
for index, (ax, by) in enumerate(arr):
b = get_basis(index, len(arr) - 1, t)
new_value_x += ax * b
new_value_y += by * b
if need_to_round:
new_value_x = round(new_value_x)
new_value_y = round(new_value_y)
res.append([new_value_x, new_value_y])
t += step
return res
if __name__ == '__main__':
curve = get_curve(test_points, need_to_round=True)
b_pos = 0
m_pos = len(curve) - 1
direction = -1
pygame.init()
screen = pygame.display.set_mode(window_size)
pygame.display.set_caption('Bezier')
clock = pygame.time.Clock()
surface = pygame.Surface(window_size)
while not quit_flag:
surface.fill(background_color)
pygame.draw.lines(surface, (255, 255, 255), False, curve)
xp, yp = curve[b_pos]
pygame.draw.circle(surface, (255, 0, 0), (xp, yp), 10)
screen.blit(surface, (0, 0))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_flag = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE or event.key == pygame.K_q:
quit_flag = True
if b_pos == m_pos or b_pos == 0:
direction = -direction
b_pos += direction
clock.tick(game_clock)
|
# -*- encoding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 Tobias Koch <tobias.koch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import re
import stat
import shlex
import subprocess
import textwrap
import hashlib
import functools
import locale
from tempfile import TemporaryDirectory, NamedTemporaryFile
import boltlinux.ffi.libarchive as libarchive
from boltlinux.ffi.libarchive import (
ArchiveFileReader, ArchiveFileWriter, ArchiveEntry
)
from boltlinux.error import NotFound, BoltSyntaxError, BoltError
from boltlinux.package.boltpack.xpkg import BaseXpkg
from boltlinux.package.boltpack.debianpackagemetadata \
import DebianPackageMetaData
class RepoIndexer:
def __init__(self, repo_dir, force_full=False, sign_with=None):
if not os.path.isdir(repo_dir):
raise NotFound("path '%s' does not exists or is not a directory."
% repo_dir)
self._force_full = force_full
self._repo_dir = repo_dir
self._sign_with = sign_with
#end function
def update_package_index(self):
if self._force_full:
index, digest = {}, ""
else:
index, digest = self.load_package_index()
for meta_data in self.scan(index=index):
name = meta_data["Package"]
version = meta_data["Version"]
index\
.setdefault(name, {})\
.setdefault(version, meta_data)
#end for
if not self._force_full:
self.prune_package_index(index)
self.store_package_index(index, current_digest=digest)
#end function
def load_package_index(self):
packages_file = os.path.join(self._repo_dir, "Packages.gz")
if not os.path.exists(packages_file):
return {}, ""
buf = ""
with ArchiveFileReader(packages_file, raw=True) as archive:
for entry in archive:
buf = archive.read_data()
h = hashlib.sha256()
h.update(buf)
text = buf.decode("utf-8")
index = {}
for entry in re.split(r"\n\n+", text, flags=re.MULTILINE):
meta_data = DebianPackageMetaData(entry)
try:
name = meta_data["Package"]
version = meta_data["Version"]
except KeyError:
continue
index.setdefault(name, {})[version] = meta_data
#end for
return index, h.hexdigest()
#end function
def prune_package_index(self, index):
for name in list(index.keys()):
for version, meta_data in list(index[name].items()):
abspath = os.path.join(self._repo_dir, meta_data["Filename"])
if not os.path.exists(abspath):
del index[name][version]
#end for
#end for
#end function
def store_package_index(self, index, current_digest=None):
meta_data_list = []
for name in sorted(index.keys()):
for version in sorted(index[name].keys(), key=functools.cmp_to_key(
BaseXpkg.compare_versions)):
meta_data_list.append(index[name][version])
#end for
#end for
if not meta_data_list:
return
text_output = "\n".join([str(entry) for entry in meta_data_list])
byte_output = text_output.encode("utf-8")
signature = None
signed_output = None
if self._sign_with:
signature = self._create_usign_signature(byte_output)
signed_output = (
"""\
-----BEGIN SIGNIFY SIGNED MESSAGE-----
{output}\
-----BEGIN SIGNIFY SIGNATURE-----
{signature}\
-----END SIGNIFY SIGNATURE-----
"""
) \
.format(
output=text_output,
signature=signature
) \
.encode("utf-8")
#end if
changed = True
if current_digest is not None:
h = hashlib.sha256()
h.update(byte_output)
if h.hexdigest() == current_digest:
changed = False
packages_gz = os.path.join(self._repo_dir, "Packages.gz")
tempfile_gz = None
packages_sig = os.path.join(self._repo_dir, "Packages.sig")
tempfile_sig = None
packages_in = os.path.join(self._repo_dir, "InPackages.gz")
tempfile_in = None
options = [("gzip", "timestamp", None)]
try:
if changed:
with NamedTemporaryFile(dir=self._repo_dir, delete=False) \
as tempfile_gz:
pass
with ArchiveFileWriter(
tempfile_gz.name,
libarchive.FORMAT_RAW,
libarchive.COMPRESSION_GZIP,
options=options) as archive:
with ArchiveEntry() as archive_entry:
archive_entry.filetype = stat.S_IFREG
archive.write_entry(archive_entry)
archive.write_data(byte_output)
#end with
#end with
os.chmod(
tempfile_gz.name,
stat.S_IRUSR |
stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH
)
#end if
if signature and signed_output:
if changed or not os.path.exists(packages_in):
with NamedTemporaryFile(dir=self._repo_dir, delete=False) \
as tempfile_in:
pass
with ArchiveFileWriter(
tempfile_in.name,
libarchive.FORMAT_RAW,
libarchive.COMPRESSION_GZIP,
options=options) as archive:
with ArchiveEntry() as archive_entry:
archive_entry.filetype = stat.S_IFREG
archive.write_entry(archive_entry)
archive.write_data(signed_output)
#end with
#end with
os.chmod(
tempfile_in.name,
stat.S_IRUSR |
stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH
)
#end if
if changed or not os.path.exists(packages_sig):
with NamedTemporaryFile(dir=self._repo_dir, mode="w+",
delete=False, encoding="utf-8") as tempfile_sig:
tempfile_sig.write(signature)
os.chmod(
tempfile_sig.name,
stat.S_IRUSR |
stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH
)
#end if
#end if
if tempfile_gz:
os.rename(tempfile_gz.name, packages_gz)
if tempfile_sig:
os.rename(tempfile_sig.name, packages_sig)
if tempfile_in:
os.rename(tempfile_in.name, packages_in)
finally:
if tempfile_gz and os.path.exists(tempfile_gz.name):
os.unlink(tempfile_gz.name)
if tempfile_sig and os.path.exists(tempfile_sig.name):
os.unlink(tempfile_sig.name)
if tempfile_in and os.path.exists(tempfile_in.name):
os.unlink(tempfile_in.name)
#end try
#end function
def scan(self, index=None):
if index is None:
index = {}
for path, dirs, files in os.walk(self._repo_dir, followlinks=True):
for filename in files:
if not filename.endswith(".bolt"):
continue
try:
name, version, arch = filename[:-5].rsplit("_")
except ValueError:
continue
entry = index.get(name, {}).get(version, None)
if entry is not None:
continue
abs_path = os.path.join(path, filename)
try:
control_data = self.extract_control_data(abs_path)
except BoltSyntaxError:
continue
yield control_data
#end for
#end for
#end function
def extract_control_data(self, filename):
meta_data = None
with TemporaryDirectory() as tmpdir:
with ArchiveFileReader(filename) as archive:
for entry in archive:
if not entry.pathname.startswith("control.tar."):
continue
data_file = os.path.join(tmpdir, entry.pathname)
with open(data_file, "wb+") as outfile:
while True:
buf = archive.read_data(4096)
if not buf:
break
outfile.write(buf)
#end while
#end with
pool_path = re.sub(
r"^" + re.escape(self._repo_dir) + r"/*",
"",
filename
)
meta_data = DebianPackageMetaData(
self._extract_control_data(data_file))
meta_data["Filename"] = pool_path
break
#end for
#end with
#end with
meta_data["SHA256"] = self._file_sha256_sum(filename)
meta_data["Size"] = os.path.getsize(filename)
return meta_data
#end function
# PRIVATE
def _extract_control_data(self, filename):
with ArchiveFileReader(filename) as archive:
for entry in archive:
if not entry.pathname == "control":
continue
meta_data = archive\
.read_data()\
.decode("utf-8")
meta_data = \
re.sub(r"^\s+.*?$\n?", "", meta_data, flags=re.MULTILINE)
return meta_data.strip()
#end for
#end with
#end function
def _file_sha256_sum(self, filename):
h = hashlib.sha256()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
h.update(chunk)
return h.hexdigest()
#end function
def _create_usign_signature(self, data):
signature = None
with NamedTemporaryFile(dir=self._repo_dir) as tempfile:
tempfile.write(data)
tempfile.flush()
sign_cmd = shlex.split(
"usign -S -m '{}' -s '{}' -x -".format(
tempfile.name, self._sign_with
)
)
try:
proc = subprocess.run(
sign_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True
)
signature = proc.stdout.decode("utf-8")
except subprocess.CalledProcessError as e:
raise BoltError(
"failed to sign Packages file: {}"
.format(e.stderr.decode(locale.getpreferredencoding())
.strip())
)
#end try
#end with
return signature
#end function
#end class
|
#!/usr/bin/env python
from . import elk, elk_io
|
import math
from typing import Dict, Any, Tuple
def calculate_centroid(metadata: Dict[str, Any]) -> Tuple[float, float, int]:
"""Calculate a rough centroid for the metadata provided by rio-tiler.
Args:
metadata: the metadata generated by rio-tiler for a compliant image
Returns:
The centroid of the bounds polygon with the min zoom.
"""
bounds = metadata["bounds"]["value"]
lon = (bounds[0] + bounds[2]) / 2
lat = (bounds[1] + bounds[3]) / 2
return lon, lat, metadata["minzoom"]
def build_tile_url(
image_path: str, rgb: bool = False, url_root: str = "/", is_product: bool = False
) -> str:
"""Build a Tile URL for the given image path.
Args:
image_path: the path to the image to be processed
rgb (optional): if the path should be to the DEM RGB version of tiles
Returns:
The URL for the specified tile set.
"""
prefix = ""
if is_product:
prefix = "products/"
return (
f"{url_root}{prefix}{'rgb' if rgb else 'tile'}/{image_path}/"
+ r"{z}/{x}/{y}.png"
)
def snake(term: str) -> str:
"""Convert the given string to snake case.
Args:
term: the term/word/phrase to convert
Returns:
The snake-cased version of the provided identifier.
"""
buffer = []
for pos, character in enumerate(term):
if character.isupper() and pos != 0:
buffer.append("_")
buffer.append(character.lower())
return "".join(buffer)
def camel(term: str) -> str:
"""Convert the given string to camel case.
Args:
term: the term/word/phrase to convert
Returns:
The camel-cased version of the provided identifier.
"""
parts = iter(term.split("_"))
return next(parts) + "".join([chunk.title() for chunk in parts])
def coord_to_tile(lon: float, lat: float, zoom: int) -> Tuple[int, int, int]:
"""Transform the provided coordinate to a slippy-map tile.
More information available here:
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Coordinates_to_tile_numbers_2
"""
lat_rad = math.radians(lat)
zoom_factor = 2.0 ** zoom
xtile = int((lon + 180.0) / 360.0 * zoom_factor)
ytile = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * zoom_factor)
return zoom, xtile, ytile
|
import collections
import chainer
import numpy as np
import onnx
from typing import List, Mapping
from chainer_compiler.ch2o import env
from chainer_compiler.ch2o import utils
def _is_float_value(v):
# The latter is for numpy-like things.
return isinstance(v, float) or int(v) != v
class Value(object):
"""An object which holds either an ONNX value or a Python object."""
def __init__(self, value):
if isinstance(value, Value):
self.const_value = value.const_value
value = value.value
else:
self.const_value = None
self.value = value
self.is_py = not isinstance(self.value, onnx.ValueInfoProto)
if not self.is_py:
assert self.is_tensor() or self.is_sequence()
assert not (self.is_tensor() and self.is_sequence())
def __repr__(self):
if self.is_py:
return 'Value(%s)' % str(self.value)
else:
return 'Value(%s)' % self.value.name
def get_attribute(self, key: str, env: 'utils.Env') -> 'Value':
if not self.is_py:
raise TypeError('Unsupported attribute %s for an ONNX value' % key)
value = Value(getattr(self.value, key))
if (value.is_py and
(value.value is None or
not isinstance(value.value, type) and
# TODO(hamaji): We probably need to create a ValueInfo
# for Variable.
not isinstance(value.value, chainer.Variable) and
np.array(value.value).dtype != np.object)):
value.to_value_info(env.root())
setattr(self.value, key, value)
if not value.is_py:
env.read_attrs.append((self, key, value))
return value
def is_none(self) -> bool:
return self.is_py and self.value is None
def is_tensor(self) -> bool:
return not self.is_py and self.value.type.HasField('tensor_type')
def is_sequence(self) -> bool:
return not self.is_py and self.value.type.HasField('sequence_type')
def copy(self, env: 'utils.Env', name=None) -> 'Value':
self.to_value_info(env)
vi = self.value
nvi = onnx.ValueInfoProto()
if self.is_tensor():
nvi.name = utils.gen_id(name, 'T')
else:
assert self.is_sequence(), self
nvi.name = utils.gen_id(name, 'S')
nvi.type.CopyFrom(vi.type)
return Value(nvi)
def identity(self, env: 'utils.Env', name=None) -> 'Value':
nv = self.copy(env, name=name)
env.addnode('Identity',
inputs=[self.value.name], outputs=[nv.value.name])
return nv
def to_value_info(self, env: 'utils.Env') -> onnx.ValueInfoProto:
if self.is_py:
if isinstance(self.value, collections.Iterable):
return self.to_sequence(env)
else:
return self.to_tensor(env)
return self.value
def to_tensor(self, env: 'utils.Env',
dtype: type = None) -> onnx.ValueInfoProto:
if self.is_py:
self.const_value = Value(self.value)
# TODO(hamaji): Rewrite `totensor` to convert a Python
# list to a tensor.
self.value = utils.totensor(self.value, env, dtype=dtype)
self.is_py = False
else:
if self.is_sequence():
self.value = env.calc('ConcatFromSequence',
inputs=[self.value.name],
axis=0,
new_axis=True)
self.is_py = False
if dtype is not None:
dt = utils.onnx_dtype(dtype)
self.value = env.calc(
'Cast',
inputs=[self.value.name],
to=dt
)
self.value.type.tensor_type.elem_type = dt
assert self.is_tensor()
return self.value
def to_sequence(self, env: 'utils.Env') -> onnx.ValueInfoProto:
if self.is_py:
self.const_value = Value(self.value)
if not isinstance(self.value, collections.Iterable):
raise TypeError('Expected a sequence: %s' % self.value)
res = env.calc_seq(
"SequenceConstruct",
inputs=[],
)
for v in self.value:
v = Value(v).to_tensor(env)
res = env.calc_seq(
"SequenceInsert",
inputs=[res.name, v.name],
)
self.value = res
self.is_py = False
elif self.is_tensor():
self.value = env.calc_seq(
'SplitToSequence',
inputs=[self.value.name],
keepdims=False
)
assert self.is_sequence()
return self.value
def _const(self) -> 'Value':
if not self.is_py and self.const_value is not None:
return self.const_value
return self
@property
def has_py_value(self):
return self.is_py or self.const_value is not None
def to_py_value(self):
if self.is_py:
return self
if self.const_value is not None:
return self.const_value
assert False, self
def to_float(self) -> float:
value = self._const()
if not value.is_py:
raise TypeError('Expected a float scalar: %s' % value.value)
return float(value.value)
def to_int(self) -> int:
value = self._const()
if not value.is_py or _is_float_value(value.value):
print(value.const_value)
raise TypeError('Expected an int scalar: %s' % value.value)
return int(value.value)
def to_bool(self) -> bool:
value = self._const()
if not value.is_py or not isinstance(value.value, bool):
raise TypeError('Expected a bool scalar: %s' % value.value)
return bool(value.value)
def to_int_list(self) -> List[int]:
value = self._const()
if not value.is_py or not isinstance(value.value, collections.Iterable):
raise TypeError('Expected an int list: %s' % value.value)
ints = list(Value(v).value for v in value.value)
if ints and _is_float_value(ints[0]):
raise TypeError('Expected an int list: %s' % value.value)
return ints
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class MessagesConfig(AppConfig):
name = 'django.contrib.messages'
verbose_name = _("Messages")
|
# -*- coding: UTF-8 -*-
# @yasinkuyu
# Define Python imports
import os
import sys
import time
import config
import argparse
import threading
import sqlite3
# Define Custom imports
from BinanceAPI import *
# Define Custom import vars
client = BinanceAPI(config.api_key, config.api_secret)
conn = sqlite3.connect('orders.db')
# Set parser
parser = argparse.ArgumentParser()
parser.add_argument("--quantity", type=int, help="Buy/Sell Quantity", default=200)
parser.add_argument("--symbol", type=str, help="Market Symbol (Ex: XVGBTC)", required=True)
parser.add_argument("--profit", type=float, help="Target Profit", default=1.3)
parser.add_argument("--stoploss", type=float, help="Target Stop-Loss % (If the price drops by 6%, sell market_price.)", default=0) # Not complated (Todo)
parser.add_argument("--increasing", type=float, help="Buy Price +Increasing (0.00000001)", default=0.00000001)
parser.add_argument("--decreasing", type=float, help="Sell Price -Decreasing (0.00000001)", default=0.00000001)
# Manually defined --orderid try to sell
parser.add_argument("--orderid", type=int, help="Target Order Id (use balance.py)", default=0)
parser.add_argument("--wait_time", type=int, help="Wait Time (seconds)", default=1)
parser.add_argument("--test_mode", type=bool, help="Test Mode True/False", default=False)
parser.add_argument("--prints", type=bool, help="Scanning Profit Screen Print True/False", default=True)
parser.add_argument("--debug", type=bool, help="Debug True/False", default=True)
parser.add_argument("--loop", type=int, help="Loop (0 unlimited)", default=0)
option = parser.parse_args()
# Set globals (Todo)
global DEBUG_MODE
global TEST_MODE
# Define parser vars
DEBUG_MODE = option.debug
TEST_MODE = option.test_mode
PROFIT = option.profit
ORDER_ID = option.orderid
QUANTITY = option.quantity
WAIT_TIME = option.wait_time # seconds
STOP_LOSS = option.stoploss # percent (When you drop 10%, sell panic.)
# Define static vars
WAIT_TIME_BUY_SELL = 5 # seconds
WAIT_TIME_STOP_LOSS = 20 # seconds
INVALID_ATTEMPTS_LIMIT = 40 # int
MAX_TRADE_SIZE = 10 # int
FEE = 0.0005
# Database
def write(data):
"""
Save order
data = orderid,symbol,amount,price,side,quantity,profit
Create a database connection
"""
cur = conn.cursor()
cur.executemany('''INSERT INTO orders VALUES (?, ?, ?, ?, ?, ?, ?)''', map(tuple, data.tolist()))
conn.commit()
conn.close()
def read(orderid):
"""
Query order info by id
:param orderid: the buy/sell order id
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM orders WHERE orderid = ?", (orderid,))
return cur.fetchone()
def buy_limit(symbol, quantity, buyPrice):
try:
order = client.buy_limit(symbol, quantity, buyPrice)
if 'msg' in order:
message(order['msg'])
# Order created.
orderId = order['orderId']
# Database log
#write([orderId, symbol, 0, buyPrice, "BUY", quantity, option.profit])
print ('Order Id: %d' % orderId)
return orderId
except Exception as e:
print (e)
time.sleep(WAIT_TIME_BUY_SELL)
return None
def sell_limit(symbol, quantity, orderId, sell_price, last_price):
"""
The specified limit will try to sell until it reaches.
If not successful, the order will be canceled.
"""
invalidAttempts = 0
while invalidAttempts < INVALID_ATTEMPTS_LIMIT:
order = client.sell_limit(symbol, quantity, sell_price)
if 'msg' in order:
message(order['msg'])
print ("Profit: %%%s. Buy: %.8f Sell: %.8f" % (PROFIT, float(order["price"]), sell_price))
sell_id = order['orderId']
if sell_id != None:
time.sleep(WAIT_TIME_BUY_SELL)
"""
If all sales trials fail,
the grievance is stop-loss.
"""
if STOP_LOSS > 0:
stop_order = get_order(symbol, sell_id)
stopprice = calc(float(stop_order['price']))
lossprice = stopprice - (stopprice * STOP_LOSS / 100)
status = stop_order['status']
# Order status
if status == "NEW":
if cancel_order(symbol, sell_id) == True:
# Stop loss
if last_price <= lossprice:
sell = client.sell_market(symbol, quantity)
if 'msg' in sell:
message(sell['msg'])
print ("Stop-loss, sell market, %s" % (lossprice))
if sell == True:
break
else:
continue
# Wait a while after the sale to the loss.
time.sleep (WAIT_TIME_STOP_LOSS)
else:
break
elif status == "FILLED":
print("Order filled")
break
elif status == "PARTIALLY_FILLED":
print("Order partially filled")
break
else:
continue
invalidAttempts = 0
break
else:
invalidAttempts += 1
continue
if invalidAttempts != 0:
cancel_order(symbol, orderId)
def check_buy(symbol, orderId, quantity):
trading_size = 0
time.sleep(WAIT_TIME_BUY_SELL)
while trading_size < MAX_TRADE_SIZE:
# Order info
order = get_order(symbol, orderId)
side = order['side']
price = float(order['price'])
# Todo: Sell partial qty
orig_qty = float(order['origQty'])
filled_qty = float(order['executedQty'])
status = order['status']
print ("Order(buy): %s id:%d, price: %.8f, orig_qty: %.8f" % (symbol, order['orderId'], price, orig_qty))
if status == "NEW":
if cancel_order(symbol, orderId) == True:
buy = client.buy_market(symbol, quantity)
if 'msg' in buy:
message(buy['msg'])
if buy == True:
break
else:
trading_size += 1
continue
else:
break
elif status == "FILLED":
break
elif status == "PARTIALLY_FILLED":
break
else:
trading_size += 1
continue
def cancel_order(symbol, orderId):
try:
order = client.cancel(symbol, orderId)
if 'msg' in order:
message(order['msg'])
print ("Profit loss, called order, %s" % (orderId))
return True
except Exception as e:
print (e)
return False
def get_order_book(symbol):
try:
orders = client.get_orderbooks(symbol, 5)
lastBid = float(orders['bids'][0][0]) #last buy price (bid)
lastAsk = float(orders['asks'][0][0]) #last sell price (ask)
return lastBid, lastAsk
except Exception as e:
print (e)
return None, None
def get_order(symbol, orderId):
try:
order = client.query_order(symbol, orderId)
if 'msg' in order:
message(order['msg'])
return order
except Exception as e:
print (e)
return False
def get_order_status(symbol, orderId):
try:
order = client.query_order(symbol, orderId)
if 'msg' in order:
message(order['msg'])
return order['status']
except Exception as e:
print (e)
return None
def get_ticker(symbol):
try:
ticker = client.get_ticker(symbol)
return float(ticker["lastPrice"])
except Exception as e:
print (e)
def analyze(symbol):
# Todo: Analyze, best price position
ticker = client.get_ticker(symbol)
hight = float(ticker["hight"])
low = float(ticker["low"])
return False
def message(msg):
print ("Error: " + msg)
exit(1)
def calc(lastBid):
return lastBid + (lastBid * PROFIT / 100)
def valid(price):
try:
price = float(price) + option.increasing
except ValueError:
print ("Invalid symbol name, Please try again....")
exit(1)
def action(symbol):
# Order amount
quantity = option.quantity
# Fetches the ticker price
lastPrice = get_ticker(symbol)
# Order book prices
lastBid, lastAsk = get_order_book(symbol)
# Target buy price, add little increase
buyPrice = lastBid + option.increasing
# Target sell price, decrease little
sellPrice = lastAsk - option.decreasing
# Spread ( profit )
profitableSellingPrice = calc(lastBid)
earnTotal = profitableSellingPrice - buyPrice
# Screen log
if option.prints:
print ('price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk))
#analyze = threading.Thread(target=analyze, args=(symbol,))
#analyze.start()
"""
Did profit get caught
if ask price is greater than profit price,
buy with my buy price,
or --orderid greater than zero
"""
if lastAsk >= profitableSellingPrice or option.orderid > 0:
# Manually defined --orderid, try to sell ( use balance.py )
if option.orderid > 0 :
orderId = option.orderid
else:
orderId = buy_limit(symbol, quantity, buyPrice)
# Order book prices
newLastBid, newLastAsk = get_order_book(symbol)
newSellPrice = newLastAsk - option.decreasing
if orderId is not None:
"""
If the order is complete,
try to sell it.
"""
#Perform buy action
sellAction = threading.Thread(target=sell_limit, args=(symbol, quantity, orderId, newSellPrice, lastPrice,))
#Perform check/sell action
checkAction = threading.Thread(target=check_buy, args=(symbol, orderId, quantity,))
sellAction.start()
checkAction.start()
def main():
cycle = 0
actions = []
symbol = option.symbol
print ("@yasinkuyu, 2017")
print ("Auto Trading for Binance.com. --symbol: %s" % symbol)
print ("trader.py --symbol %s --quantity %s --profit %s --wait_time %s --orderid %s \n" % (symbol, option.quantity, option.profit, option.wait_time, option.orderid))
print ("%%%s profit scanning for %s" % (PROFIT, symbol))
print ("... \n")
while (cycle <= option.loop):
startTime = time.time()
actionTrader = threading.Thread(target=action, args=(symbol,))
actions.append(actionTrader)
actionTrader.start()
endTime = time.time()
if endTime - startTime < WAIT_TIME:
time.sleep(WAIT_TIME - (endTime - startTime))
# 0 = Unlimited loop
if option.loop > 0:
cycle = cycle + 1
if __name__ == "__main__":
main() |
from sqlalchemy.orm import Query
'''
https://stackoverflow.com/questions/15936111/sqlalchemy-can-you-add-custom-methods-to-the-query-object
'''
class CustomQuery(Query):
def filter_if(self: Query, condition: bool, *criterion):
if condition:
return self.filter(*criterion)
else:
return self |
from rest_framework import serializers
from clients.models import Profile
class ProfileSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
class Meta:
model = Profile
fields = '__all__'
|
# © MNELAB developers
#
# License: BSD (3-clause)
from PySide6.QtCore import Qt, Slot
from PySide6.QtWidgets import (
QDialog,
QDialogButtonBox,
QGridLayout,
QListWidget,
QRadioButton,
)
from .utils import select_all
class PickChannelsDialog(QDialog):
def __init__(self, parent, channels, types):
super().__init__(parent)
self.setWindowTitle("Pick channels")
grid = QGridLayout(self)
self.by_name = QRadioButton("By name:")
grid.addWidget(self.by_name, 0, 0, Qt.AlignTop)
self.names = QListWidget()
self.names.insertItems(0, channels)
self.names.setSelectionMode(QListWidget.ExtendedSelection)
select_all(self.names)
grid.addWidget(self.names, 0, 1)
self.by_name.setChecked(True)
self.by_type = QRadioButton("By type:")
grid.addWidget(self.by_type, 1, 0, Qt.AlignTop)
self.types = QListWidget()
self.types.insertItems(0, types)
self.types.setSelectionMode(QListWidget.ExtendedSelection)
self.types.setMaximumHeight(self.types.sizeHintForRow(0) * 5.5)
select_all(self.types)
grid.addWidget(self.types, 1, 1)
self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
grid.addWidget(self.buttonbox, 2, 0, 1, -1)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
self.types.itemSelectionChanged.connect(self.toggle_buttons)
self.names.itemSelectionChanged.connect(self.toggle_buttons)
self.by_name.toggled.connect(self.toggle_buttons)
self.toggle_buttons() # initialize OK button state
self.by_name.toggled.connect(self.toggle_lists)
self.by_type.toggled.connect(self.toggle_lists)
self.toggle_lists()
@Slot()
def toggle_buttons(self):
"""Toggle OK button."""
self.buttonbox.button(QDialogButtonBox.Ok).setEnabled(False)
if (self.by_name.isChecked() and self.names.selectedItems()
or self.by_type.isChecked() and self.types.selectedItems()):
self.buttonbox.button(QDialogButtonBox.Ok).setEnabled(True)
@Slot()
def toggle_lists(self):
self.names.setEnabled(self.by_name.isChecked())
self.types.setEnabled(not self.by_name.isChecked())
|
from pycassa.types import Column
__all__ = ['ColumnFamilyMap']
def create_instance(cls, **kwargs):
instance = cls()
instance.__dict__.update(kwargs)
return instance
class ColumnFamilyMap(object):
def __init__(self, cls, column_family, columns=None, raw_columns=False):
"""
Construct a ObjectFamily
Parameters
----------
cls : class
Instances of cls are generated on get*() requests
column_family: ColumnFamily
The ColumnFamily to tie with cls
raw_columns: boolean
Whether all columns should be fetched into the raw_columns field in
requests
"""
self.cls = cls
self.column_family = column_family
self.raw_columns = raw_columns
self.columns = {}
for name, column in self.cls.__dict__.iteritems():
if not isinstance(column, Column):
continue
self.columns[name] = column
def combine_columns(self, columns):
combined_columns = {}
if self.raw_columns:
combined_columns['raw_columns'] = {}
for column, type in self.columns.iteritems():
combined_columns[column] = type.default
for column, value in columns.iteritems():
col_cls = self.columns.get(column, None)
if col_cls is not None:
combined_columns[column] = col_cls.unpack(value)
if self.raw_columns:
combined_columns['raw_columns'][column] = value
return combined_columns
def get(self, key, *args, **kwargs):
"""
Fetch a key from a Cassandra server
Parameters
----------
key : str
The key to fetch
columns : [str]
Limit the columns or super_columns fetched to the specified list
column_start : str
Only fetch when a column or super_column is >= column_start
column_finish : str
Only fetch when a column or super_column is <= column_finish
column_reversed : bool
Fetch the columns or super_columns in reverse order. This will do
nothing unless you passed a dict_class to the constructor.
column_count : int
Limit the number of columns or super_columns fetched per key
super_column : str
Fetch only this super_column
read_consistency_level : ConsistencyLevel
Affects the guaranteed replication factor before returning from
any read operation
Returns
-------
Class instance
"""
if 'columns' not in kwargs and not self.column_family.super and not self.raw_columns:
kwargs['columns'] = self.columns.keys()
columns = self.column_family.get(key, *args, **kwargs)
if self.column_family.super:
if 'super_column' not in kwargs:
vals = {}
for super_column, subcols in columns.iteritems():
combined = self.combine_columns(subcols)
vals[super_column] = create_instance(self.cls, key=key, super_column=super_column, **combined)
return vals
combined = self.combine_columns(columns)
return create_instance(self.cls, key=key, super_column=kwargs['super_column'], **combined)
combined = self.combine_columns(columns)
return create_instance(self.cls, key=key, **combined)
def multiget(self, *args, **kwargs):
"""
Fetch multiple key from a Cassandra server
Parameters
----------
keys : [str]
A list of keys to fetch
columns : [str]
Limit the columns or super_columns fetched to the specified list
column_start : str
Only fetch when a column or super_column is >= column_start
column_finish : str
Only fetch when a column or super_column is <= column_finish
column_reversed : bool
Fetch the columns or super_columns in reverse order. This will do
nothing unless you passed a dict_class to the constructor.
column_count : int
Limit the number of columns or super_columns fetched per key
super_column : str
Fetch only this super_column
read_consistency_level : ConsistencyLevel
Affects the guaranteed replication factor before returning from
any read operation
Returns
-------
{'key': Class instance}
"""
if 'columns' not in kwargs and not self.column_family.super and not self.raw_columns:
kwargs['columns'] = self.columns.keys()
kcmap = self.column_family.multiget(*args, **kwargs)
ret = {}
for key, columns in kcmap.iteritems():
if self.column_family.super:
if 'super_column' not in kwargs:
vals = {}
for super_column, subcols in columns.iteritems():
combined = self.combine_columns(subcols)
vals[super_column] = create_instance(self.cls, key=key, super_column=super_column, **combined)
ret[key] = vals
else:
combined = self.combine_columns(columns)
ret[key] = create_instance(self.cls, key=key, super_column=kwargs['super_column'], **combined)
else:
combined = self.combine_columns(columns)
ret[key] = create_instance(self.cls, key=key, **combined)
return ret
def get_count(self, *args, **kwargs):
"""
Count the number of columns for a key
Parameters
----------
key : str
The key with which to count columns
Returns
-------
int Count of columns
"""
return self.column_family.get_count(*args, **kwargs)
def get_range(self, *args, **kwargs):
"""
Get an iterator over keys in a specified range
Parameters
----------
start : str
Start from this key (inclusive)
finish : str
End at this key (inclusive)
columns : [str]
Limit the columns or super_columns fetched to the specified list
column_start : str
Only fetch when a column or super_column is >= column_start
column_finish : str
Only fetch when a column or super_column is <= column_finish
column_reversed : bool
Fetch the columns or super_columns in reverse order. This will do
nothing unless you passed a dict_class to the constructor.
column_count : int
Limit the number of columns or super_columns fetched per key
row_count : int
Limit the number of rows fetched
super_column : str
Fetch only this super_column
read_consistency_level : ConsistencyLevel
Affects the guaranteed replication factor before returning from
any read operation
Returns
-------
iterator over Class instance
"""
if 'columns' not in kwargs and not self.column_family.super and not self.raw_columns:
kwargs['columns'] = self.columns.keys()
for key, columns in self.column_family.get_range(*args, **kwargs):
if self.column_family.super:
if 'super_column' not in kwargs:
vals = {}
for super_column, subcols in columns.iteritems():
combined = self.combine_columns(subcols)
vals[super_column] = create_instance(self.cls, key=key, super_column=super_column, **combined)
yield vals
else:
combined = self.combine_columns(columns)
yield create_instance(self.cls, key=key, super_column=kwargs['super_column'], **combined)
else:
combined = self.combine_columns(columns)
yield create_instance(self.cls, key=key, **combined)
def insert(self, instance, columns=None):
"""
Insert or update columns for a key
Parameters
----------
instance : Class instance
The key to insert or update the columns at
columns : ['column']
Limit the columns inserted to this list
Returns
-------
int timestamp
"""
insert_dict = {}
if columns is None:
columns = self.columns.keys()
for column in columns:
insert_dict[column] = self.columns[column].pack(instance.__dict__[column])
if self.column_family.super:
insert_dict = {instance.super_column: insert_dict}
return self.column_family.insert(instance.key, insert_dict)
def remove(self, instance, column=None):
"""
Remove this instance
Parameters
----------
instance : Class instance
Remove the instance where the key is instance.key
column : str
If set, remove only this Column. Doesn't do anything for SuperColumns
Returns
-------
int timestamp
"""
# Hmm, should we only remove the columns specified on construction?
# It's slower, so we'll leave it out.
if self.column_family.super:
return self.column_family.remove(instance.key, column=instance.super_column)
return self.column_family.remove(instance.key, column)
|
# print('文能提笔安天下')
# print('武能上马定乾坤')
#
# print('''文能提笔安天下
# 武能上马定乾坤''')
# wonderful = 66
#
# num = int(input('请输入一个数:'))
# if num > 66:
# print('猜大了')
# elif num < 66:
# print('猜小了')
# else:
# print('恭喜你.')
# content = input('请输入麻花藤:')
# if content == '麻花藤':
# print('真聪明')
# else:
# print('你是SB么?')
|
import json
import logging
import os
import pathlib
import shutil
import sys
import jsonschema
from players import PlayerLookup
def config_binary(json_content, binary):
for config_binary in json_content.get('executables', {}):
if config_binary['name'] == binary:
return pathlib.Path(config_binary['path'])
return default_binary(binary)
def default_binary(binary):
# binary not explicitly found: can we find it on the path?
path = shutil.which(binary)
if path:
return pathlib.Path(path)
# Special-case defaults?
if sys.platform == 'darwin' and binary == 'vlc':
path = pathlib.Path('/Applications/VLC.app/Contents/MacOS/VLC')
if path.exists():
return path
return None
def default_player(ext):
if ext == '.mp3':
return first_available_player(['mpg123', 'mplayer', 'mpv'])
elif ext == '.mp4' or ext == '.mkv':
return first_available_player(['omxplayer', 'mpv', 'vlc'])
else:
assert False, "Missing default player for " + ext
def default_config_path():
return pathlib.Path.home() / '.picaverc'
def first_available_player(binaries):
for binary in binaries:
path = default_binary(binary)
if path:
return binary
return None
class Config(object):
def __init__(self, filename=None):
self.executables = {} # map from executable name to pathlib.Path
self.players = {} # map from '.ext' to Player instance
self.warm_up_music_directory = None # pathlib.Path
self.video_cache_directory = None # pathlib.Path
self.ftp = None # map from video id (or 'default') to number
self.favourites = [] # list of video ids (str)
self.show_favourites_only = False
schema_filename = pathlib.Path(__file__).parent / 'config.schema.json'
self.schema = json.load(open(schema_filename))
self.executable_names = (self.schema['definitions']['supported_players']['enum']
+ self.schema['definitions']['other_executables']['enum'])
if filename:
self._init_from_file(filename)
self.filename = filename
else:
self._init_with_defaults()
self.filename = default_config_path()
def _init_from_file(self, filename):
try:
handle = open(filename)
except OSError:
# TODO: Error handling
raise
json_content = json.load(handle)
jsonschema.validate(instance=json_content, schema=self.schema) # throws on validation error
for binary in self.executable_names:
self.executables[binary] = config_binary(json_content, binary)
logging.debug("Exe %s=%s" % (binary, self.executables[binary]))
for player_config in json_content['filetypes']:
ext = player_config['ext']
player = player_config['player']
cmd_args = player_config.get('options', None)
player_parameters = player_config.get('parameters', {})
player_class = PlayerLookup[player]
self.players[ext] = player_class(exe=self.executables[player],
default_args=cmd_args,
player_parameters=player_parameters)
logging.debug("player %s=%s" % (ext, self.players[ext]))
self.video_cache_directory = pathlib.Path(json_content['video_cache_directory']).expanduser().resolve()
warm_up_dir = json_content.get('warm_up_music_directory')
if warm_up_dir:
self.warm_up_music_directory = pathlib.Path(warm_up_dir).expanduser().resolve()
else:
self.warm_up_music_directory = None
self.ftp = json_content['FTP']
self.favourites = json_content.get('favourites', [])
self.show_favourites_only = json_content.get('show_favourites_only', False)
def _init_with_defaults(self):
self.video_cache_directory = pathlib.Path('~/.picave_cache').expanduser()
if not self.video_cache_directory.exists():
self.video_cache_directory.mkdir()
for binary in self.executable_names:
self.executables[binary] = default_binary(binary)
logging.debug("Exe %s=%s" % (binary, self.executables[binary]))
for ext in self.schema['definitions']['player']['properties']['ext']['enum']:
player_name = default_player(ext)
player_class = PlayerLookup[player_name]
player = player_class(exe=self.executables[player_name], default_args=None, player_parameters={})
self.players[ext] = player
if player is None:
logging.warning("No player found for %s files" % ext)
else:
logging.debug("player %s=%s" % (ext, self.players[ext]))
self.ftp = {
'default': 200
}
self.favourites = []
self.show_favourites_only = False
def save(self):
to_write = {
'video_cache_directory': str(self.video_cache_directory),
'warm_up_music_directory': str(self.warm_up_music_directory),
'executables': [{
"name": name,
"path": str(path)
} for name, path in self.executables.items()],
'filetypes': [{
"ext": ext,
"player": player.name,
"options": player.default_args,
"parameters": player.player_parameters
} for ext, player in self.players.items()],
'FTP': self.ftp,
'favourites': self.favourites,
'show_favourites_only': self.show_favourites_only,
}
temp_filename = self.filename.with_suffix('.new')
if temp_filename.exists():
os.remove(temp_filename)
with open(temp_filename, 'w') as handle:
json.dump(to_write, handle, indent=4)
if self.filename.exists():
backup_filename = self.filename.with_suffix('.bak')
if backup_filename.exists():
os.remove(backup_filename)
os.rename(self.filename, backup_filename)
os.rename(temp_filename, self.filename)
|
#!/usr/bin/python
from restkit.resource import Resource
import urllib2
import time
from StringIO import StringIO
import json
import logging
import sys
from urllib import urlencode
from optparse import OptionParser
from urlparse import urljoin
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
def getDocTemplate():
return {
"doc_type": "resource_data",
"doc_version": "0.11.0",
"resource_data_type" : "metadata",
"active" : True,
"submitter_type": "agent",
"submitter": "Nottingham Xpert",
"TOS": {"submission_TOS": "http://www.learningregistry.org/tos/cc-by-3-0/v0-5/"},
"resource_locator": None,
"keys": [],
"payload_placement": None,
"payload_schema": [],
"payload_schema_locator":None,
"payload_locator": None,
"resource_data": None
}
def cleanDoc(doc):
for key in doc.keys():
if (doc[key] is None):
del doc[key]
def getKeywords():
f = open("/usr/share/dict/words")
keywords = []
while(f.readline()):
w = f.readline()
if w.strip() != '':
keywords.append(w.strip())
f.close()
return keywords
def getData(sourceUrl, keyword=None):
f = urllib2.urlopen(sourceUrl)
data = f.read()
f.close()
jsonList = json.loads(str(data).replace("\r", " "))
resourceDataList = []
for r in jsonList:
doc = getDocTemplate()
doc['resource_locator'] = r['link']
doc["payload_placement"] = "inline"
doc["resource_data"] = r
doc['doc_ID'] = r['xpert_id']
if keyword is not None:
doc['keys'].append(keyword)
cleanDoc(doc)
resourceDataList.append(doc)
return resourceDataList
def getByAllKeyWords(baseUrl):
keywords = getKeywords()
# Use a dictionary to store the data to avoid duplicate data from the different
# keywords
dataDict = {}
for k in keywords:
url = urljoin(baseUrl, k)
log.info("Keywork: %s" %(k))
data = []
try:
data = getData(url, k)
except Exception as e:
log.exception(e)
continue
log.info("\t %s: %d\n" %(k, len(data)))
for doc in data:
if doc['doc_ID'] in dataDict.keys():
dataDict['doc_ID']['keys'].append(k)
else:
dataDict[doc['doc_ID']] = doc
return [dataDict.values]
def bulkUpdate(resourceList, destinationUrl):
'''
Save to Learning Registry
'''
if resourceList > 0 :
try:
log.info("Learning Registry Node URL: '{0}'\n".format(destinationUrl))
res = Resource(destinationUrl)
body = { "documents":resourceList }
log.info("request body: %s" % (json.dumps(body),))
clientResponse = res.post(path="/publish", payload=json.dumps(body), headers={"Content-Type":"application/json"})
log.info("status: {0} message: {1}".format(clientResponse.status_int, clientResponse.body_string()))
except Exception:
log.exception("Caught Exception When publishing to registry")
else:
log.info("Nothing is being updated.")
def parseCommand():
parser = OptionParser()
parser.add_option('-u', '--url', dest="registryUrl", help='URL of the registry to push the data.', default="http://localhost")
parser.add_option('-o', '--output', dest="output", help='Output file instead of publish', default=None)
parser.add_option('-s', '--source-url', dest="sourceUrl", help="The source url where to pull the data from")
parser.add_option('-b', '--base-source-url', dest="baseSourceUrl", default=None, help="Base source url, keywords will be append to it")
(options, args) = parser.parse_args()
docList =[]
if options.baseSourceUrl is not None:
docList = getByAllKeyWords(options.baseSourceUrl)
else:
docList = getData(options.sourceUrl)
print ("Number of collected data: %d " %(len(docList)))
if options.output is None:
bulkUpdate(docList, options.registryUrl)
else:
for d in docList:
print d
if __name__ == '__main__':
log.info("Update Started.")
parseCommand()
|
"""
Atribuindo dados em um DataFrame
"""
import pandas as pd
base = pd.read_csv("./index_select_assign/assigning_data_3/census2.csv")
print(base.loc[base.occupation == " ?"])
# OBS: não alterei o arquivo original, modifiquei a base
base.loc[base.occupation == " ?", "occupation"] = "Não informado"
print(base.loc[base.occupation == " ?"]) |
from lm_eval import tasks
from itertools import islice
ct = 3
for tname, Task in tasks.TASK_REGISTRY.items():#[('record', tasks.superglue.ReCoRD)]:#
task = Task()
print('#', tname)
docs = islice(task.validation_docs() if task.has_validation_docs() else task.test_docs(), ct)
print()
for i in range(ct):
print()
doc = next(docs)
print("**Context**:", "\n```\n" + task.doc_to_text(doc) + "\n```\n")
print()
print('**Target**:', "\n```\n" + task.doc_to_target(doc) + "\n```\n")
print()
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.2
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''hsfm'': conda)'
# name: python3
# ---
# Trying to follow...
# https://ceholden.github.io/open-geo-tutorial/python/chapter_5_classification.html
from glob import glob
import numpy as np
from scipy.io import loadmat
import rasterio as rio
import scipy.io
import rioxarray as rix
import geopandas as gpd
import matplotlib.pyplot as plt
# Create a clipped section of a large orthomosaic
# !gdal_translate -projwin 582332 5406614 583674 5405775 \
# /data2/elilouis/generate_ee_dems_baker/mixed_timesift/individual_clouds/79_10.0/cluster0/1/orthomosaic_final.tif \
# /data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/orthomosaic.tif
# Create a clipped section of a large DoD
# !gdal_translate -projwin 582332 5406614 583674 5405775 \
# /data2/elilouis/generate_ee_dems_baker/mixed_timesift/individual_clouds/79_10.0/cluster0/1/pc_align/spoint2point_bareground-trans_source-DEM_dem_align/spoint2point_bareground-trans_source-DEM_reference_dem_clipped_nuth_x+0.78_y+1.20_z+1.31_align_diff.tif \
# /data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/dod.tif
# Create a clipped section of a large DEM
# !gdal_translate -projwin 582332 5406614 583674 5405775 \
# /data2/elilouis/generate_ee_dems_baker/mixed_timesift/individual_clouds/79_10.0/cluster0/1/pc_align/spoint2point_bareground-trans_source-DEM_dem_align/spoint2point_bareground-trans_source-DEM_reference_dem_clipped_nuth_x+0.78_y+1.20_z+1.31_align.tif \
# /data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/dem.tif
# Create a terrain ruggedness index tiff
# !gdaldem TRI \
# -compute_edges \
# /data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/dem.tif \
# /data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/tri.tif
bands = [
"/data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/orthomosaic.tif",
"/data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/dod.tif",
"/data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/dem.tif",
"/data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/tri.tif"
]
# Open 4 layers
ortho = rix.open_rasterio(bands[0])
dod = rix.open_rasterio(bands[1], masked=True).rio.reproject_match(ortho)[0]
dem = rix.open_rasterio(bands[2], masked=True).rio.reproject_match(ortho)[0]
tri = rix.open_rasterio(bands[3], masked=True).rio.reproject_match(ortho)[0]
# Combine the alpha and greyscale bands in the orthomosaic by setting naNS
ortho_raster_values = ortho[0]
ortho_alpha_values = ortho[1]
ortho = ortho_raster_values.where(
ortho_alpha_values == 255
)
type(ortho), type(dod), type(dem), type(tri)
ortho.values.shape, dod.values.shape, dem.values.shape, tri.values.shape
fix, axes = plt.subplots(2, 2, figsize=(20,12), sharex=True, sharey=True)
axes[0][0].imshow(ortho.values[::10, ::10], cmap='gray')
axes[0][1].imshow(dod.values[::10, ::10], cmap='PuOr')
axes[1][0].imshow(dem.values[::10, ::10], cmap='terrain')
axes[1][1].imshow(tri.values[::10, ::10], cmap='viridis')
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
# Impute values for all 4 bands
ortho.values = imp.fit_transform(ortho.values)
ortho.values.shape
dod_fitted = imp.fit_transform(dod.values)
for i in range(0, dod.values.shape[1] - dod_fitted.shape[1]):
dod_fitted = np.column_stack((dod_fitted, dod_fitted[:, -1]))
dod.values = dod_fitted
dod.values.shape
# Why are these failing? Imputer appears to be changing the shape of the data?
tri_fitted = imp.fit_transform(tri.values)
n_missing_cols = tri.values.shape[1] - tri_fitted.shape[1]
print(f'Adding {n_missing_cols} columns')
for i in range(0, n_missing_cols):
tri_fitted = np.column_stack((tri_fitted, tri_fitted[:, -1]))
tri.values = tri_fitted
tri.values.shape
# + jupyter={"outputs_hidden": true}
dem_fitted = imp.fit_transform(dem.values)
n_missing_cols = dem.values.shape[1] - dem_fitted.shape[1]
print(f'Adding {n_missing_cols} columns')
for i in range(0, ):
dem_fitted = np.column_stack((dem_fitted, dem_fitted[:, -1]))
dem.values = dem_fitted
dem.values.shape
# -
# Combine two layers into a single array
all_bands = np.dstack([ortho.values, dod.values, tri.values, dem.values])
all_bands.shape
# Load training data
from geocube.api.core import make_geocube
training_data_df = gpd.read_file("/data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/training_data.geojson")
classes = {
'water': 1,
'forest': 2,
'bareground': 3,
'ice': 4,
}
training_data_df['key'] = training_data_df['id'].apply(classes.get)
training_data_df
# +
from geocube.api.core import make_geocube
result = make_geocube(
training_data_df,
measurements=["key"],
resolution=(1, -1),
)
# -
result.key.rio.to_raster(
"/data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/training_data.tif"
)
# Reproject training data so our images are equal size and stuff
training_data = rix.open_rasterio(
"/data2/elilouis/hsfm-geomorph/data/historical_land_cover_classification/training_data.tif"
).rio.reproject_match(ortho_raster_values)
plt.imshow(training_data.values[0])
training_data.plot()
# Classify
# replace nans in training data with 0
roi = training_data.values[0]
img = all_bands
roi.shape, img.shape
roi = np.nan_to_num(roi, 0)
labels = np.unique(roi[roi > 0])
print('The training data include {n} classes: {classes}'.format(n=labels.size,
classes=labels))
X = img[roi > 0, :]
y = roi[roi > 0]
X.shape, y.shape
# +
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=500, oob_score=True)
rf = rf.fit(X, y)
# -
print('Our OOB prediction of accuracy is: {oob}%'.format(oob=rf.oob_score_ * 100))
rf.feature_importances_
# Look at crosstabulation to see class confusion
# +
import pandas as pd
# Setup a dataframe -- just like R
df = pd.DataFrame()
df['truth'] = y
df['predict'] = rf.predict(X)
# Cross-tabulate predictions
print(pd.crosstab(df['truth'], df['predict'], margins=True))
# -
# Predict the rest of the image
img.shape
new_shape = (img.shape[0] * img.shape[1], img.shape[2])
img_as_array = img.reshape(new_shape)
print('Reshaped from {o} to {n}'.format(o=img.shape, n=img_as_array.shape))
# +
# Now predict for each pixel
class_prediction = rf.predict(img_as_array)
# Reshape our classification map
class_prediction = class_prediction.reshape(img[:, :, 0].shape)
# -
# Visualize
class_prediction
# Visualize the predictions
prediction = ortho_raster_values.copy()
prediction.values = class_prediction
plt.imshow(ortho_raster_values.values, cmap='gray')
ortho_raster_values.plot(cmap='gray')
classes
flatui = ["#0000FF", "#008000", "#964B00", "#FFFFFF"]
prediction.plot(levels=[0.5, 1.5, 2.5, 3.5, 4.5], colors=flatui)
plt.imshow(prediction.values)
plt.colorbar()
plt.imshow(prediction.values, levels=[0.5, 1.5, 2.5, 3.5, 4.5], colors=flatui)
|
import requests
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
class GlobeePayment:
"""
Globee Payment
"""
def __init__(self, payment_data: dict = None, payment_id: str = None):
"""
Init Globee payment
:param payment_data: dict with payment data
:param payment_id: the payment id that identifies the payment request
"""
self.payment_data = payment_data or dict()
self.payment_id = payment_id
self.redirect_url = None
self.auth_key = getattr(settings, 'GLOBEE_AUTH_KEY', None)
if self.auth_key is None:
raise ValidationError('GLOBEE_AUTH_KEY not found!')
elif not isinstance(self.auth_key, str):
raise ValidationError('GLOBEE_AUTH_KEY is not a string!')
elif not self.auth_key:
raise ValidationError('GLOBEE_AUTH_KEY is empty!')
self.testnet = getattr(settings, 'GLOBEE_TESTNET', True)
self.api_url = 'https://%sglobee.com/payment-api/v1' % ('test.' if self.testnet else '')
self.headers = {
'Accept': 'application/json',
'X-AUTH-KEY': self.auth_key
}
def ping(self):
"""
Sends a ping to verify that the integration and authentication is done correctly.
:return: response with the merchant name and url
"""
r = requests.get('%s/ping' % self.api_url, headers=self.headers)
response = r.json()
if r.status_code == 200 and response.get('success'):
return response
raise ValidationError("status code %s: %s" % (r.status_code, response['message']))
def check_required_fields(self):
"""
Checks all required fields.
:return: returns True if required fields are set
"""
try:
total = self.payment_data['total']
email = self.payment_data['customer']['email']
if not isinstance(total, (int, float)):
raise ValidationError('total is not an int, nor float!')
except KeyError as e:
raise ValidationError("key %s not set" % e)
validate_email(email)
return True
def create_request(self):
"""
Creates a new payment request.
:return: payment url
"""
r = requests.post('%s/payment-request' % self.api_url, headers=self.headers, json=self.payment_data)
response = r.json()
if r.status_code == 200 and response.get('success'):
self.redirect_url = response['data']['redirect_url']
self.payment_id = response['data']['id']
return response['data']['redirect_url']
raise ValidationError('status code: %s - %s' % (r.status_code, response))
def get_payment_url(self):
"""
gets the payment url
:return: payment url
"""
return self.redirect_url
def get_payment_by_id(self, payment_id: str = None):
"""
Fetches a previously created payment request by payment_id.
:param payment_id: the payment id that identifies the payment request
:return: payment data
"""
payment_id = payment_id or self.payment_id
if not payment_id:
raise ValidationError('payment_id is None/empty')
r = requests.get('%s/payment-request/%s' % (self.api_url, payment_id), headers=self.headers)
response = r.json()
if r.status_code == 200 and response.get('success'):
return response['data']
raise ValidationError('status code: %s - %s' % (r.status_code, response))
def update_payment_request(self, payment_id: str = None, payment_data: dict = None):
"""
Updates an existing payment request.
:param payment_id: the payment id that identifies the payment request
:param payment_data: dict with payment data
:return: response data
"""
payment_id = payment_id or self.payment_id
payment_data = payment_data or self.payment_data
if not payment_id:
raise ValidationError('payment_id is None/empty')
elif not payment_data:
raise ValidationError('payment_data is None/empty')
try:
email = self.payment_data['customer']['email']
except KeyError as e:
raise ValidationError("%s not set" % e)
validate_email(payment_data['customer']['email'])
r = requests.put('%s/payment-request/%s' % (self.api_url, payment_id), headers=self.headers, json=payment_data)
response = r.json()
if r.status_code == 200 and response.get('success'):
return response['data']
raise ValidationError('status code: %s - %s' % (r.status_code, response))
def get_payment_details(self, payment_id: str = None):
"""
Returns the accepted crypto-currencies and associated address information for the payment-request associated with the given id.
:param payment_id: the payment id that identifies the payment request
:return: return payment details like accepted crypto-currencies and associated address information
"""
payment_id = payment_id or self.payment_id
if not payment_id:
raise ValidationError('payment_id is None/empty')
r = requests.get('%s/payment-request/%s/payment-methods' % (self.api_url, payment_id), headers=self.headers)
response = r.json()
if r.status_code == 200 and response.get('success'):
return response['data']
raise ValidationError('status code: %s - %s' % (r.status_code, response))
def get_payment_currency_details(self, currency_id: str, payment_id: str = None, address_id: str = None):
"""
Generates and returns the payment details for a given payment request and payment currency.
:param currency_id: one of the currency id's: BTC, XMR, LTC, DOGE, ETH, XRP etc.
:param payment_id: the payment id that identifies the payment request
:param address_id: the address id if it has been assigned. Examples: default, lightning_address
:return: returns the payment details for a given payment request and payment currency
"""
payment_id = payment_id or self.payment_id
if not payment_id:
raise ValidationError('payment_id is None/empty')
url = '%s/payment-request/%s/addresses/%s' % (self.api_url, payment_id, currency_id)
if address_id:
url += '/%s' % address_id
r = requests.get(url, headers=self.headers)
response = r.json()
if r.status_code == 200 and response.get('success'):
return response['data']
raise ValidationError('status code: %s - %s' % (r.status_code, response))
def get_payment_methods(self):
"""
This returns the merchant account's accepted crypto-currencies.
:return: returns accepted crypto-currencies
"""
r = requests.get('%s/account/payment-methods' % self.api_url, headers=self.headers)
response = r.json()
if r.status_code == 200 and response.get('success'):
return response['data']
raise ValidationError('status code: %s - %s' % (r.status_code, response))
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import sklearn
import sklearn.preprocessing
import numpy as np
from datetime import datetime
import math
from utilx.constant import CONST, RedisKeyConstant
import traceback
from utilx.utils import Utils
import logging
from utilx.redix import RedisService
import time
from database.helper import DB_Session
from handlers.base import MetaHandler
import random
class VideoTabRankingWithWFAlgorithmHandler(MetaHandler):
# 该算法主要解决如何在一个社区中进行时间衰减的度量问题, 比如:
# 一个发帖时间比较久远但比较热门的帖子应该放在首页的什么位置合适?
# 一个刚发的帖子应该放在什么位置?
# 如何度量和调整时间的衰减的剧烈程度,保证时间衰减对排名的影响具有稳定性?
# 算法简要 Let me tell you a story
# 核心观点:将时间转换为等价的财富形式
# 自创世节点T。开始,整个世界范围内,平均每秒产生了X次点赞,Y次阅读,Z次收藏,W次评论
# 那么一个在时间节点T出生的孩子A,所能预见的世界所产生的财富总和为F = (T - T。) * (X, Y, Z, W)
# F即是这个世界所赋予A的年轻光环,如果后续A它能从别处获得更多的财富,那么在总的财富较量中它将排在其他竞争者前面
# 如果A、B、C...是同一时刻出生的孩子,那么它们的排名取决于它们后续从外部世界获取财富的平均效率
# 如果它们每天获取外部财富的效率低于这个世界产生财富的评论效率,那么它们将被新出生的孩子A'、B'...挤出排名,跌落到更低的位置中去
# 写成一般的数学形式
# F。= ∑(T - T。)* X, X = (x1, x2, ..., xn), xi表示在世界W中i类型的财富的平均产值
# F = F。+ G,G = (A1, A2, ..., An), Ai表示A出生之后从外部获得i类型财富值
# S = P * F, P = (p1, p2, ..., pn), pi表示计算最终排名分数时A对应的i类型财富的计算比例
# 其中,F。表示孩子A在世界W中进行冒险的初始财富,G表示A到目前为止在世界W中获得外部财富,F表示孩子目前拥有的总财富
# 在世界M:VideoTabRankingWithHandler中,整个世界的边界是所有需要参与排序的帖子所构成的世界,即所有的视频和短视频帖
# M中需要量化的财富类型共四种
# post_like_count
# post_collection_count
# post_read_count
# post_comment_count
# X变量,财富平均生产率 单位 xx/s,多少个xx量每秒
x1 = 100 # 每秒平均点赞数
x2 = 120 # 每秒平均阅读数
x3 = 50 # 每秒平均收藏数
x4 = 50 # 每秒平均评论数
X = [x1, x2, x3, x4]
# P,各类财富计分时的占比
# base_score = 0.55 * post_like_count + 0.05 * post_read_count + 0.20 * post_x_count + 0.20 * post_comment_count
p1 = 0.55 # 点赞数计分占比
p2 = 0.05 # 阅读数计分占比
p3 = 0.20 # 收藏数计分占比
p4 = 0.20 # 评论数计分占比
P = [p1, p2, p3, p4]
# 提前计算好PX_SCORE,即单位时间内有效财富计分值
# 这样计算get_post_create_time_init_score时只需要乘以时间差即可,节约计算量
PX_SCORE = sum(map(lambda x : x[0] * x[1] , zip(P, X)))
def get_post_create_time_init_score(self, post_info):
# 计算帖子出生时的天赋财富值
post_create_time = post_info.post_create_time
world_init_time = datetime(2014, 10, 1) # 创世时间点,上线前修正为真正的时间点
t = (post_create_time - world_init_time).total_seconds()
score = t * self.PX_SCORE
return score
def get_post_interaction_score(self, post_info):
# 计算帖子后天所得的财富值
a1 = post_info.post_likes_count
a2 = post_info.post_read
a3 = post_info.post_collects_count
a4 = post_info.post_comments_count
A = [a1, a2, a3, a4]
score = sum(map(lambda x: x[0] * x[1], zip(self.P, A)))
return score
def get_post_ranking_score(self, post_info):
score1 = self.get_post_create_time_init_score(post_info)
score2 = self.get_post_interaction_score(post_info)
score = score1 + score2
return score
def run(self):
# target_post_ids = []
# ONLINE DATA
target_post_ids = [6293207802437474349,
6293223747588271450,
6293223747589271926,
6293245510875455330,
6293258517831685919,
6293258517832793741,
6293274501954637184,
6293274501955747196,
6293274501955817616,
6293274501957423322,
6293274501957930643,
6293274501958662795,
6293274501958771681,
6293274501959009891,
6293274501959357870,
6293274501959709645,
6293274501959906351,
6293274501960390884,
6293274501960410621,
6293274501961878925,
6293274501962420314,
6293274501962843509,
6293274501963032884,
6293274501963500538,
6293274501964791878,
6293274501966863712,
6293274501970738916,
6293274501970751476,
6293274501971862325,
6293274501972020329,
6293274501973015220,
6293576039310555102,
6293576039311317638,
6293576039312078597,
6293576039312311034,
6293576039312621219,
6293576039317428725,
6293576039318372539,
6293615542238976065,
6293615542239584610,
6293615542239888325,
6293615542239894275,
6293615542241278385,
6293615542241311854,
6293615542242347244,
6293615542242912865,
6293615542243631249,
6293615542244419011,
6293615542244698937,
6293615542245856960,
6293615542246124940,
6293615542246661282,
6293615542246745094,
6293615542246829912,
6293615542249207432,
6293615542249432559,
6293615542249937830,
6293615542250353789,
6293615542252419998,
6293615542253640971,
6293615542253750470,
6293615542253968006,
6293615542254227626,
6293615542254255918,
6293615542254366065,
6293615542254382845,
6293615542255015161,
6293615542255082621,
6293615542255707017,
6293615542256486040,
6293615542256649870,
6293615542256748683,
6293615542257090963,
6293615542257137421,
6293615542257748998,
6293615542258554193,
6293615542258647584,
6293615542258697250,
6293615542259315153,
6293615542259380028,
6293615542259485757,
6293615542259520809,
6293615542259870201,
6293615542259882058,
6293615542259968425,
6293615542259973996,
6293615542260016954,
6293615542260167078,
6293615542260276262,
6293615542260549196,
6293615542260640870,
6293615542260781067,
6293615542260805346,
6293615542260880227,
6293615542261134646,
6293615542261150928,
6293615542261341072,
6293615542261502308,
6293615542262049954,
6293615542262276215,
6293615542262464125,
6293615542262722082,
6293615542262906724,
6293615542262934528,
6293615542263023616,
6293615542263119993,
6293615542263120625,
6293615542263201452,
6293615542263202106,
6293615542263277313,
6293615542263423054,
6293615542263925066,
6293615542264011782,
6293615542264734216,
6293615542265208816,
6293615542265217071,
6293615542265227390,
6293615542265411329,
6293615542267493151,
6293615542267591551,
6293615542268448670,
6293615542268688645,
6293615542268765410,
6293615542269252098,
6293615542269508415,
6293615542269532967,
6293615542269597589,
6293615542269930070,
6293615542270183766,
6293615542270196419,
6293615542270355141,
6293615542270434051,
6293615542271192094,
6293615542271224795,
6293615542272318544,
6293615542272447919,
6293615542272484428,
6293615542272593485,
6293615542272609570,
6293615542273211229,
6293615542273246035,
6293615542273258872,
6293615542273541272,
6293615542273562339,
6293615542273684623,
6293615542273689225,
6293615542273702567,
6293615542274030709,
6293615542274041027,
6293615542274711655,
6293615542275133181,
6293615542275369596,
6293615542275821255,
6293615542276034882,
6293615542276078621,
6293615542276141840,
6293615542276318548,
6293615542276562624,
6293615542276595031,
6293615542276610069,
6293615542277160072,
6293615542277282035,
6293615542277304915,
6293615542277317785,
6293615542277409558,
6293615542277668085,
6293615542278026899,
6293615542278350636,
6293615542278394105,
6293615542278781138,
6293615542279509069,
6293615542279859795,
6293615542280302529,
6293615542280765808,
6293615542280772084,
6293615542280815627,
6293615542280849544,
6293615542281251010,
6293615542281259932,
6293615542282036067,
6293615542282093219,
6293615542282632692,
6293615542282888711,
6293615542283031471,
6293615542283220377,
6293615542283558505,
6293615542283645565,
6293615542283800980,
6293615542283985653,
6293615542284456576,
6293615542284613868,
6293615542284788050,
6293615542284799202,
6293615542284801900,
6293615542284823957,
6293615542284831352,
6293615542284962233,
6293615542284980568,
6293615542284989016,
6293615542284996350,
6293615542285040259,
6293615542285248560,
6293615542285249530,
6293615542285429580,
6293615542285488059,
6293615542285560422,
6293615542286142330,
6293615542286317814,
6293615542286346543,
6293615542286439332,
6293615542286449576,
6293615542286452793,
6293615542286453778,
6293615542286503883,
6293615542286704433,
6293615542286715815,
6293615542286932262,
6293615542287018704,
6293615542287041081,
6293615542287483073,
6293615542288137127,
6293615542288316569,
6293615542288578832,
6293615542289107936,
6293615542289313165,
6293615542289464378,
6293615542289548531,
6293615542289946164,
6293615542290070413,
6293754345683204638,
6293754345684135970,
6293754345684223724,
6293754345684446602,
6293754345684615075,
6293754345684637585,
6293754345684703363,
6293754345684761824,
6293754345684966579,
6293754345685868609,
6293754345686211743,
6293754345688415579,
6293754345688895193,
6293754345689568023,
6293754345689763801,
6293754345689925452,
6293754345689963031,
6293754345690454119,
6293754345690684418,
6293754345690694173,
6293754345691872545,
6293754345691880307,
6293754345691881309,
6293754345691940130,
6293754345692299843,
6293754345692614160,
6293754345692857655,
6293754345693361711,
6293754345693384735,
6293754345694706028,
6293754345694971517,
6293754345695288642,
6293754345695938011,
6293754345696914505,
6293754345697037746,
6293754345697600529,
6293754345697810402,
6293754345697991878,
6293754345698533872,
6293754345698930601,
6293754345699523898,
6293754345700647579,
6293754345701308604,
6293754345702323998,
6293754345702655365,
6293754345703118740,
6293754345703332275,
6293754345705504189,
6293754345705664810,
6293754345706643701,
6293754345706975488,
6293754345707333866,
6293754345707500125,
6293754345708278171,
6293754345708311281,
6293754345708313228,
6293754345714566286,
6293754345714923830,
6293877835942165900,
6293877835942266098,
6293877835942556799,
6293877835943780738,
6293877835944482932,
6293877835946014554,
6293877835946238301,
6293877835946788404,
6293877835946923549,
6293877835946926635,
6293877835947610889,
6293877835948369526,
6293877835948687542,
6293877835948949885,
6293877835948978963,
6293877835949283588,
6293877835950255163,
6293877835950266170,
6293877835950321330,
6293877835950594065,
6293877835950606365,
6293877835955597683,
6293877835956349058,
6293877835956386575,
6293877835956793208,
6293877835956813625,
6293877835956889813,
6293877835957522193,
6293877835957753265,
6293877835958054489,
6293877835958182656,
6293877835958198046,
6293877835958739998,
6293877835959614021,
6293877835960012835,
6293877835960160330,
6293877835960249150,
6293877835960347663,
6293877835960511904,
6293877835960835671,
6293877835961126441,
6293877835961349703,
6293877835961583679,
6293877835961699564,
6293877835961746324,
6293877835961999839,
6293877835962555272,
6293877835962659817,
6293877835963142618,
6293877835963170900,
6293877835963279190,
6293877835963598658,
6293877835963604808,
6293877835963605544,
6293877835963633945,
6293877835963815302,
6293877835963858321,
6293877835963874098,
6293877835963982134,
6293877835964267289,
6293877835964282747,
6293877835964421955,
6293877835964422535,
6293877835964765775,
6293877835964923105,
6293877835964991421,
6293877835965035519,
6293877835965202086,
6293877835965610328,
6293877835965646309,
6293877835965717982,
6293877835965745122,
6293877835965764255,
6293877835965995073,
6293877835966041283,
6293877835967179321,
6293877835967233254,
6293877835967900010,
6293877835968040498,
6293877835968850888,
6293877835969357324,
6293877835969681225,
6293877835970233476,
6293877835970529022,
6293877835971073784,
6293877835971341887,
6293877835971471647,
6293877835972094950,
6293877835972097335,
6293877835972351667,
6293877835972365430,
6293877835972756526,
6293877835973301377,
6293877835973534511,
6293877835973729732,
6293877835974131428,
6293877835974255401,
6293877835975092045,
6293877835975234153,
6293877835975359571,
6293877835975682330,
6293877835976126210,
6293877835976259506,
6293877835976800654,
6293877835976900959,
6293877835977337916,
6293877835977963181,
6293877835978078505,
6293877835978273736,
6293877835978488673,
6293877835979433903,
6293877835980252254,
6293877835980664349,
6293877835980775267,
6293877835981025511,
6293877835981211222,
6293877835981472210,
6293877835982302685,
6293877835982691032,
6293877835982728999,
6293877835982761860,
6293877835983177794,
6293877835983193147,
6293877835983258504,
6293877835984530354,
6293877835984539408,
6293877835986958843,
6293877835987218779,
6293877835987292855,
6293877835987457585,
6293877835987721066,
6293877835988060933,
6293877835988338389,
6293877835988681023,
6293877835990254506,
6293877835990495825,
6293877835991275683,
6293877835991875083,
6293877835991918559,
6293877835992249839,
6293877835992274729,
6293877835996744033,
6293877835997188671,
6293877835997226421,
6293877835997530930,
6293877835998467645,
6293877835998897306,
6293877835999027799,
6293877835999394570,
6293877836000017289,
6293877836000676113,
6293877836000914356,
6294189047309457039,
6294189047310160402,
6294189047311313062,
6294189047312340940,
6294196636854508700,
]
post_info_list = self._post_control.get_post_tab_ranking_info_by_post_ids(target_post_ids, with_slave=True)
post_score_dict = {}
for x in post_info_list:
post_id = x.post_id
post_score = self.get_post_ranking_score(post_info=x)
# zadd required that key must be a str
post_score_dict[str(post_id)] = post_score
RedisService.init_video_tab_post_ranking_with_wfa_algorithm(post_score_dict)
if __name__ == "__main__":
try:
logging.getLogger().setLevel(logging.INFO)
handler = VideoTabRankingWithWFAlgorithmHandler()
handler.run()
handler.close_db_session()
except Exception as ex:
logging.info(traceback.format_exc())
method_end = True
|
from react_select.ReactSelect import ReactSelect
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://jedwatson.github.io/react-select/')
react_select = ReactSelect(driver.find_element_by_class_name('Select'), 1)
react_select.open_menu()
react_select._close_menu()
react_select.select_by_visible_text('Western Australia')
react_select.deselect_all()
react_select = ReactSelect(driver.find_element_by_class_name('Select--multi'), 1)
react_select.select_by_visible_text("Vanilla")
react_select.deselect_by_index(0)
driver.close()
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .playlist import Playlist
from .constants import *
if TYPE_CHECKING:
from ..spotify import SpotifyClient
from ..youtube import YoutubeClient
async def get_playlist(
spotify: SpotifyClient, youtube: YoutubeClient, url: str
) -> Optional[Playlist]:
if SPOTIFY_RE.match(url) and spotify:
spotify_info = await spotify.fetch_full_playlist(url)
return Playlist.from_spotify_dict(spotify_info) if spotify_info else None
playlist_info = await youtube.get_playlist_information(
await youtube.get_query_id(url)
)
return Playlist.from_youtube_dict(playlist_info) if playlist_info else None
|
import json
import logging
from pprint import pprint
from urllib.parse import urlparse
import requests
import wikipedia
from PIL import Image
from opennem.api.photo.controllers import img_to_buffer, write_photo_to_s3
from opennem.core.loader import load_data
from opennem.core.normalizers import station_name_cleaner
from opennem.db import SessionLocal
from opennem.db.models.opennem import Photo, Station
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def article_from_wikipedia(url: str) -> str:
"""
Return the article name from a wikipedia url
"""
path = urlparse(url).path
if not path:
return ""
return path.split("/")[2]
def dataid_from_url(url: str) -> str:
"""
Return the Q id from a wikidata url
"""
path = urlparse(url).path
if not path:
return ""
return path.split("/")[2]
def wikidata_join():
session = SessionLocal()
wikidata = load_data("wikidata-parsed.json", from_project=True)
# session.add()
for entry in wikidata:
station_name = entry.get("name")
station_lookup = (
session.query(Station).filter(Station.name == station_name).all()
)
if len(station_lookup) == 0:
logger.info("Didn't find a station for {}".format(station_name))
if len(station_lookup) == 1:
station = station_lookup.pop()
station.description = entry.get("description")
station.wikipedia_link = entry.get("wikipedia")
station.wikidata_id = entry.get("wikidata_id")
session.add(station)
logger.info("Updated station {}".format(station_name))
if len(station_lookup) > 1:
logger.info("Found multiple for station {}".format(station_name))
session.commit()
def wikidata_parse():
# query: https://w.wiki/dVi
wikidata = load_data("wikidata.json", from_project=True)
out_entries = []
total_entries = len(wikidata)
current = 0
for entry in wikidata:
wikilink = article_from_wikipedia(entry["article"])
wikidata = dataid_from_url(entry["item"])
station_name = station_name_cleaner(entry["itemLabel"])
description = None
try:
description = wikipedia.summary(wikilink)
except Exception as e:
print(e)
new_entry = {
"wikipedia": entry["article"],
"wikidata": entry["item"],
"wiki_id": wikilink,
"wikidata_id": wikidata,
"name": station_name,
"name_original": entry["itemLabel"],
"description": description,
}
out_entries.append(new_entry)
current += 1
print("Done {} of {}".format(current, total_entries))
with open("data/wikidata-parsed.json", "w") as fh:
json.dump(out_entries, fh)
def get_image(image_url):
img = None
try:
img = Image.open(requests.get(image_url, stream=True).raw)
except Exception:
logger.error("Error parsing: %s", image_url)
return None
return img
def wikidata_photos():
session = SessionLocal()
wikidata = load_data("wikidata-photos.json", from_project=True)
for entry in wikidata:
image_url = entry["thumb"]
name = entry["itemLabel"]
wiki_id = dataid_from_url(entry["item"])
station = (
session.query(Station)
.filter(Station.wikidata_id == wiki_id)
.one_or_none()
)
if not station:
print("Could not find station {}".format(name))
# continue
img = get_image(image_url)
if not img:
print("No image for {}".format(name))
continue
# file_name = urlparse(image_url).path.split("/")[-1:]
file_name = "{}_{}.{}".format(
name.replace(" ", "_"), "original", "jpeg"
)
photo = Photo(
name=file_name,
width=img.size[0],
height=img.size[1],
original_url=image_url,
)
img_buff = img_to_buffer(img)
write_photo_to_s3(file_name, img_buff)
if station:
station.photos.append(photo)
img.thumbnail((280, 340))
file_name = "{}_{}.{}".format(
name.replace(" ", "_"), img.size[0], "jpeg"
)
photo_thumb = Photo(
name=file_name,
width=img.size[0],
height=img.size[1],
original_url=image_url,
)
img_buff = img_to_buffer(img)
write_photo_to_s3(file_name, img_buff)
if station:
station.photos.append(photo_thumb)
session.add(photo)
session.add(photo_thumb)
if station:
session.add(station)
session.commit()
if __name__ == "__main__":
wikidata_join()
wikidata_photos()
|
import tempfile
import unittest
import numpy as np
from server.common.compute import diffexp_generic
from server.common.fbs.matrix import encode_matrix_fbs, decode_matrix_fbs
from server.compute import diffexp_cxg
from server.compute.diffexp_cxg import diffexp_ttest
from server.dataset.matrix_loader import MatrixDataLoader
from server.tests import FIXTURES_ROOT
from server.tests.unit import app_config
class DiffExpTest(unittest.TestCase):
"""Tests the diffexp returns the expected results for one test case, using different
adaptor types and different algorithms."""
def load_dataset(self, path, extra_server_config={}, extra_dataset_config={}):
extra_dataset_config["X_approximate_distribution"] = "normal" # hardwired for now
config = app_config(path, extra_server_config=extra_server_config, extra_dataset_config=extra_dataset_config)
loader = MatrixDataLoader(location=path, app_config=config)
adaptor = loader.open()
return adaptor
def get_mask(self, adaptor, start, stride):
"""Simple function to return a mask or rows"""
rows = adaptor.get_shape()[0]
sel = list(range(start, rows, stride))
mask = np.zeros(rows, dtype=bool)
mask[sel] = True
return mask
def compare_diffexp_results(self, results, expects):
self.assertEqual(len(results), len(expects))
for result, expect in zip(results, expects):
self.assertEqual(result[0], expect[0])
self.assertTrue(np.isclose(result[1], expect[1], 1e-6, 1e-4))
self.assertTrue(np.isclose(result[2], expect[2], 1e-6, 1e-4))
self.assertTrue(np.isclose(result[3], expect[3], 1e-6, 1e-4))
def check_1_10_2_10(self, results):
"""Checks the results for a specific set of rows selections"""
positive_expects = [
[1712, 0.24104056, 0.0051788902660723345, 1.0],
[1575, 0.2615018, 0.007830310753043345, 1.0],
[693, 0.23106655, 0.008715846769131548, 1.0],
[916, 0.2395215, 0.009080596532247588, 1.0],
[77, 0.22927025, 0.010070392939027756, 1.0],
[782, 0.20581803, 0.010161745218916036, 1.0],
[913, 0.23841085, 0.010782030711612685, 1.0],
[910, 0.21493295, 0.014596411069229197, 1.0],
[1727, 0.21911663, 0.015168372104237176, 1.0],
[1443, 0.19814226, 0.015337080567465522, 1.0],
]
negative_expects = [
[956, -0.29662406, 0.0008649321884808977, 1.0],
[1124, -0.2607333, 0.0011717216548271284, 1.0],
[1809, -0.24854594, 0.0019304405196777848, 1.0],
[1754, -0.24683577, 0.005691734062127954, 1.0],
[948, -0.18708363, 0.006622111055981219, 1.0],
[1810, -0.2172082, 0.007055917428377063, 1.0],
[779, -0.21150622, 0.007202934422407284, 1.0],
[576, -0.19008157, 0.008272092578813124, 1.0],
[538, -0.21803819, 0.01062259019889307, 1.0],
[436, -0.2100364, 0.01127515110543434, 1.0],
]
self.compare_diffexp_results(results["positive"], positive_expects)
self.compare_diffexp_results(results["negative"], negative_expects)
def get_X_col(self, adaptor, cols):
varmask = np.zeros(adaptor.get_shape()[1], dtype=bool)
varmask[cols] = True
return adaptor.get_X_array(None, varmask)
def test_cxg_default(self):
"""Test a cxg adaptor with its default diffexp algorithm (diffexp_cxg)"""
adaptor = self.load_dataset(f"{FIXTURES_ROOT}/pbmc3k.cxg")
maskA = self.get_mask(adaptor, 1, 10)
maskB = self.get_mask(adaptor, 2, 10)
# run it through the adaptor
results = adaptor.compute_diffexp_ttest(maskA, maskB, 10)
self.check_1_10_2_10(results)
# run it directly
results = diffexp_ttest(adaptor, maskA, maskB, 10)
self.check_1_10_2_10(results)
def test_cxg_generic(self):
"""Test a cxg adaptor with the generic adaptor"""
adaptor = self.load_dataset(f"{FIXTURES_ROOT}/pbmc3k.cxg")
maskA = self.get_mask(adaptor, 1, 10)
maskB = self.get_mask(adaptor, 2, 10)
# run it directly
results = diffexp_generic.diffexp_ttest(adaptor, maskA, maskB, 10)
self.check_1_10_2_10(results)
def test_cxg_sparse(self):
adaptor_sparse = self.load_dataset(f"{FIXTURES_ROOT}/diffexp/sparse_no_col_shift.cxg", )
adaptor_dense = self.load_dataset(f"{FIXTURES_ROOT}/diffexp/dense_no_col_shift.cxg")
assert not adaptor_dense.has_array('X_col_shift') # sanity check
assert not adaptor_sparse.has_array('X_col_shift') # sanity check
self.sparse_diffexp(adaptor_dense, adaptor_sparse)
def test_cxg_sparse_col_shift(self):
adaptor_sparse = self.load_dataset(f"{FIXTURES_ROOT}/diffexp/sparse_col_shift.cxg", )
adaptor_dense = self.load_dataset(f"{FIXTURES_ROOT}/diffexp/dense_col_shift.cxg")
assert not adaptor_dense.has_array('X_col_shift') # sanity check
assert adaptor_sparse.has_array('X_col_shift') # sanity check
self.sparse_diffexp(adaptor_dense, adaptor_sparse)
def sparse_diffexp(self, adaptor_dense, adaptor_sparse):
with tempfile.TemporaryDirectory() as dirname:
maskA = self.get_mask(adaptor_dense, 1, 10)
maskB = self.get_mask(adaptor_dense, 2, 10)
diffexp_results_sparse = diffexp_cxg.diffexp_ttest(adaptor_sparse, maskA, maskB, 10)
diffexp_results_dense = diffexp_cxg.diffexp_ttest(adaptor_dense, maskA, maskB, 10)
self.compare_diffexp_results(diffexp_results_dense["positive"], diffexp_results_sparse["positive"])
self.compare_diffexp_results(diffexp_results_dense["negative"], diffexp_results_sparse["negative"])
topcols_pos = np.array([x[0] for x in diffexp_results_dense["positive"]])
topcols_neg = np.array([x[0] for x in diffexp_results_dense["negative"]])
topcols = np.concatenate((topcols_pos, topcols_neg))
cols_sparse = self.get_X_col(adaptor_sparse, topcols)
cols_dense = self.get_X_col(adaptor_dense, topcols)
assert cols_dense.shape[0] == adaptor_sparse.get_shape()[0]
assert cols_dense.shape[1] == len(diffexp_results_dense["positive"]) + len(
diffexp_results_dense["negative"]
)
def convert(mat, cols):
return decode_matrix_fbs(encode_matrix_fbs(mat, col_idx=cols)).to_numpy()
cols_sparse = convert(cols_sparse, topcols)
cols_dense = convert(cols_dense, topcols)
x = adaptor_sparse.get_X_array()
assert x.shape == adaptor_sparse.get_shape()
for row in range(cols_dense.shape[0]):
for col in range(cols_dense.shape[1]):
vsparse = cols_sparse[row][col]
vdense = cols_dense[row][col]
self.assertTrue(np.isclose(vdense, vsparse, 1e-6, 1e-6))
|
import csv
#as always, something in my system changes the current dir to F:/Git
# with open("data.csv", "w") as file:
# writer = csv.writer(file, lineterminator='\n') #lineterminator='\n' removes blank rows, but makes this code not *nix-compatible
# writer.writerow(["transaction_id", "product_id", "price"])
# writer.writerow([1000,1,5])
# writer.writerow([1001,2,15])
t = {'aaa': 5, 'bbb':6}
for key, value in t.items():
print(f'Key {key}')
print(f'Value {value}') |
from . import provider
from . import system
from . import cview |
#!/usr/bin/env python
# encoding: utf-8
from unittest import TestCase
import mock
import time
from ycyc.ycollections.namedict import namedict, RequireFieldsMissError
class TestNamedDict(TestCase):
def test_usage(self):
cur_time = time.time()
Params = namedict(
"InvokeParams",
requires=("sender", "callback"),
fields={
"value": None,
"time": cur_time,
},
)
self.assertEqual(Params.__name__, "InvokeParams")
with self.assertRaisesRegexp(
RequireFieldsMissError, ", ".join({"sender", "callback"})
):
Params()
with self.assertRaisesRegexp(RequireFieldsMissError, "sender"):
Params(callback=None)
with self.assertRaisesRegexp(RequireFieldsMissError, "callback"):
Params(sender=None)
params = Params("test", 1)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, 1)
self.assertEqual(params.value, None)
self.assertEqual(params.time, cur_time)
params = Params(sender="test", callback=2)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, 2)
self.assertEqual(params.value, None)
self.assertEqual(params.time, cur_time)
params = Params("test", None, value=1)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, None)
self.assertEqual(params.value, 1)
self.assertEqual(params.time, cur_time)
params = Params("test", None, value=1, time=2)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, None)
self.assertEqual(params.value, 1)
self.assertEqual(params.time, 2)
Params = namedict(
"InvokeParams",
requires=("sender", "callback"),
fields=(
("value", None),
("time", cur_time),
),
)
params = Params("test", None, 1)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, None)
self.assertEqual(params.value, 1)
self.assertEqual(params.time, cur_time)
params = Params("test", None, 1, 2)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, None)
self.assertEqual(params.value, 1)
self.assertEqual(params.time, 2)
params = Params("test", None, 1, value=2)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, None)
self.assertEqual(params.value, 1)
self.assertEqual(params.time, cur_time)
params = Params("test", None, 1, value=2, nothing=3)
self.assertIsInstance(params, dict)
self.assertEqual(params.sender, "test")
self.assertEqual(params.callback, None)
self.assertEqual(params.value, 1)
self.assertEqual(params.time, cur_time)
with self.assertRaises(AttributeError):
params.nothing
Params = namedict("Params", ["key", "value"])
params = Params("test", value=2)
self.assertEqual(params.key, "test")
self.assertEqual(params.value, 2)
def test_logging(self):
with mock.patch("ycyc.ycollections.namedict.logger"):
from ycyc.ycollections.namedict import logger
TestDict = namedict("TestDict", {"val": None})
tdict = TestDict(1)
self.assertEqual(tdict.val, 1)
self.assertEqual(logger.warning.call_count, 0) # pylint: disable=E1101
tdict = TestDict(1, 2)
self.assertEqual(tdict.val, 1)
self.assertEqual(logger.warning.call_count, 1) # pylint: disable=E1101
self.assertListEqual(logger.warning.call_args[0][1], [2]) # pylint: disable=E1101
tdict = TestDict(1, nothing=2)
self.assertEqual(tdict.val, 1)
self.assertEqual(logger.warning.call_count, 2) # pylint: disable=E1101
self.assertSetEqual(logger.warning.call_args[0][1], {"nothing"}) # pylint: disable=E1101
|
import os
import shutil
import unittest
import logging
import pymel.core as pm
from Luna import Config
from Luna import Logger
from Luna import TestVars
from Luna.static import directories
class TestCase(unittest.TestCase):
"""
Base class for unit test cases run in Maya.
Tests do not have to inherit from this TestCase but this derived TestCase contains convenience
functions to load/unload plug-ins and clean up temporary files.
"""
files_created = []
dirs_created = []
plugins_loaded = set()
@classmethod
def tearDownClass(cls):
super(TestCase, cls).tearDownClass()
cls.delete_temp_files()
cls.unload_plugins()
@classmethod
def load_plugin(cls, plugin):
pm.loadPlugin(plugin, qt=1)
cls.plugins_loaded.add(plugin)
@classmethod
def unload_plugins(cls):
for plugin in cls.plugins_loaded:
pm.unloadPlugin(plugin)
cls.plugins_loaded = set()
@classmethod
def delete_temp_files(cls):
"""Delete the temp files in the cache and clear the cache."""
# If we don't want to keep temp files around for debugging purposes, delete them when
# all tests in this TestCase have been run
if Config.get(TestVars.delete_dirs, True):
for d in cls.dirs_created:
if os.path.isdir(d):
shutil.rmtree(d)
Logger.info("Deleted dir: {0}".format(d))
cls.dirs_created = []
if Config.get(TestVars.delete_files, default=True):
for f in cls.files_created:
if os.path.exists(f):
os.remove(f)
Logger.info("Deleted temp file: {0}".format(f))
cls.files_created = []
@classmethod
def get_temp_filename(cls, file_name):
temp_dir = Config.get(TestVars.temp_dir)
if not temp_dir:
temp_dir = pm.internalVar(utd=1)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
base_name, ext = os.path.splitext(file_name)
path = os.path.join(temp_dir, base_name + ext)
count = 0
while os.path.exists(path):
count += 1
path = os.path.join(temp_dir, "{0}{1}{2}".format(base_name, count, ext))
cls.files_created.append(path)
return path
@classmethod
def get_temp_dirname(cls, dir_name):
temp_dir = Config.get(TestVars.temp_dir)
if not temp_dir:
temp_dir = pm.internalVar(utd=1)
path = os.path.join(temp_dir, dir_name)
count = 0
while os.path.exists(path):
count += 1
path = os.path.join(temp_dir, "{0}{1}".format(dir_name, count))
cls.dirs_created.append(path)
return path
class TestResult(unittest.TextTestResult):
"""Customize the test result so we can do things like do a file new between each test and suppress script
editor output.
"""
def startTestRun(self):
"""Called before any tests are run."""
super(TestResult, self).startTestRun()
ScriptEditorState.suppress_output()
if Config.get(TestVars.buffer_output, default=True):
# Disable any logging while running tests. By disabling critical, we are disabling logging
# at all levels below critical as well
logging.disable(logging.CRITICAL)
def stopTestRun(self):
"""Called after all tests are run."""
if Config.get(TestVars.buffer_output, default=True):
# Restore logging state
logging.disable(logging.NOTSET)
ScriptEditorState.restore_output()
super(TestResult, self).stopTestRun()
def stopTest(self, test):
"""Called after an individual test is run.
Args:
test ([type]): [description]
"""
super(TestResult, self).stopTest(test)
if Config.get(TestVars.new_file, default=True):
pm.newFile(f=1)
class ScriptEditorState(object):
"""Provides methods to suppress and restore script editor output."""
# Used to restore logging states in the script editor
suppress_results = None
suppress_errors = None
suppress_warnings = None
suppress_info = None
@classmethod
def suppress_output(cls):
"""Hides all script editor output."""
if Config.get(TestVars.buffer_output, default=True):
cls.suppress_results = pm.scriptEditorInfo(q=True, suppressResults=True)
cls.suppress_errors = pm.scriptEditorInfo(q=True, suppressErrors=True)
cls.suppress_warnings = pm.scriptEditorInfo(q=True, suppressWarnings=True)
cls.suppress_info = pm.scriptEditorInfo(q=True, suppressInfo=True)
pm.scriptEditorInfo(
e=True,
suppressResults=True,
suppressInfo=True,
suppressWarnings=True,
suppressErrors=True,
)
@classmethod
def restore_output(cls):
"""Restores the script editor output settings to their original values."""
if None not in {
cls.suppress_results,
cls.suppress_errors,
cls.suppress_warnings,
cls.suppress_info,
}:
pm.scriptEditorInfo(
e=True,
suppressResults=cls.suppress_results,
suppressInfo=cls.suppress_info,
suppressWarnings=cls.suppress_warnings,
suppressErrors=cls.suppress_errors,
)
def run_all_tests():
test_suite = unittest.TestLoader().discover(start_dir=directories.TEST_DIR_PATH, pattern="*_test.py")
Logger.info("Running {0} tests...".format(test_suite.countTestCases()))
test_runner = unittest.TextTestRunner(verbosity=2, resultclass=TestResult)
test_runner.failfast = False
test_runner.buffer = Config.get(TestVars.buffer_output, default=True)
test_runner.run(test_suite)
if __name__ == "__main__":
run_all_tests()
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Particl Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import random
from test_framework.test_particl import GhostTestFramework
from test_framework.util import assert_raises_rpc_error
from test_framework.address import base58_to_byte
from test_framework.key import SECP256K1, ECPubKey
from test_framework.messages import COIN
from test_framework.messages import sha256
class AnonTest(GhostTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-noacceptnonstdtxn', '-anonrestricted=0', '-reservebalance=10000000'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
self.sync_all()
def run_test(self):
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
txnHashes = []
nodes[1].extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate')
sxAddrTo1_1 = nodes[1].getnewstealthaddress('lblsx11')
assert(sxAddrTo1_1 == 'TetbYTGv5LiqyFiUD3a5HHbpSinQ9KiRYDGAMvRzPfz4RnHMbKGAwDr1fjLGJ5Eqg1XDwpeGyqWMiwdK3qM3zKWjzHNpaatdoHVzzA')
nodes[2].extkeyimportmaster(nodes[2].mnemonic('new')['master'])
sxAddrTo0_1 = nodes[0].getnewstealthaddress('lblsx01')
txnHashes.append(nodes[0].sendparttoanon(sxAddrTo1_1, 1, '', '', False, 'node0 -> node1 p->a'))
txnHashes.append(nodes[0].sendparttoblind(sxAddrTo0_1, 1000, '', '', False, 'node0 -> node0 p->b'))
txnHashes.append(nodes[0].sendblindtoanon(sxAddrTo1_1, 100, '', '', False, 'node0 -> node1 b->a 1'))
txnHashes.append(nodes[0].sendblindtoanon(sxAddrTo1_1, 100, '', '', False, 'node0 -> node1 b->a 2'))
txnHashes.append(nodes[0].sendblindtoanon(sxAddrTo1_1, 100, '', '', False, 'node0 -> node1 b->a 3'))
txnHashes.append(nodes[0].sendblindtoanon(sxAddrTo1_1, 10, '', '', False, 'node0 -> node1 b->a 4'))
for k in range(5):
txnHash = nodes[0].sendparttoanon(sxAddrTo1_1, 10, '', '', False, 'node0 -> node1 p->a')
txnHashes.append(txnHash)
for k in range(10):
txnHash = nodes[0].sendblindtoanon(sxAddrTo1_1, 10, '', '', False, 'node0 -> node1 b->a')
txnHashes.append(txnHash)
for h in txnHashes:
assert(self.wait_for_mempool(nodes[1], h))
assert('node0 -> node1 b->a 4' in self.dumpj(nodes[1].listtransactions('*', 100)))
assert('node0 -> node1 b->a 4' in self.dumpj(nodes[0].listtransactions('*', 100)))
self.stakeBlocks(2)
block1_hash = nodes[1].getblockhash(1)
ro = nodes[1].getblock(block1_hash)
for txnHash in txnHashes:
assert(txnHash in ro['tx'])
txnHash = nodes[1].sendanontoanon(sxAddrTo0_1, 1, '', '', False, 'node1 -> node0 a->a')
txnHashes = [txnHash,]
assert(self.wait_for_mempool(nodes[0], txnHash))
self.stakeBlocks(1)
ro = nodes[1].getblock(nodes[1].getblockhash(3))
for txnHash in txnHashes:
assert(txnHash in ro['tx'])
assert(nodes[1].anonoutput()['lastindex'] == 28)
txnHashes.clear()
txnHashes.append(nodes[1].sendanontoanon(sxAddrTo0_1, 101, '', '', False, 'node1 -> node0 a->a', 5, 1))
txnHashes.append(nodes[1].sendanontoanon(sxAddrTo0_1, 0.1, '', '', False, '', 5, 2))
assert(nodes[1].getwalletinfo()['anon_balance'] > 10)
outputs = [{'address': sxAddrTo0_1, 'amount': 10, 'subfee': True},]
ro = nodes[1].sendtypeto('anon', 'part', outputs, 'comment_to', 'comment_from', 4, 32, True)
assert(ro['bytes'] > 0)
txnHashes.append(nodes[1].sendtypeto('anon', 'part', outputs))
txnHashes.append(nodes[1].sendtypeto('anon', 'anon', [{'address': sxAddrTo1_1, 'amount': 1},]))
for txhash in txnHashes:
assert(self.wait_for_mempool(nodes[0], txhash))
self.log.info('Test filtertransactions with type filter')
ro = nodes[1].filtertransactions({'type': 'anon', 'count': 20, 'show_anon_spends': True, 'show_change': True})
assert(len(ro) > 2)
foundTx = 0
for t in ro:
if t['txid'] == txnHashes[-1]:
foundTx += 1
assert(t['amount'] == t['fee'])
elif t['txid'] == txnHashes[-2]:
foundTx += 1
assert('anon_inputs' in t)
assert(t['amount'] < -9.9 and t['amount'] > -10.0)
n_standard = 0
n_anon = 0
for to in t['outputs']:
if to['type'] == 'standard':
n_standard += 1
elif to['type'] == 'anon':
n_anon += 1
assert(to['is_change'] == 'true')
assert(n_standard == 1)
assert(n_anon > 0)
assert(t['type_in'] == 'anon')
if t['txid'] == txnHashes[-3]:
foundTx += 1
assert(t['outputs'][0]['type'] == 'anon')
if foundTx > 2:
break
assert(foundTx > 2)
self.log.info('Test unspent with address filter')
unspent_filtered = nodes[1].listunspentanon(1, 9999, [sxAddrTo1_1])
assert(unspent_filtered[0]['label'] == 'lblsx11')
self.log.info('Test permanent lockunspent')
unspent = nodes[1].listunspentanon()
assert(nodes[1].lockunspent(False, [unspent[0]], True) == True)
assert(nodes[1].lockunspent(False, [unspent[1]], True) == True)
assert(len(nodes[1].listlockunspent()) == 2)
# Restart node
self.sync_all()
self.stop_node(1)
self.start_node(1, self.extra_args[1] + ['-wallet=default_wallet',])
self.connect_nodes_bi(0, 1)
assert(len(nodes[1].listlockunspent()) == 2)
assert(len(nodes[1].listunspentanon()) < len(unspent))
assert(nodes[1].lockunspent(True, [unspent[0]]) == True)
assert_raises_rpc_error(-8, 'Invalid parameter, expected locked output', nodes[1].lockunspent, True, [unspent[0]])
assert(len(nodes[1].listunspentanon()) == len(unspent)-1)
assert(nodes[1].lockunspent(True) == True)
assert(len(nodes[1].listunspentanon()) == len(unspent))
assert(nodes[1].lockunspent(True) == True)
ro = nodes[2].getblockstats(nodes[2].getblockchaininfo()['blocks'])
assert(ro['height'] == 3)
self.log.info('Test recover from mnemonic')
# Txns currently in the mempool will be reprocessed in the next block
self.stakeBlocks(1)
wi_1 = nodes[1].getwalletinfo()
nodes[1].createwallet('test_import')
w1_2 = nodes[1].get_wallet_rpc('test_import')
w1_2.extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate')
w1_2.getnewstealthaddress('lblsx11')
w1_2.rescanblockchain(0)
wi_1_2 = w1_2.getwalletinfo()
assert(wi_1_2['anon_balance'] == wi_1['anon_balance'])
nodes[1].createwallet('test_import_locked')
w1_3 = nodes[1].get_wallet_rpc('test_import_locked')
w1_3.encryptwallet('test')
assert_raises_rpc_error(-13, 'Error: Wallet locked, please enter the wallet passphrase with walletpassphrase first.', w1_3.filtertransactions, {'show_blinding_factors': True})
assert_raises_rpc_error(-13, 'Error: Wallet locked, please enter the wallet passphrase with walletpassphrase first.', w1_3.filtertransactions, {'show_anon_spends': True})
w1_3.walletpassphrase('test', 30)
# Skip initial rescan by passing -1 as scan_chain_from
w1_3.extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate',
'', False, 'imported key', 'imported acc', -1)
w1_3.getnewstealthaddress('lblsx11')
w1_3.walletsettings('other', {'onlyinstance': False})
w1_3.walletlock()
assert(w1_3.getwalletinfo()['encryptionstatus'] == 'Locked')
w1_3.rescanblockchain(0)
w1_3.walletpassphrase('test', 30)
wi_1_3 = w1_3.getwalletinfo()
assert(wi_1_3['anon_balance'] == wi_1['anon_balance'])
# Coverage
w1_3.sendanontoblind(sxAddrTo0_1, 1.0)
w1_3.sendanontopart(sxAddrTo0_1, 1.0)
self.log.info('Test sendtypeto coincontrol')
w1_inputs = w1_2.listunspentanon()
assert(len(w1_inputs) > 1)
use_input = w1_inputs[random.randint(0, len(w1_inputs) - 1)]
coincontrol = {'inputs': [{'tx': use_input['txid'], 'n': use_input['vout']}]}
txid = w1_2.sendtypeto('anon', 'anon', [{'address': sxAddrTo0_1, 'amount': 0.01}, ], '', '', 7, 1, False, coincontrol)
w1_inputs_after = w1_2.listunspentanon()
for txin in w1_inputs_after:
if txin['txid'] == use_input['txid'] and txin['vout'] == use_input['vout']:
raise ValueError('Output should be spent')
assert(self.wait_for_mempool(nodes[1], txid))
raw_tx = w1_2.getrawtransaction(txid, True)
possible_inputs = raw_tx['vin'][0]['ring_row_0'].split(', ')
possible_inputs_txids = []
for pi in possible_inputs:
anonoutput = w1_2.anonoutput(pi)
possible_inputs_txids.append(anonoutput['txnhash'] + '.' + str(anonoutput['n']))
assert(use_input['txid'] + '.' + str(use_input['vout']) in possible_inputs_txids)
num_tries = 20
for i in range(num_tries):
if nodes[0].getbalances()['mine']['anon_immature'] == 0.0:
break
self.stakeBlocks(1)
if i >= num_tries - 1:
raise ValueError('anon balance immature')
assert(nodes[0].getbalances()['mine']['anon_trusted'] > 100.0)
self.log.info('Test crafting anon transactions.')
sxAddr2_1 = nodes[2].getnewstealthaddress('lblsx01')
ephem = nodes[0].derivefromstealthaddress(sxAddr2_1)
blind = bytes(random.getrandbits(8) for i in range(32)).hex()
outputs = [{
'address': sxAddr2_1,
'type': 'anon',
'amount': 10.0,
'blindingfactor': blind,
'ephemeral_key': ephem['ephemeral_privatekey'],
},]
tx = nodes[0].createrawparttransaction([], outputs)
options = {'sign_tx': True}
tx_signed = nodes[0].fundrawtransactionfrom('anon', tx['hex'], {}, tx['amounts'], options)
txid = nodes[0].sendrawtransaction(tx_signed['hex'])
self.stakeBlocks(1)
sx_privkey = nodes[2].dumpprivkey(sxAddr2_1)
assert('scan_secret' in sx_privkey)
assert('spend_secret' in sx_privkey)
sx_pubkey = nodes[2].getaddressinfo(sxAddr2_1)
assert('scan_public_key' in sx_pubkey)
assert('spend_public_key' in sx_pubkey)
stealth_key = nodes[2].derivefromstealthaddress(sxAddr2_1, ephem['ephemeral_pubkey'])
prevtx = nodes[2].decoderawtransaction(tx_signed['hex'])
found_output = -1
for vout in prevtx['vout']:
if vout['type'] != 'anon':
continue
try:
ro = nodes[2].verifycommitment(vout['valueCommitment'], blind, 10.0)
assert(ro['result'] is True)
ro = nodes[2].rewindrangeproof(vout['rangeproof'], vout['valueCommitment'], stealth_key['privatekey'], ephem['ephemeral_pubkey'])
assert(ro['amount'] == 10.0)
found_output = vout['n']
except Exception as e:
if not str(e).startswith('Mismatched commitment'):
print(e)
assert(found_output > -1)
key_bytes = base58_to_byte(stealth_key['privatekey'])[0][0:32]
epk = ECPubKey()
epk.set(bytes.fromhex(ephem['ephemeral_pubkey']))
self.log.info('Test rewindrangeproof with final nonce')
# ECDH
P = SECP256K1.affine(epk.p)
M = SECP256K1.affine(SECP256K1.mul([((P[0], P[1], P[2]), int.from_bytes(key_bytes, 'big'))]))
eM = bytes([0x02 + (M[1] & 1)]) + M[0].to_bytes(32, 'big')
hM = sha256(eM)
hhM = sha256(hM)
# Reverse, SetHex is LE
hhM = hhM[::-1]
vout = prevtx['vout'][found_output]
ro = nodes[2].rewindrangeproof(vout['rangeproof'], vout['valueCommitment'], hhM.hex())
assert(ro['amount'] == 10.0)
self.log.info('Test signing for unowned anon input') # Input not in wallet, must be in chain for pubkey index
prev_tx_signed = nodes[0].decoderawtransaction(tx_signed['hex'])
prev_commitment = prev_tx_signed['vout'][found_output]['valueCommitment']
prev_public_key = prev_tx_signed['vout'][found_output]['pubkey']
assert(prev_public_key == stealth_key['pubkey'])
outputs = [{
'address': sxAddr2_1,
'type': 'anon',
'amount': 10.0,
},]
tx = nodes[0].createrawparttransaction([], outputs)
options = {
'subtractFeeFromOutputs': [0,],
'inputs': [{
'tx': txid,
'n': found_output,
'type': 'anon',
'value': 10.0,
'commitment': prev_commitment,
'pubkey': prev_public_key,
'privkey': stealth_key['privatekey'],
'blind': blind,
}],
'feeRate': 0.001,
'sign_tx': True,
}
input_amounts = {
}
used_input = (txid, found_output)
tx_signed = nodes[0].fundrawtransactionfrom('anon', tx['hex'], input_amounts, tx['amounts'], options)
num_tries = 20
for i in range(num_tries):
try:
spending_txid = nodes[0].sendrawtransaction(tx_signed['hex'])
break
except Exception:
self.stakeBlocks(1)
if i >= num_tries - 1:
raise ValueError('Can\'t submit txn')
assert(self.wait_for_mempool(nodes[2], spending_txid))
self.stakeBlocks(1)
w2b = nodes[2].getbalances()
assert(w2b['mine']['anon_immature'] < 10 and w2b['mine']['anon_immature'] > 9)
self.log.info('Test subfee edge case')
unspents = nodes[0].listunspent()
total_input = int(unspents[0]['amount'] * COIN) + int(unspents[1]['amount'] * COIN)
total_output = total_input - 1
coincontrol = {'test_mempool_accept': True, 'show_hex': True, 'show_fee': True, 'inputs': [{'tx': unspents[0]['txid'],'n': unspents[0]['vout']}, {'tx': unspents[1]['txid'],'n': unspents[1]['vout']}]}
outputs = [{'address': sxAddrTo0_1, 'amount': '%i.%08i' % (total_output // COIN, total_output % COIN), 'narr': '', 'subfee' : True},]
tx = nodes[0].sendtypeto('part', 'anon', outputs, 'comment', 'comment-to', 5, 1, False, coincontrol)
assert(total_input == int(tx['fee'] * COIN) + int(tx['outputs_fee'][sxAddrTo0_1]))
assert(tx['mempool-allowed'] == True)
self.log.info('Test checkkeyimage')
unspents = nodes[0].listunspentanon(0, 999999, [], True, {'show_pubkeys': True})
anon_pubkey = unspents[0]['pubkey']
keyimage = nodes[0].getkeyimage(anon_pubkey)['keyimage']
spent = nodes[0].checkkeyimage(keyimage)
assert(spent['spent'] is False)
raw_tx = nodes[0].decoderawtransaction(nodes[0].gettransaction(used_input[0])['hex'])
used_pubkey = raw_tx['vout'][used_input[1]]['pubkey']
used_keyimage = nodes[2].getkeyimage(used_pubkey)['keyimage']
spent = nodes[0].checkkeyimage(used_keyimage)
assert(spent['spent'] is True)
assert(spent['txid'] == spending_txid)
if __name__ == '__main__':
AnonTest().main()
|
from random import randint
from time import sleep
computador = randint(0,10)
print('='*60)
print('\33[32mVou pensar em um número entre 0 e 10. Tente adivinhar!\33[m')
print('='*60)
jogador = int(input('Qual número eu pensei? '))
print('\33[32mPROCESSANDO...\33[m')
sleep(2)
vez = 1
while jogador != computador:
print('\n\33[34mVocê errou.\33[m Tente novamente!')
jogador = int(input('Qual número eu pensei? '))
vez += 1
print('\33[36m\nParábens! Você acertou na {}ª tentativa, o nº escolhido foi {}.\33[m'.format(vez,computador))
print('='*60) |
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from .constants import *
from .models import ObjectChange
def add_blank_choice(choices):
"""
Add a blank choice to the beginning of a choices list.
"""
return ((None, "---------"),) + tuple(choices)
class BulkEditForm(forms.Form):
"""
Base form for editing several objects at the same time.
"""
def __init__(self, model, parent_object=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.parent_object = parent_object
self.nullable_fields = []
if hasattr(self.Meta, "nullable_fields"):
self.nullable_fields = self.Meta.nullable_fields
class BootstrapMixin(forms.BaseForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
custom_widgets = [forms.CheckboxInput, forms.RadioSelect]
for field_name, field in self.fields.items():
if field.widget.__class__ in custom_widgets:
css = field.widget.attrs.get("class", "")
field.widget.attrs["class"] = " ".join(
[css, "custom-control-input"]
).strip()
else:
css = field.widget.attrs.get("class", "")
field.widget.attrs["class"] = " ".join([css, "form-control"]).strip()
if field.required:
field.widget.attrs["required"] = "required"
if "placeholder" not in field.widget.attrs:
field.widget.attrs["placeholder"] = field.label
class ConfirmationForm(BootstrapMixin, forms.Form):
"""
A generic confirmation form. The form is not valid unless the confirm field
is checked.
"""
confirm = forms.BooleanField(
required=True, widget=forms.HiddenInput(), initial=True
)
class FilterChoiceIterator(forms.models.ModelChoiceIterator):
def __iter__(self):
# null for the first time if we asked for it
if self.field.null_label:
yield (
settings.FILTERS_NULL_CHOICE_VALUE,
settings.FILTERS_NULL_CHOICE_LABEL,
)
queryset = self.queryset.all()
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
class FilterChoiceFieldMixin(object):
iterator = FilterChoiceIterator
def __init__(self, null_label=None, *args, **kwargs):
self.null_label = null_label
if "required" not in kwargs:
kwargs["required"] = False
if "widget" not in kwargs:
kwargs["widget"] = forms.SelectMultiple(attrs={"size": 6})
super().__init__(*args, **kwargs)
def label_from_instance(self, obj):
label = super().label_from_instance(obj)
if hasattr(obj, "filter_count"):
return "{} ({})".format(label, obj.filter_count)
return label
class FilterChoiceField(FilterChoiceFieldMixin, forms.ModelMultipleChoiceField):
pass
class PasswordField(forms.CharField):
"""
A field used to enter password. The field will hide the password unless the
reveal button is clicked.
"""
def __init__(self, password_source="password", render_value=False, *args, **kwargs):
widget = kwargs.pop("widget", forms.PasswordInput(render_value=render_value))
label = kwargs.pop("label", "Password")
help_text = kwargs.pop(
"help_text",
"It can be a clear text password or an "
"encrypted one. It really depends on how you "
"want to use it. Be aware that it is stored "
"without encryption in the database.",
)
super().__init__(
widget=widget, label=label, help_text=help_text, *args, **kwargs
)
self.widget.attrs["password-source"] = password_source
class SlugField(forms.SlugField):
"""
An improved SlugField that allows to be automatically generated based on a
field used as source.
"""
def __init__(self, slug_source="name", *args, **kwargs):
label = kwargs.pop("label", "Slug")
help_text = kwargs.pop(
"help_text", "Friendly unique shorthand used for URL and config"
)
super().__init__(label=label, help_text=help_text, *args, **kwargs)
self.widget.attrs["slug-source"] = slug_source
class SmallTextarea(forms.Textarea):
"""
Just to be used as small text area.
"""
pass
class APISelect(forms.Select):
"""
Select widget using API calls to populate its choices.
"""
def __init__(
self,
api_url,
display_field=None,
value_field=None,
query_filters=None,
null_option=False,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.attrs["class"] = "custom-select2-api"
self.attrs["data-url"] = "/{}{}".format(settings.BASE_PATH, api_url.lstrip("/"))
if display_field:
self.attrs["display-field"] = display_field
if value_field:
self.attrs["value-field"] = value_field
if query_filters:
for key, value in query_filters.items():
self.add_query_filter(key, value)
if null_option:
self.attrs["data-null-option"] = 1
def add_query_filter(self, condition, value):
"""
Add a condition to filter the feedback from the API call.
"""
self.attrs["data-query-filter-{}".format(condition)] = value
class APISelectMultiple(APISelect, forms.SelectMultiple):
"""
Same API select widget using select2 but allowing multiple choices.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["data-multiple"] = 1
class StaticSelect(forms.Select):
"""
Select widget for static choices leveraging the select2 component.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["class"] = "custom-select2-static"
class StaticSelectMultiple(StaticSelect, forms.SelectMultiple):
"""
Same static select widget using select2 but allowing multiple choices.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attrs["data-multiple"] = 1
self.attrs["data-close-on-select"] = 0
class CustomNullBooleanSelect(StaticSelect):
"""
Do not enforce True/False when not selecting an option.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.choices = (("unknown", "---------"), ("true", "Yes"), ("false", "No"))
class TextareaField(forms.CharField):
"""
A textarea with support for GitHub-Flavored Markdown. Exists mostly just to
add a standard help_text.
"""
widget = forms.Textarea
def __init__(self, *args, **kwargs):
required = kwargs.pop("required", False)
super().__init__(required=required, *args, **kwargs)
class ObjectChangeFilterForm(BootstrapMixin, forms.Form):
model = ObjectChange
q = forms.CharField(required=False, label="Search")
time_after = forms.DateTimeField(
label="After",
required=False,
widget=forms.TextInput(attrs={"placeholder": "YYYY-MM-DD hh:mm:ss"}),
)
time_before = forms.DateTimeField(
label="Before",
required=False,
widget=forms.TextInput(attrs={"placeholder": "YYYY-MM-DD hh:mm:ss"}),
)
action = forms.ChoiceField(
required=False,
choices=OBJECT_CHANGE_ACTION_CHOICES,
widget=StaticSelectMultiple,
)
user = forms.ModelChoiceField(
required=False,
queryset=User.objects.order_by("username"),
widget=StaticSelectMultiple,
)
|
import os
from scipy.io import wavfile
import json
import ast
import numpy as np
jsonDirPath = '/Users/Chen/百度云同步盘/Startup/Clevo/联动数据/emotionJson/'
wavDirPath = '/Users/Chen/百度云同步盘/Startup/Clevo/联动数据/wav/'
wavSavePath = '/Users/Chen/百度云同步盘/Startup/Clevo/联动数据/wavSegs/'
if (os.path.isdir(jsonDirPath) != True):
raise ValueError('jsonDirPath is not a dir')
if (os.path.isdir(wavDirPath) != True):
raise ValueError('wavDirPath is not a dir')
if (os.path.isdir(wavSavePath) != True):
raise ValueError('wavSavePath is not a dir')
jsonArr = os.listdir(jsonDirPath)
wavArr = os.listdir(wavDirPath)
print(len(jsonArr))
print(len(wavArr))
# for i,fileName in enumerate(wavArr[1:2]):
for i, fileName in enumerate(wavArr):
print(i, fileName)
filePath = wavDirPath + fileName
print("filePath", filePath)
[sampleRate, audio] = wavfile.read(filePath)
print(audio.shape[0])
# print(sampleRate)
start = 0
duration = 10 * sampleRate
step = 5 * sampleRate
index = 0
while start + duration < audio.shape[0]:
audioSeg = audio[start:start + duration]
# print(audioSeg.shape[0])
if (audioSeg.shape[0] == 80000):
filePrefix = fileName.split('.')[0]
newFilePath = wavSavePath + filePrefix + "__" + str(index) + ".wav"
index += 1
wavfile.write(newFilePath, sampleRate, np.array(audioSeg, dtype="int16"))
start += step
|
"""Debug helper
The purpose of this file is to help during development.
The idea is to silence internal exceptions raised by Friendly
itself for most users by redirecting them here, and have them
printed only when debugging mode is activated.
"""
import os
import sys
from typing import Any, Optional
from .ft_gettext import current_lang
_ = current_lang.translate
# DEBUG is set to True for me. It can also be set to True from __main__ or when
# using the debug() command in the console.
IS_PYDEV = bool(os.environ.get("PYTHONDEVMODE", False))
IS_ANDRE = (
r"users\andre\github\friendly" in __file__.lower()
or r"users\andre\friendly" in __file__.lower()
)
DEBUG = IS_PYDEV or IS_ANDRE
SHOW_DEBUG_HELPER = False
def log(text: Any) -> None:
if DEBUG: # pragma: no cover
print("Log:", text)
def log_error(exc: Optional[BaseException] = None) -> None:
if DEBUG: # pragma: no cover
if exc is not None:
print(repr(exc))
sys.exit()
def handle_internal_error() -> None:
from . import explain_traceback, get_output, set_include, set_stream
print(_("Please report this issue."))
set_stream(redirect="capture")
set_include("debug_tb")
explain_traceback()
result = get_output()
dependencies = [
item
for item in ["executing", "stack_data", "asttokens", "pure_eval"]
if item in result
]
if dependencies:
print(
_(
"The following package names used by friendly-traceback\n",
"appear in the full traceback, which may indicate\n",
"that one of them is the source of this error.",
)
)
for dep in dependencies:
print(dep)
if DEBUG:
print(result)
log(_("Fatal error - aborting"))
sys.exit()
|
from django.urls import path
from .views import checkout,HomeView,ItemDetailView
app_name='core'
urlpatterns=[
path('product/<slug>',ItemDetailView.as_view(),name='product'),
path('',HomeView.as_view(),name='home'),
path('checkout/',checkout,name='checkout'),
] |
from models.senet import senet50
import utils
from torch.autograd import Variable
import torch
import cv2
import torchvision.transforms
import numpy as np
from PIL import Image
class VGGFace:
def __init__(self, trained_model="/media/haoxue/WD/VGGFace2-pytorch/senet50_ft_weight.pkl", transform=True):
self.net = senet50(num_classes=8631, include_top=False)
utils.load_state_dict(self.net, trained_model)
self.net.eval()
self.transform = transform
def process(self, img_path):
out = self.net(self.load_image(img_path))
output = out.view(out.size(0), -1)
output = output.data.cpu().numpy()
print(np.shape(output))
return output
def load_image(self, img_path):
# img = Image.open(img_path)
# img = torchvision.transforms.CenterCrop(224)(img)
# img = np.array(img, dtype=np.uint8)
img = cv2.imread(img_path)
img = cv2.resize(img, (224, 224))
if self.transform:
img = self.transform_img(img)
return Variable(img)
@staticmethod
def transform_img(img):
img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float32)
img -= np.array([91.4953, 103.8827, 131.0912])
img = img.transpose(2, 0, 1) # C x H x W
img = np.expand_dims(img, axis=0) # 1 x C x H x W
img = torch.from_numpy(img).float()
return img
if __name__ == "__main__":
v = VGGFace()
o = v.process("/home/haoxue/Downloads/download.jpeg")
|
import requests
import time
import datetime
import json
import cexapi
from twilio.rest import Client
from config import *
from sys import argv
# colors for terminal output
class bcolors:
MAGENTA = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
""" cex api public endpoint"""
api = 'https://api.coinmarketcap.com/v1'
endpoint = '/ticker/'
params = '?convert=EUR&limit=10'
url = api + endpoint + params
""" setup twilio client """
client = Client(account_sid, auth_token)
""" setup cex.io api '"""
cex = cexapi.API(username, api_key, api_secret)
""" holds past values of price """
past_values = list()
past_values_numeric = list()
holds = 10 # max length of past_values list
# fetch latest price before lopping
last_price = cex.ticker()['last'][:6]
past_values.append(last_price)
past_values_numeric.append(float(last_price))
while True:
resp = requests.get(url)
if resp.status_code != 200:
print("api error: {}".format(resp.text))
else:
data = json.loads(resp.text)
for entry in data:
if isinstance(entry, dict):
if 'id' in entry and entry['id'] == 'ethereum':
print(entry['name'], "CEX.IO Tracker", time.strftime('%l:%M%p %Z on %b %d, %Y'),
'\n')
""" percentage changes 1 hour 1 day 7 days """
print("Change 1 hour: {} % | ".format(entry['percent_change_1h']), end="")
print("24 hour: {} % | ".format(entry['percent_change_24h']), end="")
print("7 days: {} %".format(entry['percent_change_7d']))
""" latest price in EUR """
last_price = cex.ticker()['last']
print(ticker + ": {0:.2f}".format(float(last_price)))
""" get wallet balance """
wallet = cex.balance()
if wallet is None:
print("Api error ")
break
eth_wallet = float(wallet['ETH']['available'])
""" calculate net """
net = eth_wallet * float(last_price)
print("Net: {:.2f} EUR".format(net))
""" calculate profit or loss """
profit = net - investment - fees
if profit > 0:
print("Profit: " + bcolors.GREEN + "{:.2f} EUR ".format(
profit) + bcolors.ENDC + "| ", end="")
else:
print("Profit: " + bcolors.FAIL + "{:.2f} EUR ".format(
profit) + bcolors.ENDC + "| ", end="")
""" calculate return of investment percentage """
if net / float(investment) <= 1:
calc = (1 - (net / float(investment)))
print(
"ROI: " + bcolors.FAIL + "-{:.2f} %".format(
100 * calc) + bcolors.ENDC)
else:
calc = (net / float(investment)) - 1
print("ROI: " + bcolors.GREEN + "+{:.2f} %".format(
100 * calc) + bcolors.ENDC)
print("Wallet: {} ETH".format(eth_wallet))
""" get high low values """
hl = cex.ticker(ticker)
if hl is None:
print("Api error")
break
print("\nLow: {0:.2f} EUR ".format(float(hl['low'])), end="")
print("High: {0:.2f} EUR".format(float(hl['high'])))
""" update history of previews price update """
past_values_numeric.append(
float(entry['price_eur'])) # store always numeric value price
if past_values[-1] < entry['price_eur']: # use new price is up use green color
past_values.append(bcolors.GREEN + entry['price_eur'][:6] + bcolors.ENDC)
elif past_values[-1] == entry[
'price_eur']: # use new price hasn't changed use no color
past_values.append(entry['price_eur'][:6])
else: # use new price is less use red color
past_values.append(bcolors.FAIL + entry['price_eur'][:6] + bcolors.ENDC)
if len(past_values) == holds + 1: # remove values very old price value
past_values.pop(0)
past_values_numeric.pop(0)
""" print historic data """
print("Past ticks: ", end="")
for val in past_values:
print(val + " ", end="")
print("")
minutes = (update_int / 60) * len(past_values_numeric)
mean_price = sum(past_values_numeric) / float(len(past_values_numeric))
print("Average price {:d}m : {:.2f}".format(int(minutes), mean_price))
human_time = datetime.datetime.fromtimestamp(
int(entry['last_updated'])).strftime('%d-%m-%Y %H:%M:%S')
print(
bcolors.MAGENTA + "Last Updated: {:s}".format(
human_time) + bcolors.ENDC)
print('\n')
""" send message to mobile with latest infos """
if len(argv) == 2 and argv[1] in "-m":
print("Sending trading summary sms to mobile... ")
message_body = "Eth Track \n" + \
"Wallet: {:.2f} ETH\n".format(eth_wallet) + \
"Net: {:.2f} EUR\n".format(net) + \
"Profit: {:.2f} EUR".format(profit)
message = client.api.account.messages.create(
to=my_num,
from_=twilio_num,
body=message_body)
if message is None:
print("Api error")
break
""" price alert sms"""
if alert:
alert = False
if price_alert < float(last_price):
print("\nPrice alert ETH trading now at {:.2f}\n".format(
float(last_price)))
message_body = "Eth Track \n" + \
"cex.io price alert" + \
"ETH: {:.2f}".format(float(last_price))
message = client.api.account.messages.create(
to=my_num,
from_=twilio_num,
body=message_body)
if message is None:
print("Api error")
break
""" percentage alert via sma """
if float(entry['percent_change_1h']) > percent_alert:
message_body = "Eth Track \n" + \
"Percent alert ETH is going up +10%"
print(message_body)
message = client.api.account.messages.create(
to=my_num,
from_=twilio_num,
body=message_body)
if message is None:
print("Api error")
break
elif -percent_alert < float(entry['percent_change_1h']) < percent_alert:
pass
else:
message_body = "Eth Track \n" + \
"Percent alert ETH is going down -10%"
print(message_body)
message = client.api.account.messages.create(
to=my_num,
from_=twilio_num,
body=message_body)
if message is None:
print("Api error")
break
"""
print("Converter {} EUR \n\n\n".format(cex.converter(eth_wallet, 'ETH/EUR')['amnt']))
"""
time.sleep(update_int)
|
import sys
from typing import Any, List
app_list: List = ["flask", "starlette", "sanic", "tornado"]
def auto_load_app_class() -> Any:
"""A project using only a web framework, to use `auto_load_app_class`"""
real_app: Any = None
for app in app_list:
if app not in sys.modules:
continue
if real_app:
raise RuntimeError(f"Pait unable to make a choice {real_app} & {app}")
real_app = sys.modules[app]
if not real_app:
raise RuntimeError("Pait can't auto load app class")
return real_app
|
from ebml.base import EBMLInteger, EBMLString, EBMLMasterElement, EBMLProperty
#from ebml.util import TypeValidator, EBMLProperty
__all__ = ["EBMLVersion", "EBMLReadVersion", "EBMLMaxIDLength", "EBMLMaxSizeLength",
"DocType", "DocTypeVersion", "DocTypeReadVersion", "EBMLHead"]
class EBMLVersion(EBMLInteger):
ebmlID = b"\x42\x86"
class EBMLReadVersion(EBMLInteger):
ebmlID = b"\x42\xf7"
class EBMLMaxIDLength(EBMLInteger):
ebmlID = b"\x42\xf2"
class EBMLMaxSizeLength(EBMLInteger):
ebmlID = b"\x42\xf3"
class DocType(EBMLString):
ebmlID = b"\x42\x82"
class DocTypeVersion(EBMLInteger):
ebmlID = b"\x42\x87"
class DocTypeReadVersion(EBMLInteger):
ebmlID = b"\x42\x85"
class EBMLHead(EBMLMasterElement):
ebmlID = b"\x1a\x45\xdf\xa3"
__ebmlchildren__ = (
EBMLProperty("ebmlVersion", EBMLVersion),
EBMLProperty("ebmlReadVersion", EBMLReadVersion),
EBMLProperty("ebmlMaxIDLength", EBMLMaxIDLength),
EBMLProperty("ebmlMaxSizeLength", EBMLMaxSizeLength),
EBMLProperty("docType", DocType),
EBMLProperty("docTypeVersion", DocTypeVersion),
EBMLProperty("docTypeReadVersion", DocTypeReadVersion)
)
|
import logging
logger = logging.getLogger(__name__)
import simplejson as json
from datetime import datetime
from blinker import signal
from cassandra.cqlengine import columns
from cassandra.cqlengine.models import Model
from ...common.mixins.json import JsonMixin
class AbstractBaseModel(Model, JsonMixin):
"""
AbstractBaseModel
This model is only intended to be used as a base class for other models
"""
__abstract__ = True
changed_on = columns.DateTime()
created_on = columns.DateTime(default=datetime.utcnow)
# super_active field, in case admin staff needs to disable the entry
super_active = columns.Boolean(default=True)
# --------------
# Super Methods
# --------------
def save(self):
self.changed_on = datetime.utcnow()
signal('on_save').send(self.object, instance=self)
return super(AbstractBaseModel, self).save()
def update(self, **values):
self.changed_on = datetime.utcnow()
return super(AbstractBaseModel, self).update(**values)
# --------------
# Properties
# --------------
@property
def object(self):
return self.__table_name__
|
import torch
import torch.nn.init
import torch.nn as nn
eps = 1e-10
class L2Norm(nn.Module):
def __init__(self):
super(L2Norm,self).__init__()
self.eps = 1e-10
def forward(self, x):
norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps)
x= x / norm.unsqueeze(-1).expand_as(x)
return x
class L2Net(nn.Module):
def __init__(self):
super(L2Net, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(32, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(32, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=True),
nn.BatchNorm2d(64, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(64, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=True),
nn.BatchNorm2d(128, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(128, affine=True, eps=eps),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=8, bias=True),
nn.BatchNorm2d(128, affine=True, eps=eps),
)
return
def input_norm(self, x):
# matlab norm
z = x.contiguous().transpose(2, 3).contiguous().view(x.size(0),-1)
x_minus_mean = z.transpose(0,1)-z.mean(1)
sp = torch.std(z,1).detach()
norm_inp = x_minus_mean/(sp+1e-12)
norm_inp = norm_inp.transpose(0, 1).view(-1, 1, x.size(2), x.size(3)).transpose(2,3)
return norm_inp
def forward(self, input):
norm_img = self.input_norm(input)
x_features = self.features(norm_img)
return nn.LocalResponseNorm(256,1*256,0.5,0.5)(x_features).view(input.size(0),-1)
|
# Generated by Django 2.1.3 on 2020-06-30 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('result', '0101_auto_20200626_1450'),
]
operations = [
migrations.AddField(
model_name='btutor',
name='first_term',
field=models.CharField(blank=True, default='1st Term', max_length=30, null=True),
),
migrations.AddField(
model_name='btutor',
name='second_term',
field=models.CharField(blank=True, default='1st Term', max_length=30, null=True),
),
migrations.AddField(
model_name='btutor',
name='third_term',
field=models.CharField(blank=True, default='1st Term', max_length=30, null=True),
),
migrations.AlterField(
model_name='annual',
name='term',
field=models.CharField(blank=True, help_text='Select subject term', max_length=30, null=True),
),
migrations.AlterField(
model_name='btutor',
name='created',
field=models.DateTimeField(default='2020-06-30', max_length=200),
),
migrations.AlterField(
model_name='downloadformat',
name='created',
field=models.DateTimeField(default='2020-06-30', max_length=200),
),
migrations.AlterField(
model_name='student_info',
name='term',
field=models.CharField(blank=True, default='1st Term', max_length=30, null=True),
),
]
|
import hashlib
import zlib
from util.repo_handling.repo_file import repo_file
def object_write(obj, actually_write=True):
data = obj.serialize()
# Add header
result = obj.fmt + b' ' + str(len(data)).encode() + b'\x00' + data
# Compute hash
sha = hashlib.sha1(result).hexdigest()
if actually_write:
path = repo_file(obj.repo, "objects", sha[0:2], sha[2:], mkdir=actually_write)
with open(path, 'wb') as f:
f.write(zlib.compress(result))
return sha |
import argparse
import sys
import os
import getpass
from fetchai.ledger.api import LedgerApi
from fetchai.ledger.crypto import Entity, Address
from fetchai.ledger.crypto.entity import _strong_password as is_strong_password
from fetchai.ledger.serialisation.transaction import encode_transaction
from .address_book import AddressBook
from .key_store import KeyStore
from .table import Table
from . import __version__
DISCLAIMER = """
== Warning ==
You use this application at your own risk. Whilst Fetch.ai have made every
effort to ensure its reliability and security, it comes with no warranty. It is
intended for the creation and management of Fetch.ai mainnet wallets and
transactions between them. You are responsible for the security of your own
private keys (see ~/.pocketbook folder). Do not use this application for
high-value operations: it is intended for utility operations on the main network.
"""
class NetworkUnavailableError(Exception):
pass
def get_balance(api: LedgerApi, address):
balance = int(api.tokens.balance(address))
return balance / 10000000000
def get_stake(api: LedgerApi, addresss):
stake = int(api.tokens.stake(addresss))
return stake / 10000000000
def create_api(name: str) -> LedgerApi:
try:
return LedgerApi(network=name)
except:
pass
raise NetworkUnavailableError()
def run_list(args):
address_book = AddressBook()
key_store = KeyStore()
keys = key_store.list_keys()
if len(keys) == 0:
print('No keys present')
else:
# select the columns
cols = ['name', 'type', 'balance', 'stake']
if args.verbose:
cols.append('address')
api = create_api(args.network)
table = Table(cols)
for key in keys:
address = key_store.lookup_address(key)
balance = get_balance(api, address)
stake = get_stake(api, address)
row_data = {
'name': key,
'type': 'key',
'balance': balance,
'stake': stake,
'address': str(address),
}
table.add_row(**row_data)
for name, address in address_book.items():
balance = get_balance(api, address)
stake = get_stake(api, address)
row_data = {
'name': name,
'type': 'addr',
'balance': balance,
'stake': stake,
'address': str(address),
}
table.add_row(**row_data)
table.display()
def run_create(args):
key_store = KeyStore()
# get the name for the new key
while True:
name = input('Enter name for key: ')
if name in key_store.list_keys():
print('Key name already exists')
continue
break
# prompt the user for the password
while True:
password = getpass.getpass('Enter password for key...: ')
if not is_strong_password(password):
print('Password too simple, try again')
continue
confirm = getpass.getpass('Confirm password for key.: ')
if password != confirm:
print('Passwords did not match, try again')
continue
break
key_store.add_key(name, password, Entity())
def run_display(args):
key_store = KeyStore()
entity = key_store.load_key(args.name, getpass.getpass('Enter password for key: '))
address = Address(entity)
print('Address....:', str(address))
print('Public Key.:', str(entity.public_key))
def parse_commandline():
parser = argparse.ArgumentParser(prog='pocketbook')
parser.add_argument('-v', '--version', action='version', version=__version__)
parser.add_argument('-n', '--network', default='mainnet', help='The name of the target being addressed')
subparsers = parser.add_subparsers()
parser_list = subparsers.add_parser('list', aliases=['ls'], help='Lists all the balances')
parser_list.add_argument('-v', '--verbose', action='store_true', help='Display extra information (if available)')
parser_list.set_defaults(handler=run_list)
parser_create = subparsers.add_parser('create', aliases=['new'], help='Create a key key')
parser_create.set_defaults(handler=run_create)
parser_display = subparsers.add_parser('display', help='Displays the address and public key for a private key')
parser_display.add_argument('name', help='The name of the key')
parser_display.set_defaults(handler=run_display)
parser_add = subparsers.add_parser('add', help='Adds an address to the address book')
parser_add.add_argument('name', help='The name of the key')
parser_add.add_argument('address', type=Address, help='The account address')
parser_add.set_defaults(handler=run_add)
parser_transfer = subparsers.add_parser('transfer')
parser_transfer.add_argument('destination',
help='The destination address either a name in the address book or an address')
parser_transfer.add_argument('amount', type=int, help='The amount in whole FET to be transferred')
parser_transfer.add_argument('--from', dest='from_address', help='The signing account, required for multi-sig')
parser_transfer.add_argument('signers', nargs='+', help='The series of key names needed to sign the transaction')
parser_transfer.set_defaults(handler=run_transfer)
return parser, parser.parse_args()
def run_add(args):
address_book = AddressBook()
address_book.add(args.name, args.address)
def run_transfer(args):
address_book = AddressBook()
key_store = KeyStore()
# choose the destination
if args.destination in address_book.keys():
destination = address_book.lookup_address(args.destination)
else:
destination = key_store.lookup_address(args.destination)
if destination is None:
destination = Address(args.destination)
# change the amount
amount = args.amount * 10000000000
# check all the signers make sense
for signer in args.signers:
if signer not in key_store.list_keys():
raise RuntimeError('Unknown key: {}'.format(signer))
# determine the from account
from_address_name = None
if len(args.signers) == 1 and args.from_address is None:
from_address_name = args.signers[0]
elif len(args.signers) >= 1 and args.from_address is not None:
present = args.from_address in key_store.list_keys() or args.from_address in address_book.keys()
from_address_name = args.from_address
if not present:
raise RuntimeError('Unknown from address: {}'.format(args.from_address))
else:
raise RuntimeError('Unable to determine from account')
print('Network....:', args.network)
print('From.......:', str(from_address_name))
print('Signers....:', args.signers)
print('Destination:', str(destination))
print('Amount.....:', args.amount, 'FET')
print()
input('Press enter to continue')
api = create_api(args.network)
# start unsealing the private keys
entities = {}
for signer in args.signers:
entity = key_store.load_key(signer, getpass.getpass('Enter password for key {}: '.format(signer)))
entities[signer] = entity
from_address = None
if from_address_name in entities:
from_address = Address(entities[from_address_name])
elif from_address_name in address_book.keys():
from_address = Address(address_book.lookup_address(from_address_name))
# build up the basic transaction information
tx = api.tokens._create_skeleton_tx(len(entities.values()))
tx.from_address = Address(from_address)
tx.add_transfer(destination, amount)
for entity in entities.values():
tx.add_signer(entity)
# encode and sign the transaction
encoded_tx = encode_transaction(tx, list(entities.values()))
# # submit the transaction
print('Submitting TX...')
api.sync(api.tokens._post_tx_json(encoded_tx, 'transfer'))
print('Submitting TX...complete')
def main():
# disclaimer
if not os.path.join(KeyStore.KEY_STORE_ROOT):
print(DISCLAIMER)
input('Press enter to accept')
parser, args = parse_commandline()
# select the command handler
if hasattr(args, 'handler'):
handler = args.handler
else:
parser.print_usage()
handler = None
# run the specified command
exit_code = 1
if handler is not None:
try:
# execute the handler
handler(args)
exit_code = 0
except NetworkUnavailableError:
print('The network appears to be unavailable at the moment. Please try again later')
except Exception as ex:
print('Error:', ex)
# close the program with the given exit code
sys.exit(int(exit_code))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# ██╗███╗ ██╗████████╗███████╗██╗ ██╗██╗ ██╗██╗██████╗
# ██║████╗ ██║╚══██╔══╝██╔════╝██║ ██║╚██╗██╔╝██║██╔══██╗
# ██║██╔██╗ ██║ ██║ █████╗ ██║ ██║ ╚███╔╝ ██║██████╔╝
# ██║██║╚██╗██║ ██║ ██╔══╝ ██║ ██║ ██╔██╗ ██║██╔══██╗
# ██║██║ ╚████║ ██║ ███████╗███████╗██║██╔╝ ██╗██║██║ ██║
# ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚══════╝╚══════╝╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝
__title__ = 'intelixir'
__description__ = 'A Potion of Threat Intelligence.'
__url__ = 'http://github.com/intelixir/intelixir'
__version__ = '0.1.0'
__author__ = 'https://twitter.com/secbug'
__author_email__ = 'secbug.git@gmail.com'
__copyright__ = 'Copyright 2019 @secbug'
__license__ = 'Apache License 2.0'
|
# /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021-12-08 20:14 bucktoothsir <rsliu.xd@gmail.com>
#
# Distributed under terms of the MIT license.
"""
"""
from unittest import mock
from domxss import DomXSSDetector
from domxss import DomAlertInfo
from .test_config import *
import pytest
@pytest.fixture
def get_domxss_detector():
return DomXSSDetector()
@pytest.mark.parametrize('url',
['LocationHashEval.html',
'LocationHashReplace.html',
'LocationHashFormAction.html',
'LocationHashSetTimeout.html'])
def test_scan_by_payload(url, get_domxss_detector):
domxss_detector = get_domxss_detector
url = HTTP_SERVER_ADDRESS + ':' + str(PORT) + '/' + url
result = domxss_detector.scan_by_payload(url)
assert result == True
@mock.patch('domxss.domxss_detector.WebDriver', mock.MagicMock(return_value=None))
class TestScanByPayload():
def test_is_vulnerable(self, monkeypatch):
detector = DomXSSDetector()
assert detector.vulnerable is False
def mock_payload_scan_helper(*args, **kwargs):
return DomAlertInfo('test url', None)
monkeypatch.setattr(detector, '_payload_scan_helper', mock_payload_scan_helper)
assert detector.scan_by_payload('test url') is True
assert detector.vulnerable is True
def test_is_not_vulnerable(self, monkeypatch):
detector = DomXSSDetector()
assert detector.vulnerable is False
def mock_payload_scan_helper(*args, **kwargs):
return None
monkeypatch.setattr(detector, '_payload_scan_helper', mock_payload_scan_helper)
assert detector.scan_by_payload('test url') is False
assert detector.vulnerable is False
def test_empty_vectors_should_not_be_vulnerable(self, monkeypatch):
detector = DomXSSDetector()
monkeypatch.setattr(detector, '_attack_vecotrs', [])
assert detector.scan_by_payload('test url') is False
assert detector.vulnerable is False
def test_scan_by_given_vectors_if_present(self, monkeypatch):
detector = DomXSSDetector()
attack_vecotrs = ['a1', 'a2']
default_vectors = ['v1', 'v2']
monkeypatch.setattr(detector, '_attack_vecotrs', default_vectors)
called_vectors = []
def mock_payload_scan_helper(url, vector):
called_vectors.append(vector)
return None
monkeypatch.setattr(detector, '_payload_scan_helper', mock_payload_scan_helper)
detector.scan_by_payload('test url', attack_vecotrs)
assert called_vectors == attack_vecotrs
def test_empty_vectors_should_use_default_vectors(self, monkeypatch):
detector = DomXSSDetector()
default_vectors = ['v1', 'v2']
monkeypatch.setattr(detector, '_attack_vecotrs', default_vectors)
called_vectors = []
def mock_payload_scan_helper(url, vector):
called_vectors.append(vector)
return None
monkeypatch.setattr(detector, '_payload_scan_helper', mock_payload_scan_helper)
detector.scan_by_payload('test url')
assert called_vectors == default_vectors
|
# stdlib imports
import datetime
# django imports
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db import transaction
from django.db.models import Count
from django.db.models import F
from django.db.models import OuterRef
from django.db.models import Subquery
from django.db.models import Value
from django.db.models.functions import Coalesce
from django.http import HttpResponse
from django.http import HttpResponseForbidden
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.template import loader
from django.utils import timezone
from django.views import View
# third-party imports
import pusher
# local imports
from beeme.core.models import Command
from beeme.core.models import Message
from beeme.core.models import VoteDown
from beeme.core.models import VoteUp
RATELIMIT_COUNT = 1 # occurrences per period
RATELIMIT_PERIOD = 1 # seconds
VOTE_EXPIRATION = datetime.timedelta(minutes=15) # time window during which votes are valid
# pusher service client
pusher_client = pusher.Pusher(
app_id=settings.PUSHER_APP_ID,
key=settings.PUSHER_KEY,
secret=settings.PUSHER_SECRET,
cluster=settings.PUSHER_CLUSTER,
ssl=settings.PUSHER_SSL
)
# helper methods
def command_to_dict(command):
timezone.now()
c = {
'id': str(command.id),
'text': command.text,
'timestamp': command.timestamp.isoformat(),
'votes': {
'up': command.upvotes_count,
'down': command.downvotes_count,
'value': command.votes_count,
}
}
return c
def ratelimit_exceeded(key):
# choose now
now = datetime.datetime.utcnow()
# expired date
expired = now - datetime.timedelta(seconds=RATELIMIT_PERIOD)
# get current values
values = cache.get(key, [])
# filter expired values
values = list( filter(lambda d: d >= expired, values) )
# count values
count = len(values)
# check if things were exceeded
return count >= RATELIMIT_COUNT
def ratelimit_increment(key):
# choose now
now = datetime.datetime.utcnow()
# expired date
expired = now - datetime.timedelta(seconds=RATELIMIT_PERIOD)
# get current values
values = cache.get(key, [])
# filter expired values
values = list( filter(lambda d: d >= expired, values) )
# increment
values.append(now)
# set cache
cache.set(key, values, RATELIMIT_PERIOD)
return len(values)
# function views
class IndexView(View):
channel = 'controlroom'
def get(self, request):
commands_queue = Command.objects \
.filter(is_accepted=True, is_performed=False) \
.order_by('-ord')
return render(request, 'controlroom/index.html', {
'channel': self.channel,
'commands': self.commands(),
#'queue': tuple( map( command_to_dict, commands_queue.values_list('id','command_text','votes', named=True) ) ),
'pusher_config': {
'key': settings.PUSHER_KEY,
'cluster': settings.PUSHER_CLUSTER,
}
})
def post(self, request):
"""
Handle user actions.
"""
# only authenticated users are allowed to make changes.
if not request.user.is_authenticated: return HttpResponseForbidden()
action = request.POST.get('action')
# handle command being added
if action == 'command':
text = request.POST.get('text', '').strip()
if len(text) < 2:
error = 'command too short (min_length=1)'
return JsonResponse({ 'error':error }, status=400)
if len(text) > 50:
error = 'command too long (max_length=50)'
return JsonResponse({ 'error':error }, status=400)
with transaction.atomic():
# create command
command = Command.objects.create(text=text)
# add initial upvote
command.vote_up(user=request.user)
transaction.on_commit(lambda: self.send_votes_change(self.channel))
pass
return JsonResponse({ 'status':'success' }, status=202)
# handle up/down vote
if action == 'up' or action == 'down':
pk = request.POST.get('id')
command = get_object_or_404(Command, pk=pk)
with transaction.atomic():
if action == 'up':
command.vote_up(user=request.user)
pass
elif action == 'down':
command.vote_down(user=request.user)
pass
transaction.on_commit(lambda: self.send_votes_change(self.channel))
pass
return JsonResponse({ 'status':'success' }, status=202)
# handle message from experimenter
if action == 'experimenter_msg':
# only superusers users are allowed to send messages.
if not request.user.is_superuser: return HttpResponseForbidden()
text = request.POST.get('text', '').strip()
if len(text) < 2:
error = 'command too short (min_length=1)'
return JsonResponse({ 'error':error }, status=400)
if len(text) > 250:
error = 'command too long (max_length=250)'
return JsonResponse({ 'error':error }, status=400)
with transaction.atomic():
# create command
message = Message.objects.create(text=text, user=request.user)
transaction.on_commit(lambda: self.send_message(self.channel, message))
pass
return JsonResponse({ 'status':'success' }, status=202)
return JsonResponse({ 'error':'unknown action' }, status=400)
def commands(self, limit=5, timediff=VOTE_EXPIRATION):
"""
build complex queries to get the commands, and the count of their votes within a specific time window.
"""
# get current time
now = timezone.now()
# calculate cutoff time
cutoff = now - timediff
# get downvotes by command, within cutoff, and disable sorting
downvotes = VoteDown.objects.filter(command=OuterRef('pk')).order_by().values('command')
# count downvotes by command
downvotes_count = downvotes.annotate(downvotes_count=Count('pk')).values('downvotes_count')
# get upvotes by command, within cutoff, and disable sorting
upvotes = VoteUp.objects.filter(command=OuterRef('pk')).order_by().values('command')
# count upvotes by command
upvotes_count = upvotes.annotate(upvotes_count=Count('pk')).values('upvotes_count')
# only show unaccepted commands
# add downvotes, with 0 if none available, as an int
# add upvotes, with 0 if none available, as an int
# calculate net total votes
# return the id,text, and three calculated fields as a named tuple
commands = Command.objects \
.filter(is_accepted=False) \
.annotate(
downvotes_count=Coalesce(Subquery(downvotes_count),Value(0), output_field=models.IntegerField()) ,
upvotes_count=Coalesce(Subquery(upvotes_count),Value(0), output_field=models.IntegerField())
) \
.annotate(votes_count=(F('upvotes_count') - F('downvotes_count'))) \
.values_list('id','text','timestamp','votes_count','downvotes_count','upvotes_count', named=True)
# order and limit
commands_top_five = commands.order_by('-votes_count')[:limit]
commands_last_five = commands.order_by('-timestamp')[:limit]
# convert to dicts
commands_top_five = tuple( map( command_to_dict, commands_top_five ) )
commands_last_five = tuple( map(command_to_dict, commands_last_five ) )
data = {
'top_five': commands_top_five,
'last_five': commands_last_five,
}
return data
def send_message(self, channel, message):
"""
Publish messages from experimenter
"""
event = 'experimenter-msg'
pusher_client.trigger(channel, event, {
'id': str( message.id ),
'msg': message.text,
})
return
def send_votes_change(self, channel):
"""
Publish vote changes to all subscribers.
Respects rate limits to flooding clients with vote changes.
"""
event = 'votes-change'
ratelimit_key = '{}:{}'.format( channel, event )
if ratelimit_exceeded(ratelimit_key):
print('RATELIMITED')
return
print( 'send_votes_change', channel, event )
pusher_client.trigger(channel, event, self.commands())
ratelimit_increment(ratelimit_key)
return
pass
def update_queue_table(request):
commands = Command.objects.filter(is_accepted=True, is_performed=False).order_by('-org')
return render(request, 'controlroom/update_queue_table.html', {'commands': commands})
__all__ = (
'IndexView',
)
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.components import sensor
from esphome.const import CONF_COUNT_MODE, CONF_FALLING_EDGE, CONF_ID, CONF_INTERNAL_FILTER, \
CONF_PIN, CONF_RISING_EDGE, CONF_NUMBER, \
ICON_PULSE, UNIT_PULSES_PER_MINUTE
from esphome.core import CORE
pulse_counter_ns = cg.esphome_ns.namespace('pulse_counter')
PulseCounterCountMode = pulse_counter_ns.enum('PulseCounterCountMode')
COUNT_MODES = {
'DISABLE': PulseCounterCountMode.PULSE_COUNTER_DISABLE,
'INCREMENT': PulseCounterCountMode.PULSE_COUNTER_INCREMENT,
'DECREMENT': PulseCounterCountMode.PULSE_COUNTER_DECREMENT,
}
COUNT_MODE_SCHEMA = cv.enum(COUNT_MODES, upper=True)
PulseCounterSensor = pulse_counter_ns.class_('PulseCounterSensor',
sensor.Sensor, cg.PollingComponent)
def validate_internal_filter(value):
value = cv.positive_time_period_microseconds(value)
if CORE.is_esp32:
if value.total_microseconds > 13:
raise cv.Invalid("Maximum internal filter value for ESP32 is 13us")
return value
return value
def validate_pulse_counter_pin(value):
value = pins.internal_gpio_input_pin_schema(value)
if CORE.is_esp8266 and value[CONF_NUMBER] >= 16:
raise cv.Invalid("Pins GPIO16 and GPIO17 cannot be used as pulse counters on ESP8266.")
return value
CONFIG_SCHEMA = sensor.sensor_schema(UNIT_PULSES_PER_MINUTE, ICON_PULSE, 2).extend({
cv.GenerateID(): cv.declare_id(PulseCounterSensor),
cv.Required(CONF_PIN): validate_pulse_counter_pin,
cv.Optional(CONF_COUNT_MODE, default={
CONF_RISING_EDGE: 'INCREMENT',
CONF_FALLING_EDGE: 'DISABLE',
}): cv.Schema({
cv.Required(CONF_RISING_EDGE): COUNT_MODE_SCHEMA,
cv.Required(CONF_FALLING_EDGE): COUNT_MODE_SCHEMA,
}),
cv.Optional(CONF_INTERNAL_FILTER, default='13us'): validate_internal_filter,
}).extend(cv.polling_component_schema('60s'))
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield sensor.register_sensor(var, config)
pin = yield cg.gpio_pin_expression(config[CONF_PIN])
cg.add(var.set_pin(pin))
count = config[CONF_COUNT_MODE]
cg.add(var.set_rising_edge_mode(count[CONF_RISING_EDGE]))
cg.add(var.set_falling_edge_mode(count[CONF_FALLING_EDGE]))
cg.add(var.set_filter_us(config[CONF_INTERNAL_FILTER]))
|
"""attendence indexes
Revision ID: 3559c36f279d
Revises: 3daa1030c816
Create Date: 2015-07-21 09:34:59.716931
"""
# revision identifiers, used by Alembic.
revision = '3559c36f279d'
down_revision = '3daa1030c816'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('meeting_member_ix', 'committee_meeting_attendance', ['meeting_id', 'member_id'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('meeting_member_ix', table_name='committee_meeting_attendance')
### end Alembic commands ###
|
# -*- coding: utf-8 -*-
"""The item methods and class
"""
from __future__ import unicode_literals
class Item(object):
"""
Define a basic item of the db.
"""
def __init__(self, title, link, feed, username, content):
"""
Initialize an item object.
This define what an item means for the db.
:param title: Title of the item
:param link: Link of the item
:param feed: RSS feed link of the item
:param username: Username associated with the item
:param content: Raw data of the item
:type title: str
:type link: str
:type feed: str
:type username: str
:type content: str
"""
self.title = title
self.link = link
self.feed = feed
self.username = username
self.content = content
def get_metadata(self):
"""
Get metadata of an item.
This return the metadata of the defined item.
:return: Metadata of the item
:rtype: dict
"""
return {
'title': self.title,
'link': self.link,
'feed': self.feed,
'username': self.username
}
def clean_link(link):
"""
Clean an url.
This clean an url by removing any extra stuff of it.
:param link: An url to clean
:type key: str
:return: The clean url
:rtype: str
"""
new_link = [x for x in link.lower().strip().split('/') if x]
new_link = '{}//{}'.format(new_link[0], '/'.join(new_link[1:]))
return new_link
def format_link(link):
"""
Format an url.
This (re)format an url regarding it's dns. Useful for changing urls of video
providers with the embed one (otherwise JS scripts are not passing).
:param link: An url to format
:type key: str
:return: The formated url
:rtype: str
"""
if link.startswith('https://www.youtube.com'):
return link.replace('watch?v=', 'embed/')
elif link.startswith('https://www.dailymotion.com') or \
link.startswith('http://www.dailymotion.com'):
return link.replace('/video', '/embed/video')
else:
return link
|
"""
File: pitch_range_interpreter.py
Purpose: A PitchRangeInterpreter is an interface for mapping numeric values to pitches.
"""
from abc import ABC, abstractmethod
class PitchRangeInterpreter(ABC):
"""
A PitchRangeInterpreter is an interface for a function whose utility to to map numeric values to pitches.
The interface is very abstract on meanin for all of this - but essentially is built around a value to pitch
mapping. Reference ScalarRangeInterpreter or ChromaticRangeInterpreter.
"""
def __init__(self):
pass
@abstractmethod
def eval_as_nearest_pitch(self, v):
"""
Given a numeric v, find the nearest pitch.
:param v: A numeric.
:return: The nearest pitch.
"""
pass
@abstractmethod
def value_for(self, diatonic_pitch):
"""
The inverse of the pitch to map function - given a pitch, what value maps to it.
:param diatonic_pitch:
:return:
"""
pass
@abstractmethod
def eval_as_pitch(self, v):
"""
For numeric v, find what it maps to. In some cases, there may be multiple answers if say v's values is
between 2 nearest pitches.
:param v:
:return: A list of nearest pitches.
"""
pass
@abstractmethod
def eval_as_accurate_chromatic_distance(self, v):
"""
For numeric v, find a 'precise' chromatic distance to which it maps. 'Precise' is an interpretation of
the implementation class, however it usually means that v is a value in some function range, and we want
precisely what it maps to in the pitch range. The result can be a real value.
:param v:
:return:
"""
pass |
import sys
from src.functionality import data_io
from src.functionality import user
import requests
import json
import os
import datetime
sys.modules['user'] = user
class App:
def __init__(self):
self.user_expense_data = {}
self.user_budget_data = {}
self.budget_performance_data = {}
self.app_user = None
@staticmethod
def create_user_profile(name):
os.mkdir("../../data/" + name)
def add_new_expense(self, category, year, month, data):
if category in self.user_expense_data:
# add data to existing category
if year in self.user_expense_data[category]:
if month in self.user_expense_data[category][year]:
entries = len(self.user_expense_data[category][year][month])
# print(entries)
self.user_expense_data[category][year][month].insert(entries, data)
else:
self.user_expense_data[category][year][month] = [data]
else:
self.user_expense_data[category][year] = {month: [data]}
else:
# create new category and add data to it
self.user_expense_data[category] = {year: {month: [data]}}
self.update_expense_data(self.app_user.name)
def edit_expense(self, category, year, month, index, data):
self.user_expense_data[category][year][month][index] = data
self.update_expense_data(self.app_user.name)
def remove_expense(self, category, year, month, index):
del self.user_expense_data[category][year][month][index]
self.update_expense_data(self.app_user.name)
def update_expense_data(self, name):
data_io.store_data("../../data/" + name + "/user_expenses.json", self.user_expense_data)
@staticmethod
def calculate_post_tax_funds(year, state, filing_status, salary, additional_inc, retirement_cont, hsa_cont):
gross_income = (salary + additional_inc) - (salary*(retirement_cont/100)) - hsa_cont
tax_info = json.loads(App.fetch_tax_information(year, gross_income, state, filing_status))["annual"]
post_tax_funds = gross_income - (tax_info["fica"]["amount"] + tax_info["federal"]["amount"] + tax_info["state"]["amount"])
post_tax_funds_by_month = post_tax_funds / 12
return {'annual': post_tax_funds, 'monthly': post_tax_funds_by_month}
@staticmethod
def fetch_tax_information(year, gross_income, state, filing_status):
year = str(year)
gross_income = str(gross_income)
api_key_file = open("../../data/taxee_api.txt", "r")
api_key = api_key_file.read()
api_key_file.close()
headers = {
'Authorization': api_key,
'Content-Type': 'application/x-www-form-urlencoded',
}
data = {
'state': state,
'filing_status': filing_status,
'pay_rate': gross_income
}
response = requests.post('https://taxee.io/api/v2/calculate/' + year, headers=headers, data=data)
if response.status_code == 200:
return response.text
else:
return None
@staticmethod
def calculate_hourly_to_salary(wage, avg_hours, expected_weeks):
return wage*avg_hours*expected_weeks
def create_budget(self):
current_year = datetime.datetime.today().year
budget = {current_year: {1: {}}}
self.user_budget_data = budget
self.update_budget_data()
def change_budget(self, year, month, category, amount):
if year in self.user_budget_data:
if month in self.user_budget_data[year]:
if category is None:
self.user_budget_data[year][month] = "s"
else:
self.user_budget_data[year][month][category] = amount
else:
if category is None:
self.user_budget_data[year][month] = "s"
else:
self.user_budget_data[year][month] = {category: amount}
else:
if category is None:
self.user_budget_data[year] = {month: "s"}
else:
self.user_budget_data[year] = {month: {category: amount}}
self.update_budget_data()
def update_budget_data(self):
data_io.store_data("../../data/" + self.app_user.name + "/user_budget.json", self.user_budget_data)
def calculate_budget_performance(self, year=None, month=None):
if year is None or month is None:
if self.budget_performance_data is None:
self.budget_performance_data = {}
for y in sorted(self.user_budget_data):
if y not in self.budget_performance_data:
self.budget_performance_data[y] = {}
self.calculate_budget_performance()
for m in sorted(self.user_budget_data[y]):
if m not in self.budget_performance_data[y]:
self.budget_performance_data[y][m] = {}
self.calculate_budget_performance()
self.calculate_budget_performance(y, m)
else:
year = str(year)
month = str(month)
for c in self.user_budget_data[year][month]:
expenses_added = False
if c in self.user_expense_data:
if year in self.user_expense_data[c]:
if month in self.user_expense_data[c][year]:
expense_sum = 0
for data in self.user_expense_data[c][year][month]:
expense_sum += data["amount"]
self.budget_performance_data[year][month][c] = {"budgeted": self.user_budget_data[year][month][c], "spent": expense_sum}
self.update_budget_performance_data()
expenses_added = True
if not expenses_added:
self.budget_performance_data[year][month][c] = {"budgeted": self.user_budget_data[year][month][c], "spent": 0}
self.update_budget_performance_data()
def update_budget_performance_data(self):
data_io.store_data("../../data/" + self.app_user.name + "/budget_performance.json", self.budget_performance_data)
|
# Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from action_detection.nn.data.augmentation import AugmentFactory, BrightnessAugmentor, SaturationAugmentor, \
DetectionAugmentation
from action_detection.nn.data.core import ImageSize
from action_detection.nn.models import SSDHeadDesc
from action_detection.nn.parameters.common import AttributedDict as Dict
from action_detection.nn.parameters.detector_parameters import DetectorParams
class ActionParams(DetectorParams):
"""Class to control the action model specific parameters.
"""
def _configure_params(self):
"""Returns the parameters for action network.
:return: Parameters
"""
lr_params = Dict(schedule=self._config_values.LR_SCHEDULE,
boundaries=[int(e * self._epoch_num_steps) for e in self._config_values.LR_EPOCH_DROPS],
values=self._config_values.LR_EPOCH_VALUES,
init_value=self._config_values.LR_INIT_VALUE,
first_decay_steps=int(self._config_values.LR_FIRST_DECAY_EPOCH * self._epoch_num_steps),
t_mul=self._config_values.LR_T_MUL,
m_mul=self._config_values.LR_M_MUL,
alpha=self._config_values.LR_ALPHA)
mbox_params = Dict(threshold=self._config_values.MBOX_THRESHOLD,
variance=self._config_values.VARIANCE,
bg_class=self._config_values.DETECTION_BG_CLASS_ID,
neg_factor=self._config_values.MBOX_NEG_FACTOR,
cl_weight=self._config_values.MBOX_CL_WEIGHTS,
entropy_weight=self._config_values.MBOX_ENTROPY_WEIGHT,
max_num_samples_per_gt=self._config_values.MBOX_MAX_NUM_MATCHES_PER_GT,
matches_drop_ratio=self._config_values.MBOX_MATCHES_DROP_RATIO,
instance_normalization=self._config_values.MBOX_DO_INSTANCE_NORMALIZATION,
comp_loss_max_num_samples=self._config_values.MBOX_COMPACTNESS_LOSS_MAX_NUM_SAMPLES,
repulsion_loss_weight=self._config_values.MBOX_REPULSION_LOSS_WEIGHT,
focal_alpha=self._config_values.MBOX_FOCAL_ALPHA,
focal_gamma=self._config_values.MBOX_FOCAL_GAMMA,
gh_num_bins=self._config_values.MBOX_GRADIENT_HARMONIZED_LOSS_NUM_BINS)
image_size = ImageSize(*self._config_values.IMAGE_SIZE)
tuple_augmentation = DetectionAugmentation(
self._config_values.FREE_PROB, self._config_values.EXPAND_PROB,
self._config_values.CROP_PROB, self._config_values.MAX_EXPAND_RATIO,
self._config_values.CROP_SCALE_DELTA, self._config_values.CROP_SCALE_LIMITS,
self._config_values.CROP_SHIFT_DELTA, float(image_size.h) / float(image_size.w))
image_augmentation = AugmentFactory() \
.add(BrightnessAugmentor(self._config_values.BRIGHTNESS_DELTA)) \
.add(SaturationAugmentor(self._config_values.SATURATION_LIMITS))
head_params = []
for scale in self._config_values.NORMALIZED_ANCHORS:
head_params.append(SSDHeadDesc(scale=scale,
internal_size=self._config_values.INTERNAL_HEAD_SIZES[scale],
num_classes=self._config_values.DETECTION_NUM_CLASSES,
anchors=self._scale_anchors(self._config_values.NORMALIZED_ANCHORS[scale],
image_size.h, image_size.w),
clip=False,
offset=0.5))
action_params = Dict(num_actions=len(self._config_values.VALID_ACTION_NAMES),
embedding_size=self._config_values.ACTION_EMBEDDING_SIZE,
undefined_action_id=self._config_values.UNDEFINED_ACTION_ID,
num_centers_per_action=self._config_values.NUM_CENTERS_PER_ACTION,
scale_start=self._config_values.SCALE_START_VALUE,
scale_end=self._config_values.SCALE_END_VALUE,
scale_num_steps=int(self._config_values.SCALE_NUM_EPOCHS * self._epoch_num_steps),
scale_power=self._config_values.SCALE_POWER,
max_entropy_weight=self._config_values.ACTION_ENTROPY_WEIGHT,
focal_alpha=self._config_values.ACTION_FOCAL_ALPHA,
focal_gamma=self._config_values.ACTION_FOCAL_GAMMA,
glob_pull_push_margin=self._config_values.GLOB_PULL_PUSH_MARGIN,
local_push_margin=self._config_values.LOCAL_PUSH_MARGIN,
num_samples=self._config_values.NUM_SAMPLES_PER_CLASS,
local_push_top_k=self._config_values.LOCAL_PUSH_LOSS_TOP_K,
weight_limits=self._config_values.ADAPTIVE_WEIGHT_LIMITS,
ce_loss_weight=self._config_values.CE_LOSS_WEIGHT,
auxiliary_loss_weight=self._config_values.AUXILIARY_LOSS_WEIGHT,
matches_threshold=self._config_values.MATCHES_THRESHOLD,
max_num_samples_per_gt=self._config_values.MAX_NUM_MATCHES_PER_GT,
sample_matches_drop_ratio=self._config_values.MATCHES_DROP_RATIO,
glob_pull_push_loss_top_k=self._config_values.GLOB_PULL_PUSH_LOSS_TOP_K,
center_loss_top_k=self._config_values.CENTER_LOSS_TOP_K,
center_loss_weight=self._config_values.CENTER_LOSS_WEIGHT,
num_bins=self._config_values.ACTION_GRADIENT_HARMONIZED_LOSS_NUM_BINS)
action_names_map = {i: v for i, v in enumerate(self._config_values.VALID_ACTION_NAMES)}
return Dict(max_num_objects_per_image=self._config_values.MAX_NUM_DETECTIONS_PER_IMAGE,
num_classes=self._config_values.DETECTION_NUM_CLASSES,
bg_class=self._config_values.DETECTION_BG_CLASS_ID,
num_actions=len(self._config_values.VALID_ACTION_NAMES),
tuple_augmentation=tuple_augmentation,
image_augmentation=image_augmentation,
lr_params=lr_params,
mbox_params=mbox_params,
head_params=head_params,
action_params=action_params,
labels_map=self._config_values.ACTIONS_MAP,
valid_actions=self._config_values.VALID_ACTION_NAMES,
ignore_classes=self._config_values.IGNORE_CLASSES,
use_class_balancing=self._config_values.USE_CLASS_BALANCING,
det_conf=self._config_values.DETECTION_CONFIDENCE,
action_conf=self._config_values.ACTION_CONFIDENCE,
action_colors_map=self._config_values.ACTION_COLORS_MAP,
action_names_map=action_names_map,
undefined_action_name=self._config_values.UNDEFINED_ACTION_NAME,
undefined_action_color=self._config_values.UNDEFINED_ACTION_COLOR)
|
mensagem = 'oi, python'
numero = 10
pi = 3.14
type(mensagem)
type(numero)
type(pi)
meu_nome = 'Rondinele'
print("Hello, World!")
numero = input('Digite um número:\n')
print(numero) |
iris_file = open('iris_train.csv')
iris_file = iris_file.readlines()
def sort_by_0(n):
return n[0]
def sort_by_1(n):
return n[1]
def sort_by_2(n):
return n[2]
def sort_by_3(n):
return n[3]
setosa_range = []
versicolor_range = []
virginica_range = []
for i in range(len(iris_file)):
iris_file[i] = iris_file[i].split(',')
iris_file[i][-1] = iris_file[i][-1][:-1]
if iris_file[i][-1] == 'Iris-setosa':
setosa_range.append([iris_file[i][0], iris_file[i][1], iris_file[i][2], iris_file[i][3]])
elif iris_file[i][-1] == 'Iris-versicolor':
versicolor_range.append([iris_file[i][0], iris_file[i][1], iris_file[i][2], iris_file[i][3]])
elif iris_file[i][-1] == 'Iris-virginica':
virginica_range.append([iris_file[i][0], iris_file[i][1], iris_file[i][2], iris_file[i][3]])
setosa_range.sort(key=sort_by_0)
versicolor_range.sort(key=sort_by_0)
virginica_range.sort(key=sort_by_0)
print('setosa : ', setosa_range[0][0], ' < ', 'range', ' < ', setosa_range[-1][0])
print('versicolor : ', versicolor_range[0][0], ' < ', 'range', ' < ', versicolor_range[-1][0])
print('virginica : ', virginica_range[0][0], ' < ', 'range', ' < ', virginica_range[-1][0])
print()
setosa_range.sort(key=sort_by_1)
versicolor_range.sort(key=sort_by_1)
virginica_range.sort(key=sort_by_1)
print('setosa : ', setosa_range[0][1], ' < ', 'range', ' < ', setosa_range[-1][1])
print('versicolor : ', versicolor_range[0][1], ' < ', 'range', ' < ', versicolor_range[-1][1])
print('virginica : ', virginica_range[0][1], ' < ', 'range', ' < ', virginica_range[-1][1])
print()
setosa_range.sort(key=sort_by_2)
versicolor_range.sort(key=sort_by_2)
virginica_range.sort(key=sort_by_2)
print('setosa : ', setosa_range[0][2], ' < ', 'range', ' < ', setosa_range[-1][2])
print('versicolor : ', versicolor_range[0][2], ' < ', 'range', ' < ', versicolor_range[-1][2])
print('virginica : ', virginica_range[0][2], ' < ', 'range', ' < ', virginica_range[-1][2])
print()
setosa_range.sort(key=sort_by_3)
versicolor_range.sort(key=sort_by_3)
virginica_range.sort(key=sort_by_3)
print('setosa : ', setosa_range[0][3], ' < ', 'range', ' < ', setosa_range[-1][3])
print('versicolor : ', versicolor_range[0][3], ' < ', 'range', ' < ', versicolor_range[-1][3])
print('virginica : ', virginica_range[0][3], ' < ', 'range', ' < ', virginica_range[-1][3])
|
__author__ = 'Sushant'
from sklearn.base import BaseEstimator, ClassifierMixin
from scipy.optimize import minimize
from sklearn.metrics import log_loss
import numpy
from sklearn.cross_validation import StratifiedShuffleSplit
"""
Usage:
estimators = []
estimators.append(RandomForestClassifier(n_estimators = 100))
estimators.append(GMM(n_components = 9))
C_MC = MegaClassifier(estimators = estimators, xv_tries = 5)
C_MC. fit(X_train, y_train)
C_MC.predict_proba(X_test)
Description:
The MegaClassifier object automatically partitions training data in a
stratified manner into 'xv_tries' number of folds (default 4), trains
all models in 'estimators' with the stratified training sets and records
their output on the stratified validation set.
During optimization it selects weights that result in minimization of
averaged log-loss across all the validation sets.
"""
class StratifiedSplit(object):
@staticmethod
def train_test_split( X, y, test_size = 0.2):
res = StratifiedShuffleSplit(y, n_iter=1, test_size=test_size)
for ind_train, ind_test in res:
X_train = []
y_train = []
X_test = []
y_test = []
for ind in ind_train:
X_train.append(X[ind])
y_train.append(y[ind])
for ind in ind_test:
X_test.append(X[ind])
y_test.append(y[ind])
return X_train, X_test, y_train, y_test
class MegaClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, estimators, xv_tries=4, test_size=0.2):
self.estimators = estimators
self.xv_tries = xv_tries
self.test_size = test_size
def fit(self, X, y):
self.X_trains = []
self.y_trains = []
self.X_valids = []
self.y_valids = []
for i in xrange(self.xv_tries):
Xt, Xv, yt, yv = StratifiedSplit.train_test_split(X, y, test_size=self.test_size)
self.X_trains.append(Xt)
self.X_valids.append(Xv)
self.y_trains.append(yt)
self.y_valids.append(yv)
# train the classifiers
self.all_xv_predictions = []
for ind, Xt in enumerate(self.X_trains):
cur_xv_predictions = []
for estimator in self.estimators:
#new_est = copy.deepcopy(estimator)
#new_est.fit(Xt, self.y_trains[ind])
estimator.fit(Xt, self.y_trains[ind])
cur_xv_predictions.append(estimator.predict_proba(self.X_valids[ind]))
self.all_xv_predictions.append(cur_xv_predictions)
num_estimators = len(self.estimators)
initial_weights = [1.0 / float(num_estimators) for i in xrange(num_estimators)]
print ("Optimizing....")
bounds = [(0, 1) for i in xrange(num_estimators)]
constraints = {'type': 'eq', 'fun': lambda w: 1 - sum(w)}
res = minimize(self.__find_best_blending_weights, initial_weights, bounds=bounds, constraints=constraints)
self.final_weights = res.x
print ("Optimization finished...")
print ("Weights:")
print (self.final_weights)
for estimator in self.estimators:
estimator.fit(X, y)
def __find_best_blending_weights(self, weights):
log_losses = []
for ind1, xv_predictions in enumerate(self.all_xv_predictions):
y_final_pred_prob = None
for ind, est_predictions in enumerate(xv_predictions):
if y_final_pred_prob is None:
y_final_pred_prob = weights[ind] * est_predictions
else:
y_final_pred_prob = numpy.add(y_final_pred_prob, (weights[ind] * est_predictions))
log_losses.append(log_loss(self.y_valids[ind1], y_final_pred_prob))
log_losses = numpy.array(log_losses)
return log_losses.mean()
def predict_proba(self, X):
y_final_pred_prob = None
for ind, estimator in enumerate(self.estimators):
y_pp_cur = estimator.predict_proba(X)
if y_final_pred_prob is None:
y_final_pred_prob = self.final_weights[ind] * y_pp_cur
else:
y_final_pred_prob = numpy.add(y_final_pred_prob, (self.final_weights[ind] * y_pp_cur))
return y_final_pred_prob |
#17/02/21
#What does this code do?
# This code is designed to take a single line input of the class, which according to Grok will be given in Title case (Only first letter capitalised.) with a single space between each name.
# We use the .sort() function in this code, as well as the .split() function. Put basically, the sort() function puts things in order. Ie: letters in alphabetical order, and numbers in ascending order.
data = input("Students: ")
students = data.split()
students.sort()
print("Class Roll")
for student in students:
print(student)
#What is happening here?
# Our code is taking an input, of all the students names, on a single line, and assigning that string to the "data" variable.
# From there, it splits "data" into seperate words (the students names) and assigns them to a list ("students")
# It then sorts the list alphabetically, and then prints it line by line using a for loop.
|
from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only
import csv
import os
import numpy as np
from os.path import join
from argparse import Namespace
from typing import Optional, Dict, Union, Any
from warnings import warn
import torch
from pkg_resources import parse_version
from torch.utils.tensorboard import SummaryWriter
import matplotlib.cm
class TBLogger(LightningLoggerBase):
NAME_CSV_TAGS = 'meta_tags.csv'
def __init__(self,
out_dir: str,
hparams: dict,
i_batch=0,
**kwargs):
super().__init__()
self.out_dir = out_dir
self._experiment = None # type: Optional[Dict[str, SummaryWriter]]
self._sub_loggers = ('train', 'val', 'train_epoch', 'val_epoch')
self.tags = {}
self._kwargs = kwargs
self.hparams = hparams
self.i_batch = i_batch
self.stages = ('stage1', 'stage2', 'stage3')
@property
def experiment(self) -> Optional[Dict[str, SummaryWriter]]:
if self._experiment is not None:
return self._experiment
self._experiment = {}
for sub in self._sub_loggers:
os.makedirs(join(self.out_dir, sub), exist_ok=True)
self._experiment[sub] = SummaryWriter(log_dir=join(self.out_dir, sub), **self._kwargs)
return self._experiment
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
params = self._convert_params(params)
params = self._flatten_dict(params)
sanitized_params = self._sanitize_params(params)
if parse_version(torch.__version__) < parse_version("1.3.0"):
warn(
f"Hyperparameter logging is not available for Torch version {torch.__version__}."
" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable"
" hyperparameter logging."
)
else:
from torch.utils.tensorboard.summary import hparams
exp, ssi, sei = hparams(sanitized_params, {})
writer = self.experiment[self._sub_loggers[0]]._get_file_writer()
writer.add_summary(exp)
writer.add_summary(ssi)
writer.add_summary(sei)
# some alternative should be added
self.tags.update(sanitized_params)
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
return
@rank_zero_only
def save(self) -> None:
super().save()
for _, exp in self.experiment.items():
try:
exp.flush()
except AttributeError:
# you are using PT version (<v1.2) which does not have implemented flush
exp._get_file_writer().flush()
# prepare the file path
meta_tags_path = os.path.join(self.out_dir, self._sub_loggers[0], self.NAME_CSV_TAGS)
# save the metatags file
with open(meta_tags_path, 'w', newline='') as csvfile:
fieldnames = ['key', 'value']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'key': 'key', 'value': 'value'})
for k, v in self.tags.items():
writer.writerow({'key': k, 'value': v})
@rank_zero_only
def finalize(self, status: str) -> None:
self.save()
@property
def name(self) -> str:
return self.out_dir
@property
def version(self) -> int:
return 0
def _scale_depth(self, x, depth_max=None):
if depth_max is None:
return x / self.hparams["DATA.DEPTH_MAX"]
return x / depth_max
def _should_log_scalars(self, tag, batch_idx, global_step) -> bool:
del batch_idx
assert tag != 'val', "Scalars should be logged only in validation_epoch_end."
if tag in ('val_epoch', 'train_epoch'):
return True
if tag == 'train':
return global_step % self.hparams["IO.LOG_INTERVAL"] == 0
raise NotImplementedError("This scenario has not been implemented.")
def _should_log_summaries(self, tag, batch_idx, global_step) -> bool:
assert tag not in ('train_epoch', 'val_epoch'), "Summaries should not be logged in *_epoch_end methods."
if tag == 'val':
return batch_idx % self.hparams["IO.LOG_INTERVAL"] == 0
if tag == 'train':
return global_step % self.hparams["IO.LOG_INTERVAL"] == 0
raise NotImplementedError("This scenario has not been implemented.")
@rank_zero_only
def add_scalars(self, tag, losses, errors, batch_idx, global_step, prefix=""):
if not self._should_log_scalars(tag, batch_idx, global_step):
return
global_sample = self.hparams["IO.SAMPLES_PER_STEP"] * global_step
del global_step
writer = self._experiment[tag] # type: SummaryWriter
for key, val in losses.items():
writer.add_scalar(f"0.Main/{prefix}{key}", val, global_sample)
for i, (k, v) in enumerate(errors['stage3'].items()):
writer.add_scalar(f"0.Main/{prefix}{i}.{k}", v, global_sample)
for i_stage, stage in enumerate(self.stages):
for i, (k, v) in enumerate(errors[stage].items()):
writer.add_scalar(f"{3 - i_stage}.Errors{stage.title()}/{prefix}{k}", v, global_sample)
@rank_zero_only
def add_lr(self, tag, optimizers, batch_idx, global_step):
if not self._should_log_scalars(tag, batch_idx, global_step):
return
global_sample = self.hparams["IO.SAMPLES_PER_STEP"] * global_step
del global_step
writer = self._experiment[tag] # type: SummaryWriter
if isinstance(optimizers, (list, tuple)) and len(optimizers) == 1:
optimizers = optimizers[0]
if not isinstance(optimizers, (list, tuple)):
writer.add_scalar("4.Train/Lr", optimizers.param_groups[0]['lr'], global_sample)
else:
for i in range(len(optimizers)):
writer.add_scalar(f"4.Train/Lr{i}", optimizers[i].param_groups[0]['lr'], global_sample)
@rank_zero_only
def add_summaries(self, tag, batch, outputs, batch_idx, global_step):
if not self._should_log_summaries(tag, batch_idx, global_step):
return
global_sample = self.hparams["IO.SAMPLES_PER_STEP"] * global_step
del global_step
writer = self._experiment[tag] # type: SummaryWriter
if 'image' in self.hparams['IO.SUMMARIES']:
# multi_view_image = torch.cat(torch.unbind(batch['image'][self.i_batch], 0), -1) # (3, H, W*V)
writer.add_image('0.multi_view_image', batch['image'][self.i_batch], global_sample,
dataformats='CHW')
# multi_view_image_noaug = torch.cat(torch.unbind(batch['image_noaug'][self.i_batch], 0), -1) # (3, H, W*V)
# writer.add_image('0.multi_view_image_noaug', multi_view_image_noaug, global_sample,
# dataformats='CHW')
# photometric loss is turned on
# if 'img_warped' in outputs['stage3']:
# writer.add_image('0.warped', torch.cat(outputs['stage3']['img_warped'], -1)[self.i_batch],
# global_sample,
# dataformats='CHW')
if 'depth' in self.hparams['IO.SUMMARIES']:
for stage in self.stages:
depth_gt = batch['depth'][stage][self.i_batch]
depth_pred = outputs[stage]['depth'][self.i_batch]
error = torch.abs(depth_gt - depth_pred)
if 'mask_total' in batch:
mask = batch['mask_total'][stage][self.i_batch]
else:
mask = batch['mask'][stage][self.i_batch]
depth_max = 10
# depth_max = batch['depth_max'][self.i_batch]
writer.add_image(f'1.depth_gt/{stage}', self._scale_depth(depth_gt, depth_max)[None, :, :],
global_sample, dataformats='CHW')
writer.add_image(f'2.depth_pred/{stage}',
self._scale_depth(depth_pred, depth_max)[None, :, :],
global_sample, dataformats='CHW')
writer.add_image(f'3.depth_err_abs/{stage}',
self._scale_depth(error * mask, depth_max)[None, :, :],
global_sample, dataformats='CHW')
writer.add_image(f'4.depth_err_rel/{stage}',
(error * mask)[None, :, :] / torch.max(error * mask),
global_sample, dataformats='CHW')
# if 'confidence' in self.hparams['IO.SUMMARIES']:
# for stage in ('stage1', 'stage2', 'stage3'):
# writer.add_image(f'5.confidence/{stage}',
# outputs[stage]['confidence'][self.i_batch][None, :, :],
# global_sample, dataformats='CHW')
# writer.add_image(f'6.mask/{stage}',
# batch['mask'][stage][self.i_batch][None, :, :],
# global_sample, dataformats='CHW')
# if 'warp' in self.hparams['IO.SUMMARIES']:
# for stage in ('stage1', 'stage2', 'stage3'):
# writer.add_image(f'7.warp_image/{stage}',
# torch.cat(torch.unbind(batch['warp_image'][stage][self.i_batch], 0), -1),
# global_sample, dataformats='CHW')
# writer.add_image(f'8.warp_mask/{stage}',
# torch.cat(torch.unbind(batch['warp_mask'][stage][self.i_batch], 0), -1),
# global_sample, dataformats='CHW')
# error = torch.unbind(batch['warp_image'][stage][self.i_batch], 0) # (V)(C, H, W)
# error = [torch.mean(torch.abs(x - batch['warp_image'][stage][self.i_batch][0]), 0) for x in
# error] # [V](H, W)
# for i in range(len(error)):
# error[i][batch['mask_total'][stage][self.i_batch, i, 0] < 0.5] = 0.0
# error[i] = colorize(error[i])
# writer.add_image(f'9.warp_error/{stage}',
# torch.cat(error, -1),
# global_sample, dataformats='CHW')
_cm = matplotlib.cm.get_cmap("plasma")
_colors = _cm(np.arange(256))[:, :3]
_colors = torch.from_numpy(_colors)
# https://gist.github.com/jimfleming/c1adfdb0f526465c99409cc143dea97b#gistcomment-2398882
def colorize(value):
"""
A utility function for Torch/Numpy that maps a grayscale image to a matplotlib
colormap for use with TensorBoard image summaries.
By default it will normalize the input value to the range 0..1 before mapping
to a grayscale colormap.
Arguments:
- value: 2D Tensor of shape [height, width]
- vmin: the minimum value of the range used for normalization.
(Default: value minimum)
- vmax: the maximum value of the range used for normalization.
(Default: value maximum)
- cmap: a valid cmap named for use with matplotlib's `get_cmap`.
(Default: Matplotlib default colormap)
Returns a 4D uint8 tensor of shape [height, width, 4].
"""
global _colors
_colors = _colors.to(value.device)
size = list(value.size())
idx = torch.reshape((255 * value).to(torch.long), (-1,))
idx = idx.unsqueeze(1).expand(-1, 3)
out = torch.gather(_colors, 0, idx)
out = torch.reshape(out, size + [3])
out = torch.stack(torch.unbind(out, -1), 0)
return out
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 18:55:22 2018
@author: kyungdoehan
"""
import pandas as pd
import os
import numpy as np
#%% Importing lake coordinates and levels
class lake_data_read:
def __init__(self, Ma, nz, ny, nx, grid, rch, dx, dy, lakeK, top, bot, lakeb):
self.btop = top - bot[0, :, :]
self.dir = os.getcwd()
self.df = pd.read_csv(self.dir + "/tisc_output/lakes_" + str(Ma) + "0Ma.csv",\
sep=",", header=None)
self.df.columns = ["x", "y", "level", "type", "id", "nlakes", "evap", "inflow"]
self.x = self.df.loc[:, "x"].values.astype('int')
self.y = self.df.loc[:, "y"].values.astype('int')
self.level = self.df.loc[:, "level"].values.astype('float32')
self.type = self.df.loc[:, "type"].values.astype('str')
self.id = self.df.loc[:, "id"].values.astype('int')
self.nlakes = self.df.loc[:, "nlakes"].values.astype('int')[0]
self.evap = self.df.loc[:, "evap"].values.astype('float32')
self.inflow = self.df.loc[:, "inflow"].values.astype('float32')
self.stages = [0 for i in range(self.nlakes)]
self.levap = [0 for i in range(self.nlakes)]
self.linflow = [0 for i in range(self.nlakes)]
self.bdlknc = np.zeros((nz, ny, nx), dtype=np.float32)
for i in range(1, self.nlakes+1):
for j in range(len(self.id)):
if self.id[j] == i:
self.stages[i-1] = self.level[j]
self.levap[i-1] = self.evap[j]
self.linflow[i-1] = self.inflow[j]
self.lakarr = np.zeros((nz, ny, nx), dtype=np.int16)
self.ghb = [0 for i in range(len(self.id))]
for lake in range(len(self.x)):
self.ycoord = np.abs(self.y[lake] - int((grid-1)/2))
self.xcoord = np.abs(self.x[lake] + int((grid-1)/2))
# if self.ycoord != 0 and self.xcoord != 0:
# if self.ycoord != ny-1 and self.xcoord != 0:
self.lakarr[0, self.ycoord, self.xcoord] = self.level[lake]
self.ghb[lake] = [0, self.ycoord, self.xcoord, self.level[lake],\
dx*dy*lakeK/(lakeb)]
self.ghb = {0: self.ghb}
def lake(Ma, nz, ny, nx, grid, rch, dx, dy, lakeK, top, bot, lakeb):
return lake_data_read(Ma, nz, ny, nx, grid, rch, dx, dy, lakeK, top, bot, lakeb)
|
from plugins.module_utils.foreman_helper import _foreman_spec_helper
def test_empty_entity():
spec = {}
foreman_spec, argument_spec = _foreman_spec_helper(spec)
assert spec == {}
assert foreman_spec == {}
assert argument_spec == {}
def test_full_entity():
spec = {
'name': {},
'count': {'type': 'int', 'aliases': ['number']},
'facilities': {'type': 'list'},
'street': {'type': 'entity', 'flat_name': 'street_id'},
'quarter': {'type': 'entity', 'resource_type': 'edges'},
'houses': {'type': 'entity_list', 'flat_name': 'house_ids'},
'prices': {'type': 'nested_list', 'foreman_spec': {
'value': {'type': 'int'},
}},
'tenant': {'invisible': True},
}
foreman_spec, argument_spec = _foreman_spec_helper(spec)
assert spec == {
'name': {},
'count': {'type': 'int', 'aliases': ['number']},
'facilities': {'type': 'list'},
'street': {'type': 'entity', 'flat_name': 'street_id'},
'quarter': {'type': 'entity', 'resource_type': 'edges'},
'houses': {'type': 'entity_list', 'flat_name': 'house_ids'},
'prices': {'type': 'nested_list', 'foreman_spec': {
'value': {'type': 'int'},
}},
'tenant': {'invisible': True},
}
assert foreman_spec == {
'name': {},
'count': {'type': 'int'},
'facilities': {'type': 'list'},
'street': {'type': 'entity', 'flat_name': 'street_id', 'resource_type': 'streets'},
'street_id': {},
'quarter': {'type': 'entity', 'flat_name': 'quarter_id', 'resource_type': 'edges'},
'quarter_id': {},
'houses': {'type': 'entity_list', 'flat_name': 'house_ids', 'resource_type': 'houses'},
'house_ids': {'type': 'list'},
'prices': {'type': 'nested_list', 'foreman_spec': {'value': {'type': 'int'}}, 'ensure': False},
'tenant': {},
}
assert argument_spec == {
'name': {},
'count': {'type': 'int', 'aliases': ['number']},
'facilities': {'type': 'list'},
'street': {},
'quarter': {},
'houses': {'type': 'list', 'elements': 'str'},
'prices': {'type': 'list', 'elements': 'dict', 'options': {
'value': {'type': 'int'},
}},
}
|
#!/usr/bin/env python
from distutils.core import setup
from setuptools import setup,find_packages
setup(name='glasgowcs_labtest',
version='1.0.6',
description='Testing framework for Glasgow CS classes',
url='https://github.com/jakelever/glasgowcs_labtest',
author='Jake Lever',
author_email='jake.lever@glasgow.ac.uk',
license='MIT',
packages=find_packages())
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from sqlalchemy.orm import Session
from sqlalchemy import select
from datetime import datetime
from custmaker.making import Customer, KoreanLastname, KoreanFirstname,\
AgeStat, RegionStat, SexStat
def _percent_format(x, precision):
return round(x*100, precision)
def _caculate_age(x):
return datetime.today().year - int(x) + 1
def _create_compare_plots(conn):
customer_df = pd.read_sql(select(Customer), conn)
sex_stat_df = pd.read_sql(select(SexStat), conn)
sex_stat_df.replace({'남': 'Male', '여': 'Female'}, inplace=True)
sex_grouped = customer_df.groupby('sex')
customer_sex_df = sex_grouped.size().reset_index()
customer_sex_df.replace({'남': 'Male', '여': 'Female'}, inplace=True)
lastname_df = pd.read_sql(select(KoreanLastname), conn).head(25)
lastname_df['ratio'] = lastname_df['ratio'].apply(
_percent_format, args=(2,))
firstname_df = pd.read_sql(select(KoreanFirstname), conn)
firstname_df['ratio'] = firstname_df['ratio'].apply(
_percent_format, args=(2,))
customer_lastname_df = customer_df.groupby('lastname').size().reset_index()
customer_lastname_df.columns = ['lastname', 'count']
customer_lastname_df.sort_values(by='count', ascending=False, inplace=True)
customer_lastname_df['ratio'] = round((
customer_lastname_df['count'] / customer_lastname_df['count'].sum()
)*100, 2)
customer_lastname_df = customer_lastname_df.head(25)
customer_firstname_df = customer_df.groupby('firstname').size().\
reset_index()
customer_firstname_df.columns = ['firstname', 'count']
customer_firstname_df.sort_values(
by='count', ascending=False, inplace=True
)
customer_firstname_df['ratio'] = round((
customer_firstname_df['count'] / customer_firstname_df['count'].sum()
)*100, 2)
age_stat_df = pd.read_sql(select(AgeStat), conn)
age_stat_df['ratio'] = age_stat_df['ratio'].apply(
_percent_format, args=(2,))
age_stat_df['age'] = age_stat_df['age'].str[:-1].astype(str)+' years'
customer_age_df = customer_df['birthdate'].str[:4].apply(_caculate_age)\
.to_frame()
customer_age_df.columns = ['age']
customer_age_df = customer_age_df.groupby('age').size().reset_index()
customer_age_df.columns = ['age', 'count']
customer_age_df['ratio'] = round((
customer_age_df['count'] / customer_age_df['count'].sum()
)*100, 2)
customer_age_df['age'] = customer_age_df['age'].astype(str) + ' years'
sex_fig = make_subplots(
rows=1, cols=2, specs=[[{"type": "pie"}, {"type": "pie"}]]
)
sex_fig.add_trace(go.Pie(
values=sex_stat_df.iloc[:, 1].to_list(),
labels=sex_stat_df.iloc[:, 0].to_list(),
domain=dict(x=[0, 0.5]),
name="Reference", title="Reference", titlefont_size=17),
row=1, col=1
)
sex_fig.add_trace(go.Pie(
values=customer_sex_df.iloc[:, 1].to_list(),
labels=customer_sex_df.iloc[:, 0].to_list(),
domain=dict(x=[0.5, 1.0]),
name="Actual", title="Actual", titlefont_size=17),
row=1, col=2
)
sex_fig.update_traces(
textinfo='label+percent', hoverinfo="label+percent+name",
textfont_size=13, marker=dict(colors=['darkblue', 'red'])
)
sex_fig.update_layout(
showlegend=False, title="Comparison of gender distribution",
titlefont_size=20, title_font_color='black', title_x=0.5
)
app = dash.Dash(__name__)
app.layout = html.Div(style={}, children=[
html.H1(
children='Compare reference and actual customer distribution',
style={
'textAlign': 'center',
}
),
dcc.Graph(id='sex-graph', figure=sex_fig),
dcc.Graph(id='lastname-graph'),
dcc.Slider(
id='slider-top_number', min=5, max=25,
value=5, step=5,
marks={i: 'Top {}'.format(i) for i in range(5, 30, 5)}
),
dcc.Graph(id='firstname-graph'),
html.Div([
"이름을 검색하세요!",
html.Br(),
html.Br(),
dcc.Input(id="firstname-input", type="text", placeholder="ex) 민준"),
html.Div(id='firstname-output', style={
'color': 'red', 'font-size': 'large'})
], style={'padding-left': '8%'}),
dcc.Graph(id='age-graph'),
dcc.RangeSlider(
id='age-range-slider',
min=0,
max=99,
step=1,
value=[0, 99],
marks={i: '{}'.format(i) for i in range(0, 100, 3)}
),
html.Br(),
html.Div(
"비교하고 싶은 연령에 마우스를 올려보세요!", style={'textAlign': 'center'}
),
html.Div(id='age-title', style={'textAlign': 'center'}),
html.Div(
[dcc.Graph(id='age-compare-graph')],
style={'padding-left': '36.5%'}
)
])
@app.callback(
Output("lastname-graph", "figure"),
[Input("slider-top_number", "value")]
)
def change_top_number(top_number):
fig = make_subplots(
rows=1, cols=2, subplot_titles=['Reference', 'Actual'],
specs=[[{"type": "bar"}, {"type": "bar"}]]
)
filtered_lastname_df = lastname_df.head(top_number)
filtered_customer_lastname_df = customer_lastname_df.head(top_number)
sub_fig1 = px.bar(
filtered_lastname_df, x='lastname', y='ratio', color='ratio',
text='ratio', labels={'ratio': 'ratio(%)'}
)
sub_fig2 = px.bar(
filtered_customer_lastname_df, x='lastname', y='ratio',
color='ratio', text='ratio', labels={'ratio': 'ratio(%)'}
)
fig.add_trace(sub_fig1['data'][0], row=1, col=1)
fig.add_trace(sub_fig2['data'][0], row=1, col=2)
fig.update_traces(textfont_size=13, texttemplate='%{text:.2}%')
fig.update_layout(
title=f"Comparison of lastname distribution (Top {top_number})",
titlefont_size=20, title_font_color='black', title_x=0.5
)
return fig
@app.callback(
Output("age-graph", "figure"),
[Input("age-range-slider", "value")]
)
def update_age(age):
fig = make_subplots(
rows=1, cols=2, subplot_titles=['Reference', 'Actual'],
specs=[[{"type": "bar"}, {"type": "bar"}]]
)
start_age = age[0]
end_age = age[1]
filtered_age_stat_df = age_stat_df[start_age: end_age]
fig1_color_discrete_sequence = ['#ff7700']*len(filtered_age_stat_df)
sub_fig1 = px.bar(
filtered_age_stat_df, x='ratio', y='age', orientation='h',
labels={'ratio': 'ratio(%)'},
color_discrete_sequence=fig1_color_discrete_sequence
)
filtered_customer_age_df = customer_age_df[start_age: end_age]
fig2_color_discrete_sequence = ['#00c8ff']*len(
filtered_customer_age_df)
sub_fig2 = px.bar(
filtered_customer_age_df, x='ratio', y='age', orientation='h',
labels={'ratio': 'ratio(%)'},
color_discrete_sequence=fig2_color_discrete_sequence
)
fig.add_trace(sub_fig1['data'][0], row=1, col=1)
fig.add_trace(sub_fig2['data'][0], row=1, col=2)
fig.update_traces(textfont_size=13)
fig.update_layout(
showlegend=False, title="Comparison of age distribution",
titlefont_size=20, title_font_color='black', title_x=0.5,
height=800
)
return fig
@app.callback(
Output("age-compare-graph", "figure"),
Output("age-title", "children"),
Input("age-graph", "hoverData")
)
def update_age_compare_graph(hoverData):
age = (hoverData['points'][0]['label']
if hoverData else age_stat_df['age'][0])
age_title = age
ref_ratio = age_stat_df[age_stat_df['age'] == age]['ratio'].values[0]
act_ratio = customer_age_df[customer_age_df['age'] == age]['ratio']\
.values[0]
age_compare_df = pd.DataFrame({'category': ['Reference', 'Actual'],
'ratio': [ref_ratio, act_ratio]})
color_discrete_sequence = ['#ff7700', '#00c8ff']
fig = px.bar(
age_compare_df, x='category', y='ratio', text='ratio',
color='category', labels={'ratio': 'ratio(%)'},
color_discrete_sequence=color_discrete_sequence
)
fig.update_xaxes(title='')
fig.update_yaxes(title='')
fig.update_traces(textfont_size=18, texttemplate='%{text:.2}%')
fig.update_layout(showlegend=False, width=500)
return fig, age_title
@app.callback(
Output("firstname-graph", "figure"),
Output("firstname-output", "children"),
Input("firstname-input", "value")
)
def update_firstname(firstname):
fig = make_subplots(
rows=1, cols=2, subplot_titles=['Reference', 'Actual'],
specs=[[{"type": "bar"}, {"type": "bar"}]]
)
output = ""
filtered_firstname_df = (
firstname_df[firstname_df['firstname'] == firstname])
if len(filtered_firstname_df) == 0:
filtered_firstname_df = firstname_df.head(20)
output = "Wrong Name!" if firstname else ""
filtered_customer_firstname_df = (
customer_firstname_df[
customer_firstname_df['firstname'] == firstname])
if len(filtered_customer_firstname_df) == 0:
filtered_customer_firstname_df = customer_firstname_df.head(20)
sub_fig1 = px.bar(
filtered_firstname_df, x='firstname', y='ratio', color='ratio',
text='ratio', labels={'ratio': 'ratio(%)'}
)
sub_fig2 = px.bar(
filtered_customer_firstname_df, x='firstname', y='ratio',
color='ratio', text='ratio', labels={'ratio': 'ratio(%)'}
)
fig.add_trace(sub_fig1['data'][0], row=1, col=1)
fig.add_trace(sub_fig2['data'][0], row=1, col=2)
fig.update_traces(textfont_size=13, texttemplate='%{text:.2}%')
fig.update_layout(
title=f"Comparison of firstname distribution",
titlefont_size=20, title_font_color='black', title_x=0.5
)
return fig, output
app.run_server(debug=False)
def show_compare_plot(engine):
conn = engine.connect()
_create_compare_plots(conn)
|
str = input("Enter the equation as follows (5 + 2) > ")
equation = str.split(" ")
num1 = int(equation[0])
operator = equation[1]
num2 = int(equation[2])
answer = 0
match operator:
case "+":
answer = num1 + num2
case "-":
answer = num1 - num2
case "*":
answer = num1 * num2
case "/":
answer = num1 / num2
print(answer) |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from pyuploadcare.dj.forms import ImageField
from .models import Profile, Neighbourhood, Updates, Business, EmergencyContact, Post
class SignupForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password1', 'password2')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['name', 'profile_pic', 'age', 'contact', 'address', 'estate', 'role']
class NeighbourhoodForm(forms.ModelForm):
class Meta:
model = Neighbourhood
fields = ['name', 'location', 'image']
class EmergencyForm(forms.ModelForm):
class Meta:
model = EmergencyContact
fields = ['name', 'contact', 'description']
class BusinessForm(forms.ModelForm):
class Meta:
model = Business
fields = ['name', 'description', 'location', 'owner']
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['title', 'content', 'tag']
class UpdatesForm(forms.ModelForm):
class Meta:
model = Updates
exclude = ['title', 'tag', 'editor', 'estate', 'up_date']
class UpdateProfileForm(forms.ModelForm):
profile_pic = ImageField(label='')
class Meta:
model = Profile
fields = ['name', 'age', 'profile_pic', 'contact', 'address', 'estate', 'role']
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-19 17:16
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations
def update_es_mapping(*args, **kwargs):
def _update_es_mapping():
return call_command('update_es_mapping', 'case_search', noinput=True)
return _update_es_mapping
class Migration(migrations.Migration):
dependencies = [
('case_search', '0007_auto_20170522_1506'),
]
operations = [
migrations.RunPython(update_es_mapping)
]
|
from spada.methods import method
from spada.network import ensembl_transcript_network
import os
import pytest
scriptPath = os.path.realpath(__file__)
dataPath = os.path.dirname(scriptPath) + "/../../data/"
txs = ensembl_transcript_network.ENSEMBLTranscriptNetwork('aName')
txs.skip_filter = False
def test_init():
assert txs._name == 'aName'
def test_genenameFilter():
assert txs.genenameFilter(full_name = 'id') == ('id', None)
assert txs.genenameFilter(gene_symbol = 'id', gene_id = 'id') == (None, None)
assert txs.genenameFilter(gene_symbol = 'locus', gene_id = 'locus') == (None, None)
def test_txFilter():
assert txs.txFilter('made_up_tx') == None
assert txs.txFilter('ENST16.2') == 'ENST16.2'
txs.add_node('ENST16.2', 'test')
txs.add_node('ENST17.1', 'test')
assert txs.txFilter('ENST16.2') == 'ENST16.2'
assert txs.txFilter('ENST16') == 'ENST16.2'
assert txs.txFilter('ENST17.1') == 'ENST17.1'
assert txs.txFilter('ENST17') == 'ENST17.1' |
def initialize():
"""Initialize the dashboard, data storage, and account balances."""
return
def build_dashboard():
"""Build the dashboard."""
return
def fetch_data():
"""Fetches the latest prices."""
return
def generate_signals():
"""Generates trading signals for a given dataset."""
return
def execute_backtest():
"""Backtests signal data."""
return
def execute_trade_strategy():
"""Makes a buy/sell/hold decision."""
return
def evaluate_metrics():
"""Generates evaluation metrics from backtested signal data."""
return
def update_dashboard():
"""Updates the dashboard."""
return
def main():
"""Main Event Loop."""
return |
#!/usr/bin/python
#
# James Sandford, copyright BBC 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase, mock
from unittest.mock import MagicMock
from hypothesis import given, strategies as st # type: ignore
from rtpPayload_ttml import (
RTPPayload_TTML, SUPPORTED_ENCODINGS, utfEncode)
from rtpPayload_ttml.utfUtils import BOMS
from rtpTTML import TTMLTransmitter
import asyncio
class TestTTMLTransmitter (TestCase):
def setUp(self):
self.transmitter = TTMLTransmitter("", 0)
def setup_example(self):
self.setUp()
@given(
st.text(min_size=1),
st.integers(min_value=4))
def test_fragmentDoc(self, doc, maxLen):
fragments = self.transmitter._fragmentDoc(doc, maxLen)
reconstructedDoc = ""
for fragment in fragments:
self.assertLessEqual(len(utfEncode(fragment)), maxLen)
reconstructedDoc += fragment
self.assertEqual(doc, reconstructedDoc)
@given(st.datetimes())
def test_datetimeToRTPTs(self, time):
rtpTs = self.transmitter._datetimeToRTPTs(time)
self.assertIsInstance(rtpTs, int)
self.assertGreaterEqual(rtpTs, 0)
self.assertLess(rtpTs, 2**32)
@given(st.tuples(
st.text(min_size=1),
st.sampled_from(SUPPORTED_ENCODINGS),
st.booleans(),
st.integers(min_value=0, max_value=(2**32)-1),
st.booleans(),
st.booleans()).filter(
lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
def test_generateRTPPacket(self, data):
doc, encoding, bom, time, isFirst, marker = data
thisTransmitter = TTMLTransmitter("", 0, encoding=encoding, bom=bom)
expectedSeqNum = thisTransmitter._nextSeqNum
packet = thisTransmitter._generateRTPPacket(
doc, time, isFirst, marker)
payload = RTPPayload_TTML(
encoding=encoding, bom=bom).fromBytearray(packet.payload)
self.assertEqual(packet.timestamp, time)
self.assertEqual(packet.sequenceNumber, expectedSeqNum)
self.assertEqual(packet.marker, marker)
self.assertEqual(payload.userDataWords, doc)
self.assertEqual(thisTransmitter._nextSeqNum, expectedSeqNum + 1)
@given(st.tuples(
st.text(min_size=1),
st.sampled_from(SUPPORTED_ENCODINGS),
st.booleans(),
st.datetimes(),
st.booleans()).filter(
lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
def test_packetiseDoc(self, data):
doc, encoding, bom, time, marker = data
thisTransmitter = TTMLTransmitter("", 0, encoding=encoding, bom=bom)
expectedSeqNum = thisTransmitter._nextSeqNum
packets = thisTransmitter._packetiseDoc(doc, time)
for x in range(len(packets)):
payload = RTPPayload_TTML(
encoding=encoding, bom=bom).fromBytearray(packets[x].payload)
self.assertEqual(
packets[x].timestamp, thisTransmitter._datetimeToRTPTs(time))
self.assertEqual(packets[x].sequenceNumber, expectedSeqNum + x)
self.assertIn(payload.userDataWords, doc)
self.assertLess(len(utfEncode(payload.userDataWords)), 2**16)
thisBom = BOMS[encoding]
if bom and (x == 0):
self.assertTrue(payload._userDataWords.startswith(thisBom))
else:
self.assertFalse(payload._userDataWords.startswith(thisBom))
if x == (len(packets) - 1):
self.assertTrue(packets[x].marker)
else:
self.assertFalse(packets[x].marker)
self.assertEqual(
thisTransmitter.nextSeqNum, expectedSeqNum + len(packets))
class TestTTMLTransmitterContexts (TestCase):
async def dummyEndpoint(self, mockTransport, mockProtocol):
return (mockTransport, mockProtocol)
async def async_test_async(self, endpoint, port, doc, time):
mockTransport = MagicMock()
mockProtocol = MagicMock()
endpoint.return_value = self.dummyEndpoint(mockTransport, mockProtocol)
async with TTMLTransmitter("", port) as transmitter:
await transmitter.sendDoc(doc, time)
if len(doc) > 0:
mockTransport.sendto.assert_called()
else:
mockTransport.sendto.assert_not_called()
mockTransport.close.assert_called_once()
@mock.patch(
"asyncio.unix_events._UnixSelectorEventLoop.create_datagram_endpoint")
@given(
st.integers(min_value=0, max_value=(2**16)-1),
st.text(),
st.datetimes())
def test_async(self, endpoint, port, doc, time):
asyncio.get_event_loop().run_until_complete(
self.async_test_async(endpoint, port, doc, time))
@mock.patch("socket.socket")
@given(
st.integers(min_value=0, max_value=(2**16)-1),
st.text(),
st.datetimes())
def test_sync(self, socket, port, doc, time):
socket.reset_mock()
sockInst = socket()
with TTMLTransmitter("", port) as transmitter:
transmitter.sendDoc(doc, time)
if len(doc) > 0:
sockInst.sendto.assert_called()
else:
sockInst.sendto.assert_not_called()
sockInst.close.assert_called_once()
|
"""
author: Paul Bruillard, harsh
"""
import jax.numpy as jnp
from jax import jit
from cjax.utils.math_trees import *
from typing import Any
from functools import partial
def get_rotation_pytree(src: Any, dst: Any) -> Any:
"""
Takes two n-dimensional vectors/Pytree and returns an
nxn rotation matrix mapping cjax to dst.
Raises Value Error when unsuccessful.
"""
def __assert_rotation(R):
if R.ndim != 2:
print("R must be a matrix")
a, b = R.shape
if a != b:
print("R must be square")
if (
not jnp.isclose(jnp.abs(jnp.eye(a) - jnp.dot(R, R.T)).max(), 0.0, rtol=0.5)
) or (
not jnp.isclose(jnp.abs(jnp.eye(a) - jnp.dot(R.T, R)).max(), 0.0, rtol=0.5)
):
print("R is not diagonal")
if not pytree_shape_array_equal(src, dst):
print("cjax and dst must be 1-dimensional arrays with the same shape.")
x = pytree_normalized(src)
y = pytree_normalized(dst)
n = len(dst)
# compute angle between x and y in their spanning space
theta = jnp.arccos(jnp.dot(x, y)) # they are normalized so there is no denominator
if jnp.isclose(theta, 0):
print("x and y are co-linear")
# construct the 2d rotation matrix connecting x to y in their spanning space
R = jnp.array([[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]])
__assert_rotation(R)
# get projections onto Span<x,y> and its orthogonal complement
u = x
v = pytree_normalized(pytree_sub(y, (jnp.dot(u, y) * u)))
P = jnp.outer(u, u.T) + jnp.outer(
v, v.T
) # projection onto 2d space spanned by x and y
Q = jnp.eye(n) - P # projection onto the orthogonal complement of Span<x,y>
# lift the rotation matrix into the n-dimensional space
uv = jnp.hstack((u[:, None], v[:, None]))
R = Q + jnp.dot(uv, jnp.dot(R, uv.T))
__assert_rotation(R)
if jnp.any(jnp.logical_not(jnp.isclose(jnp.dot(R, x), y, rtol=0.25))):
print("Rotation matrix did not work")
return R
def get_rotation_array(src: Any, dst: Any) -> Any:
"""
Takes two n-dimensional vectors and returns an
nxn rotation matrix mapping cjax to dst.
Raises Value Error when unsuccessful.
"""
def __assert_rotation(R):
if R.ndim != 2:
raise ValueError("R must be a matrix")
a, b = R.shape
if a != b:
raise ValueError("R must be square")
if (not np.isclose(np.abs(np.eye(a) - np.dot(R, R.T)).max(), 0)) or (
not np.isclose(np.abs(np.eye(a) - np.dot(R.T, R)).max(), 0)
):
raise ValueError("R is not diagonal")
def __normalize(x):
return x / np.sqrt(np.sum(x ** 2))
if src.shape != dst.shape or src.ndim != 1:
raise ValueError(
"cjax and dst must be 1-dimensional arrays with the same shape."
)
x = __normalize(src.copy())
y = __normalize(dst.copy())
# compute angle between x and y in their spanning space
theta = np.arccos(np.dot(x, y)) # they are normalized so there is no denominator
if np.isclose(theta, 0):
raise ValueError("x and y are co-linear")
# construct the 2d rotation matrix connecting x to y in their spanning space
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
__assert_rotation(R)
# get projections onto Span<x,y> and its orthogonal complement
u = x
v = __normalize((y - (np.dot(u, y) * u)))
P = np.outer(u, u.T) + np.outer(
v, v.T
) # projection onto 2d space spanned by x and y
Q = np.eye(n) - P # projection onto the orthogonal complement of Span<x,y>
# lift the rotation matrix into the n-dimensional space
uv = np.hstack((u[:, None], v[:, None]))
R = Q + np.dot(uv, np.dot(R, uv.T))
__assert_rotation(R)
if np.any(np.logical_not(np.isclose(np.dot(R, x), y))):
raise ValueError("Rotation matrix did not work")
return R
@partial(jit, static_argnums=(0))
def projection_affine(n_dim, u, n, u_0):
"""
Args:
n_dim: affine transformation space
u: random point to be projected on n as L
n: secant normal vector
u_0: secant starting point
Returns:
"""
n_norm = l2_norm(n)
I = jnp.eye(n_dim)
p2 = jnp.dot(I, n)[:, None] / n_norm ** 2 * n
u_0 = lax.reshape(u_0, (n_dim, 1))
I = jnp.eye(n_dim)
t1 = jnp.block([[I, u_0], [jnp.zeros(shape=(1, n_dim)), 1.0]])
t2 = jnp.block(
[[p2, jnp.zeros(shape=(n_dim, 1))], [jnp.zeros(shape=(1, n_dim)), 1.0]]
)
t3 = jnp.block([[I, -1 * u_0], [jnp.zeros(shape=(1, n_dim)), 1.0]])
P = jnp.matmul(jnp.matmul(t1, t2), t3)
pr = jnp.matmul(P, jnp.hstack([u, 1.0]))
pr = lax.slice(pr, [0], [n_dim])
return pr
if __name__ == "__main__":
n = 5
# key = random.PRNGKey(10)
# k1, k2 = random.split(key, 2)
# cjax = random.normal(k1, [n])
# dst = random.normal(k2, [n])
# R = get_rotation_pytree(cjax, dst)
# transformed_vector = np.dot(R, cjax)
# print(jnp.dot(transformed_vector, dst))
# n = 3
# cjax= np.array([0.0,0.0,1.0])
# dst = np.array([3.0,3.0,3.0])
# sample = np.array([1.0,2.0,0.0])
# R = get_rotation_array(cjax, dst)
# transformed_vector = np.dot(R, sample) + dst
# print(transformed_vector)
|
###############################################################################
# main.py
#
# simple script to read the value of a Force Sensitive Resistor (FSR) every
# 500ms and print its value to the REPL. The pin connected to Pin Y12 on the
# pyboard, should have a pull-down resistor of 10K connected to it as well.
# The other pin of the FSR flex sensor should be connected to 3.3V.
#
# Created: 10/05/17
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# * 09/29/18 - JEV
# - Changed pin to match upcoming MCHE210 breakout board
#
# TODO:
# *
###############################################################################
import pyb # import the pyboard module
import time # import the time module
# Set up the analog-to-digital converter
adc = pyb.ADC(pyb.Pin("Y12"))
# Now read the pot every 500ms, forever
while (True):
# Read the value of the FSR. It should be in the range 0-4095
fsr_value = adc.read()
# We can convert the value to the voltage we are reading.
# 0=0V and 4095=3.3V
voltage = 3.3 / 4095 * fsr_value
# print out the values, nicely formatted
print("ADC: {:5d}".format(fsr_value))
print("Voltage: {:5.2f}".format(voltage))
if fsr_value > 3900:
print("Wow!... You're strong!\n")
elif fsr_value < 200:
print("Press the round part to test your strength.\n")
else:
print("Vary how hard you're pressing to watch the values change.\n")
# sleep for 500ms
time.sleep_ms(500) |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="senda",
version="0.7.7",
author="Lars Kjeldgaard",
author_email="lars.kjeldgaard@eb.dk",
description="Framework for Fine-tuning Transformers for Sentiment Analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ebanalyse/senda",
packages=setuptools.find_packages(where='src'),
package_dir={'': 'src'},
python_requires='>=3.7',
install_requires=[
'torch',
'transformers',
'sklearn',
'nltk',
'pandas',
'pyconll',
'tweepy',
'danlp',
'datasets',
'numpy'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
include_package_data=True
)
|
import json
import math
import numpy as np
import pandas as pd
import xarray as xr
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.classes.evaluators.PandasEvaluator import PandasEvaluator
from pyplan_engine.classes.XHelpers import XHelpers, XIndex
from pyplan_engine.common.classes.filterChoices import filterChoices
from pyplan_engine.common.classes.indexValuesReq import IndexValuesReq
class XArrayEvaluator(BaseEvaluator):
PAGESIZE = 100
def evaluateNode(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
if isinstance(result, xr.DataArray):
return self.cubeEvaluate(result, nodeDic, nodeId, dims, rows, columns, summaryBy, bottomTotal, rightTotal, fromRow, toRow)
elif isinstance(result, XIndex):
return self.indexEvaluate(result, nodeDic, nodeId, dims, rows, columns, summaryBy, bottomTotal, rightTotal, fromRow, toRow)
def cubeEvaluate(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
sby = np.sum
if summaryBy == 'avg':
sby = np.mean
elif summaryBy == 'max':
sby = np.max
elif summaryBy == 'min':
sby = np.min
if (fromRow is None) or int(fromRow) <= 0:
fromRow = 1
if (toRow is None) or int(toRow) < 1:
toRow = 100
fromRow = int(fromRow)
toRow = int(toRow)
result = self.applyHierarchy(
result, nodeDic, nodeId, dims, rows, columns, sby)
_filters = {}
_rows = []
_columns = []
if not rows is None:
for row in rows:
if self.hasDim(result, str(row["field"])):
_rows.append(str(row["field"]).split(".")[0])
self.addToFilter(nodeDic, row, _filters)
if not columns is None:
for column in columns:
if self.hasDim(result, str(column["field"])):
_columns.append(str(column["field"]).split(".")[0])
self.addToFilter(nodeDic, column, _filters)
if not dims is None:
for dim in dims:
if self.hasDim(result, str(dim["field"]).split(".")[0]):
self.addToFilter(nodeDic, dim, _filters)
tmp = None
filteredResult = result
if len(_filters) > 0:
filteredResult = result.sel(_filters)
if len(_rows) == 0 and len(_columns) == 0 and result.ndim > 0:
try:
tmp = sby(filteredResult)
except Exception as ex:
if "flexible type" in str(ex):
tmp = sby(filteredResult.astype("O"))
else:
raise ex
else:
otherDims = [
xx for xx in filteredResult.dims if xx not in (_rows + _columns)]
if len(otherDims) > 0:
try:
tmp = filteredResult.reduce(
sby, otherDims).transpose(*(_rows + _columns))
except Exception as ex:
if "flexible type" in str(ex):
tmp = filteredResult.astype("O").reduce(
sby, otherDims).transpose(*(_rows + _columns))
else:
tmp = filteredResult.transpose(*(_rows + _columns))
finalValues = tmp.values
finalIndexes = []
if tmp.ndim > 0:
finalIndexes = tmp.coords[tmp.dims[0]].values
finalColumns = ["Total"]
if tmp.ndim == 2:
finalColumns = tmp.coords[tmp.dims[1]].values
# Add totales
_totalRow = None
if bottomTotal and len(_rows) > 0:
# add total row
if tmp.ndim == 1:
_totalRow = finalValues.sum(axis=0).reshape(1)
else:
_totalRow = finalValues.sum(
axis=0).reshape(1, len(finalValues[0]))
_totalRow = _totalRow[0]
if rightTotal:
_totalRow = np.append(_totalRow, finalValues.sum())
if rightTotal and len(_columns) > 0:
# add total column
if tmp.ndim == 1:
finalIndexes = np.append(finalIndexes, "Total")
finalValues = np.append(
finalValues, finalValues.sum(axis=0).reshape(1), axis=0)
else:
finalColumns = np.append(finalColumns, "Total")
finalValues = np.append(finalValues, finalValues.sum(
axis=1).reshape(len(finalValues), 1), axis=1)
# chek inf
if self.kindToString(finalValues.dtype.kind) == "numeric":
if np.isinf(finalValues).any():
finalValues[np.isinf(finalValues)] = None
# chec if haver nan values
if pd.isnull(finalValues).any():
try:
finalValues = np.where(
np.isnan(finalValues), None, finalValues)
except:
finalValues[pd.isnull(finalValues)] = None
res = {}
pageInfo = None
onRow = None
onColumn = None
if len(_rows) == 0 and len(_columns) == 0:
res = {
"columns": [],
"index": ["Total"],
"data": [[finalValues.tolist()]]
}
elif len(_rows) == 0:
onColumn = _columns[0]
res = {
"columns": self.checkDateFormat(finalIndexes[:300]).tolist(),
"index": finalColumns,
"data": [finalValues[:300].tolist()]
}
elif len(_columns) == 0:
if (len(finalIndexes) > self.PAGESIZE):
pageInfo = {
"fromRow": int(fromRow),
"toRow": int(toRow),
"totalRows": len(finalIndexes)
}
onRow = _rows[0]
res = {
"columns": finalColumns,
"index": self.checkDateFormat(finalIndexes[fromRow-1:toRow]).tolist(),
"data": [[x] for x in finalValues[fromRow-1:toRow].tolist()]
}
# add total rows
if not _totalRow is None:
res["index"].append("Total")
res["data"].append(_totalRow.tolist())
else:
onColumn = _columns[0]
onRow = _rows[0]
if (len(finalIndexes) > self.PAGESIZE):
pageInfo = {
"fromRow": int(fromRow),
"toRow": int(toRow),
"totalRows": len(finalIndexes)
}
res = {
"columns": self.checkDateFormat(finalColumns[:300]).tolist(),
"index": self.checkDateFormat(finalIndexes[fromRow-1:toRow]).tolist(),
"data": finalValues[fromRow-1:toRow, :300].tolist()
}
# add total rows
if not _totalRow is None:
res["index"].append("Total")
res["data"].append(_totalRow[:300].tolist())
return self.createResult(res, type(tmp), onRow=onRow, onColumn=onColumn, node=nodeDic[nodeId], pageInfo=pageInfo)
def hasDim(self, result, dim):
return True if dim.split(".")[0] in result.dims else False
def addToFilter(self, nodeDic, dim, filters):
if "values" in dim and dim["values"] is not None and len(dim["values"]) > 0:
field = str(dim["field"]).split(".")[0]
# chek if the node id of the index we are using to filter has changed
nodeId = None
indexType = None
indexType = self.getIndexType(nodeDic, nodeId, field)
# check if the indexes have change
_values = None
if indexType == "S":
_values = [str(xx["value"]) for xx in dim["values"]]
else:
_values = [int(xx["value"]) for xx in dim["values"]]
all_values = None
npValues = np.array(_values)
if field in nodeDic:
all_values = nodeDic[field].result.values
elif len(dim["field"].split(".")) > 1:
node_id = str(dim["field"]).split(".")[1]
if field in nodeDic[node_id].result.dims:
all_values = nodeDic[node_id].result.coords[field].values
serie = pd.Series(all_values)
if not all_values is None and serie.isin(npValues).any():
npValues = all_values[serie.isin(npValues)]
if len(npValues) > 0:
filters[field] = npValues
def getIndexes(self, node, result=None):
if result is None:
result = node._result
return [(xx+"."+node.identifier) for xx in result.dims]
def getIndexesWithLevels(self, node, result=None):
res = []
if result is None:
result = node._result
if not result is None:
_model = node.model
for indexItem in result.dims:
itemDim = indexItem.split(",")[0]
item = {"field": itemDim+"."+node.identifier,
"name": itemDim, "description": "", "levels": []}
if _model.existNode(itemDim):
levelNode = _model.getNode(itemDim)
if levelNode.title:
item["name"] = levelNode.title
item["description"] = levelNode.description
if levelNode.numberFormat:
item["numberFormat"] = levelNode.numberFormat
# check for levels
if not levelNode.hierarchy_parents is None:
def buildLevels(parents, levelList):
if not isinstance(parents, list):
parents = [parents]
for parentIndexId in parents:
parentIndexNode = _model.getNode(parentIndexId)
if parentIndexNode is None:
raise ValueError(
f"Node {parentIndexId} not found")
levelItem = {
"field": parentIndexId, "name": parentIndexNode.title or parentIndexId}
levelList.append(levelItem)
_dummy = parentIndexNode.result # to force calc
if not parentIndexNode.hierarchy_parents is None:
buildLevels(
parentIndexNode.hierarchy_parents, levelList)
listOfLevels = [
{"field": itemDim, "name": item["name"]}]
indexParents = levelNode.hierarchy_parents
buildLevels(indexParents, listOfLevels)
item["levels"] = listOfLevels
elif "datetime" in result.coords[itemDim].dtype.name:
item["numberFormat"] = "2,DD,0,,0,0,4,0,$,5,FULL,0"
res.append(item)
return res
def isIndexed(self, result):
if not result is None:
obj = result
if isinstance(obj, pd.Series):
obj = pd.DataFrame({"values": obj})
if isinstance(obj, pd.DataFrame):
if (isinstance(obj.index, pd.MultiIndex) or isinstance(obj.index, pd.Index)) and len(obj.index.names) > 0 and (not obj.index.names[0] is None):
return True
return False
def getIndexValues(self, nodeDic, data: IndexValuesReq, result=None):
res = []
if data.node_id:
if (not data.node_id is None) & (data.node_id in nodeDic):
node = nodeDic[data.node_id]
if result is None:
result = node.result
res = self.checkDateFormat(
result[data.index_id].values).tolist()
elif (not data.index_id is None) & (data.index_id in nodeDic):
node = nodeDic[data.index_id]
if result is None:
result = node.result
if isinstance(result, XIndex):
res = result.values.tolist()
elif isinstance(result, np.ndarray):
res = self.checkDateFormat(result).tolist()
else:
res = list(result)
if data.text1:
text1 = data.text1.lower()
if data.filter == filterChoices.CONTAINS.value:
res = list(
filter(lambda item: text1 in str(item).lower(), res))
elif data.filter == filterChoices.NOT_CONTAINS.value:
res = list(
filter(lambda item: not text1 in str(item).lower(), res))
return res
def getIndexType(self, nodeDic, nodeId, indexId):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
res = "S"
if (not indexId is None) & (indexId in nodeDic):
node = nodeDic[indexId]
if isinstance(node.result, XIndex) or isinstance(node.result, pd.Index):
if str(node.result.values.dtype) in numerics:
res = "N"
elif isinstance(node.result, np.ndarray):
if str(node.result.dtype) in numerics:
res = "N"
elif nodeId:
if (not nodeId is None) & (nodeId in nodeDic):
node = nodeDic[nodeId]
if str(node.result.coords[indexId].values.dtype) in numerics:
res = "N"
return res
def getCubeValues(self, result, nodeDic, nodeId, query):
if isinstance(result, xr.DataArray):
res = {
"dims": [],
"values": []
}
# fix field.array
query["columns"] = [xx.split(".")[0] for xx in query["columns"]]
_filters = {}
if not query["filters"] is None:
for dimFilter in query["filters"]:
field = str(dimFilter["field"]).split(".")[0]
if self.hasDim(result, field):
dimFilter["values"] = [{"value": xx}
for xx in dimFilter["values"]]
self.addToFilter(nodeDic, dimFilter, _filters)
_filteredResult = result
if len(_filters):
_filteredResult = result.sel(_filters)
nodeIndexes = self.getIndexes(nodeDic[nodeId], result)
nodeIndexes = [xx.split(".")[0] for xx in nodeIndexes]
for col in query["columns"]:
if col in nodeIndexes:
item = {
"field": col,
"count": 0,
"values": [str(v) for v in self.checkDateFormat(_filteredResult.coords[col].values).tolist()]
# "values": result.filter(_filters).axis(col).values.tolist()
}
item["count"] = len(item["values"])
res["dims"].append(item)
otherDims = [
xx for xx in _filteredResult.dims if xx not in query["columns"]]
resultValues = None
if len(otherDims) > 0:
resultValues = _filteredResult.sum(otherDims)
else:
resultValues = _filteredResult
#resultValues = _filteredResult.sum(keep=query["columns"])
if isinstance(resultValues, xr.DataArray):
if len(query["columns"]) > 0:
res["values"] = resultValues.transpose(
*query["columns"]).values.reshape(resultValues.size).tolist()
else:
res["values"] = [resultValues.values.tolist()]
else:
res["values"] = resultValues
return res
def getCubeDimensionValues(self, result, nodeDic, nodeId, query):
if isinstance(result, xr.DataArray):
if len(query["columns"]) > 0:
dimension = query["columns"][-1]
if (dimension+"."+nodeId) in self.getIndexes(nodeDic[nodeId], result):
finalList = [
str(v) for v in self.checkDateFormat(result.coords[dimension].values).tolist()[:1000]]
finalList.sort()
return finalList
return []
def getCubeMetadata(self, result, nodeDic, nodeId):
res = None
if isinstance(result, xr.DataArray):
res = {
"dims": [],
"measures": [],
"aggregator": "sum",
"isEditable": True if self.isTable(nodeDic[nodeId]) == "1" else False,
"nodeProperties": {
"title": nodeDic[nodeId].title if not nodeDic[nodeId].title is None else nodeDic[nodeId].identifier,
"numberFormat": nodeDic[nodeId].numberFormat
}
}
# check if is in scenario
if nodeDic[nodeId].model.isNodeInScenario(nodeDic[nodeId].identifier):
res["nodeProperties"]["scenario"] = True
for dim in result.dims:
indexPart = str(dim).split(".")[0]
itemDim = {
"field": dim,
"name": indexPart
}
if indexPart in nodeDic:
if not nodeDic[indexPart].title is None:
itemDim["name"] = nodeDic[indexPart].title
if nodeDic[indexPart].numberFormat:
itemDim["numberFormat"] = nodeDic[indexPart].numberFormat
res["dims"].append(itemDim)
res["measures"].append({
"field": "datavalue",
"name": "datavalue"
})
return res
def isTable(self, node):
res = "0"
if isinstance(node.result, xr.DataArray):
if not node.definition is None and node.definition != "":
import re
deff = re.sub(
'[\s+]', '', str(node.definition).strip(' \t\n\r')).lower()
if (deff.startswith("result=pp.dataarray(") or deff.startswith("result=pp.cube(") or deff.startswith("result=xr.dataarray(") or deff.startswith("result=create_dataarray(")):
res = "1"
return res
def setNodeValueChanges(self, nodeDic, nodeId, nodeChanges):
if isinstance(nodeDic[nodeId].result, xr.DataArray):
for change in nodeChanges["changes"]:
newValue = change["definition"]
filters = {}
for filterItem in change["filterList"]:
aux = {
"field": filterItem["Key"],
"values": [{
"value": filterItem["Value"]
}]}
self.addToFilter(nodeDic, aux, filters)
for key in filters:
filters[key] = slice(filters[key][0], filters[key][0])
nodeDic[nodeId].result.loc[filters] = newValue
nodeDic[nodeId].definition = self.generateNodeDefinition(
nodeDic, nodeId)
return "ok"
def generateNodeDefinition(self, nodeDic, nodeId, forceXArray=False):
array = nodeDic[nodeId].result
"""Generate code for cube definition"""
np.set_printoptions(threshold=np.prod(array.values.shape))
data = np.array2string(array.values, separator=",", precision=20, formatter={
'float_kind': lambda x: repr(x)}).replace('\n', '')
indexes = []
for dim in list(array.dims):
if dim in nodeDic:
indexes.append(dim)
else:
index_values = np.array2string(array[dim].values, separator=",", precision=20, formatter={
'float_kind': lambda x: repr(x)}).replace('\n', '')
coord = f"pd.Index({index_values},name='{dim}')"
indexes.append(coord)
indexes = "[" + ",".join(indexes).replace("'", '"') + "]"
if forceXArray or "xr.DataArray" in nodeDic[nodeId].definition or "create_dataarray" in nodeDic[nodeId].definition:
if self.kindToString(array.values.dtype.kind) == "string" or self.kindToString(array.values.dtype.kind) == "object":
deff = f'result = xr.DataArray({data},{indexes}).astype("O")'
else:
deff = f'result = xr.DataArray({data},{indexes})'
else:
if self.kindToString(array.values.dtype.kind) == "string" or self.kindToString(array.values.dtype.kind) == "object":
deff = "result = pp.cube(" + indexes + \
"," + data + ", dtype='O')"
else:
deff = "result = pp.cube(" + indexes + "," + data + ")"
return deff
def dumpNodeToFile(self, nodeDic, nodeId, fileName):
definition = self.generateNodeDefinition(nodeDic, nodeId)
with open(fileName, 'w') as f:
f.write(definition)
f.close()
def applyHierarchy(self, result, nodeDic, nodeId, dims, rows, columns, sby):
def hierarchize(dataArray, levels, maps, hierarchyDic):
mapArray = nodeDic[maps[0]].result
coordValues = mapArray.values.copy()
targetIndexId = nodeDic[levels[1]].result.name
for pos, level in enumerate(levels):
if pos > 0:
if not maps[pos] is None:
mapArrayLevel = nodeDic[maps[pos]].result
for ii in range(len(coordValues)):
if not coordValues[ii] is None:
try:
newVal = mapArrayLevel.sel(
{mapArrayLevel.dims[0]: coordValues[ii]}, drop=True).values.item(0)
coordValues[ii] = newVal
except Exception as ex:
coordValues[ii] = None
#raise ValueError("Hierarchy not found. Level: " + targetIndexId + ", value: " + coordValues[ii])
pass
# perform aggregate
dataArray.coords[levels[0]].values = coordValues
_df = dataArray.to_series()
_df = _df.groupby(list(dataArray.dims), sort=False).agg(sby)
_da = _df.to_xarray()
reindex_dic = dict()
for dimension in _da.dims:
if dimension == levels[0]:
reindex_dic[dimension] = nodeDic[levels[-1:]
[0]].result.values
elif dimension in nodeDic and isinstance(nodeDic[dimension].result, pd.Index):
node_id = dimension
if not hierarchyDic is None and dimension in hierarchyDic:
node_id = hierarchyDic[dimension]
reindex_dic[dimension] = nodeDic[node_id].result.values
_db = _da.reindex(reindex_dic)
return _db
allDims = (dims or []) + (rows or []) + (columns or [])
hierarchyDic = dict()
for dim in allDims:
if dim and dim["currentLevel"] and dim["currentLevel"] != str(dim["field"]).split(".")[0]:
hierarchyDic[str(dim["field"]).split(".")[
0]] = dim["currentLevel"]
# recursive fn for search parent
def findPath(indexNode, level, levels, maps):
if indexNode.identifier == level:
levels.append(indexNode.identifier)
maps.append(None)
return True
else:
_for_calc = indexNode.result
parents = indexNode.hierarchy_parents
if parents is None:
return False
else:
if not isinstance(parents, list):
parents = [parents]
mapArrays = indexNode.hierarchy_maps
if not isinstance(mapArrays, list):
mapArrays = [mapArrays]
mapPos = 0
for parentId in parents:
parent = nodeDic[parentId]
if findPath(parent, level, levels, maps):
levels.append(indexNode.identifier)
maps.append(mapArrays[mapPos])
return True
mapPos += 1
return False
field = str(dim["field"]).split(".")[0]
currentLevel = dim["currentLevel"]
indexNode = nodeDic[field]
levels = []
maps = []
findPath(indexNode, currentLevel, levels, maps)
levels.reverse()
maps.reverse()
result = hierarchize(result.copy(), levels, maps, hierarchyDic)
return result
def geoUnclusterData(self, result, nodeDic, nodeId, rowIndex, attIndex, latField="latitude", lngField="longitude", geoField="geoField", labelField="labelField", sizeField="sizeField", colorField="colorField", iconField="iconField"):
_tmp_for_geo = XIndex('tmp_for_geo', [
latField, lngField, geoField, labelField, sizeField, colorField, iconField])
attIndex = attIndex.split(".")[0]
rowIndex = rowIndex.split(".")[0]
_idx = nodeDic[attIndex].result
rowIndexObj = nodeDic[rowIndex].result
#mapCube = result.sel({_idx.name:_tmp_for_geo}).transpose([rowIndex,"tmp_for_geo"]).values
mapCube = XHelpers.changeIndex(None, result, _idx, _tmp_for_geo).transpose(
*[rowIndex, "tmp_for_geo"]).values
res = dict()
points = []
pos = 0
for itemRow in mapCube:
vo = dict()
vo["id"] = str(rowIndexObj.values[pos])
vo["lat"] = itemRow[0]
vo["lng"] = itemRow[1]
vo["geoDef"] = itemRow[2]
vo["labelRes"] = itemRow[3]
vo["sizeRes"] = itemRow[4]
vo["colorRes"] = itemRow[5]
vo["iconRes"] = itemRow[6]
points.append(vo)
pos += 1
res["points"] = points
for nn, point in enumerate(res["points"]):
if nn == 0:
try:
if not math.isnan(float(point["sizeRes"])):
res["minSize"] = float(point["sizeRes"])
res["maxSize"] = float(point["sizeRes"])
except Exception as ex:
pass
try:
if not math.isnan(float(point["colorRes"])):
res["minColor"] = float(point["colorRes"])
res["maxColor"] = float(point["colorRes"])
except Exception as ex:
pass
try:
if not math.isnan(float(point["iconRes"])):
res["minIcon"] = float(point["iconRes"])
res["maxIcon"] = float(point["iconRes"])
except Exception as ex:
pass
else:
try:
if not math.isnan(float(point["sizeRes"])):
if point["sizeRes"] > res["maxSize"]:
res["maxSize"] = point["sizeRes"]
if point["sizeRes"] < res["minSize"]:
res["minSize"] = point["sizeRes"]
except Exception as ex:
pass
try:
if not math.isnan(float(point["colorRes"])):
if point["colorRes"] > res["maxColor"]:
res["maxColor"] = point["colorRes"]
if point["colorRes"] < res["minColor"]:
res["minColor"] = point["colorRes"]
except Exception as ex:
pass
try:
if not math.isnan(float(point["iconRes"])):
if point["iconRes"] > res["maxIcon"]:
res["maxIcon"] = point["iconRes"]
if point["iconRes"] < res["minIcon"]:
res["minIcon"] = point["iconRes"]
except Exception as ex:
pass
return res
def postCalculate(self, node, result):
"""Method executed after calculate node
"""
if isinstance(result, xr.DataArray):
result.name = node.title
def copyAsValues(self, result, nodeDic, nodeId):
""" Copy node as values """
newDef = ""
if isinstance(result, float) or isinstance(result, int):
newDef = "result = " + str(result)
elif isinstance(result, xr.DataArray):
newDef = self.generateNodeDefinition(nodeDic, nodeId, True)
else:
return False
nodeDic[nodeId].definition = newDef
return True
def kindToString(self, kind):
"""Returns the data type on human-readable string
"""
if kind in {'U', 'S'}:
return "string"
elif kind in {'b'}:
return "boolean"
elif kind in {'i', 'u', 'f', 'c'}:
return "numeric"
elif kind in {'m', 'M'}:
return "date"
elif kind in {'O'}:
return "object"
elif kind in {'V'}:
return "void"
|
from django.contrib import admin
from .models import Employee, Position
# Register your models here.
# Register your models here.
admin.site.register(Employee)
admin.site.register(Position) |
import os
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import train_test_split
def getData():
# Get home data from CSV file
dataFile = None
if os.path.exists('home_data.csv'):
print("-- home_data.csv found locally")
dataFile = pd.read_csv('home_data.csv', skipfooter=1)
return dataFile
def linearRegressionModel(X_train, Y_train, X_test, Y_test):
linear = linear_model.LinearRegression()
# Training process
linear.fit(X_train, Y_train)
# Evaluating the model
score_trained = linear.score(X_test, Y_test)
return score_trained
def lassoRegressionModel(X_train, Y_train, X_test, Y_test):
lasso_linear = linear_model.Lasso(alpha=1.0)
# Training process
lasso_linear.fit(X_train, Y_train)
# Evaluating the model
score_trained = lasso_linear.score(X_test, Y_test)
return score_trained
if __name__ == "__main__":
data = getData()
if data is not None:
# Selection few attributes
attributes = list(
[
'num_bed',
'year_built',
'num_room',
'num_bath',
'living_area',
]
)
# Vector price of house
Y = data['askprice']
# print np.array(Y)
# Vector attributes of house
X = data[attributes]
# Split data to training test and testing test
X_train, X_test, Y_train, Y_test = train_test_split(np.array(X), np.array(Y), test_size=0.2)
# Linear Regression Model
linearScore = linearRegressionModel(X_train, Y_train, X_test, Y_test)
print 'Linear Score = ' , linearScore
# LASSO Regression Model
lassoScore = lassoRegressionModel(X_train, Y_train, X_test, Y_test)
print 'Lasso Score = ', lassoScore |
from typing import Any, Optional
from pandas_profiling.config import ImageType
from pandas_profiling.report.presentation.core.item_renderer import ItemRenderer
class Image(ItemRenderer):
def __init__(
self,
image: str,
image_format: ImageType,
alt: str,
caption: Optional[str] = None,
**kwargs
):
super().__init__(
"image",
{
"image": image,
"image_format": image_format,
"alt": alt,
"caption": caption,
},
**kwargs
)
def __repr__(self) -> str:
return "Image"
def render(self) -> Any:
raise NotImplementedError()
|
# STACK IMPLEMENTED AS A LIST
#To check stack is empty or not
def isEmpty(stack):
if stack==[]:
return True
else:
return False
#Insertion of Element
def Push(stack, item):
stack.append(item)
top=len(stack)-1
#Deletion of Element
def Pop(stack):
if isEmpty(stack):
return "Underflow"
else:
item=stack.pop()
if len(stack)==0:
top=None
else:
top=len(stack)-1
return item
#Inpection of top element
def Peek(stack):
if isEmpty(stack):
return "Underflow"
else:
top=len(stack)-1
return stack[top]
#Display of Stack Elements
def Display(stack):
if isEmpty(stack):
print("Stack is Empty")
else:
top=len(stack)-1
print(stack[top],"<-top")
for a in range(top-1, -1, -1):
print(stack[a])
#__main__
Stack=[] # initIally empty
top=None
while True:
print("\n\nStack Operations")
print("1. Push")
print("2. POP")
print("3. Peek")
print("4. Display")
print("5. Exit")
ch=int(input("Enter your choice (1-5): "))
if ch==1:
item=int(input("Enter item:"))
Push(Stack, item)
elif ch==2:
item=Pop(Stack)
if item=="Underflow":
print("Underflow! Stack is Empty")
else:
print("Popped item is ", item)
elif ch==3:
item=Peek(Stack)
if item=="Underflow":
print("Underflow! Stack is Empty")
else:
print("Topmost item is ", item)
elif ch==4:
Display(Stack)
elif ch==5:
break
else:
print("Invalid Choice!") |
import random
from kelte.maths import Position
from kelte.tiles import get_tile, Tile
from kelte.ecs import Entity
mob_registry = {}
def create_mob(name: str, type: str, tile: Tile = None, health: int = None, inventory: dict = None, position: Position = None):
tile = get_tile(name) if tile is None else tile
position = Position() if position is None else position
health = 10 if health is None else health
inventory = {} if inventory is None else inventory
mob = Entity(name=name, type=type)
mob.add_component('tile', tile)
mob.add_component('position', position)
mob.add_component('health', health)
mob.add_component('inventory', inventory)
return mob
def populate_mob_data(registry):
global mob_registry
for mob_type, data in registry.items():
if not data:
continue
if not mob_type.startswith('mob'):
continue
for mob_data in data:
mob_name = mob_data.get('name')
mob = create_mob(name=mob_name, type=mob_type)
mob_registry[mob_name] = mob
return mob_registry
def get_mob(name: str, tile: Tile = None, copy: bool = True) -> Entity:
global mob_registry
if name is not None and name not in mob_registry:
tile = tile or get_tile(name)
mob = create_mob(name, type='mob', tile=tile)
mob_registry[name] = mob
elif name:
mob = mob_registry[name]
else:
mob = random.choice([m for m in mob_registry.values()])
return mob.copy() if copy else mob
|
# -*- coding: utf-8 -*-
from translate.TranslatorInterface import TranslatorInterface
import requests
from lxml import html
import json
from gtts import gTTS
#
# https://dictionary.yandex.net/api/v1/dicservice.json/lookup?key=dict.1.1.20170611T172307Z.4abaf98a6a837032.37092569e629450212c2a109f3e90091d784d1e3&lang=ru-de&text=ножницы
#
#
class Translator(TranslatorInterface):
def translate(self, word, _from, _to, folder):
# Yandex translate + TTS mp3
payload = {'key': 'dict.1.1.20170611T172307Z.4abaf98a6a837032.37092569e629450212c2a109f3e90091d784d1e3',
'lang': _from + '-' + _to, 'text': word}
r = requests.get('https://dictionary.yandex.net/api/v1/dicservice.json/lookup', params=payload)
# print(r.url)
resp = r.json()
audio = word
if 'gen' in resp['def'][0]:
if resp['def'][0]['gen'] == 'f':
audio = 'die ' + word
if resp['def'][0]['gen'] == 'm':
audio = 'der ' + word
if resp['def'][0]['gen'] == 'n':
audio = 'das ' + word
tts = gTTS(text=audio, lang=_from, slow=False)
tts.save('./' + folder + '/' + word + '.mp3')
trans = resp['def'][0]['tr'][0]['text']
# Glosbe examples
payloadgl = {'from': _from, 'dest': _to, 'format': 'json', 'phrase': word, 'pretty': 'true', 'tm': 'true'}
rg = requests.get('https://glosbe.com/gapi/translate', params=payloadgl)
# print(rg.url)
example = ''
if 'ex' in resp['def'][0]['tr'][0]:
example = resp['def'][0]['tr'][0]['ex'][0]['text']
# Example http://dict.tu-chemnitz.de/dings.cgi?o=302;service=deen;iservice=de-en-ex;query=k%C3%B6nnen
# rex = requests.get('http://dict.tu-chemnitz.de/dings.cgi?o=302;service=deen;iservice=de-en-ex;query='+word)
# tree = html.fromstring(rex.text)
# html_element = tree.xpath(".//table[@id='result']")
# html_element
return {'German': audio.encode("utf-8"), 'Russian': trans.encode("utf-8"), 'GermanEx': example.encode("utf-8"),
'GerAudio': ('[sound:' + word + '.mp3]').encode("utf-8")}
|
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
import os
import re
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from pkg_resources import get_distribution, DistributionNotFound
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
RESOURCE_PATH = os.path.join(ROOT_PATH, 'resource')
SOURCE_PATH = os.path.join(ROOT_PATH, 'source')
README_PATH = os.path.join(ROOT_PATH, 'README.rst')
try:
release = get_distribution('ftrack-python-api').version
# take major/minor/patch
VERSION = '.'.join(release.split('.')[:3])
except DistributionNotFound:
# package is not installed
VERSION = 'Unknown version'
# Custom commands.
class PyTest(TestCommand):
'''Pytest command.'''
def finalize_options(self):
'''Finalize options to be used.'''
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
'''Import pytest and run.'''
import pytest
raise SystemExit(pytest.main(self.test_args))
version_template = '''
# :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
__version__ = {version!r}
'''
# Call main setup.
setup(
name='ftrack-python-api',
description='Python API for ftrack.',
long_description=open(README_PATH).read(),
keywords='ftrack, python, api',
url='https://bitbucket.org/ftrack/ftrack-python-api',
author='ftrack',
author_email='support@ftrack.com',
license='Apache License (2.0)',
packages=find_packages(SOURCE_PATH),
project_urls={
"Documentation": "http://ftrack-python-api.rtd.ftrack.com/en/{}/".format(VERSION),
"Source Code": "https://bitbucket.org/ftrack/ftrack-python-api/src/{}".format(VERSION),
},
package_dir={
'': 'source'
},
use_scm_version={
'write_to': 'source/ftrack_api/_version.py',
'write_to_template': version_template,
},
setup_requires=[
'sphinx >= 1.2.2, < 1.6',
'sphinx_rtd_theme >= 0.1.6, < 1',
'lowdown >= 0.1.0, < 2',
'setuptools>=30.3.0',
'setuptools_scm'
],
install_requires=[
'requests >= 2, <3',
'arrow >= 0.4.4, < 1',
'termcolor >= 1.1.0, < 2',
'pyparsing >= 2.0, < 3',
'clique >= 1.2.0, < 2',
'websocket-client >= 0.40.0, < 1',
'future >=0.16.0, < 1',
'six >= 1, < 2'
],
tests_require=[
'pytest >= 2.7, < 3',
'pytest-mock >= 0.4, < 1',
'pytest-catchlog >= 1, <=2',
'mock',
'flaky'
],
cmdclass={
'test': PyTest
},
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
zip_safe=False,
python_requires=">=2.7.9, <4.0"
)
|
"""
The module containing the PyReshaper configuration specification class
This is a configuration specification class, through which the input to
the PyReshaper code is specified. Currently all types of supported
operations for the PyReshaper are specified with derived dypes of the
Specification class.
Copyright 2020 University Corporation for Atmospheric Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pickle
from os import path as ospath
def create_specifier(**kwargs):
"""
Factory function for Specifier class objects. Defined for convenience.
Parameters:
kwargs (dict): Optional arguments to be passed to the newly created Specifier object's constructor.
Returns:
Specifier: An instantiation of the type of Specifier class desired.
"""
return Specifier(**kwargs)
class Specifier(object):
"""
Time-slice to Time-series Convertion Specifier
This class acts as a container for the various input data needed
by the Reshaper to perform the time-slice to time-series operation.
"""
def __init__(
self,
infiles=[],
ncfmt='netcdf4',
compression=0,
least_significant_digit=None,
prefix='tseries.',
suffix='.nc',
timeseries=None,
metadata=[],
meta1d=False,
backend='netCDF4',
exclude_list=[],
metafile=None,
**kwargs,
):
"""
Initializes the internal data with optional arguments.
The time-series output files are named according to the convention:
output_file_name = prefix + variable_name + suffix
The output_file_name should be a full-path filename.
Parameters:
infiles (list): List of full-path input filenames
ncfmt (str): String specifying the NetCDF data format ('netcdf','netcdf4','netcdf4c')
compression (int): Compression level to use for NetCDF4 formatted data (overridden by the 'netcdf4c' format)
least_significant_digit (int): The digit (after the decimal) to assure precision to when using truncation before compression
prefix (str): String specifying the full-path prefix common to all time-series output files
suffix (str): String specifying the suffix common to all time-series output files
timeseries (list): List of variable names to extract out from the input time-slices into their own
time-series files. If None, then all non-metadata time-variant variables will be treated as time-series
variables.
metadata (list): List of variable names specifying the variables that should be included in every
time-series output file
meta1d (bool): True if 1D time-variant variables should be treated as metadata variables, False otherwise.
backend (str): Which I/O backend to use ('Nio' for PyNIO, 'netCDF4' for netCDF4-python)
exclude_list (list): List of time invariant variables to exclude from each timeseries file
metafile (str): Name of file from which to search for metadata (if unspecified, PyReshaper searches
for metadata in the first input file given)
kwargs (dict): Optional arguments describing the Reshaper run
"""
# The list of input (time-slice) NetCDF files (absolute paths)
self.input_file_list = infiles
# The string specifying the NetCDF file format for output
self.netcdf_format = ncfmt
# The string specifying the NetCDF file format for output
self.compression_level = compression
# Least significant digits argument to NetCDF4 (ignored by PyNIO)
self.least_significant_digit = least_significant_digit
# The common prefix to all output files (following the rule:
# prefix + variable_name + suffix)
self.output_file_prefix = prefix
# The common suffix to all output files (following the rule:
# prefix + variable_name + suffix)
self.output_file_suffix = suffix
# List of time-variant variables that should be given their own output
# file
self.time_series = timeseries
# List of time-variant variables that should be included in all output
# files.
self.time_variant_metadata = metadata
# Whether all 1D time-variant variables should be treated as metadata
self.assume_1d_time_variant_metadata = meta1d
# Store the netCDF I/O backend name
self.io_backend = backend
# time invariant variables to exclude from each timeseries file
self.exclude_list = exclude_list
# Name of file from which to search for metadata
self.metadata_filename = metafile
# Optional arguments associated with the reshaper operation
self.options = kwargs
def validate(self):
"""
Perform self-validation of internal data
"""
# Validate types
self.validate_types()
# Validate values
self.validate_values()
def validate_types(self):
"""
Method for checking the types of the Specifier data.
This method is called by the validate() method.
"""
# Validate the type of the input file list
if not isinstance(self.input_file_list, list):
err_msg = 'Input file list must be a list'
raise TypeError(err_msg)
# Validate that each input file name is a string
for ifile_name in self.input_file_list:
if not isinstance(ifile_name, str):
err_msg = 'Input file names must be given as strings'
raise TypeError(err_msg)
# Validate the netcdf format string
if not isinstance(self.netcdf_format, str):
err_msg = 'NetCDF format must be given as a string'
raise TypeError(err_msg)
# Validate the netcdf compression level
if not isinstance(self.compression_level, int):
err_msg = 'NetCDF compression level must be given as an int'
raise TypeError(err_msg)
# Validate the output file prefix
if not isinstance(self.output_file_prefix, str):
err_msg = 'Output file prefix must be given as a string'
raise TypeError(err_msg)
# Validate the output file suffix
if not isinstance(self.output_file_suffix, str):
err_msg = 'Output file suffix must be given as a string'
raise TypeError(err_msg)
# Validate the type of the time-series variable list
if self.time_series is not None:
if not isinstance(self.time_series, list):
err_msg = 'Time-series variables must be a list or None'
raise TypeError(err_msg)
for var_name in self.time_series:
if not isinstance(var_name, str):
err_msg = 'Time-series variable names must be given as strings'
raise TypeError(err_msg)
# Validate the type of the time-variant metadata list
if not isinstance(self.time_variant_metadata, list):
err_msg = 'Time-variant metadata must be a list'
raise TypeError(err_msg)
# Validate the type of each time-variant metadata variable name
for var_name in self.time_variant_metadata:
if not isinstance(var_name, str):
err_msg = 'Time-variant metadata variable names must be given as strings'
raise TypeError(err_msg)
# Validate the type of assume_1d_time_variant_metadata
if not isinstance(self.assume_1d_time_variant_metadata, bool):
err_msg = 'Flag to assume 1D time-variant metadata must be boolean'
raise TypeError(err_msg)
# Validate the type of the backend
if not isinstance(self.io_backend, str):
err_msg = 'I/O backend must be given as a string'
raise TypeError(err_msg)
def validate_values(self):
"""
Method to validate the values of the Specifier data.
This method is called by the validate() method.
We impose the (somewhat arbitrary) rule that the Specifier
should not validate values what require "cracking" open the
input files themselves. Hence, we validate values that can
be checked without any NetCDF file I/O (including reading the
header information).
This method will correct some input if it is safe to do so.
"""
# Make sure there is at least 1 input file given
if len(self.input_file_list) == 0:
err_msg = 'There must be at least one input file given.'
raise ValueError(err_msg)
# Validate that each input file exists and is a regular file
for ifile_name in self.input_file_list:
if not ospath.isfile(ifile_name):
err_msg = 'Input file {} is not a regular file'.format(ifile_name)
raise ValueError(err_msg)
# Validate the value of the netcdf format string
valid_formats = ['netcdf', 'netcdf4', 'netcdf4c']
if self.netcdf_format not in valid_formats:
err_msg = 'Output NetCDF file format {} is not valid'.format(self.netcdf_format)
raise ValueError(err_msg)
# Forcefully set the compression level if 'netcdf4c' format
if self.netcdf_format == 'netcdf4c':
self.compression_level = 1
# Validate the value of the compression level integer
if self.compression_level < 0 or self.compression_level > 9:
err_msg = 'NetCDF compression level {} is not in the valid range (0-9)'.format(
self.compression_level
)
raise ValueError(err_msg)
# Validate the output file directory
abs_output_prefix = ospath.abspath(self.output_file_prefix)
abs_output_dir = ospath.dirname(abs_output_prefix)
if not ospath.isdir(abs_output_dir):
err_msg = ('Output directory {} implied in output prefix {} is not ' 'valid').format(
abs_output_dir, self.output_file_prefix
)
raise ValueError(err_msg)
self.output_file_prefix = abs_output_prefix
# Validate the output file suffix string (should end in .nc)
if self.output_file_suffix[-3:] != '.nc':
self.output_file_suffix += '.nc'
# Backend validated when PyReshaper is run ONLY!
def write(self, fname):
"""
Write the specifier to a file
Parameters:
fname (str): Name of file to write
"""
with open(fname, 'wb') as fobj:
pickle.dump(self, fobj)
if __name__ == '__main__':
pass
|
"""Bayesian polynomial mixture model."""
# pylint: disable=invalid-name
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
class BayesianPolynomialMixture: # pylint: disable=too-few-public-methods
"""Handles creation of a polynomial mixture model."""
def __init__(self, num_components=5, polynomial_degree=3):
"""Creates polynomial mixture with given mixture components of given degree."""
self.num_components = num_components
self.polynomial_degree = polynomial_degree
self.coefficient_precisions = [10.0 ** x for x in range(self.polynomial_degree + 1)]
self.concentration = np.array([0.1 for _ in range(self.num_components)])
self.wishart_df = self.polynomial_degree + 2.0
self.student_df = 2
def create_model(self, X):
"""Defines the joint distribution of the mixture model."""
precision_scale = np.repeat(np.expand_dims(self.coefficient_precisions, 0), self.num_components, axis=0)
joint_distribution = tfd.JointDistributionNamed(
dict(
precision=tfd.Independent(
tfd.WishartLinearOperator(
df=self.wishart_df,
scale=tf.linalg.LinearOperatorDiag(precision_scale),
input_output_cholesky=True,
name="precision",
),
reinterpreted_batch_ndims=1,
),
coefficients=lambda precision: tfd.Independent(
tfd.MultivariateNormalTriL(
loc=0, scale_tril=tfb.MatrixInverseTriL()(precision), name="coefficients"
),
reinterpreted_batch_ndims=1,
),
scale=tfd.HalfCauchy(loc=np.float64(0.0), scale=np.float64(1.0), name="noise_scale"),
mixture_probs=tfd.Dirichlet(concentration=self.concentration, name="mixture_probs"),
mixture=lambda mixture_probs, coefficients, scale: tfd.Sample(
tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=mixture_probs, name="mixture_distribution"),
components_distribution=tfd.StudentT(
df=self.student_df,
loc=tf.linalg.matmul(X, coefficients, transpose_b=True),
scale=scale,
name="sample_likelihood",
),
name="mixture_components",
),
sample_shape=1,
),
),
name="joint_distribution",
)
return joint_distribution
|
import numpy as np
from numba import njit
@njit(cache=True)
def calculate_goodness_of_fit(table):
n = table.shape[0]
m = table.sum()
row_sum = table.sum(axis=0).astype(np.float64)
col_sum = table.sum(axis=1).astype(np.float64)
e = np.dot(col_sum.reshape(n, 1), row_sum.reshape(1, 2)) / m
s = np.sqrt(((table - e) ** 2).sum() / 2 / n)
return s
@njit(cache=True)
def make_permutation_table(p, m, n):
permuted_flat = np.zeros_like(p, dtype=np.int64)
order = np.arange(0, permuted_flat.size)
for _ in range(m):
permuted_flat[np.random.choice(order)] += 1
return permuted_flat.reshape(n, 2)
@njit(cache=True)
def permute_root_mean_square_test(table, n_permute=3000, min_pvalue=0.034):
# calculate real goodness-of-fit s
n = table.shape[0]
m = table.sum()
row_sum = table.sum(axis=0).astype(np.float64)
col_sum = table.sum(axis=1).astype(np.float64)
e = np.dot(col_sum.reshape(n, 1), row_sum.reshape(1, 2)) / m
real_s = calculate_goodness_of_fit(table)
# permutation
p = e.flatten() / m
greater_than_real = 1
max_greater_value = n_permute * min_pvalue
for i in range(n_permute):
p_table = make_permutation_table(p, m, n)
# calculate permuted goodness of fit s'
s = calculate_goodness_of_fit(p_table)
greater_than_real += int(s >= real_s)
# break in advance if p-value can be significant
if greater_than_real > max_greater_value:
# return current p value
return greater_than_real / (i + 2)
p_value = greater_than_real / n_permute
return p_value
@njit
def calculate_residue(table):
n = table.shape[0]
m = table.sum()
row_sum = table.sum(axis=0).astype(np.float64)
col_sum = table.sum(axis=1).astype(np.float64)
e = np.dot(col_sum.reshape(n, 1), row_sum.reshape(1, 2)) / m
residual = (table - e) / np.sqrt(
np.multiply(e, (1 - e.sum(axis=1) / m).reshape(n, 1) *
(e.sum(axis=0) / m).reshape(1, 2)))
return residual
|
from radioapi.radios.Radio import Radio
import json
jsonData = {}
jsonData['QDM'] = False
jsonData['Launch'] = False
jsonData['Abort'] = False
radio = Radio(True)
radio.send(json.dumps(jsonData), True)
|
##
## Get commandline arguments
##
from sys import stderr as error
from sys import stdout as out
from sys import exit
from re import compile
def print_usage(output, bin_name):
print("%s: Usage:\n" % bin_name, file=output)
print("\t-h/--help\tPrint usage", file=output)
print("\t-u\t\tUser name", file=output)
print("\t-m\t\tModule name (form : 'B-NAME-CODE')", file=output)
def get(argv):
user_name = ""
module_name = ""
i = 1
module_regex = compile("^[A-Z]{1}-[A-Z]{2,}-[0-9]{2,}$")
name_regex = compile("^[a-z\-0-9]*\.[a-z\-0-9]*$")
while i < len(argv):
try:
if argv[i] == "-u":
i += 1
user_name = argv[i]
if not name_regex.fullmatch(user_name):
print("Error: Invalid User name", file=error)
print_usage(error, argv[0])
exit(84)
elif argv[i] == "-m":
i += 1
module_name = argv[i]
if not module_regex.fullmatch(module_name):
print("Error: Invalid module name", file=error)
print_usage(error, argv[0])
exit(84)
else:
if argv[i] == "-h" or argv[i] == "--help":
output = out
exit_value = 0
else:
output = error
exit_value = 84
print_usage(output, argv[0])
exit(exit_value)
except IndexError:
print_usage(error, argv[0])
exit(84)
i += 1
if len(user_name) == 0 or len(module_name) == 0:
print("Error: User name or Module name is missing", file=error)
print_usage(error, argv[0])
return user_name, module_name
|
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x08\x00\x55\x0d\x0d\x0a\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xdc\x0f\x00\x00\x00\x00\x00\x10\xac\xe4\x91\x3d\xb6\x93\x02\x2d\xa7\xfa\x52\xf5\x3f\xd6\x9c\x72\x00\x00\x00\x00\x00\x00\x00\x00\xe5\x4a\x3b\x2f\xe6\x1f\x89\xcb\xf3\xb6\x8d\x89\xfd\x4d\xc3\xba\x2d\xb4\xb1\x13\xd7\xa2\x56\x09\xcd\x8f\xdc\x94\xaa\xb7\xa8\x0e\xf6\xa1\xaa\x57\x86\xaa\x12\xe6\xc6\x6a\xfa\x8c\x60\xc6\xd9\xc8\x05\x93\xee\x8b\x36\xcb\x7a\xa6\xe1\x10\x45\x08\x20\x72\xf0\xca\x02\x34\x15\xba\x23\x90\xf4\x88\xab\x5a\x92\x4a\x5b\x11\xa5\x5b\x1c\x7c\x09\x05\xcd\x3f\x4a\x2f\x77\x93\xb7\xfc\xbc\x88\x35\x60\xef\xfd\x4d\x76\x87\x20\x9d\xac\x13\x3b\x25\x8a\x67\xdd\x49\x67\x03\x79\x0c\x54\x7f\x53\x3f\xf1\x69\x6a\x52\xfc\xe6\x70\x67\x81\x1e\xc2\x80\xea\xf1\xb4\x3f\x24\xe0\xb6\x6c\x4d\x6b\xe5\x1f\x76\xd9\x8d\x2d\xdb\xac\x7e\x32\xc4\xc5\x2b\x51\x81\xe9\xb0\xec\xbc\x10\x63\xa6\xac\x48\x0b\xc3\x92\xfb\x1c\x0f\x91\x68\xd3\x62\x3d\xc8\x31\x5d\xa5\x0b\x97\xca\x56\x8d\x4c\x60\xb2\xdd\xbc\xde\xe4\xce\xbd\xd4\x0b\x3d\xb5\x4e\x77\x29\x67\xde\x6d\x91\x82\xe9\xd3\x48\xb0\xae\x51\xaa\x65\xb0\x6f\x11\xf0\x82\xcb\x6e\xbd\x10\xd1\x30\x40\x0e\x7a\x0e\xf7\x3c\xd4\xd4\x5d\x19\xc6\xc7\xa3\xcf\x82\x55\xf8\xf7\x64\x1b\x54\xca\xe7\xdb\x33\xde\x5f\x8f\xb3\xb2\xac\x2b\x03\xaa\x4d\x9a\xd5\x86\x0e\xad\xbc\x7f\xb9\x6b\x08\x6f\x1f\x06\xb1\x7a\xe2\x39\xf7\x84\xb2\xc4\xf8\x66\xb9\xbb\xdb\x64\x06\x85\xb9\xa4\xec\x5f\xf6\xa5\xa8\x57\x41\x62\x0a\xa3\x7c\xe1\xd4\x4f\x68\x9e\x59\x7d\x4b\xb8\xe7\x53\x70\xa5\xff\xdc\x3f\xac\x27\x4b\xa5\x10\x00\x4a\xe8\x4d\xef\x93\x1e\x8e\xf4\x89\x88\xbe\x5f\x13\x79\x14\xa8\x8e\x73\xa5\xd6\x60\x36\x7d\x28\x57\xb5\x7d\xfe\x07\x4c\x92\x24\x1a\x10\xa5\x80\xb6\x8f\x4e\x84\xf8\x24\x59\xc6\x98\x0e\xb9\x1e\xde\xf0\xad\x20\x9b\xaa\x3f\x4e\x7e\x58\x60\x5f\x86\xc4\x0e\x1b\x0f\x07\x6f\x23\x10\x65\x40\xe0\xde\xd0\x5b\x09\x8c\x76\x64\xc6\x26\x03\x90\x75\xd9\xd5\x46\xb4\xc4\xc3\x96\x92\x51\xda\x9b\x65\x79\x18\xaf\xc5\x4d\x1d\x91\xa5\x56\x0c\x20\x74\xd4\x65\x37\x37\x87\xfb\xc7\x31\x6e\x42\xd3\xc9\xea\x2b\xd2\xe5\x20\xa5\x0d\x7d\xd9\xce\xc4\xe5\xe5\x50\x12\x17\x6b\xd5\x0f\x4b\x83\xf6\x8c\x42\xca\x53\x44\xef\x02\x98\x6d\x5e\x6f\x75\xfc\xba\x0a\xe8\xfd\xb9\xd8\xe5\x12\x3a\xe5\xb5\xd4\xde\xe8\xb6\xbe\xad\x8a\xb0\x4f\x88\x6b\x26\xbb\xce\x2d\x0f\x24\x62\x60\x98\xa0\xe1\x5c\x13\x4b\x58\x53\x9f\x8b\x7e\x18\xb7\xcd\x5e\xea\xb9\x31\x1f\x60\x32\x33\x43\x73\x62\xf0\x47\xc2\xab\x2d\xfb\x66\x87\x25\xdf\x24\x80\x28\xf2\xe1\x84\x7b\xe5\x29\xa7\x6b\xd0\x7a\xf9\x2e\x3b\x60\x71\xdf\x3b\x80\xad\xd1\xf4\x51\x99\x1d\x02\x6f\xf9\xc5\xda\xb7\xe7\xb3\xd8\x6a\xf8\xf5\xce\x86\x7c\x9c\x8a\xfe\x90\x12\xa2\xd7\xa9\xd7\x8b\xc9\xdd\x1f\x25\xe6\x30\xe5\x05\x4b\x46\x88\x67\x8a\x06\x07\x7a\x3f\x49\x95\x95\x6d\x3f\x67\x86\xf0\x76\x89\xc2\xb8\xb0\xc3\x1b\x74\x57\xc5\x56\xe0\xa8\x83\x2f\x37\x4f\xc4\xdb\x83\x30\x93\x5a\xbf\x50\x08\x74\x51\x5b\xff\x44\x63\xf9\xf5\x89\xf3\xe7\xd6\x93\x5a\x5c\x5c\x14\x9e\xca\x53\x9d\x6a\x56\x23\xf3\x60\xbc\xe6\x25\xc8\x20\xb9\x46\x0c\x05\x31\x03\x48\x2b\xf3\x7e\x5b\xa3\x93\x51\x89\x7c\x91\xc7\x0e\xdc\x46\x0a\x23\xab\x09\xbc\xd3\x95\x76\x30\xb7\xe2\x38\xdc\x53\x18\xb1\x4c\x83\x3c\xd6\x64\xa3\x75\xb6\x85\xeb\x0d\xd7\xb0\x2f\x36\x0d\xc9\xd0\x0d\xcf\x1c\x8b\x35\x54\xe5\x35\x54\x75\x00\x90\x58\x02\x6a\xd3\xcf\x4e\x20\x57\x92\x3b\x84\xae\x22\xed\xa7\x4a\xfa\x21\x10\xcc\xf3\xdd\xb6\x25\xf7\x31\x36\x03\xa4\x16\xb5\x05\xa8\x20\xe4\xf9\xe6\xef\xf8\x65\x89\x95\xa3\xa7\xee\x96\xfe\x66\xac\x26\x36\x97\xf7\x9e\xd6\x0d\x56\x96\xe7\xdd\x9a\xaf\x26\x4d\x13\x7c\x9f\x48\xd9\x92\x1c\x3a\x59\xf2\xa4\x21\x65\x1d\x18\xbe\xf9\x46\xa5\x59\x72\x12\xb8\x45\xdc\xdc\xcf\x4a\xb1\xec\x92\xd5\x83\x55\x65\xb3\x84\x4f\x91\xb6\x3e\x53\x66\x79\x7a\x63\xc2\xd4\x50\xd2\x5a\x81\x90\xc6\x95\xf2\xc4\x2a\xef\x92\x26\x28\x61\xc7\x4a\x5d\xfe\x71\xba\x49\xcd\x7a\x8a\x39\xc4\x23\x1b\x32\x20\xcf\x6c\xd0\x0b\x22\xee\x27\x36\x29\x66\x85\xa8\xbf\x01\x69\xd4\xa5\xcb\xd9\xf3\x89\xea\x10\x07\xf5\x90\x19\xc9\xa6\xf1\xcb\xb3\x08\xae\xf5\x36\xa7\x25\x74\x72\xdc\x95\x59\x5f\x1e\x92\x9b\x45\x0c\xf5\x8f\xf5\xe9\xb5\xe4\x0a\xf3\xda\x4b\x51\xf0\x61\x6d\xa3\x05\x33\x89\x15\xf8\x2a\x41\x1b\xe2\xcc\x53\x95\x74\x18\xcd\x02\xe3\x70\xc9\xc3\x9c\x75\x5e\x7d\x81\xb4\xe6\xf5\xf1\x2c\xcc\xc3\xed\xb0\xad\xd6\x7c\xe2\xb1\x1b\x82\x6b\x94\x4b\x91\x6c\x3e\xd8\x24\xe9\x21\xcb\xd4\x59\xb7\x89\x41\x46\x6f\xb2\x8e\x11\xc0\xa2\xfb\x67\x16\xf2\xb7\x08\x11\xd5\x65\x77\xd3\x89\x26\xe7\x5e\xcb\x50\x46\x1e\x6d\x58\x2e\x83\x63\x73\xe0\x66\x99\x40\xe4\x02\x7d\x7a\x91\x4b\x25\x48\x92\x92\x26\xc7\x81\x2c\xb6\xd6\xa0\xa7\x8c\x9b\x4e\xeb\xf8\x28\xb1\xf3\xa3\xae\x44\xa1\x09\x69\x6b\x7c\x11\x55\x78\x52\x21\x5e\xd2\xe0\xa6\x8e\xf2\xf9\x35\x29\x38\x6f\x4b\xa0\x79\xd4\xdc\x92\xed\x9a\x9f\x88\xd0\xb4\xa5\xcf\x27\x9a\xac\x7b\x85\x07\xde\x92\x29\xcc\x1f\x21\x7f\x42\xa8\x86\x51\x35\xb2\xe2\x4d\xcd\x5c\xb2\xf0\xbd\xd3\xf1\xd5\xf2\xf7\xf3\x60\x8c\x54\xc0\xf7\x1d\xc8\x82\xdb\x08\x69\xc7\x35\xfa\x07\xd7\x90\xf8\xc4\x67\x93\xba\x36\x24\xe7\x45\x78\x86\x49\x73\x1a\x7e\x1e\x09\x31\x09\xcf\xbb\x1e\x92\x12\x64\x58\xbe\x22\xb3\xcf\xa5\xc7\xa4\x00\x8a\xc0\xa0\x28\xd7\x89\xd9\x70\x3a\x1f\x2b\xde\x2a\xa1\xe5\x60\xd1\x22\xa3\x67\x88\x6c\xf5\x6c\xe0\x08\xc2\x12\x6c\x21\x50\x25\x05\x93\x4a\x45\x8c\x2b\xcb\x57\x09\xff\x44\x2a\x69\x48\xd6\x55\x63\x32\xe6\x09\xb2\xd0\x75\x50\x3f\x62\x81\x30\xc6\xbd\x8b\xb6\xbf\x8b\x8d\xc1\x60\xed\xaf\x0b\x29\x66\x05\x25\x94\x5e\x75\x94\xa0\xa7\xac\xeb\xd7\x0c\xb1\x34\x61\x9e\x3c\x58\x2a\x5f\x2d\x3d\x2b\xd4\x5a\xba\xa5\xf4\xea\xfc\xce\xad\x3d\x1d\x44\x0c\x43\xbe\x9b\x74\xbf\x0c\xd6\x95\xcd\x88\xc4\x18\xa7\x05\x1a\x88\x57\x04\x3c\x5f\x60\x97\xcb\x4a\x44\x77\x42\xd5\x7c\xfb\xeb\xb8\x52\x14\x35\x65\xcd\xe2\xd0\xf5\x2d\xcd\xa0\x5e\x0a\x37\xc4\xa0\xe4\xcc\x9e\x67\x1f\xc3\xff\xef\x7e\x42\xfc\xaa\xa6\xe6\x0a\x7c\x29\xd8\x3f\xf1\x3e\x16\x1d\xc9\xbe\xc6\x00\x62\xe2\xec\x0c\x82\x90\xa3\x71\x70\x19\x25\x31\xff\xca\x7b\x61\x32\xc0\xbd\xe4\x23\xa6\x78\x50\x8c\xb5\xad\x08\xbc\x29\xb1\xd6\xd7\x94\x82\xce\xce\x74\x99\x29\xd5\xad\x88\x51\x47\x80\x08\x80\x52\xe6\x6a\x76\xda\x03\xbe\xdc\xb9\x1f\x91\xe5\x97\x31\x28\x77\xb3\xe4\x36\xd2\xbf\xf2\x5d\x37\xe9\x39\xb8\x2c\x13\x11\x92\xf7\x04\x0b\xae\x64\xba\xd1\x50\xdc\x28\xbc\x79\xeb\xdd\x43\xd9\xe0\xd6\x9c\xc0\x75\x8f\xfe\x0a\xb4\xb0\x7e\xc3\xb1\x4c\x19\x59\x25\x64\xa2\x9c\xb2\xf8\xc4\x14\x9a\xc5\x8f\x6e\x36\x8e\xb2\x2f\x97\x13\xeb\xdc\x5e\xfb\x4d\x5f\x95\x7a\x9b\x86\x6a\x78\xc7\x7a\x96\x5c\x46\xae\x4f\x7c\xb6\x77\xf7\x20\xb0\x0e\x36\x02\x01\x9d\x7f\xf7\x18\x7b\x13\x3c\x08\x76\x80\x38\x6e\x00\x28\xa5\x2a\x50\xf1\x4e\x5d\xb5\x4a\x1f\x8d\xb8\xc3\xdc\x8a\x53\x7c\x92\x3e\xfb\x47\xfe\xda\xf9\x9d\x8d\x7d\x92\x31\x5e\x24\x0c\x5a\x85\xf6\x4b\x1f\x4b\x98\x87\xf2\x7b\xb7\x9d\xba\xa5\x96\xf4\xa7\xda\xbe\x20\x9c\x94\x84\x8e\x1b\xe3\xe4\x4c\x92\xf6\x76\x51\x60\x3a\x68\x23\x18\xfe\x32\xa6\x4e\xaa\x1c\xd2\x66\xed\xd0\xaa\xed\xf2\x91\x73\xf8\xa6\x20\x4a\x6c\xff\xb6\xf7\x74\x5f\x61\x11\x50\x2a\x8c\x8d\x43\xa5\x96\x94\x93\x75\xf5\x3c\x5b\x86\x2e\xed\xce\x1f\x0e\xf4\x3a\xec\x52\xe8\x95\x3e\x3a\x7b\x51\xe4\x9f\x5f\xdc\xa5\x48\x96\x9a\xbb\xdd\xd9\x66\x7a\xae\xfb\xff\x19\xea\x7f\x75\x45\x60\xd7\x65\xa1\x4a\xc1\x6c\xf3\xab\x28\xeb\x05\xf7\xb3\xc6\xea\xd0\x3e\x09\xdb\xee\xe3\x6a\xfc\x49\xfe\x39\x5e\xc9\xc1\x10\x07\x97\x42\x86\x77\x19\x58\x36\xbd\x0b\x3c\x8f\x83\x60\x0e\x03\xf4\x21\x1d\x61\x7d\x21\x43\x9c\xdf\xf0\x44\x0e\xa4\x8a\x4c\x0e\x7b\x1f\xf1\x9b\xb8\xbe\x61\x97\x50\x4e\xd7\x66\xbc\xe5\x55\xdb\x26\xa9\x8c\x2b\x2a\x03\xf9\x9a\x3b\x6f\x12\x65\x6c\x5d\xeb\x67\x51\x7b\xd2\x74\xe8\x33\xc8\x0b\xfc\x25\x43\x86\x12\x25\xd2\x30\x8d\xc1\x15\xed\xc5\xc5\xa6\x6f\x60\xb0\x70\xf4\xfa\x2b\xbe\xa3\x4a\xa2\x38\xa4\x41\x86\xe1\x8c\x85\x06\x11\xb2\x71\x57\xfd\x46\x12\x81\xfa\x52\xc5\x13\x47\xf2\xca\xa3\xb6\x22\x00\xa3\x83\x93\xa9\xaf\x1b\x63\xcc\xf6\x8f\x15\x2f\x71\x0a\x52\xfb\x1d\x30\x1a\x1f\xdc\x78\xb4\x02\x83\x79\x76\x66\x8b\xf1\x07\x1e\x78\xed\x11\xaf\x7e\x42\x6c\x2f\x94\x8e\x84\x16\x4b\x5d\xf1\xad\x16\x0b\x53\x4f\x77\x5b\xa1\x01\x0d\xa7\x75\x45\xd2\x5e\x6a\x53\xe6\xb2\x05\xea\x56\xc0\xea\x27\xd1\x43\x97\x83\x6b\x89\x32\x0b\xf0\xff\x2b\x3c\xd6\xb7\x2d\x8d\x4c\xa1\xb7\x4d\xa9\x43\x20\x48\xaf\x9b\xc6\x1b\xce\x12\x9e\x20\x10\x41\xe1\x16\xc7\xe1\xe1\x0c\xcc\x53\x76\xc0\xcc\x3c\x7b\x36\x78\x50\x6e\x8e\x11\x7f\x08\x3f\x08\x07\x40\x4b\x49\x03\x77\x70\x5c\x83\x8c\x3f\x9a\x1d\xa5\xc5\xcd\x56\xa6\xcd\x71\x17\x9d\x7e\x71\xbd\x55\x57\xa0\xba\xbd\x79\x81\x76\x5a\xcc\x00\x5c\x5b\x35\xdc\xf9\xb3\x16\x4f\x44\xec\xc4\xca\xe8\xf1\xba\x1b\xfe\xd3\x83\x03\x3d\x97\x1e\x1c\x31\xac\x68\x16\x2f\x6b\xb7\xcd\x8b\x49\xfc\x90\x46\x29\x48\xfe\x1f\x89\x86\x8c\xb5\x76\x19\x6d\xc9\x8a\x3a\x62\x6f\x8f\x0e\x7c\x1f\x4c\x7e\xf0\x70\xa4\xa2\x57\xc8\x60\x2f\xbb\x96\xe9\x07\xb0\x91\x54\x7b\x1e\x79\xaa\x93\x45\x4a\xe1\x1a\x6f\xb6\x51\xf2\x80\x01\xee\xd4\xbd\x71\x3b\xbd\xa9\xa8\xac\x1e\xab\xf9\x32\xac\x5b\x88\x6d\x8e\x45\xa0\xed\xa3\x36\xa8\xc7\x02\x7c\xd8\x66\x6f\xdc\xf7\xa4\x8c\x74\x00\x91\x11\xea\x94\x39\x19\xee\xd9\x6e\xc6\x6c\xff\x49\xf2\x18\xc5\x4f\xf3\x92\x55\xb4\x4b\x5d\x78\xab\x4e\xd2\xac\xbc\xfa\x67\x77\x47\x4e\x5b\x33\x58\x08\xc8\x12\x2e\x32\x80\x39\x6e\xd2\x9e\xa5\x95\x24\xcb\xf1\x35\x70\x49\x57\x17\x33\x5f\xc8\xc6\xfa\x4e\x4e\xd6\x5c\xcf\x48\xbb\xa9\x97\x58\xb7\x56\xaa\xe1\x6b\x60\x4f\x5a\xbc\x4b\x75\x1b\x0a\x89\x3e\x7f\x55\xe6\x37\x2f\xc1\x0d\xc4\x58\x12\x44\x8b\x63\x0e\x24\xee\xdb\x13\xde\xfe\x9e\xfb\x22\x58\x72\x45\xf4\xbe\x00\x97\x2e\xf4\x3f\x86\x37\x43\xf9\x5c\x07\x9c\x2b\xfc\xfe\xa7\x6b\x08\xd4\x19\x33\xff\x98\x7b\x21\x14\xe0\xff\x49\x4c\xc1\x99\xca\x46\xd9\xa4\xfb\x02\x59\xfe\xf4\xa6\xbe\xfd\x2e\x58\x28\x44\xee\x42\x6b\xb2\x9a\x64\x82\xa3\x48\xe2\x84\x2e\xba\x90\x72\x24\x76\x8b\x43\x17\xf6\xdb\x31\x43\x29\x58\x77\x95\x8a\xec\x76\x67\x89\x21\x00\xf1\x56\x64\xfd\x2f\x1b\x6c\x37\xec\xa6\xa2\x07\x57\x9a\xb1\xd6\x25\xa5\xf3\x81\x0e\x18\xd6\x3b\xb9\x52\x0a\xdc\xe6\xdc\x4e\xc5\x9c\xaa\xa4\x29\x76\xce\x06\xd2\x61\x96\x8f\x7e\xd8\x72\x48\x58\x90\xba\x62\xd0\xf7\xf8\xe9\xb6\xc9\x23\x33\xef\x66\xac\xed\x11\x20\x49\xa5\x64\x4e\xec\x35\x4e\x93\x2d\x73\x69\x1f\xe6\x26\xf3\xfc\x63\x4f\x06\xea\x7e\xa2\x35\xd4\x68\x98\xec\xc2\x79\x3c\xa2\xe4\x16\xa5\x9d\xad\x2d\xe9\x4d\x02\xa9\x90\xcb\x8e\xdf\x83\x25\xe7\xaf\x91\xbb\xab\x12\xdd\xe0\xb3\x53\x7d\x2b\xcb\x23\x44\x92\x60\x79\xe0\x21\x89\x23\xbf\x0a\xf1\xfd\x89\x3b\x1f\xed\x8c\xf6\xb3\x77\x40\x04\x14\x3e\x6c\x9c\x0e\x90\xa3\x23\xe3\xb7\xbe\x5d\xf6\xb2\x11\xbf\x47\x00\x7c\xfd\x45\x47\x66\xe1\xcb\xaa\x93\x22\xdc\x8b\xdd\xe9\xc6\xd3\x6d\xd5\x0c\xa8\x53\x30\x6a\x86\x21\xfb\x76\xc3\x2f\x4c\x71\xcf\x9b\x73\x9e\x55\x79\x9d\x7a\xc9\xdd\x33\xeb\xcf\x74\xe8\x19\x87\x61\x79\x6b\x64\x7f\xe3\x5a\xec\xca\xed\xb1\x20\x5d\x29\xc1\x88\xb2\xa5\x56\x48\x9d\xbf\xb9\x3b\x8f\xee\x26\x4c\x3d\xb8\x14\xe3\x52\x26\x46\xd5\xd9\x31\x4b\xb1\xda\x5c\x57\xf7\xe8\xce\x63\x29\x39\xa5\xd1\x23\xbe\xd0\x34\xe5\x30\xff\xc9\x8f\x48\x69\x16\x4e\x81\xe3\x62\x10\x56\xd3\x8a\xde\xe2\x1e\xf1\x34\x47\x35\xcb\x15\xff\x3f\xb8\xc5\x4f\x44\x8a\x76\x2b\x0a\x67\x27\xa7\xf4\x16\xb6\x9b\x88\xab\xfb\x93\x55\xdd\x64\x7e\xef\xd0\xe1\x35\xad\xaf\x86\x71\x7c\x48\x8d\xb9\xb0\xb7\x84\x31\x79\xf0\x1a\xde\xd5\x9d\x76\x73\xbb\x4b\x31\x96\xd8\x4f\xe7\x97\xaa\xe6\xed\x70\x7d\x07\x85\xfe\xe3\xb5\x76\x01\x61\xf6\x18\xec\x3c\xd5\x2b\xe3\xb0\xa3\x14\xb7\xb6\x53\x54\xeb\x3b\xc8\x1e\x86\x8d\xc0\xe1\xce\xa2\xa4\x37\x1e\xb3\x33\x58\x84\x5e\x71\xcf\x27\x0d\x8e\xca\xbf\x09\xce\xea\x36\x76\xfc\xc8\x73\x18\xb1\x8a\x72\xe1\x38\xe0\x98\xcd\xee\xed\x3d\x0c\xd9\x8e\x41\x91\x05\x4d\x91\x4f\x94\x3a\x26\x74\xb9\x5f\xb1\x54\x5a\x7f\xad\x50\xc4\xae\x4f\xff\x62\x72\x59\x26\xe8\x71\x56\x52\x36\xab\x53\x1e\xb6\xe3\x60\xc6\xe0\x91\xc7\x23\x41\xd1\x28\x53\x78\x7d\x11\x4f\x6d\x2d\xbb\x82\xda\x6e\x7b\xad\xe2\x7e\x56\x3e\xda\x34\xa0\x39\xe9\x4d\x9c\x5b\x00\xca\xac\x74\x61\x75\x4f\x4a\x53\x3c\x61\xc5\x6b\x81\x5a\xbd\x95\x11\xc5\x1b\x2a\xe7\xbd\xac\xf5\x22\x3b\xcb\xcb\xda\x9d\xe7\x64\x2f\x0e\x74\x36\x28\xac\xf2\x8e\x80\x26\xed\x85\x0a\xb9\xe4\xa1\x7c\x47\x1a\xef\xec\xea\x9b\x92\xfa\xb0\x52\xd5\x32\xa5\xff\x2c\xdf\xa8\x19\xb5\xc6\xee\x77\x5c\x3e\xc1\x29\xf8\xe8\xfc\x19\xef\x96\xe5\x05\xad\x2d\xc6\x57\x36\x81\x3f\x14\xc5\x1d\xcb\xec\x97\x1f\x40\x25\x3e\xac\x32\x7c\xef\x13\xc9\x4b\x0d\xdb\x70\x4a\xac\x13\x72\xaf\xfe\x28\xd3\x58\xd3\xfe\x3b\x1e\x99\x5c\xaf\x06\xc6\xa2\x44\x83\xd6\x9c\xe8\xed\xc6\xfb\xcc\x12\x0b\x7d\x56\xd2\xfc\xb4\x60\x82\x22\x48\xcd\xfc\x9c\x84\x0b\xc8\x1c\x4d\x93\x5c\x2d\x80\x33\xba\x3a\x68\xe7\x8d\x48\x65\xc2\x6f\xbe\xf5\x11\x62\xcc\xdc\x83\x76\xc2\x21\x78\x44\xc8\x58\xb1\x6d\x35\x6d\x65\xc7\x34\xfd\x7f\xca\x7b\xd1\x66\x6d\x91\x9f\x1c\xa8\xd8\xc4\xa0\xdf\xe8\xc6\xfb\x3c\xb8\x95\x60\x3e\xe6\x63\x12\x35\x37\x7e\x56\xf1\x5e\xc8\xa9\x07\x6f\x29\x0a\x89\x5d\xc1\xde\xe1\xd7\xb3\x12\x5a\xf1\xf1\xcb\xcf\xf7\xde\x1d\x2f\x46\x84\x30\xe5\xda\x05\x09\x7a\xd0\xc0\x48\x0e\x64\xbc\x91\xfb\x29\xca\x8b\x36\x5c\xa1\xb8\x5e\xad\x05\xfd\x58\xdb\x76\x07\x23\x83\x84\x3f\xb8\xa5\xcb\x72\x12\x01\x91\x05\xdf\x4a\xd2\x07\xbd\x15\x4b\x20\xd5\x14\xa8\x43\x4c\x4a\x2e\x5e\x08\x6e\xfd\x62\x19\xe6\x29\x8c\x13\x42\x3c\xfe\x85\x0c\xb7\x64\x2d\xb9\xcb\x46\xd8\x3a\xdb\x98\x59\x18\xeb\xe0\x61\x66\x61\xb1\x16\x30\x79\x8f\xbb\x49\xa4\x20\x04\xe8\xf2\x2c\x56\x9f\x1f\x56\x6e\x40\xd7\x07\xa4\x25\x9f\x12\xd9\x03\xea\xec\xbd\xf9\x2d\x9b\xc5\xab\x46\x97\x98\x59\x74\xc9\xa3\x70\x6e\x9a\x53\x3e\x6b\x69\xa6\x13\xf1\x48\xce\x07\x4f\x4a\x95\xf5\xcf\x20\x75\x5c\x17\xc9\x29\x17\xcb\x0a\x7b\x66\x60\xf8\x02\x2c\x1a\xf0\xe4\x70\xda\x08\x68\xa7\xdd\xcc\xf5\x0c\x3c\x45\x38\x2d\x29\x21\xd1\x95\x14\x25\x12\xac\x40\xa8\xd3\xc5\xfc\xc5\xe4\x27\xb3\xf0\x63\x98\x43\xbd\x59\x4c\x14\xed\x46\xed\xba\xb6\xd1\x35\x54\xee\xf3\x9c\xd5\x95\xfd\xe6\x65\x2b\x5e\x2b\x3a\x4f\x1d\x8f\xb0\x16\x9c\x23\x44\x91\x15\x45\xba\xf1\x16\x50\xfd\x4e\x0d\x44\xf8\x60\x38\x37\x74\xa5\x9b\xf1\x39\x42\x1d\xce\x62\x9e\x95\x57\xfc\x30\x73\x6a\x8f\x26\xd8\xca\xd1\xb8\x22\xad\x97\xaa\xf4\x70\x70\x51\xe8\x7a\xff\xa3\x4a\xbe\x49\x49\x26\xd3\xad\x1b\x1c\x6b\xe9\x51\x94\x3c\xb0\x88\x5f\x0c\x55\x27\x2d\x2a\x72\x76\xc9\x02\x1b\x8b\xb4\x94\x32\x9c\x22\x36\x93\xe5\x27\xb8\xdb\xf0\x64\x56\x04\xa4\x36\x9c\xfb\xd9\x67\xe5\x61\x06\x21\xbd\xd5\x2f\x05\x49\x32\x4b\x84\xdb\xd8\x40\x51\x68\x96\xa4\xf4\x2f\x1b\x9f\xbc\xaa\x64\x52\xee\x6e\x65\x4a\xf7\xb8\x39\x8a\xa0\x9a\x15\x20\xca\x70\x90\xeb\x42\x63\x8a\x06\x8c\x34\xc9\x46\x75\x7f\x99\x1e\x7b\xd1\x72\x5d\x15\x71\x5e\xb4\x77\x99\x21\xb7\x06\xa0\xfa\xc0\xb7\x16\x65\xfa\xf7\x5e\xc2\xbc\xb4\x30\xe7\x21\xa3\xa4\xe1\xfa\xff\xd3\x26\x5c\xc4\xf5\x80\xed\x8c\xa6\xd1\xac\x4f\x6e\xec\x50\x9a\xae\xce\xea\xaf\x7b\x78\xdd\x1b\x80\x32\xf8\x49\x72\xbb\x6a\xa5\x54\x4b\x2a\x95\xed\x8d\x4c\x48\x81\x9e\x3d\x04\xdd\x8f\xf4\x50\x0c\xe7\x4e\xa7\xb7\xc1\x14\x4b\x80\x2f\x0b\x21\x88\x3a\xb5\x61\xb8\xb8\x09\x77\xc7\x5b\x94\x7e\x3e\xc5\x78\x2a\x21\xbc\xab\xb1\xaa\x48\xe3\x78\xcd\x37\xc2\xf9\xb0\x3f\x48\xc8\x98\x69\x0c\x54\x29\xae\xe2\xfe\x88\x2f\x3e\x69\xb6\xf0\xe8\xf9\xa0\x85\x58\x54\xf2\x6f\xbf\xcd\xc8\x2c\x93\x38\x3d\x14\x49\x81\x2d\x3b\x41\xba\x67\xd2\x2b\x81\x9f\xd3\xcd\xd9\xed\xd0\x2b\xf1\xe2\x53\x47\xad\xc8\xb5\xc3\x91\xe4\xb6\xae\xc6\xa3\x99\xfa\x89\xe6\x0a\x16\xae\x2a\x32\xef\x4c\xd0\x9f\xb3\x0e\x6a\x22\x8f\xc0\x82\x29\xba\xf0\xa4\xcc\x54\x15\x7a\xe7\xd9\x70\x24\x3b\x85\x13\xaa\x54\x96\x3e\x1b\xe5\x3b\x35\x86\xe2\x8c\x59\xf6\xe2\x5d\x80\x81\x61\x36\xdc\x01\x6d\x2c\xde\xbb\x9b\x16\xa3\xc3\xb4\xeb\xc9\xf5\xe1\xb7\x96\x7d\xc6\x7b\x01\x74\x61\xf6\xe3\x82\x3f\x60\xa8\x93\xac\xa4\xda\x45\x08\x3c\x84\xe4\xe5\x17\xaf\xcf\xf2\x7c\xaa\xd7\x38\x61\xfa\x5f\x85\x86\x0e\xcd\xdc\x32\xbc\x63\x5c\xc9\x39\xec\xac\x3c\x04\x27\x71\x29\x3f\x2e\x0b\x6f\x7e\xc6\xa1\xfd\xc5\xf0\x22\xb4\x82\x75\x55\x17\x15\x89\x20\x9b\xf7\x59\x4d\xc6\x73\x4a\xf3\x6c\xf6\x9b\xd8\xa3\xa9\xe1\xc2\x8f\x31\x0e\x48\xce\xc1\xe9\x0f\xb9\x00\x81\x5f\xe2\x86\x5b\x46\x79\xe3\x01\x72\xb2\xe0\x2b\x81\xc8\x7e\x7c\x46\x48\x49\xde\xcd\x06\xf7\x7a\xfd\x69\x4c\x98\x87\xac\xe1\x06\x44\xf2\xf4\x4e\xf0\x77\x62\xc9\xd6\xf2\xa0\x35\x51\x88\xb0\xeb\x2f\x4d\xac\x9d\x88\x21\x54\xf2\x7e\x43\x44\xeb\xb2\xbd\x91\x93\x9c\x54\xf0\x0f\xe4\x9c\xfb\xbe\xa5\x52\x09\xd8\xef\xc2\x01\x16\xa8\x7f\xdd\xd8\xeb\xa0\xa9\xb6\xdc\x35\x88\x57\x57\x65\x8c\x93\x3d\x78\xaf\xee\xc4\x3a\x65\x12\x9c\x4e\x58\x18\xac\x3d\xf7\xf1\x60\x36\x0e\xf6\x3d\xf7\x52\x4c\xbd\xc6\xc8\xc0\x66\xe1\x24\x80\x2c\xe3\x53\x32\x50\x00\xcf\x56\xb5\x61\x26\xc0\x3a\x7d\x30\x87\x5d\x0f\xea\x20\x93\xe8\x44\x3b\x20\xe4\x49\x75\x31\x5d\x5d\x67\xd3\x44\x17\x8d\x98\x76\x46\x28\x73\x14\xc1\xeb\xd2\xd9\xea\x2e\x24\xe1\xbc\x7a\x5c\x14\x52\x0c\xa6\xda\x7e\x62\xd4\xde\xf7\x89\x1a\x90\x8a\x90\xc1\xb4\x94\x6f\xc2\x3b\x07\xbb\xc8\xb1\xd1\x5c\xc0\x1e\x7c\x92\xba\x1d\x9e\xdb\x84\x99\x18\x88\x1e\x80\x18\x5d\x6d\x23\x86\x83\xa3\x2e\x3c\xa3\x79\x36\x11\x09\x51', 2) |
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
# %%
import json
import logging
from pathlib import Path
import jsonschema as jschema
import pkg_resources
# %%
# Here is the Schema file, loaded as the default to validate against
LOCAL_SCHEMA_FILE = Path(
pkg_resources.resource_filename(
"aquarius", "ddo_checker/schemas/metadata_local_v0_6.json"
)
)
assert LOCAL_SCHEMA_FILE.exists(), "Can't find schema file {}".format(LOCAL_SCHEMA_FILE)
REMOTE_SCHEMA_FILE = Path(
pkg_resources.resource_filename(
"aquarius", "ddo_checker/schemas/metadata_remote_v0_6.json"
)
)
assert LOCAL_SCHEMA_FILE.exists(), "Can't find schema file {}".format(
REMOTE_SCHEMA_FILE
)
# TODO: Handle full file path vs. dictionary better?
# %%
def load_serial_data_file_path(file_path):
file_path_obj = Path(file_path)
assert Path(file_path_obj).exists(), "File path {} does not exist".format(file_path)
assert file_path_obj.is_file()
# file_name = file_path_obj.name
if file_path_obj.suffix == ".json":
with open(file_path_obj) as fp:
json_dict = json.load(fp)
return json_dict
# TODO: Add Yaml parser
# if file_path_obj.suffix in ['.yaml', '.yml']:
# with open(file_path_obj) as fp:
# json_dict = json.load(fp)
# return json_dict
# %%
def validator_file(schema_file):
logging.info("Schema: {}".format(schema_file))
this_json_schema_dict = load_serial_data_file_path(schema_file)
return jschema.validators.Draft7Validator(this_json_schema_dict)
# %% Wrapper over jschema.Draft7Validator.validate()
def validate_dict(this_json_dict, schema_file):
validator = validator_file(schema_file)
return validator.validate(this_json_dict)
def validate_dict_local(this_json_dict):
return validate_dict(this_json_dict, LOCAL_SCHEMA_FILE)
def validate_dict_remote(this_json_dict):
return validate_dict(this_json_dict, REMOTE_SCHEMA_FILE)
# %%
# Wrapper over jschema.Draft7Validator.is_valid()
def is_valid_dict(this_json_dict, schema_file=LOCAL_SCHEMA_FILE):
validator = validator_file(schema_file)
return validator.is_valid(this_json_dict)
# Convenience functions
def is_valid_dict_local(this_json_dict):
return is_valid_dict(this_json_dict, schema_file=LOCAL_SCHEMA_FILE)
def is_valid_dict_remote(this_json_dict):
return is_valid_dict(this_json_dict, schema_file=REMOTE_SCHEMA_FILE)
# %% Wrapper over jschema.Draft7Validator.iter_errors()
def list_errors(json_dict, schema_file):
"""Iterate over the validation errors, print to log.warn
:param json_dict:
:param schema_file:
:return:
"""
validator = validator_file(schema_file)
# Build a list of 'errors', summarizing each
errors = sorted(validator.iter_errors(json_dict), key=lambda e: e.path)
error_summary = list()
for i, err in enumerate(errors):
# print("ERR",i)
stack_path = list(err.relative_path)
stack_path = [str(p) for p in stack_path]
error_string = "Error {} at {}".format(i, "/".join(stack_path))
# logging.warning("Error {} at {}".format(i, "/".join(stack_path)))
# logging.warning("\t" + err.message)
# error_summary.append(error_string)
error_summary.append((error_string, err))
return error_summary
# Convenience functions
def list_errors_dict_local(this_json_dict):
return list_errors(this_json_dict, LOCAL_SCHEMA_FILE)
def list_errors_dict_remote(this_json_dict):
return list_errors(this_json_dict, REMOTE_SCHEMA_FILE)
|
import logging
import re
from bs4 import BeautifulSoup
LOGGER = logging.getLogger(__name__)
class BaseHtmlSplitter:
"""
Permit to split an HTML file or string
"""
INTERNAL_BODY_FINDER = re.compile(r"<body[\w\-=\"\ ]*>([\s\S]+?)<\/body>")
HEADER_REGEX = re.compile(r'^h[1-7][\w\-="\ ]*')
@staticmethod
def normalize_html(html):
return BeautifulSoup(html, "html.parser").prettify()
@staticmethod
def get_html_between_body(html):
re_search = BaseHtmlSplitter.INTERNAL_BODY_FINDER.search(html)
if re_search:
return re_search.group(1)
html = BaseHtmlSplitter.trim_tag(html, "body")
return BaseHtmlSplitter.trim_tag(html, "html")
@staticmethod
def trim_tag(html, tag):
end_tag = "</{}".format(tag)
tag = "<{}".format(tag)
opening_tag = tag in html
closing_tag = end_tag in html
if opening_tag:
without_tag = tag.join(html.split(tag)[1:])
html = ">".join(without_tag.split(">")[1:])
if closing_tag:
html = end_tag.join(html.split(end_tag)[0:-1])
return html
def parse_id_and_href(self):
html_id_to_origin_id = dict()
parsed_html = BeautifulSoup(self.content, self.parser).body
if parsed_html:
for tag in parsed_html.find_all(self.HEADER_REGEX):
identifiers = []
if tag.get("id"):
identifiers.append(tag.get("id"))
identifiers += [t.get("id") for t in tag.find_all("a", recursive=True) if t.get("id")]
for identifier in identifiers[1:]:
html_id_to_origin_id[identifier] = identifiers[0]
for html_id, origin_id in html_id_to_origin_id.items():
self.content = self.content.replace("#" + html_id, origin_id)
self.content = self.content.replace('"#', '"')
def __init__(self, content=None, path=None, parser="lxml"):
self.parser = parser
self.path = None
error_msg = "Choose {} one between <content> and <path>."
if content and path:
raise ValueError(error_msg.format("only"))
if content is None and path is None:
raise ValueError(error_msg.format("at least"))
if content is None:
with open(path, "r") as html:
content = html.read()
self.path = path
self.content = self.get_html_between_body(content)
self.parse_id_and_href()
class HtmlSplitter(BaseHtmlSplitter):
def split(self):
parsed_html = BeautifulSoup(self.content, self.parser).body
if parsed_html is None:
return []
split_content = self.get_split_content(parsed_html)
result = self.create_proper_list(split_content)
hierarchized_content = self.__get_hierarchized_content(result)
return hierarchized_content
def create_proper_list(self, split_content):
result = []
headers = set(c["header_type"] for c in self.__iter_on_split_content(split_content))
higest_level_header = "h1" if not headers else min(headers)
cover_page = split_content[0]
if " ".join(cover_page.split()):
result.append(
{"title": "Cover Page", "header_type": higest_level_header, "content": cover_page, "id": None}
)
for title_header_content in self.__iter_on_split_content(split_content):
result.append(title_header_content)
return result
@staticmethod
def __iter_on_split_content(split_content):
split_content = iter(split_content[1:])
while True:
try:
title_and_header = next(split_content)
content = next(split_content)
title_and_header["content"] = content
yield title_and_header
except StopIteration:
return
def __get_hierarchized_content(self, flat_headers):
hierarchized = []
headers = set(h["header_type"] for h in flat_headers)
if len(headers) <= 1:
return flat_headers
highest_level_header = min(headers)
father = flat_headers[0]
children = []
for current in flat_headers[1:]:
if current["header_type"] == highest_level_header:
if father != current:
if children:
father["children"] = self.__get_hierarchized_content(children)
hierarchized.append(father)
children = []
father = current
else:
children.append(current)
if children:
father["children"] = self.__get_hierarchized_content(children)
hierarchized.append(father)
return hierarchized
def get_split_content(self, parsed_html):
has_empty_title = False
split_content = []
after = self.content
for tag in parsed_html.find_all(self.HEADER_REGEX):
html_split_by_tag = after.split(str(tag))
if len(html_split_by_tag) >= 1:
end_tag = "</{}>".format(tag.name)
html_split_manually = after.split(end_tag)
html_split_by_tag[0] = html_split_manually[0].split("<{}".format(tag.name))[0]
begin_tag = "<{}".format(tag.name)
html_split_by_tag = [html_split_manually[0].split(begin_tag)[0], end_tag.join(html_split_manually[1:])]
after = "".join(html_split_by_tag[1:])
title = self.__get_text_from_tag(tag)
if not title:
has_empty_title = True
identifiers = []
if tag.get("id"):
identifiers.append(tag.get("id"))
identifiers += [t.get("id") for t in tag.find_all("a", recursive=True) if t.get("id")]
identifier = None if not identifiers else identifiers[0]
split_content += [html_split_by_tag[0], {"title": title, "header_type": tag.name, "id": identifier}]
split_content.append(after)
if has_empty_title:
split_content = self.__remove_empty_titles(split_content)
return split_content
@staticmethod
def __remove_empty_titles(split_content):
compacted_content = []
split_content = iter(split_content)
current_content = next(split_content)
try:
while True:
title_and_header = next(split_content)
content = next(split_content)
if not title_and_header["title"]:
current_content += content
else:
compacted_content += [current_content, title_and_header]
current_content = content
except StopIteration:
compacted_content += [current_content]
return compacted_content
def __get_text_from_tag(self, tag):
return BeautifulSoup("".join([str(h) for h in tag.contents]), self.parser).get_text()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.