hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80bcd2a34aea988c4d677e2569bb88d99ffaf320 | 1,496 | py | Python | wordBreak3Ways.py | ezquire/python-challenges | c953633eb211bb315eca4ed54b7bf837588dc36f | [
"MIT"
] | null | null | null | wordBreak3Ways.py | ezquire/python-challenges | c953633eb211bb315eca4ed54b7bf837588dc36f | [
"MIT"
] | null | null | null | wordBreak3Ways.py | ezquire/python-challenges | c953633eb211bb315eca4ed54b7bf837588dc36f | [
"MIT"
] | null | null | null | def wordBreakDP(word, dic):
n = len(word)
if word in dic:
return True
if len(dic) == 0:
return False
dp = [False for i in range(n + 1)]
dp[0] = True
for i in range(1, n + 1):
for j in range(i - 1, -1, -1):
if dp[j] == True:
substring = word[j:i]
if substring in dic:
dp[i] = True
break
return dp[-1]
def wordBreakRecursive(word, dic, startIndex=0):
if word in dic:
return True
if len(dic) == 0:
return false
if startIndex == len(word):
return True
for endIndex in range(startIndex + 1, len(word) + 1):
if word[startIndex: endIndex] in dic and wordBreakRecursive(word, dic, endIndex):
return True
return False
def wordBreakMemo(word, dic, startIndex=0, memo=None):
if word in dic:
return True
if len(dic) == 0:
return False
if memo == None:
memo = dict()
if startIndex in memo:
return memo[startIndex]
for endIndex in range(startIndex + 1, len(word) + 1):
if word[startIndex: endIndex] in dic and wordBreakRecursive(word, dic, endIndex):
memo[startIndex] = True
return memo[startIndex]
memo[startIndex] = False
return memo[startIndex]
word = "papapapokerface"
dic = {"pa", "poker", "face"}
print(wordBreakDP(word, dic))
print(wordBreakRecursive(word, dic))
print(wordBreakMemo(word, dic))
| 28.769231 | 89 | 0.566845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.022727 |
80bd0f0505c7035471b773e6bad75b98a0740f07 | 5,931 | py | Python | src/embedding/utilslib/baidu_spider_threads.py | mykiscool/DeepCamera | e77cdbf45ab09895f315aa299bd6ac87b3bb6d66 | [
"MIT"
] | 914 | 2019-03-07T14:57:45.000Z | 2022-03-31T14:54:15.000Z | src/embedding/utilslib/baidu_spider_threads.py | mykiscool/DeepCamera | e77cdbf45ab09895f315aa299bd6ac87b3bb6d66 | [
"MIT"
] | 45 | 2019-03-11T09:53:37.000Z | 2022-03-30T21:59:37.000Z | src/embedding/utilslib/baidu_spider_threads.py | mykiscool/DeepCamera | e77cdbf45ab09895f315aa299bd6ac87b3bb6d66 | [
"MIT"
] | 148 | 2019-03-08T00:40:28.000Z | 2022-03-30T09:22:18.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import re
import urllib
import json
import socket
import time
import multiprocessing
from multiprocessing.dummy import Pool
from multiprocessing import Queue
import requests
timeout = 5
socket.setdefaulttimeout(timeout)
class Image(object):
"""图片类,保存图片信息"""
def __init__(self, url, save_path, referer):
super(Image, self).__init__()
self.url = url
self.save_path = save_path
self.referer = referer
class Crawler:
# 睡眠时长
__time_sleep = 0.1
__amount = 0
__start_amount = 0
__counter = 0
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '
'Chromium/58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'}
# 获取图片url内容等
# t 下载图片时间间隔
def __init__(self, t=0.1):
self.dirpath = dirpath
self.time_sleep = t
self.pool = Pool(30)
self.session = requests.Session()
self.session.headers = Crawler.headers
self.queue = Queue()
self.delay = 1.5 # 网络请求太频繁会被封
self.__down_counter = 1
# 获取后缀名
@staticmethod
def __get_suffix(name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpeg'
# 获取前缀
@staticmethod
def __get_prefix(name):
return name[:name.find('.')]
# 保存图片
def __resolve_img_url(self, rsp_data, referer):
imgs = []
for image_info in rsp_data['imgs']:
fix = self.__get_suffix(image_info['objURL'])
local_path = os.path.join(self.__work_path, str(self.__counter) + str(fix))
image = Image(image_info['objURL'], local_path, referer)
imgs.append(image)
print("图片+1,已有" + str(self.__down_counter) + "张")
self.__down_counter += 1
self.__counter += 1
self.queue.put(imgs)
return
# 开始获取
def __resolve_json(self, word=''):
search = urllib.quote(word)
# pn 图片数
pn = self.__start_amount
while pn < self.__amount:
url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
# 沿用session防ban
try:
time.sleep(self.delay)
req = self.session.get(url=url, timeout=15)
rsp = req.text
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except requests.exceptions.RequestException as e:
print(e)
print("-----Error:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
# 解析json
try:
rsp_data = json.loads(rsp)
self.__resolve_img_url(rsp_data, url)
except ValueError:
pass
# 读取下一页
print("读取下一页json")
pn += 60
print("解析json完成")
return
def __downImg(self, img):
"""下载单张图片,传入的是Image对象"""
# try:
# time.sleep(self.delay)
# urllib.urlretrieve(img.url, img.save_path)
# except requests.exceptions.HTTPError as e:
# print(e)
# except Exception as err:
# time.sleep(1)
# print(err)
# print("产生未知错误,放弃保存")
imgUrl = img.url
# self.messageQueue.put("线程 %s 正在下载 %s " %
# (threading.current_thread().name, imgUrl))
try:
time.sleep(self.delay)
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '
'Chromium/58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'}
headers['Referer'] = img.referer
res = requests.get(imgUrl, headers=headers, timeout=15)
with open(img.save_path, "wb") as f:
f.write(res.content)
except Exception as e:
message = "抛出异常: %s%s" % (imgUrl, str(e))
print(message)
def start(self, index, word, spider_page_num=1, start_page=1):
"""
爬虫入口
:param word: 抓取的关键词
:param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
:param start_page: 起始页数
:return:
"""
self.__work_path = os.path.join(self.dirpath, index)
if not os.path.exists(self.__work_path):
os.mkdir(self.__work_path)
self.__counter = len(os.listdir(self.__work_path)) + 1 # 判断本地名字是否重复,获取目录下图片数
self.__start_amount = (start_page - 1) * 60
self.__amount = spider_page_num * 60 + self.__start_amount
self.__resolve_json(word)
while self.queue.qsize():
imgs = self.queue.get()
self.pool.map_async(self.__downImg, imgs)
self.pool.close()
self.pool.join()
print('完成保存')
if __name__ == '__main__':
dirpath = os.path.join(sys.path[0], 'results')
if not os.path.exists(dirpath):
os.mkdir(dirpath)
with open('name.json') as f:
json_data = json.load(f)
# word = str(input("请输入图片关键字: \n"))
sort_data = sorted([(int(k), v) for k, v in json_data.items()])
print('开始')
for index, name in sort_data:
folder = str(index)
person = name.encode('utf-8')
print('开始抓取 {}:{}'.format(folder, person))
if folder in os.listdir('./results'):
print('已存在, continue')
continue
crawler = Crawler(0.05)
crawler.dirpath = dirpath
crawler.start(folder, person, 2, 1)
| 31.547872 | 131 | 0.548137 | 5,286 | 0.837585 | 0 | 0 | 280 | 0.044367 | 0 | 0 | 1,828 | 0.289653 |
80be21d5757f74fcf164345d78cb45a0c4101894 | 7,377 | py | Python | cgi-bin/pybrowser.py | fanuware/pybrowser | 910cebaee45524248c18d86605ba9e7f1b862c47 | [
"MIT"
] | null | null | null | cgi-bin/pybrowser.py | fanuware/pybrowser | 910cebaee45524248c18d86605ba9e7f1b862c47 | [
"MIT"
] | null | null | null | cgi-bin/pybrowser.py | fanuware/pybrowser | 910cebaee45524248c18d86605ba9e7f1b862c47 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import cgi, os
import shutil
from userlogger import UserLogger
import templates
import mimetypes
from stat import S_IEXEC
def getUnusedName(file):
if not os.path.exists(file):
return file
basepath, basename = os.path.split(file)
p = basename.rfind('.')
extension = basename[p:] if p > 0 else ""
name = basename[:len(basename)-len(extension)]
counter = 0
outFile = file
while os.path.exists(outFile):
counter += 1
outFile = os.path.join(basepath, name + str(counter) + extension)
return outFile
def getRecbin():
if not os.path.isdir("recbin") and not os.path.isdir("../recbin"):
os.mkdir("recbin")
return "recbin" if os.path.isdir("recbin") else "../recbin"
##################################################
# main
# create instance of field storage
form = cgi.FieldStorage()
if "path" in form:
filepath = form.getvalue("path")
filepath = filepath.rstrip(os.sep)
else:
filepath = os.sep
if "cmd" in form:
cmd = form.getvalue("cmd")
else:
cmd = "nocommand"
# receive file for upload
try:
uploadfiles = form["uploadfiles"]
cmd = "uploadfiles"
except:
pass
# receive page (optional)
currentPage = 0 if "page" not in form else int(form.getvalue("page"))
##################################################
# permission guard
userLogger = UserLogger()
userPermission = userLogger.getPermission(filepath)
userLogger.setTargetUrl('pybrowser.py?path='+filepath)
# make sure user is allowed to read
if (userPermission < UserLogger.PERMISSION_READ):
if "redirect" not in form:
args = '&'.join([key + '=' + str(form[key].value) for key in form.keys()])
if args:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True&' + args
else:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True'
templates.redirect(url)
else:
userLogger.showLogin('Identification required')
elif userPermission == UserLogger.PERMISSION_READ:
if (cmd == "nocommand"):
templates.directory(filepath, currentPage)
else:
if "redirect" not in form:
args = '&'.join([key + '=' + str(form[key].value) for key in form.keys()])
if args:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True&' + args
else:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True'
templates.redirect(url)
else:
userLogger.showLogin('Identification required')
##################################################
# check commands (all read permission)
# upload file
if cmd == "uploadfiles":
# upload file to server
try:
# if single file received, make file list-accessable
if uploadfiles.filename:
uploadfiles = list([uploadfiles])
except:
pass
try:
for file in uploadfiles:
FILEPATH = os.path.join(filepath, file.filename)
# create file
with open(FILEPATH , 'wb') as fhand:
contentRaw = file.file.read()
fhand.write(contentRaw)
fhand.close()
# convert text file to unix format
mime = mimetypes.guess_type(FILEPATH)
if 'text' in str(mime):
with open(FILEPATH , 'wb') as fhand:
contentRaw = contentRaw.replace(b'\r\n', b'\n') # DOS
contentRaw = contentRaw.replace(b'\r', b'\n') # MAC os
fhand.write(contentRaw)
fhand.close()
# make file executable
if ".py" in FILEPATH:
mode = os.stat(FILEPATH).st_mode
os.chmod(FILEPATH, mode|S_IEXEC )
except Exception as e:
templates.message("UploadError", str(e))
# new
elif cmd == "new":
# new folder
if not os.path.exists(filepath):
os.mkdir(filepath)
filepath = os.path.dirname(filepath)
# save file (from editor)
elif os.path.isfile(filepath):
try:
contentRaw = form.getvalue("textcontent")
fhand = open(filepath, 'wb')
contentRaw = contentRaw.encode('utf-8')
# in case of DOS/macOS-formatting, change to unix
#contentUnix = contentRaw.replace('\r\n', '\n') # DOS
#contentUnix = contentUnix.replace('\r', '\n') # MAC os
contentUnix = contentRaw.replace(b'\r\n', b'\n') # DOS
contentUnix = contentUnix.replace(b'\r', b'\n') # MAC os
fhand.write(contentUnix)
fhand.close()
if ".py" in filepath:
mode = os.stat(filepath).st_mode
os.chmod(filepath, mode|S_IEXEC )
except Exception as e:
templates.error(str(e))
# remove folder/file
elif cmd == "remove":
recbin = getRecbin()
userRecbin = os.path.join(recbin, userLogger.isLoggedIn())
if not os.path.isdir(userRecbin):
os.mkdir(userRecbin)
if os.path.isdir(filepath) or os.path.isfile(filepath):
try:
destination = getUnusedName(os.path.join(userRecbin, os.path.basename(filepath)))
os.rename(filepath, destination)
except:
pass
# rename
elif cmd == "rename":
try:
newname = form.getvalue("newname")
if os.path.exists(filepath):
os.rename(filepath, os.path.join(os.path.dirname(filepath), newname))
except:
pass
# copy
elif cmd == "copy":
if os.path.isfile(filepath) or os.path.isdir(filepath):
userLogger.setCopyUrl(filepath)
if os.path.isdir(filepath):
filepath = os.path.split(filepath)[0]
# paste
elif cmd == "paste":
sourceFile = userLogger.getCopyUrl()
userLogger.resetCopyUrl()
destFileName = getUnusedName(os.path.join(filepath, os.path.basename(sourceFile)))
if os.path.isfile(sourceFile):
shutil.copy(sourceFile, destFileName)
elif os.path.isdir(sourceFile):
shutil.copytree(sourceFile, destFileName)
else:
templates.error("No copy file found")
# unzip
elif cmd == "unzip":
import zipfile
dirpath = os.path.dirname(filepath)
newFolder = getUnusedName(os.path.join(dirpath, os.path.basename(filepath).replace('.zip', '')))
os.mkdir(newFolder)
try:
zipf = zipfile.ZipFile(filepath, 'r')
zipf.extractall(newFolder)
zipf.close()
except Exception as e:
templates.message("Unzip", str(e))
filepath = dirpath
#templates.message("Unzip", filepath)
# validate filepath
if not os.path.isdir(filepath):
filepath = os.path.dirname(filepath)
if not os.path.isdir(filepath):
filepath = os.sep
# show directory
if (userLogger.getPermission(filepath) >= userLogger.PERMISSION_READ):
templates.directory(filepath, currentPage)
else:
if "redirect" not in form:
args = '&'.join([key + '=' + str(form[key].value) for key in form.keys()])
if args:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True&' + args
else:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True'
templates.redirect(url)
else:
userLogger.showLogin('Identification required')
| 31.525641 | 100 | 0.590891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,522 | 0.206317 |
80be84948c857c226b842731966f51f313b423cf | 498 | py | Python | prog_python/strings/sustring_2.py | TCGamer123/python | 82ad1f84b52d6cc7253fb4c5522ae8389824930a | [
"MIT"
] | 1 | 2022-03-08T13:29:59.000Z | 2022-03-08T13:29:59.000Z | prog_python/strings/sustring_2.py | TCGamer123/python | 82ad1f84b52d6cc7253fb4c5522ae8389824930a | [
"MIT"
] | null | null | null | prog_python/strings/sustring_2.py | TCGamer123/python | 82ad1f84b52d6cc7253fb4c5522ae8389824930a | [
"MIT"
] | null | null | null | s = "Olá, mundo!";
print(s[::2]); # Imprime os caracteres nos índices pares.
print(s[1::2]) # Imprime os caracteres nos índices ímpares.
frase = "Mundo mundo vasto mundo"
print(frase[::-1]); #inverte a frase;
# Forma mais avançada de formatação de strings
frase_2 = "Um triângulo de base igual a {0} e altura igual a {1} possui área igual {2}.".format(3,4,12);
print(frase_2);
# Formatação de strongs com f-strings
linguagem = "Python";
frase_3 = f"Progamando em {linguagem}";
print(frase_3);
| 29.294118 | 104 | 0.702811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.685658 |
80bf37aa9aad7edddb690a9919912a42c6115218 | 2,742 | py | Python | pymonad/maybe/maybe_test.py | Wildhoney/Pymonad | 177989b3d0f362c3bf3af962d89306309ff000c3 | [
"MIT"
] | null | null | null | pymonad/maybe/maybe_test.py | Wildhoney/Pymonad | 177989b3d0f362c3bf3af962d89306309ff000c3 | [
"MIT"
] | null | null | null | pymonad/maybe/maybe_test.py | Wildhoney/Pymonad | 177989b3d0f362c3bf3af962d89306309ff000c3 | [
"MIT"
] | null | null | null | import unittest
from . import Nothing, Just
a = Just('Adam')
b = Nothing()
def lower(x): return x.lower()
def reverse(x): return x[::-1]
def shout(x): return '%s!' % x
def capitalise(x): return x.capitalize()
class TestJust(unittest.TestCase):
def test_is_just(self):
self.assertEqual(a.is_just(), True)
self.assertEqual(b.is_just(), False)
def test_is_nothing(self):
self.assertEqual(a.is_nothing(), False)
self.assertEqual(b.is_nothing(), True)
def test_map(self):
c = a.map(lower).map(reverse).map(shout).map(capitalise)
self.assertEqual(str(c), 'Just (Mada!)')
d = a.map(lower).map(lambda x: Just(
reverse(x))).map(shout).map(capitalise)
self.assertEqual(str(d), 'Just (Mada!)')
e = a.map(lower).map(lambda x: Nothing()).map(shout).map(capitalise)
self.assertEqual(str(e), 'Nothing')
f = b.map(lower).map(reverse).map(shout).map(capitalise)
self.assertEqual(str(f), 'Nothing')
g = b.map(lower).map(lambda x: Just(
reverse(x))).map(shout).map(capitalise)
self.assertEqual(str(g), 'Nothing')
def test_map_shorthand(self):
c = a >> lower >> reverse >> shout >> capitalise
self.assertEqual(str(c), 'Just (Mada!)')
d = b >> lower >> reverse >> shout >> capitalise
self.assertEqual(str(d), 'Nothing')
def test_get(self):
c = a >> lower >> reverse >> shout >> capitalise
self.assertEqual(c.get(), 'Mada!')
d = b >> lower >> reverse >> shout >> capitalise
self.assertEqual(d.get('Unknown'), 'Unknown')
def test_get_shorthand(self):
c = a >> lower >> reverse >> shout >> capitalise
self.assertEqual(c | 'Unknown', 'Mada!')
d = b >> lower >> reverse >> shout >> capitalise
self.assertEqual(d | 'Unknown', 'Unknown')
def test_equals(self):
self.assertEqual(str(Just('Adam') == Just('Adam')), 'Just (True)')
self.assertEqual(str(Just('Adam') == Just('Imogen')), 'Just (False)')
self.assertEqual(str(Just('Maria') == Just('Imogen')), 'Just (False)')
self.assertEqual(str(Nothing() == Nothing()), 'Nothing')
self.assertEqual(str(Nothing() == Just('Imogen')), 'Nothing')
def test_not_equals(self):
self.assertEqual(str(Just('Adam') != Just('Adam')), 'Just (False)')
self.assertEqual(str(Just('Adam') != Just('Imogen')), 'Just (True)')
self.assertEqual(str(Just('Maria') != Just('Imogen')), 'Just (True)')
self.assertEqual(str(Just('Adam') == Nothing()), 'Nothing')
self.assertEqual(str(Nothing() == Nothing()), 'Nothing')
self.assertEqual(str(Nothing() == Just('Imogen')), 'Nothing')
| 37.054054 | 78 | 0.591174 | 2,526 | 0.921225 | 0 | 0 | 0 | 0 | 0 | 0 | 378 | 0.137856 |
80bf4db9002340c8afb22321e1adb5cd22a14a77 | 7,492 | py | Python | ryu/gui/models/topology.py | isams1/Thesis | dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3 | [
"Apache-2.0"
] | 3 | 2019-04-23T11:11:46.000Z | 2020-11-04T20:14:17.000Z | ryu/gui/models/topology.py | isams1/Thesis | dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3 | [
"Apache-2.0"
] | null | null | null | ryu/gui/models/topology.py | isams1/Thesis | dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3 | [
"Apache-2.0"
] | 3 | 2019-10-03T09:31:42.000Z | 2021-05-15T04:41:12.000Z | import logging
import json
from socket import error as SocketError
from httplib import HTTPException
import gevent
import gevent.monkey
gevent.monkey.patch_all()
from ryu.lib.dpid import str_to_dpid
from ryu.lib.port_no import str_to_port_no
from ryu.app.client import TopologyClient
LOG = logging.getLogger('ryu.gui')
class Port(object):
def __init__(self, dpid, port_no, hw_addr, name):
assert type(dpid) == int
assert type(port_no) == int
assert type(hw_addr) == str or type(hw_addr) == unicode
assert type(name) == str or type(name) == unicode
self.dpid = dpid
self.port_no = port_no
self.hw_addr = hw_addr
self.name = name
def to_dict(self):
return {'dpid': self.dpid,
'port_no': self.port_no,
'hw_addr': self.hw_addr,
'name': self.name}
@classmethod
def from_rest_dict(cls, p):
return cls(str_to_dpid(p['dpid']),
str_to_port_no(p['port_no']),
p['hw_addr'],
p['name'])
def __eq__(self, other):
return self.dpid == other.dpid and self.port_no == other.port_no
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dpid, self.port_no))
def __str__(self):
return 'Port<dpid=%s, port_no=%s, hw_addr=%s, name=%s>' % \
(self.dpid, self.port_no, self.hw_addr, self.name)
class Switch(object):
def __init__(self, dpid, ports):
assert type(dpid) == int
assert type(ports) == list
self.dpid = dpid
self.ports = ports
def to_dict(self):
return {'dpid': self.dpid,
'ports': [port.to_dict() for port in self.ports]}
def __eq__(self, other):
return self.dpid == other.dpid
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.dpid)
def __str__(self):
return 'Switch<dpid=%s>' % (self.dpid)
class Link(object):
def __init__(self, src, dst):
assert type(src) == Port
assert type(dst) == Port
self.src = src
self.dst = dst
def to_dict(self):
return {'src': self.src.to_dict(),
'dst': self.dst.to_dict()}
def __eq__(self, other):
return self.src == other.src and self.dst == other.dst
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.src, self.dst))
def __str__(self):
return 'Link<%s to %s>' % (self.src, self.dst)
class Topology(dict):
def __init__(self, switches_json=None, links_json=None):
super(Topology, self).__init__()
self['switches'] = []
if switches_json:
for s in json.loads(switches_json):
ports = []
for p in s['ports']:
ports.append(Port.from_rest_dict(p))
switch = Switch(str_to_dpid(s['dpid']), ports)
self['switches'].append(switch)
self['links'] = []
if links_json:
for l in json.loads(links_json):
link = Link(Port.from_rest_dict(l['src']),
Port.from_rest_dict(l['dst']))
self['links'].append(link)
self['ports'] = []
for switch in self['switches']:
self['ports'].extend(switch.ports)
def peer(self, port):
for link in self['links']:
if link.src == port:
return link.dst
elif link.dst == port:
return link.src
return None
def attached(self, port):
for switch in self['switches']:
if port in switch.port:
return switch
return None
def neighbors(self, switch):
ns = []
for port in switch.port:
ns.append(self.attached(self.peer(port)))
return ns
# TopologyDelta = new_Topology - old_Topology
def __sub__(self, old):
assert type(old) == Topology
added = Topology()
deleted = Topology()
for k in self.iterkeys():
new_set = set(self[k])
old_set = set(old[k])
added[k] = list(new_set - old_set)
deleted[k] = list(old_set - new_set)
return TopologyDelta(added, deleted)
def __str__(self):
return 'Topology<switches=%d, ports=%d, links=%d>' % (
len(self['switches']),
len(self['ports']),
len(self['links']))
class TopologyDelta(object):
def __init__(self, added, deleted):
self.added = added
self.deleted = deleted
def __str__(self):
return 'TopologyDelta<added=%s, deleted=%s>' % \
(self.added, self.deleted)
class TopologyWatcher(object):
_LOOP_WAIT = 3
_REST_RETRY_WAIT = 10
def __init__(self, update_handler=None, rest_error_handler=None):
self.update_handler = update_handler
self.rest_error_handler = rest_error_handler
self.address = None
self.tc = None
self.is_active = None
self.threads = []
self.topo = Topology()
self.prev_switches_json = ''
self.prev_links_json = ''
def start(self, address):
LOG.debug('TopologyWatcher: start')
self.address = address
self.tc = TopologyClient(address)
self.is_active = True
self.threads.append(gevent.spawn(self._polling_loop))
def stop(self):
LOG.debug('TopologyWatcher: stop')
self.is_active = False
def _polling_loop(self):
LOG.debug('TopologyWatcher: Enter polling loop')
while self.is_active:
try:
switches_json = self.tc.list_switches().read()
links_json = self.tc.list_links().read()
except (SocketError, HTTPException) as e:
LOG.debug('TopologyWatcher: REST API(%s) is not available.' %
self.address)
LOG.debug(' wait %d secs...' %
self._REST_RETRY_WAIT)
self._call_rest_error_handler(e)
#gevent.sleep(self._REST_RETRY_WAIT)
self.is_active = False;
continue
if self._is_updated(switches_json, links_json):
LOG.debug('TopologyWatcher: topology updated')
new_topo = Topology(switches_json, links_json)
delta = new_topo - self.topo
self.topo = new_topo
self._call_update_handler(delta)
gevent.sleep(self._LOOP_WAIT)
def _is_updated(self, switches_json, links_json):
updated = (
self.prev_switches_json != switches_json or
self.prev_links_json != links_json)
self.prev_switches_json = switches_json
self.prev_links_json = links_json
return updated
def _call_rest_error_handler(self, e):
if self.rest_error_handler:
self.rest_error_handler(self.address, e)
def _call_update_handler(self, delta):
if self.update_handler:
self.update_handler(self.address, delta)
def handler(address, delta):
print delta
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
watcher = TopologyWatcher(handler)
watcher.start('127.0.0.1:8080')
gevent.joinall(watcher.threads)
| 28.378788 | 77 | 0.574212 | 6,921 | 0.923785 | 0 | 0 | 199 | 0.026562 | 0 | 0 | 679 | 0.09063 |
80bfef8f2adb756fa51ead93bbcc4295e352ae27 | 744 | py | Python | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/internet/test/process_gireactornocompat.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 4 | 2016-03-30T14:31:52.000Z | 2019-02-02T05:01:32.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/internet/test/process_gireactornocompat.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 1 | 2020-03-06T04:49:42.000Z | 2020-03-06T04:49:42.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/internet/test/process_gireactornocompat.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | import sys
# Override theSystemPath so it throws KeyError on gi.pygtkcompat:
from twisted.python import modules
modules.theSystemPath = modules.PythonPath([], moduleDict={})
# Now, when we import gireactor it shouldn't use pygtkcompat, and should
# instead prevent gobject from being importable:
from twisted.internet import gireactor
for name in gireactor._PYGTK_MODULES:
if sys.modules[name] is not None:
sys.stdout.write("failure, sys.modules[%r] is %r, instead of None" %
(name, sys.modules["gobject"]))
sys.exit(0)
try:
import gobject
except ImportError:
sys.stdout.write("success")
else:
sys.stdout.write("failure: %s was imported" % (gobject.__path__,))
| 32.347826 | 77 | 0.686828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.377688 |
80bffaf5ce6de8b8154d194bc0ff65bdab497cc8 | 4,140 | py | Python | octostore/mongo_helper.py | luzhang06/octostore | c3a6ac42a86ab6943eaa7e11dfbcae50c0a68bfa | [
"MIT"
] | 1 | 2020-08-17T20:54:39.000Z | 2020-08-17T20:54:39.000Z | octostore/mongo_helper.py | luzhang06/octostore | c3a6ac42a86ab6943eaa7e11dfbcae50c0a68bfa | [
"MIT"
] | null | null | null | octostore/mongo_helper.py | luzhang06/octostore | c3a6ac42a86ab6943eaa7e11dfbcae50c0a68bfa | [
"MIT"
] | null | null | null | from pymongo import MongoClient
import os
import sys
from pathlib import Path
from environs import Env
sys.path.append("..")
sys.path.append(str(Path(__file__).parent.resolve()))
class MongoHelpers:
_client = None
_db = None
_collection = None
def __init__(self, connection_uri=None, db_name=None):
env = Env()
env.read_env()
if db_name is None:
db_name = os.getenv("MONGO_DB")
if connection_uri is None:
host = os.getenv("MONGO_HOST")
port = os.getenv("MONGO_PORT")
username = os.getenv("MONGO_USERNAME")
password = os.getenv("MONGO_PASSWORD")
args = "ssl=true&retrywrites=false&ssl_cert_reqs=CERT_NONE"
connection_uri = (
f"mongodb://{username}:{password}@{host}:{port}/{db_name}?{args}"
)
self.client = MongoClient(connection_uri)
self.db = self._client[db_name]
# def create_experiment(self, name, artifact_location=None, tags=[]):
# # all_experiments = self.get_all_experiments()
# # Get all existing experiments and find the one with largest numerical ID.
# # len(list_all(..)) would not work when experiments are deleted.
# # experiments_ids = [
# # int(e.experiment_id)
# # for e in self.list_experiments(ViewType.ALL)
# # if e.experiment_id.isdigit()
# # ]
# experiment_id = self._get_highest_experiment_id() + 1
# return self._create_experiment_with_id(
# name, str(experiment_id), artifact_location, tags
# )
# def _create_experiment_with_id(
# self,
# experiment_name,
# experiment_id,
# artifact_location,
# lifecycle_stage: LifecycleStage = LifecycleStage.ACTIVE,
# tags=[],
# ) -> int:
# e = Experiment(
# experiment_id,
# experiment_name,
# experiment_id,
# artifact_location,
# lifecycle_stage,
# tags,
# )
# def _get_highest_experiment_id(self):
# if len(list(self._client.experiments.find())) is not 0:
# last_experiment = list(
# self.db.experiments.find({}).sort("experiment_id", -1).limit(1)
# )
# return last_experiment[0]["experiment_id"]
# else:
# return 0
# def list_experiments(self, view_type=ViewType.ACTIVE_ONLY):
# rsl = []
# if view_type == ViewType.ACTIVE_ONLY or view_type == ViewType.ALL:
# rsl += self._get_active_experiments(full_path=False)
# if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL:
# # rsl += self._get_deleted_experiments(full_path=False)
# pass
# experiments = []
# for exp_id in rsl:
# try:
# # trap and warn known issues, will raise unexpected exceptions to caller
# experiment = self._get_experiment(exp_id, view_type)
# if experiment:
# experiments.append(experiment)
# except MissingConfigException as rnfe:
# # Trap malformed experiments and log warnings.
# logging.warning(
# "Malformed experiment '%s'. Detailed error %s",
# str(exp_id),
# str(rnfe),
# exc_info=True,
# )
# return experiments
# def _get_active_experiments(self, full_path=False):
# active_experiments_query = {
# "type": "experiment",
# "experiment_state": LifecycleStage.ACTIVE,
# }
# all_experiments = self.db.experiments.find(active_experiments_query)
# # exp_list = list_subdirs(self.root_directory, full_path)
# # return [exp for exp in exp_list if not exp.endswith(FileStore.TRASH_FOLDER_NAME)]
# def _get_deleted_experiments(self, full_path=False):
# # return list_subdirs(self.trash_folder, full_path)
# raise NotImplementedError("get_deleted_experiments")
| 36.315789 | 93 | 0.58285 | 3,957 | 0.955797 | 0 | 0 | 0 | 0 | 0 | 0 | 3,015 | 0.728261 |
80c147e7794b7f3c322d8bf48ca72fdbf59b3d05 | 2,110 | py | Python | tests/test_app.py | jonathanharg/covid_dashboard | a1bc18d971911cc4db35af96f973da636c91190e | [
"MIT"
] | null | null | null | tests/test_app.py | jonathanharg/covid_dashboard | a1bc18d971911cc4db35af96f973da636c91190e | [
"MIT"
] | null | null | null | tests/test_app.py | jonathanharg/covid_dashboard | a1bc18d971911cc4db35af96f973da636c91190e | [
"MIT"
] | null | null | null | from app import create_app
from utils import get_setting
import pytest
@pytest.fixture
def client():
app = create_app(testing=True)
with app.test_client() as client:
yield client
@pytest.mark.parametrize("url", ["/", "/index"])
def test_get_url(client, url):
response = client.get(url)
assert response.status_code in [200, 302]
remove_nonexisting_event = {
"update_item": "TRY TO REMOVE AN ARTICLE THAT DOES NOT EXIST"
}
remove_nonexisting_news = {"notif": "TRY TO REMOVE AN ARTICLE THAT DOES NOT EXIST"}
schedule_update_with_no_label = {
"update": "12:30",
"covid-data": "covid-data",
}
schedule_update_with_no_time = {
"update": "",
"two": "No Time",
"covid-data": "covid-data",
}
schedule_update_with_invalid_time = {
"update": "25:72",
"two": "Invalid Time",
"covid-data": "covid-data",
}
schedule_update_with_same_name = {
"update": "12:30",
"two": "Same Name",
"covid-data": "covid-data",
}
remove_update_with_same_name = {"update_item": "Same Name"}
schedule_update_with_no_covid_or_news = {"update": "12:30", "two": "Label"}
requests = [
remove_nonexisting_event,
remove_nonexisting_news,
schedule_update_with_no_label,
schedule_update_with_no_time,
schedule_update_with_invalid_time,
schedule_update_with_no_covid_or_news,
schedule_update_with_same_name,
schedule_update_with_same_name,
remove_update_with_same_name,
remove_update_with_same_name,
]
@pytest.mark.parametrize("requests", requests)
def test_input_sequence(client, requests):
url = "index"
for i, arg in enumerate(requests):
if i == 0:
url += "?"
else:
url += "&"
url += arg + "=" + requests[arg]
response = client.get(url)
assert response.status_code in [200, 302]
# TEST FAVICON, TEST IMAGE
def test_favicon(client):
favicon = get_setting("favicon")
response = client.get(favicon)
assert response.status_code in [200, 302]
def test_image(client):
image = get_setting("image")
response = client.get("/static/images/" + image)
| 25.119048 | 83 | 0.680095 | 0 | 0 | 108 | 0.051185 | 623 | 0.295261 | 0 | 0 | 464 | 0.219905 |
80c2022fa6bfc0785ace3c63f2c584e61cfeac6a | 7,734 | py | Python | wp/modules/utils/frame.py | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | wp/modules/utils/frame.py | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | wp/modules/utils/frame.py | ExLeonem/master-thesis-code | 559ad55f15c99772358384146bd30dd517b1dfe8 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
class Frame:
"""
Uitiliy methods for dataframe operations across multiple dataframes.
"""
@staticmethod
def set_columns(frame, new_names):
frame.columns = new_names
return frame
@staticmethod
def filter(frame, exclude):
if isinstance(exclude, list):
column_names = frame.columns
filtered_names = []
for col_name in column_names:
if col_name not in exclude:
filtered_names.append(col_name)
return frame.filter(filtered_names)
if not isinstance(exclude, dict):
raise ValueError("Error while trying to filter the frame. Expected parameter exclude to be of type dict. Got {}".format(type(exclude)))
filtered = frame
for key, value in exclude.items():
# Filter by multiple values
if isinstance(value, list):
for x in value:
selector = filtered[key] != x
filtered = filtered[selector]
continue
selector = filtered[key] != value
filtered = filtered[selector]
return filtered
@staticmethod
def split_by(composite_frame, key, force=False):
"""
Splits a dataframe by unique values of a column.
Parameters:
composite_frame (pandas.DataFrame): A pandas dataframe.
key (str): The key to split the dataframe by.
Returns:
(list(pandas.Dataframe)) the dataframe split by column into different dataframes
"""
frames = []
if "float" in str(composite_frame[key].dtype) and not force:
raise ValueError("Error in Frame.split_by(). Column to split by is of type float, aborting. Use kwarg \"force=True\" to force execution of this method.")
values = np.unique(composite_frame[key])
if len(values) == 1:
return [composite_frame]
# raise ValueError("Error in Frame.split_by(). Column {} has only one unique value {}.".format(key, values[0]))
for unique_value in values:
selector = composite_frame[key] == unique_value
new_frame = composite_frame[selector]
frames.append(new_frame)
return frames
@staticmethod
def merge_by_index(frame_collection, **kwargs):
"""
Merge multiple frame collections by array indices.
Parameters:
frame_collection(list(list(pandas.DataFrame))): A list of pandas dataframe lists.
**kwargs (dict): getting passed to pandas.concat(obj, **kwargs)
Returns:
list(pandas.DataFrame) the frames merged by index of the inner frame lists
Example:
> frames = [[frame_1_1, frame_1_2], [frame_2_1, frame_2_2]]
> merge_by_index(frames)
"""
frames_by_index = []
for collection_ith in range(len(frame_collection)):
collection = frame_collection[collection_ith]
if not isinstance(collection, list):
raise ValueError("Error in Frame.merge_by_index(). Expected value of type list at index {} in parameter frame collection.".format(collection_ith))
for frame_idx in range(len(collection)):
frame = collection[frame_idx]
if not isinstance(frame, pd.DataFrame):
raise ValueError("Error in Frame.merge_by_index(). Expected frame in collection of frames at index {}.".format(frame_idx))
if len(frames_by_index) < frame_idx+1:
frames_by_index.append([])
frames_by_index[frame_idx].append(frame)
merged_frames = []
for idx in range(len(frames_by_index)):
collection = frames_by_index[idx]
merged_frames.append(pd.concat(collection, **kwargs))
return merged_frames
@staticmethod
def mean(frames, groupby_key, ids=None):
"""
Averages frames over all common numerical columns.
Parameters:
frames (pandas.DataFrames): The frames to mean data on.
groupby_key (str): Key by which to group the frame
ids (str|list(str)): Column names or list of column names equal over frames. Will get copied into meaned frame when passed. (default=None)
Returns:
(list(pandas.DataFrame)) a list of dataframes each grouped by given key and meaned.
"""
if len(frames) < 1:
raise ValueError("Error in Frame.mean(). Can't mean over list of <= 1 dataframe.")
meaned = []
for frame in frames:
mean_frame = frame.groupby(groupby_key, as_index=False).mean()
if ids is not None:
mean_len = mean_frame.shape[0]
copied_columns = Frame.get_columns(frame, ids)[:mean_len]
Frame.update(mean_frame, copied_columns)
meaned.append(mean_frame)
return meaned
@staticmethod
def merge_mean_std(frame, decimals=None, mean_col="Mean", std_col="Std"):
frame = frame.copy()
mean_values = Frame.round_values(frame[mean_col].to_numpy(), decimals)
std_values = Frame.round_values(frame[std_col].to_numpy(), decimals)
zipped = zip(mean_values, std_values)
mean_std_values = list(map(lambda x: str(x[0]) + " \u00B1 " + str(x[1]), zipped))
mean_std_label = "Mean \u00B1 Std."
previous_columns = frame.columns
frame.insert(0, mean_std_label, mean_std_values)
for column in previous_columns:
frame = frame.drop(column, axis=1)
return frame
@staticmethod
def update(frame, series):
if isinstance(series, pd.Series):
frame.insert(0, series.name, series)
column_names = series.columns
for idx in range(len(column_names)):
column_name = column_names[idx]
frame.insert(idx, column_name, series[column_name])
@staticmethod
def get_columns(df, names):
if names is str:
names = [names]
return df[names]
@staticmethod
def transpose_index(frame, index):
"""
"""
transposed = frame.copy()
multi_index = frame.index.to_numpy()
names = list(frame.index.names)
index_idx = names.index(index)
new_column = []
new_index = []
for row in multi_index:
# Select the index corresponding to to index that should be transposed
if isinstance(row, tuple) and len(row) > 1:
new_column.append(row[index_idx])
new_index.append(Frame.__from_tuple_except(row, index_idx))
new_column = np.unique(new_column)
values = frame.to_numpy()
new_dim = int(values.shape[0]/len(new_column))
values.reshape(tuple([new_dim] + values.shape))
return transposed
@staticmethod
def __from_tuple_except(values, except_index):
new_tuple = []
for idx in range(len(values)):
if idx != except_index:
new_tuple.append(values[idx])
if len(new_tuple) == 1:
return new_tuple[0]
return tuple(new_tuple)
@staticmethod
def round_values(values, decimals):
if decimals is None:
return values
return np.round(values, decimals) | 31.696721 | 169 | 0.582751 | 7,694 | 0.994828 | 0 | 0 | 7,508 | 0.970778 | 0 | 0 | 2,285 | 0.295449 |
80c42f7c2ddfa85fab0bc347a514d3a8d73a04ce | 4,647 | py | Python | setup.py | jeblohe/strup | 60ddbcdf5cff411b738ce52573328e89a61e8d0b | [
"MIT"
] | null | null | null | setup.py | jeblohe/strup | 60ddbcdf5cff411b738ce52573328e89a61e8d0b | [
"MIT"
] | null | null | null | setup.py | jeblohe/strup | 60ddbcdf5cff411b738ce52573328e89a61e8d0b | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
import os
# Get the long description from the README file
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md")) as f:
long_description = f.read()
# Get the version
for line in open(os.path.join(here, "strup", "__init__.py")):
if line.startswith("__version__"):
version = line.split("=")[1].strip()[1:-1]
# List packages we depend on (end users)
dependencies = []
# Packages for development of strup (assumed on Python 3.x)
dependencies_dev = ["pytest>=5.1", "pytest-cov", "coverage", "black", "coveralls"]
# Packages for testing strup without syntax and coverage checks (for CI checks on old images)
dependencies_test = ["pytest>=4.6"]
setup(
name="strup",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description=(
"A package for unpacking int, float, string and bool objects from a text string."
),
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url="https://github.com/jeblohe/strup",
# Author details
author="Jens B. Helmers",
author_email="jens.bloch.helmers@gmail.com",
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Manufacturing",
"Intended Audience :: Other Audience",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"Topic :: Text Processing",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
# Supported Python versions (pip will refuse to install on other versions)
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
# What does your project relate to?
keywords="text processing",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when
# your project is installed.
install_requires=dependencies,
# List additional groups of dependencies here (e.g. development and test
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install strup[dev] # or: pip install -e .[dev]
# $ pip install strup[test]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={
"dev": dependencies_dev,
"test": dependencies_test,
},
# If there are data files included in your packages that need to be
# installed, specify them here.
package_data={},
zip_safe=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={"console_scripts": []},
# List additional URLs that are relevant to your project as a dict.
project_urls={
"Documentation": "https://strup.readthedocs.io/",
"Bug Tracker": "https://github.com/jeblohe/strup/issues",
"Source Code": "https://github.com/jeblohe/strup/",
},
)
| 41.491071 | 93 | 0.660856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,293 | 0.708629 |
80c51c456120b0b7a682e4f845f192154e235f25 | 4,665 | py | Python | dags/load_transformations.py | brendasanchezs/Capstonev2 | 22d5cebfaba6b2865d9fb71a31b214b57ea034b5 | [
"Apache-2.0"
] | null | null | null | dags/load_transformations.py | brendasanchezs/Capstonev2 | 22d5cebfaba6b2865d9fb71a31b214b57ea034b5 | [
"Apache-2.0"
] | null | null | null | dags/load_transformations.py | brendasanchezs/Capstonev2 | 22d5cebfaba6b2865d9fb71a31b214b57ea034b5 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta
import boto3
from airflow.models import Variable
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.S3_hook import S3Hook
from airflow.contrib.operators.emr_create_job_flow_operator import (
EmrCreateJobFlowOperator,
)
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.contrib.sensors.emr_step_sensor import EmrStepSensor
from airflow.contrib.operators.emr_terminate_job_flow_operator import (
EmrTerminateJobFlowOperator,
)
# Configurations
JOB_FLOW_OVERRIDES = {
"Name": "Movie review classifier",
"LogUri":"s3://data-raw-bucket/",
"ReleaseLabel": "emr-5.29.0",
"Applications": [{"Name": "Hadoop"}, {"Name": "Spark"}, {"Name": "Livy"} ], # We want our EMR cluster to have HDFS and Spark
"Configurations": [
{
"Classification": "spark-env",
"Configurations": [
{
"Classification": "export",
"Properties": {"PYSPARK_PYTHON": "/usr/bin/python3"},
}
],
}
],
"BootstrapActions": [
{
"Name": "CustomBootStrapAction",
"ScriptBootstrapAction": {
"Path": "s3://data-raw-bucket/xmlpackage.sh",
}
}
],
"Instances": {
"InstanceGroups": [
{
"Name": "Master node",
"Market": "SPOT",
"InstanceRole": "MASTER",
"InstanceType": "m4.xlarge",
"InstanceCount": 1,
},
{
"Name": "Core - 2",
"Market": "SPOT", # Spot instances are a "use as available" instances
"InstanceRole": "CORE",
"InstanceType": "m4.xlarge",
"InstanceCount": 2,
},
],
"KeepJobFlowAliveWhenNoSteps": True,
"TerminationProtected": False, # this lets us programmatically terminate the cluster
},
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
}
##### Where the SPARK steps execute
SPARK_STEPS = [
# Note the params values are supplied to the operator
{
"Name": "Classify movie and log reviews",
"ActionOnFailure": "CANCEL_AND_WAIT",
"HadoopJarStep": {
"Jar": "command-runner.jar",
"Args": [
"spark-submit",
"--deploy-mode",
"client",
"s3://data-raw-bucket/transformation-spark.py",
],
},
}
]
default_args = {
"owner": "airflow",
"start_date": datetime(2020, 10, 17),
"email": ["airflow@airflow.com"],
"email_on_failure": False
}
dag = DAG(
"MOVIE_REVIEWS_DAG",
default_args=default_args,
schedule_interval="0 10 * * *",
max_active_runs=1,
)
start_data_pipeline = DummyOperator(task_id="Init", dag=dag)
# Create an EMR cluster
create_emr_cluster = EmrCreateJobFlowOperator(
task_id="create_emr_cluster",
job_flow_overrides=JOB_FLOW_OVERRIDES,
aws_conn_id="aws_default",
emr_conn_id="emr_default",
dag=dag,
)
# Add your steps to the EMR cluster
step_adder = EmrAddStepsOperator(
task_id="transformation_movies",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_default",
steps=SPARK_STEPS,
params={ # these params are used to fill the paramterized values in SPARK_STEPS json
"BUCKET_NAME":"data-raw-bucket",
"s3_script": "s3://data-raw-bucket/transformation-spark.py"
},
dag=dag,
)
# last_step = len(SPARK_STEPS) - 1
# # wait for the steps to complete
# step_checker = EmrStepSensor(
# task_id="watch_step",
# job_flow_id="{{ task_instance.xcom_pull('create_emr_cluster', key='return_value') }}",
# step_id="{{ task_instance.xcom_pull(task_ids='transformation_movies', key='return_value')["
# + str(last_step)
# + "] }}",
# aws_conn_id="aws_default",
# dag=dag,
# )
# Terminate the EMR cluster
terminate_emr_cluster = EmrTerminateJobFlowOperator(
task_id="terminate_emr_cluster",
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_emr_cluster', key='return_value') }}",
aws_conn_id="aws_default",
dag=dag,
)
end_data_pipeline = DummyOperator(task_id="End", dag=dag)
s3ToPostgres = DummyOperator(task_id="S3ToPostgres", dag=dag)
start_data_pipeline >> [create_emr_cluster, s3ToPostgres] >> step_adder >> terminate_emr_cluster >> end_data_pipeline
| 30.292208 | 128 | 0.614791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,226 | 0.47717 |
80c5febb11f85056db71fbcf343fcfa6d57b6f52 | 4,716 | py | Python | pypower/runpf_fast.py | felixkoeth/PYPOWER | 51476da14dead2ca23417bfa1210748800212ffe | [
"BSD-3-Clause"
] | null | null | null | pypower/runpf_fast.py | felixkoeth/PYPOWER | 51476da14dead2ca23417bfa1210748800212ffe | [
"BSD-3-Clause"
] | null | null | null | pypower/runpf_fast.py | felixkoeth/PYPOWER | 51476da14dead2ca23417bfa1210748800212ffe | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Runs a power flow.
"""
from sys import stdout, stderr
from os.path import dirname, join
from time import time
from numpy import r_, c_, ix_, zeros, pi, ones, exp, argmax,angle
from numpy import flatnonzero as find
#from pypower.bustypes import bustypes
#from pypower.ext2int import ext2int
#from pypower.loadcase import loadcase
#from pypower.ppoption import ppoption
#from pypower.ppver import ppver
#from pypower.makeBdc import makeBdc
from pypower.makeSbus import makeSbus
#from pypower.dcpf import dcpf
#from pypower.makeYbus import makeYbus
from pypower.newtonpf_fast import newtonpf_fast
#from pypower.fdpf import fdpf
#from pypower.gausspf import gausspf
#from pypower.makeB import makeB
#from pypower.pfsoln import pfsoln
#from pypower.printpf import printpf
#from pypower.savecase import savecase
#from pypower.int2ext import int2ext
from pypower.idx_bus import PD, QD, VM, VA, GS, BUS_TYPE, PQ, REF
from pypower.idx_brch import PF, PT, QF, QT
from pypower.idx_gen import PG, QG, VG, QMAX, QMIN, GEN_BUS, GEN_STATUS
def runpf_fast(Ybus, Yf,Yt,ref, pv, pq,on,ppc, ppopt=None, fname='', solvedcase=''):
"""Runs a power flow.
Runs a power flow [full AC Newton's method by default] and optionally
returns the solved values in the data matrices, a flag which is C{True} if
the algorithm was successful in finding a solution, and the elapsed
time in seconds. All input arguments are optional. If C{casename} is
provided it specifies the name of the input data file or dict
containing the power flow data. The default value is 'case9'.
If the ppopt is provided it overrides the default PYPOWER options
vector and can be used to specify the solution algorithm and output
options among other things. If the 3rd argument is given the pretty
printed output will be appended to the file whose name is given in
C{fname}. If C{solvedcase} is specified the solved case will be written
to a case file in PYPOWER format with the specified name. If C{solvedcase}
ends with '.mat' it saves the case as a MAT-file otherwise it saves it
as a Python-file.
If the C{ENFORCE_Q_LIMS} options is set to C{True} [default is false] then
if any generator reactive power limit is violated after running the AC
power flow, the corresponding bus is converted to a PQ bus, with Qg at
the limit, and the case is re-run. The voltage magnitude at the bus
will deviate from the specified value in order to satisfy the reactive
power limit. If the reference bus is converted to PQ, the first
remaining PV bus will be used as the slack bus for the next iteration.
This may result in the real power output at this generator being
slightly off from the specified values.
Enforcing of generator Q limits inspired by contributions from Mu Lin,
Lincoln University, New Zealand (1/14/05).
@author: Ray Zimmerman (PSERC Cornell)
"""
## default arguments
## options
## read data
#ppc = loadcase(casedata)
## convert to internal indexing
ppc["branch"][:,[0,1]]-=1
ppc["bus"][:,0]-=1
ppc["gen"][:,0]-=1
baseMVA, bus, gen, branch = \
ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]
## get bus index lists of each type of bus
#ref, pv, pq = bustypes(bus, gen)
#
# generator info
#print(gen[:, GEN_STATUS])
#on = find(gen[:, GEN_STATUS] > 0) ## which generators are on?
gbus = gen[on, GEN_BUS].astype(int) ## what buses are they at?
##----- run the power flow -----
t0 = time()
V0 = bus[:, VM] * exp(1j * 0.017453292519943295 * bus[:, VA])
V0[gbus] = gen[on, VG] / abs(V0[gbus]) * V0[gbus]
## build admittance matrices
#Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)
## compute complex bus power injections [generation - load]
Sbus = makeSbus(baseMVA, bus, gen)
## run the power flow
V, success, i = newtonpf_fast(Ybus, Sbus, V0, ref, pv, pq, ppopt)
## update data matrices with solution
#bus, gen, branch = pfsoln(baseMVA, bus, gen, branch, Ybus, Yf, Yt, V, ref, pv, pq)
bus[:, VM] = abs(V)
bus[:, VA] = angle(V) * 180 / pi
#UNTIL HERE
ppc["et"] = time() - t0
ppc["success"] = success
##----- output results -----
## convert back to original bus numbering & print results
ppc["bus"], ppc["gen"], ppc["branch"] = bus, gen, branch
ppc["branch"][:,[0,1]]+=1
ppc["bus"][:,0]+=1
ppc["gen"][:,0]+=1
return ppc, success,i
if __name__ == '__main__':
runpf()
| 33.211268 | 87 | 0.683842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,327 | 0.705471 |
80c6ede6e307ee489af6f1fefba2316fa2f0871d | 5,565 | py | Python | SimpleTrading/commodities_inflation.py | Nawter/Quantiacs | 95ea9443744c5a2e5268fa18f5e38928a6452d5d | [
"MIT"
] | null | null | null | SimpleTrading/commodities_inflation.py | Nawter/Quantiacs | 95ea9443744c5a2e5268fa18f5e38928a6452d5d | [
"MIT"
] | null | null | null | SimpleTrading/commodities_inflation.py | Nawter/Quantiacs | 95ea9443744c5a2e5268fa18f5e38928a6452d5d | [
"MIT"
] | null | null | null | # This program imports the federal reserve economic data consumer price index
# values from 1990 and uses those values to get the real values or infaltion adjusted
# values of the sepcific commodities/markets.
# Then when a commdoity hits a specific low infaltion based price, the algo
# enters into a long psoiton and exits when the commodity/market hits a relativley
# high price.
import numpy
import csv
#elemnt zero is the oldest elment, in this case, inflation from 2/1/1990
def cpi_array():
cpi_array = numpy.zeros((328))
count = 0
with open("CPI_Spyder.csv", 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
cpi = float(row[1])
cpi_array[count] = cpi
count += 1
csvfile.close()
return cpi_array
#market dicitonary [buy price, sell price, current pos, iniital entry pos, fall by price, add to pos price
#if it falls by 'fall by price', # of times added to the pos]
def market_dictionary():
market_dictionary = {}
market_dictionary[0] = [10000.0,12500.0,0,.5,.08,.1, 0]
market_dictionary[1] = [8000.0,12000.0,0,.5,.12,.1, 0]
market_dictionary[2] = [20000.0,25000.0,0,.5,.1,.1, 0]
market_dictionary[3] = [15000.0,20000.0,0,.5,.06,.1, 0]
market_dictionary[4] = [26000.0,36000.0,0,.5,.07,.1, 0]
market_dictionary[5] = [25000.0,30000.0,0,.5,.08,.1, 0]
market_dictionary[6] = [20000.0,21000.0,0,.5,.05,.1, 0]
market_dictionary[7] = [14000.0,17000.0,0,.5,.07,.1, 0]
market_dictionary[8] = [15000.0,20000.0,0,.5,.07,.1, 0]
market_dictionary[9] = [5000.0,6000.0,0,.5,.1,.1, 0]
market_dictionary[10] = [13000.0,19500.0,0,.5,.075,.1, 0]
return market_dictionary
def myTradingSystem(DATE, OPEN, HIGH, LOW, CLOSE, VOL, exposure, equity, settings):
#initalzie the basics
nMarkets = CLOSE.shape[1]
pos = numpy.zeros(nMarkets)
i = 0
settings['countDays'] += 1
#setting the cpi multiplyer to get compare prices reltivlely
settings['CPI_muliplyer'] = (settings['BASE_CPI'] / settings['cpi_array'][ settings['count']])
# constantly get a new cpi every month by adding to count
if settings['countDays'] % 21 == 0:
settings['count'] += 1
#entering the pos
for i in range(nMarkets - 1):
if (CLOSE[-1, i] * settings['CPI_muliplyer']) <= settings['market_dictionary'][i][0]:
settings['market_dictionary'][i][2] = settings['market_dictionary'][i][3]
# pyramding to a falling posiiton - stage 1
if (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 5)) and settings['market_dictionary'][i][6] == 4):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 4)) and settings['market_dictionary'][i][6] == 3):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 3)) and settings['market_dictionary'][i][6] == 2):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][5] * 2)) and settings['market_dictionary'][i][6] == 1):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] / (1+settings['market_dictionary'][i][4])
and settings['market_dictionary'][i][6] == 0):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
#closing the position
if (CLOSE[-1, i] * settings['CPI_muliplyer']) >= settings['market_dictionary'][i][1]:
settings['market_dictionary'][i][2] = 0
settings['market_dictionary'][i][6] = 0
#set posistion to be returned equal to market dictionary value 2
for i in range(nMarkets - 1):
pos[i] = settings['market_dictionary'][i][2]
pos[11] = 11
return pos, settings
def mySettings():
''' Define your trading system settings here '''
settings = {}
# Futures Contracts
settings['markets'] = ['F_C', 'F_CC', 'F_CL', 'F_CT', 'F_FC','F_KC',
'F_LC', 'F_LN', 'F_NG', 'F_O', 'F_PA', 'CASH']
#`19900104 - 20170710
settings['beginInSample'] = '19900104'
#settings['endInSample'] = '20170710'
settings['lookback'] = 21
settings['budget'] = 10**6
settings['slippage'] = 0.05
settings['countDays'] = 0
settings['count'] = 0
settings['cpi_array'] = cpi_array()
settings['market_dictionary'] = market_dictionary()
settings['BASE_CPI'] = settings['cpi_array'][0]
settings['CPI_muliplyer'] = 0
return settings
# Evaluate trading system defined in current file.
if __name__ == '__main__':
import quantiacsToolbox
results = quantiacsToolbox.runts(__file__)
print(results['stats']) | 46.764706 | 136 | 0.62372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,216 | 0.398203 |
80c7f4d876bcca6792829d4bd5fbc77ce4c7d34b | 3,195 | py | Python | encryptor/encryptor.py | crafter-hub/Kreusada-Cogs | 9b7bf873484c7bfeb9707b50f386de82c355b571 | [
"MIT"
] | 21 | 2021-03-11T06:52:41.000Z | 2022-02-04T16:27:47.000Z | encryptor/encryptor.py | crafter-hub/Kreusada-Cogs | 9b7bf873484c7bfeb9707b50f386de82c355b571 | [
"MIT"
] | 77 | 2021-03-06T13:31:50.000Z | 2022-03-25T10:37:15.000Z | encryptor/encryptor.py | crafter-hub/Kreusada-Cogs | 9b7bf873484c7bfeb9707b50f386de82c355b571 | [
"MIT"
] | 33 | 2021-03-05T20:59:07.000Z | 2022-03-06T03:55:47.000Z | import contextlib
import random
import string
from password_strength import PasswordStats
from redbot.core import commands
from redbot.core.utils import chat_formatting as cf
from .word_list import *
GREEN_CIRCLE = "\N{LARGE GREEN CIRCLE}"
YELLOW_CIRCLE = "\N{LARGE YELLOW CIRCLE}"
ORANGE_CIRCLE = "\N{LARGE ORANGE CIRCLE}"
RED_CIRCLE = "\N{LARGE RED CIRCLE}"
class Encryptor(commands.Cog):
"""
Create, and validify the strength of passwords.
"""
__author__ = ["Kreusada"]
__version__ = "1.1.0"
def __init__(self, bot):
self.bot = bot
def format_help_for_context(self, ctx: commands.Context) -> str:
context = super().format_help_for_context(ctx)
authors = ", ".join(self.__author__)
return f"{context}\n\nAuthor: {authors}\nVersion: {self.__version__}"
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete"""
return
def cog_unload(self):
with contextlib.suppress(Exception):
self.bot.remove_dev_env_value("encryptor")
async def initialize(self) -> None:
if 719988449867989142 in self.bot.owner_ids:
with contextlib.suppress(Exception):
self.bot.add_dev_env_value("encryptor", lambda x: self)
@commands.group()
async def password(self, ctx):
"""
Create, and validify the strength of passwords.
"""
pass
@password.group(name="generate")
async def password_generate(self, ctx):
"""Generate passwords."""
pass
@password_generate.command(name="complex")
async def password_generate_complex(self, ctx):
"""Generate a complex password."""
await ctx.send(
"".join(
random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))
)
)
@password_generate.command(name="strong")
async def password_generate_strong(self, ctx, delimeter: str = ""):
"""
Generate a strong password.
**Arguments**
* ``<delimeter>``: The character used to seperate each random word. Defaults to "-"
"""
d = delimeter
rc = random.choice
rr = random.randint
await ctx.send(
d.join(rc(RANDOM_WORDS).capitalize() for i in range(3)) + f"{d}{rr(1,1000)}"
)
@password.command(name="strength")
async def password_strength(self, ctx, password: str):
"""Validate a passwords strength."""
conv = PasswordStats(password)
converter = conv.strength()
if converter < 0.250:
emoji = RED_CIRCLE
text = "This is a **weak** password."
elif converter > 0.250 and converter < 0.500:
emoji = ORANGE_CIRCLE
text = "This is an **okay** password."
elif converter > 0.500 and converter < 0.750:
emoji = YELLOW_CIRCLE
text = "This is a **good** password!"
else:
emoji = GREEN_CIRCLE
text = "This is an **excellent** password!"
await ctx.maybe_send_embed(
f"**Strength rating: {round(converter * 100)}%** {emoji}\n{cf.quote(text)}"
)
| 31.019417 | 95 | 0.607199 | 2,829 | 0.885446 | 0 | 0 | 1,897 | 0.59374 | 2,013 | 0.630047 | 881 | 0.275743 |
80c8767c767968d7e7fa23b2ecf6dcc08bf852f7 | 3,884 | py | Python | semana4/app.py | ArseniumGX/bluemer-modulo2 | 24e5071b734de362dc47ef9d402c191699d15b43 | [
"MIT"
] | null | null | null | semana4/app.py | ArseniumGX/bluemer-modulo2 | 24e5071b734de362dc47ef9d402c191699d15b43 | [
"MIT"
] | null | null | null | semana4/app.py | ArseniumGX/bluemer-modulo2 | 24e5071b734de362dc47ef9d402c191699d15b43 | [
"MIT"
] | null | null | null | from werkzeug.security import generate_password_hash, check_password_hash
from flask import Flask, render_template, jsonify, request, redirect, flash, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, current_user, login_required, logout_user, login_user
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://user:password@localhost:3009/blue_modulo3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SECRET_KEY'] = b'_5#y2L"F4Q8z\n\xec]/'
db = SQLAlchemy(app)
login = LoginManager(app)
########################################################################
class Projeto(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(25), nullable=False)
description = db.Column(db.String(85))
image = db.Column(db.String)
link = db.Column(db.String, nullable=False)
last_login = db.Column(db.Date)
def __init__(self, name, description, image, link) -> None:
super().__init__()
self.name = name
self.description = description
self.image = image
self.link = link
########################################################################
########################################################################
class Admin(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
username = db.Column(db.String(25), nullable=False)
password = db.Column(db.String(103), nullable=False)
last_login = db.Column(db.Date, nullable=True)
def __init__(self, name, username, password) -> None:
super().__init__()
self.name = name
self.username = username
self.password = password
self.last_login = None
def set_password(self, pwd):
self.password = generate_password_hash(pwd)
def check_password(self, pwd):
return check_password_hash(self.password, pwd)
def set_date(self):
from datetime import date
self.last_login = date.today()
########################################################################
@login.user_loader
def load_user(id):
return Admin.query.get(int(id))
########################################################################
@app.route('/')
def index():
return render_template('index.html')
@app.route('/signup', methods=['POST'])
def signup():
if request.method == 'POST':
admin = Admin(
request.json['name'],
request.json['username'],
request.json['password']
)
admin.set_password(request.json['password'])
db.session.add(admin)
db.session.commit()
return jsonify({'Message': 'Administrator created!'}), 201
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('admin'))
if request.method == 'GET':
return render_template('login.html')
if request.method == 'POST':
user = Admin.query.filter_by(username=request.form['username']).first()
if user and user.check_password(request.form['password']):
login_user(user, remember=True)
user.set_date()
db.session.commit()
return redirect(url_for('admin'))
else:
flash('Usuário ou senha inválidos!')
return redirect(url_for('login'))
@app.route('/admin')
@login_required
def admin():
return render_template('admin.html')
@app.route('/admin/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/<slug>')
def slug(slug):
return render_template('notfound.html', slug=slug), 404
if __name__ == '__main__':
# db.drop_all()
db.create_all()
app.run(debug=True, host='0.0.0.0', port=3000) | 32.638655 | 102 | 0.598095 | 1,319 | 0.339424 | 0 | 0 | 1,492 | 0.383942 | 0 | 0 | 827 | 0.212815 |
80c9491190bb5e53af696dbe13cce5e8d3abe6ff | 9,788 | py | Python | setup.py | osilkin98/HappyMail | 6bc34cd2b35d58757973a267bf01077332770b6d | [
"MIT"
] | 1 | 2018-09-20T01:06:11.000Z | 2018-09-20T01:06:11.000Z | setup.py | osilkin98/HappyMail | 6bc34cd2b35d58757973a267bf01077332770b6d | [
"MIT"
] | null | null | null | setup.py | osilkin98/HappyMail | 6bc34cd2b35d58757973a267bf01077332770b6d | [
"MIT"
] | null | null | null | from subprocess import call
from sys import executable
from distutils.core import setup
from distutils.command.build_py import build_py
import os
from os.path import exists
from getpass import getuser
from time import sleep
try:
import colorama
except ImportError as IE:
print("Module Colorama not found, installing")
call([executable, '-m', 'pip', 'install','--user', 'colorama==0.3.9'])
finally:
from colorama import Fore, Style, Back
needed_packages = ['apiclient>=1.0.3',
'httplib2>=0.9.2',
'google-api-python-client-py3>=1.2',
'oauth2client>=4.1.2',
'bs4>=0.0.1',
'tensorflow>=1.10.1' if call(['which', 'nvidia-smi']) != 0 else 'tensorflow-gpu>=1.10.1',
'keras>=2.2.2']
needed_directories = {"config_files": "src/configuration_files",
"models": "cache/models",
"logdir": "cache/models/logs",
"training_dir": "cache/models/training_data",
"index_directory": "cache/models/indices",
"cache_dir": "cache",
"message_cache": "cache/messages",
"label_cache": "cache/labels",
"list_cache": "cache/lists",
"classify_dir": "cache/processed/",
"processed_messages": "cache/processed/messages",
"processed_responses": "cache/processed/responses"}
def install_packages(packages):
"""
:param list packages: List of Python Package names to be installed by pip in the format 'package-name>=version.number'
:return: Nothing
"""
for package in packages:
# print("installing package {} with pip" .format(package))
pip_command = "{} -m pip install {} --user".format(executable, package)
# print("Running {}".format(pip_command))
retcode = call(pip_command.split(' '))
if retcode is not 0:
print(Fore.RED + "return code was {} when trying to install {}".format(retcode, packages))
else:
print(Fore.GREEN + "installed {}".format(package))
print(Fore.RESET)
# To create directories
def create_subdirectories(directories, base_dir=os.getcwd()):
""" this function creates the directories and __init__.py files underneath the given path
:param list | dict | tuple directories: List of subdirectories within the current directory to create. \
If it's a a dict then the directories should be the values that the keys get mapped to.
:param str base_dir: Path to the base directory under which all the directories will be installed.\
this parameter *should* be an absolute path for universal use.
:return: A list of the directories but with the base_dir prepended to them
"""
# Strip the tailing / symbol from the base directory for consistency
base_dir = base_dir.rstrip('/')
full_directories = []
# Try to create the subdirectories
try:
# Check to see whether or not we're dealing with a dict, and loop over all the
# Subdirectories specified in the iterable to create them and populated the current path
for directory in (directories if type(directories) != dict else directories.values()):
# Saves the full path to the directory and strips it on the right for consitency
full_dir = base_dir + '/' + directory.rstrip('/')
# Save it to the full_directories list
full_directories.append(full_dir)
# If the directory doesn't already exist, then we create it
if not exists(full_dir):
print(Fore.YELLOW + full_dir + " doesn't exist")
# Call the makedirs function to create all the directories in between
os.makedirs(full_dir)
print(Fore.GREEN + "Created directory " + full_dir + Fore.RESET)
# If the __init__.py file doesn't exist
if not exists(full_dir + "/__init__.py"):
# Create the __init__.py file within the directories
with open(full_dir + "/__init__.py", "w") as init:
# Note that it wa generated by the current file
init.write("# This file was generated by {}\n".format(__file__))
print(Fore.GREEN + "Created "+full_dir+"/__init__.py")
except PermissionError:
print(Fore.RED + "Permission Error: " + Fore.RESET +" user " + Fore.YELLOW +
'{}'.format(getuser()) + Fore.RESET + "has insufficient privilages to create directories.")
finally:
print(Fore.RESET)
return full_directories
# Within the constants file
def obtain_credentials(email):
""" Tries to obtain the credentials.json file from the user
:param str email: The user's email, which was inputted during the setup
:return: 0 if succeeded, or 1 if failed
:rtype: int
"""
# If we don't have a credentials.json file
if not exists(os.getcwd() + '/' + needed_directories['config_files'] + '/credentials.json'):
wait_time = 15
print(Style.BRIGHT + Back.RED + "IMPORTANT" + Style.RESET_ALL +
": In {} seconds, a webpage will open to a page with the download link to the ".format(
wait_time) +
"Credentials site.\n" + (' ' * len('IMPORTANT: ')) + "You must save the file as " +
Back.LIGHTWHITE_EX + Style.BRIGHT + os.getcwd() + '/' + needed_directories['config_files'] +
'/' + 'credentials.json' + Style.RESET_ALL + '\n as user ' + Style.BRIGHT+Back.LIGHTWHITE_EX+email+
Style.RESET_ALL + '\n\n')
# Import it here so we can refresh all the values that rely on the keys.py file within scraper.py
import src.scraper as scraper
# Wait however many seconds so the user has a chance to read the text
sleep(wait_time)
try:
# Try and download the credentials if they don't exist
scraper.retrieve_credentials(filepath="{}/{}/credentials.json".format(
os.getcwd(), needed_directories['config_files']), quiet=True)
# Create token.json file
scraper.get_gmail_service(
filepath=os.getcwd() + '/' + needed_directories['config_files'] + '/credentials.json')
return 0
except FileNotFoundError:
print(Fore.RED + "Error" + Style.RESET_ALL + ": credentials file wasn't correctly downloaded.\n"+
" Please try downloading the file again and saving it to " + Style.BRIGHT + Back.LIGHTWHITE_EX+
os.getcwd() + '/' + needed_directories['config_files'] + '/credentials.json' +Style.RESET_ALL)
return 1
# Override build_py to be able to execute a command
class my_build_py(build_py):
def run(self):
""" Initialization Routine for setup.py
:return: Nothing
"""
print(Fore.CYAN + "Trying to install packages: {}".format(needed_packages))
print(Fore.RESET)
# Install the packages as defined in the needed_packages list
install_packages(needed_packages)
create_subdirectories(needed_directories)
'''
directories = {"config_files": "src/configuration_files",
"models": "models",
"logdir": "models/logs",
"cache_dir": "cache",
"message_cache": "cache/messages",
"label_cache": "cache/labels",
"list_cache": "cache/lists"}
'''
try:
# We will create keys.py
with open("{}/keys.py".format(needed_directories['config_files']), 'w') as key_file:
# Ask the user for their email
email = input("Enter your gmail account: ").replace(" ", "")
key_file.write("# This file was automatically generated by {}\n".format(__file__))
key_file.write("user_id = \"" + (email if '@' in email else email + "@gmail.com") + "\"\n")
print("Setting variables defined in needed_directories")
# iterate through the keys and values in the needed_directories dict and set them as variables
for variable, path in needed_directories.items():
# Actually write to the key_file the variable name and the value we give it
key_file.write('{} = "{}/{}"\n'.format(variable, os.getcwd(), path))
print(Fore.GREEN + "Set " + Fore.CYAN + variable + Fore.GREEN +
" to '" + Fore.BLUE + path + Fore.RESET + "'")
print(Fore.GREEN + "Created " + Fore.BLUE + "{}/keys.py".format(needed_directories['config_files'])
+ Fore.RESET)
if obtain_credentials(email) is 0:
print(Style.BRIGHT + Back.LIGHTWHITE_EX + "credentials.json" +Style.RESET_ALL+
Fore.GREEN + " and "+Fore.RESET+Style.BRIGHT + Back.LIGHTWHITE_EX +"token.json"+Style.RESET_ALL +
Fore.GREEN + " were successfully created\n" + Style.RESET_ALL )
except PermissionError as PE:
print(Fore.RED + "Error: " + Fore.RESET + "user '" + Fore.RED + getuser() + Fore.RESET +
"' has insufficient privilages to create files")
finally:
print(Fore.RESET)
build_py.run(self)
setup(
name='HappyMail',
version='0.7',
packages=['src',],
license='MIT License',
long_description=open('README.md', mode='r').read(),
cmdclass={'build_py': my_build_py}
)
| 40.783333 | 122 | 0.591745 | 2,697 | 0.275541 | 0 | 0 | 0 | 0 | 0 | 0 | 4,723 | 0.48253 |
80cb754fd4097ddc5ceb99da12f2ad2947dbe655 | 345 | py | Python | project_3/code/characters.py | Psemp/oc_project_11 | 26ee2e607b2ccc768e19d264b5e1da010820fbc5 | [
"MIT"
] | null | null | null | project_3/code/characters.py | Psemp/oc_project_11 | 26ee2e607b2ccc768e19d264b5e1da010820fbc5 | [
"MIT"
] | null | null | null | project_3/code/characters.py | Psemp/oc_project_11 | 26ee2e607b2ccc768e19d264b5e1da010820fbc5 | [
"MIT"
] | null | null | null | from get_char_pos import get_char_position
class Character:
def __init__(self):
self.x = 0
self.y = 0
self.vel = 32
self.alive = True
self.tag = "str"
mac = Character()
mac.tag = "mac"
guard = Character()
guard.tag = "guard"
macpos = get_char_position(mac)
guardpos = get_char_position(guard)
| 15 | 42 | 0.626087 | 152 | 0.44058 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.049275 |
80ccd7e653bf3655ee84b0246e1de697194e67d6 | 2,376 | py | Python | share/regulate/steps/deduplicate.py | CenterForOpenScience/SHARE | c7715af2881f6fa23197d4e7c381d90169a90ed1 | [
"Apache-2.0"
] | 87 | 2015-01-06T18:24:45.000Z | 2021-08-08T07:59:40.000Z | share/regulate/steps/deduplicate.py | fortress-biotech/SHARE | 9c5a05dd831447949fa6253afec5225ff8ab5d4f | [
"Apache-2.0"
] | 442 | 2015-01-01T19:16:01.000Z | 2022-03-30T21:10:26.000Z | share/regulate/steps/deduplicate.py | fortress-biotech/SHARE | 9c5a05dd831447949fa6253afec5225ff8ab5d4f | [
"Apache-2.0"
] | 67 | 2015-03-10T16:32:58.000Z | 2021-11-12T16:33:41.000Z | from share.regulate.steps import GraphStep
class Deduplicate(GraphStep):
"""Look for duplicate nodes and merge/discard them
Example config (YAML):
```yaml
- namespace: share.regulate.steps.graph
name: deduplicate
```
"""
MAX_MERGES = 100
# map from concrete type to set of fields used to dedupe
DEDUPLICATION_CRITERIA = {
# works and agents may be merged if duplicate identifiers are merged
# 'abstractcreativework': {},
# 'abstractagent': {},
'abstractagentworkrelation': {'creative_work', 'agent', 'type'},
'abstractagentrelation': {'subject', 'related', 'type'},
'abstractworkrelation': {'subject', 'related', 'type'},
'workidentifier': {'uri'},
'agentidentifier': {'uri'},
'subject': {'name', 'parent', 'central_synonym'},
'tag': {'name'},
'throughtags': {'tag', 'creative_work'},
# 'award': {},
'throughawards': {'funder', 'award'},
'throughsubjects': {'subject', 'creative_work'},
}
def regulate_graph(self, graph):
# naive algorithm, O(n*m) (n: number of nodes, m: number of merges)
# but merges shouldn't be common, so probably not worth optimizing
count = 0
while self._merge_first_dupe(graph):
count += 1
if count > self.MAX_MERGES:
self.error('Way too many deduplications')
return
def _merge_first_dupe(self, graph):
dupe_index = {}
for node in graph:
node_key = self._get_node_key(node)
if node_key:
other_node = dupe_index.get(node_key)
if other_node:
graph.merge_nodes(node, other_node)
return True
dupe_index[node_key] = node
return False
def _get_node_key(self, node):
criteria = self.DEDUPLICATION_CRITERIA.get(node.concrete_type)
if not criteria:
return None
return (
node.concrete_type,
tuple(
self._get_criterion_value(node, criterion)
for criterion in criteria
)
)
def _get_criterion_value(self, node, criterion_name):
if criterion_name == 'type':
return node.type
return node[criterion_name]
| 33.464789 | 76 | 0.569865 | 2,330 | 0.98064 | 0 | 0 | 0 | 0 | 0 | 0 | 893 | 0.375842 |
80cf4b1cdeb9f8af6f921aa50c5b9f893fb21de0 | 537 | py | Python | mit-ml/reinforcedl.py | stepinski/machinelearning | 1f84883a25616da4cd76bb4655267efd3421e561 | [
"MIT"
] | null | null | null | mit-ml/reinforcedl.py | stepinski/machinelearning | 1f84883a25616da4cd76bb4655267efd3421e561 | [
"MIT"
] | null | null | null | mit-ml/reinforcedl.py | stepinski/machinelearning | 1f84883a25616da4cd76bb4655267efd3421e561 | [
"MIT"
] | null | null | null | import numpy as np
gama = 0.5
alfa = 0.75
data = np.array([[1, 1, 1], [1, 2, -1], [2, 1, 1]]) #(s, s', R)
Q = np.zeros((data.shape[0]+1, 2)) #(iterations, |S|)
k = 1
for d in range(data.shape[0]):
R = data[d, 2] #inmediate reward
idx_s = data[d, 0] - 1 # index of state s in Q
idx_sp = data[d, 1] - 1 #index of state s' in Q
# Q[k, idx_s] = (1 - alfa) * Q[k - 1, idx_s] + alfa * (R + gama * np.max(Q[0:k, idx_sp]))
Q[k, idx_s] = (1 - alfa) * Q[k - 1, idx_s] + alfa * (R + gama * Q[k-1, idx_sp])
k += 1
print(Q) | 35.8 | 93 | 0.50838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.337058 |
80d34d2c062fdfb55cef6a9e4a57d6d6a013448e | 695 | py | Python | database.py | ISProject1/POS | 1d4143d48382f6fc42bdb459a40321ff35efa4b5 | [
"MIT"
] | null | null | null | database.py | ISProject1/POS | 1d4143d48382f6fc42bdb459a40321ff35efa4b5 | [
"MIT"
] | null | null | null | database.py | ISProject1/POS | 1d4143d48382f6fc42bdb459a40321ff35efa4b5 | [
"MIT"
] | null | null | null |
file1= "db_breakfast_menu.txt"
file2= "db_lunch_menu.txt"
file3= "db_dinner_menu.txt"
file4 = "db_label_text.txt"
retail = []
title = []
label = []
with open(file1, "r") as f:
data = f.readlines()
for line in data:
w = line.split(":")
title.append(w[1])
for line in data:
w = line.split("$")
price = w[1]
price.rstrip('\n')
conv = float(price)
retail.append(conv)
f.close()
with open (file4, "r") as f:
data = f.readlines()
for line in data:
i = 1
w = line.split(":")
string1 = w[i]
i+=1
string2 = w[i]
string = ("%s\n%s" %(string1,string2))
label.append(string)
f.close()
| 15.444444 | 43 | 0.535252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.155396 |
80d37d91e7e3b94e92127c52267fea900fab6810 | 6,072 | py | Python | libvis/scripts/LMOptimizer SE3Optimization Test Jacobian derivation.py | zimengjiang/badslam | 785a2a5a11ce57b09d47ea7ca6a42196a4f12409 | [
"BSD-3-Clause"
] | 541 | 2019-06-16T22:12:49.000Z | 2022-03-31T05:53:56.000Z | libvis/scripts/LMOptimizer SE3Optimization Test Jacobian derivation.py | zimengjiang/badslam | 785a2a5a11ce57b09d47ea7ca6a42196a4f12409 | [
"BSD-3-Clause"
] | 82 | 2019-06-18T06:45:38.000Z | 2022-01-23T00:34:34.000Z | libvis/scripts/LMOptimizer SE3Optimization Test Jacobian derivation.py | zimengjiang/badslam | 785a2a5a11ce57b09d47ea7ca6a42196a4f12409 | [
"BSD-3-Clause"
] | 104 | 2019-06-17T06:42:20.000Z | 2022-03-16T20:51:22.000Z | from sympy import *
# Implementation of QuaternionBase<Derived>::toRotationMatrix(void).
# The quaternion q is given as a list [qw, qx, qy, qz].
def QuaternionToRotationMatrix(q):
tx = 2 * q[1]
ty = 2 * q[2]
tz = 2 * q[3]
twx = tx * q[0]
twy = ty * q[0]
twz = tz * q[0]
txx = tx * q[1]
txy = ty * q[1]
txz = tz * q[1]
tyy = ty * q[2]
tyz = tz * q[2]
tzz = tz * q[3]
return Matrix([[1 - (tyy + tzz), txy - twz, txz + twy],
[txy + twz, 1 - (txx + tzz), tyz - twx],
[txz - twy, tyz + twx, 1 - (txx + tyy)]])
# Implementation of SO3Group<Scalar> expAndTheta().
# Only implementing the first case (of very small rotation) since we take the Jacobian at zero.
def SO3exp(omega):
theta = omega.norm()
theta_sq = theta**2
half_theta = theta / 2
theta_po4 = theta_sq * theta_sq
imag_factor = Rational(1, 2) - Rational(1, 48) * theta_sq + Rational(1, 3840) * theta_po4;
real_factor = 1 - Rational(1, 2) * theta_sq + Rational(1, 384) * theta_po4;
# return SO3Group<Scalar>(Eigen::Quaternion<Scalar>(
# real_factor, imag_factor * omega.x(), imag_factor * omega.y(),
# imag_factor * omega.z()));
qw = real_factor
qx = imag_factor * omega[0]
qy = imag_factor * omega[1]
qz = imag_factor * omega[2]
return QuaternionToRotationMatrix([qw, qx, qy, qz])
# Implementation of SE3Group<Scalar> exp().
# Only implementing the first case (of small rotation) since we take the Jacobian at zero.
def SE3exp(tangent):
omega = Matrix(tangent[3:6])
V = SO3exp(omega)
rotation = V
translation = V * Matrix(tangent[0:3])
return rotation.row_join(translation)
# Main
init_printing(use_unicode=True)
print('Variant 1')
print('')
# Define the tangent vector with symbolic elements T_0 to T_5.
# (For a matrix, use: Matrix(3, 1, lambda i,j:var('S_%d%d' % (i,j))) )
T = Matrix(6, 1, lambda i,j:var('T_%d' % (i)))
# Compute transformation matrix from tangent vector.
T_matrix = SE3exp(T)
# Define the vector current_T * src:
S = Matrix(3, 1, lambda i,j:var('S_%d' % (i)))
# Matrix-vector multiplication with homogeneous vector:
result = T_matrix * S.col_join(Matrix([1]))
# Compute Jacobian:
# (Note: The transpose is needed for stacking the matrix columns (instead of rows) into a vector.)
jac = result.transpose().reshape(result.rows * result.cols, 1).jacobian(T)
# Take Jacobian at zero:
jac_subs = jac.subs([(T[0], 0), (T[1], 0), (T[2], 0), (T[3], 0), (T[4], 0), (T[5], 0)])
# Simplify and output:
jac_subs_simple = simplify(jac_subs)
pprint(jac_subs_simple)
print('')
print('')
print('Variant 2')
print('')
# Treat the function of which we want to determine the derivative as a list of nested functions.
# This makes it easier to compute the derivative of each part, simplify it, and concatenate the results
# using the chain rule.
### Define the function of which the Jacobian shall be taken ###
# Matrix-vector multiplication with homogeneous vector:
def MatrixVectorMultiplyHomogeneous(matrix, vector):
return matrix * vector.col_join(Matrix([1]))
# Define the vector current_T * src:
S = Matrix(3, 1, lambda i,j:var('S_%d' % (i)))
# The list of nested functions. They will be evaluated from right to left
# (this is to match the way they would be written in math: f(g(x)).)
functions = [lambda matrix : MatrixVectorMultiplyHomogeneous(matrix, S), SE3exp]
### Define the variables wrt. to take the Jacobian, and the position for evaluation ###
# Chain rule:
# d(f(g(x))) / dx = (df/dy)(g(x)) * dg/dx
# Define the parameter with respect to take the Jacobian, y in the formula above:
parameters = Matrix(6, 1, lambda i,j:var('T_%d' % (i)))
# Set the position at which to take the Jacobian, g(x) in the formula above:
parameter_values = zeros(6, 1)
### Automatic Jacobian calculation, no need to modify anything beyond this point ###
# Jacobian from previous step, dg/dx in the formula above:
previous_jacobian = 1
# TODO: Test whether this works with non-matrix functions.
def ComputeValueAndJacobian(function, parameters, parameter_values):
# Evaluate the function.
values = function(parameter_values)
# Compute the Jacobian.
symbolic_values = function(parameters)
symbolic_values_vector = symbolic_values.transpose().reshape(symbolic_values.rows * symbolic_values.cols, 1)
parameters_vector = parameters.transpose().reshape(parameters.rows * parameters.cols, 1)
jacobian = symbolic_values_vector.jacobian(parameters_vector)
# Set in the evaluation point.
for row in range(0, parameters.rows):
for col in range(0, parameters.cols):
jacobian = jacobian.subs(parameters[row, col], parameter_values[row, col])
# Simplify the jacobian.
jacobian = simplify(jacobian)
return (values, jacobian)
# Print info about initial state.
print('Taking the Jacobian of these functions (sorted from inner to outer):')
for i in range(len(functions) - 1, -1, -1):
print(str(functions[i]))
print('with respect to:')
pprint(parameters)
print('at position:')
pprint(parameter_values)
print('')
# Loop over all functions:
for i in range(len(functions) - 1, -1, -1):
# Compute value and Jacobian of this function.
(values, jacobian) = ComputeValueAndJacobian(functions[i], parameters, parameter_values)
# Update parameter_values
parameter_values = values
# Update parameters (create a new symbolic vector of the same size as parameter_values)
parameters = Matrix(values.rows, values.cols, lambda i,j:var('T_%d%d' % (i,j)))
# Concatenate this Jacobian with the previous one according to the chain rule:
previous_jacobian = jacobian * previous_jacobian
# Print intermediate result
print('Intermediate step ' + str(len(functions) - i) + ', for ' + str(functions[i]))
print('Position after function evaluation (function value):')
pprint(parameter_values)
print('Jacobian of this function wrt. its input only:')
pprint(jacobian)
print('Cumulative Jacobian wrt. the innermost parameter:')
pprint(previous_jacobian)
print('')
# Print final result
print('Final result:')
pprint(previous_jacobian)
| 33 | 110 | 0.69697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,821 | 0.464592 |
80d50d6044510b122ab7b7a8eb1e2e582f2b674a | 2,286 | py | Python | train_cnn.py | hee9joon/Single-Image-Super-Resolution | f4622b179943e8e52582971bb0f976406ae4d374 | [
"MIT"
] | 5 | 2021-02-27T13:13:08.000Z | 2022-03-24T01:54:20.000Z | train_cnn.py | hee9joon/Single-Image-Super-Resolution | f4622b179943e8e52582971bb0f976406ae4d374 | [
"MIT"
] | 2 | 2021-02-27T13:13:57.000Z | 2021-03-12T04:45:49.000Z | train_cnn.py | hee9joon/Single-Image-Super-Resolution | f4622b179943e8e52582971bb0f976406ae4d374 | [
"MIT"
] | 1 | 2021-03-18T15:03:58.000Z | 2021-03-18T15:03:58.000Z | import os
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torchvision.utils import save_image
from utils import get_lr_scheduler, sample_images, inference
# Reproducibility #
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Device Configuration #
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def train_srcnns(train_loader, val_loader, model, device, args):
# Loss Function #
criterion = nn.L1Loss()
# Optimizers #
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999))
optimizer_scheduler = get_lr_scheduler(optimizer=optimizer, args=args)
# Lists #
losses = list()
# Train #
print("Training {} started with total epoch of {}.".format(str(args.model).upper(), args.num_epochs))
for epoch in range(args.num_epochs):
for i, (high, low) in enumerate(train_loader):
# Data Preparation #
high = high.to(device)
low = low.to(device)
# Forward Data #
generated = model(low)
# Calculate Loss #
loss = criterion(generated, high)
# Initialize Optimizer #
optimizer.zero_grad()
# Back Propagation and Update #
loss.backward()
optimizer.step()
# Add items to Lists #
losses.append(loss.item())
# Print Statistics #
if (i+1) % args.print_every == 0:
print("{} | Epoch [{}/{}] | Iterations [{}/{}] | Loss {:.4f}"
.format(str(args.model).upper(), epoch+1, args.num_epochs, i+1, len(train_loader), np.average(losses)))
# Save Sample Images #
sample_images(val_loader, args.batch_size, args.upscale_factor, model, epoch, args.samples_path, device)
# Adjust Learning Rate #
optimizer_scheduler.step()
# Save Model Weights and Inference #
if (epoch+1) % args.save_every == 0:
torch.save(model.state_dict(), os.path.join(args.weights_path, '{}_Epoch_{}.pkl'.format(model.__class__.__name__, epoch+1)))
inference(val_loader, model, args.upscale_factor, epoch, args.inference_path, device) | 31.75 | 136 | 0.61986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 461 | 0.201662 |
80d55cb9db024badc082226294afa5c0ac752eba | 318 | py | Python | 8_kyu/Finish_Guess_the_Number_Game.py | UlrichBerntien/Codewars-Katas | bbd025e67aa352d313564d3862db19fffa39f552 | [
"MIT"
] | null | null | null | 8_kyu/Finish_Guess_the_Number_Game.py | UlrichBerntien/Codewars-Katas | bbd025e67aa352d313564d3862db19fffa39f552 | [
"MIT"
] | null | null | null | 8_kyu/Finish_Guess_the_Number_Game.py | UlrichBerntien/Codewars-Katas | bbd025e67aa352d313564d3862db19fffa39f552 | [
"MIT"
] | null | null | null | class Guesser:
def __init__(self, number, lives):
self.number = number
self.lives = lives
def guess(self,n):
if self.lives < 1:
raise Exception("Omae wa mo shindeiru")
match = n == self.number
if not match:
self.lives -= 1
return match | 26.5 | 51 | 0.537736 | 318 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.069182 |
80d976b647bf6411a96e4d01f4938331a5d6bf01 | 5,704 | py | Python | GGL_LensCats/CREATE_FITS_HEALPIX_MASKS/BOSS_2dFLenS_healpix_masks.py | KiDS-WL/Cat_to_Obs_K1000_P1 | 0de7f79cab150416859ffe58ac2d0f5659aedb5d | [
"MIT"
] | 7 | 2020-11-18T12:58:03.000Z | 2021-07-01T08:54:29.000Z | GGL_LensCats/CREATE_FITS_HEALPIX_MASKS/BOSS_2dFLenS_healpix_masks.py | KiDS-WL/Cat_to_Obs_K1000_P1 | 0de7f79cab150416859ffe58ac2d0f5659aedb5d | [
"MIT"
] | null | null | null | GGL_LensCats/CREATE_FITS_HEALPIX_MASKS/BOSS_2dFLenS_healpix_masks.py | KiDS-WL/Cat_to_Obs_K1000_P1 | 0de7f79cab150416859ffe58ac2d0f5659aedb5d | [
"MIT"
] | 3 | 2020-12-09T13:30:22.000Z | 2022-03-02T01:40:13.000Z |
##############################
## MFP_K1000.py ##
## Chieh-An Lin ##
## Version 2020.03.25 ##
##############################
import os
import os.path as osp
import time
import subprocess as spc
import numpy as np
import scipy as sp
import astropy.io.fits as fits
import healpy as hp
import treecorr as tree
import commonFunctions as cf
import HEALPixFunctions as hpf
################################################################################
## Parameters
class Parameters:
KiDSPath = 'data/KiDS/'
dataPath = 'data/mockFootprint/'
absDataPath = '/disk05/calin/91_Data/mockFootprint/'
## Mask parameters
area_BOSS = 9329 ## [deg^2]
area_BOSS_reduced = 1274.319868 ## From my own calculations
area_BOSS_wcs = 408.321
area_BOSS_4Band = 339.298
area_BOSS_9Band = 319.506
area_2dFLenS_SGP = 510.803964 ## [deg^2]
area_2dFLenS_wcs = 424.508017
area_2dFLenS_gri = 355.283139
area_2dFLenS_9Band = 341.888289
area_KiDS = 773.286 ## [deg^2]
area_KiDS_North = 334.138
area_KiDS_South = 439.148
area_KiDS_North_new = 371.801
area_KiDS_South_new = 401.485
## Galaxy number density
n_gal_BOSS_reduced_z0 = 0.014496
n_gal_BOSS_reduced_z1 = 0.016595
n_gal_BOSS_wcs_z0 = 0.014437
n_gal_BOSS_wcs_z1 = 0.016265
n_gal_2dFLenS_SGP_z0 = 0.005813
n_gal_2dFLenS_SGP_z1 = 0.006067
n_gal_2dFLenS_wcs_z0 = 0.005857
n_gal_2dFLenS_wcs_z1 = 0.006031
n_gal_2dFLenS_gri_z0 = 0.002891
n_gal_2dFLenS_gri_z1 = 0.003677
################################################################################
## Functions related to masks - I
## This function load BOSS random catalogues
def loadFitsLenCat(surveyTag, zInd, bitMaskTag='reduced'):
P = Parameters()
if bitMaskTag in ['all', 'reduced', 'SGP']: ## No selection
bitMask = 000000
elif bitMaskTag == 'wcs': ## KiDS wcs
bitMask = 0x4000
elif bitMaskTag == 'gri':
bitMask = 0x6FFC ## KiDS gri overlap
elif bitMaskTag == '9Band':
bitMask = 0x681C ## KiDS 9-band overlap
else:
raise ValueError('Bad bit mask option: \"%s\"' % bitMaskTag)
name = '%sKiDS-1000_GGLCATS/%s_z%d.fits' % (P.KiDSPath, surveyTag, zInd+1)
data = fits.getdata(name, 1)
print('Loaded \"%s\"' % name)
flag = data.field('KIDSMASK')
ind = np.logical_not(np.array(flag.astype(int) & bitMask, dtype=bool))
return data[ind]
## This function loads BOSS random catalogues & pour them onto a HEALPix map.
def saveFitsCountMap_BOSS(nside, bitMaskTag='wcs'):
P = Parameters()
nbPix = 12 * nside * nside
full = np.zeros(nbPix, dtype=int)
## Fill catalogues
for zInd in range(2):
data = loadFitsLenCat('BOSS_random', zInd, bitMaskTag=bitMaskTag)
RA = data.field('ALPHA_J2000')
DEC = data.field('DELTA_J2000')
pix = hpf.RADECToPatch(nside, RA, DEC)
for i in pix:
full[i] += 1
## Save
name = '%sKiDS-1000_for_mocks/countMap_BOSS_%s_nside%d.fits' % (P.KiDSPath, bitMaskTag, nside)
hpf.saveFitsFullMap(name, full, verbose=True)
return
def saveFitsCountMap_overlap(surveyTag_K, surveyTag_L, nside_L):
P = Parameters()
nside_K = 4096
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag_L, nside_L)
count_L = hpf.loadFitsFullMap(name)
count_L = hpf.increaseResolution(count_L, nside_K)
name = '%sKiDS-1000_for_mocks/mask_%s_fromArea_nside%d.fits' % (P.KiDSPath, surveyTag_K, nside_K)
mask_K = hpf.loadFitsFullMap(name)
ind = mask_K.astype(bool)
del mask_K
count_L[~ind] = 0
del ind
## Save
surveyTag_o = 'BOSS_KiDS_overlap' if 'BOSS' in surveyTag_L else '2dFLenS_KiDS_overlap'
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag_o, nside_K)
hpf.saveFitsFullMap(name, count_L)
del count_L
return
## 'BOSS_wcs' is called
def saveFitsMask_fromCountMap(surveyTag):
P = Parameters()
if surveyTag == 'BOSS_reduced':
nside = 2048
elif surveyTag == 'BOSS_wcs':
nside = 2048
elif surveyTag == '2dFLenS_SGP':
nside = 4096
elif surveyTag == '2dFLenS_wcs':
nside = 4096
else:
raise NotImplementedError('surveyTag = \"%s\" not implemented' % surveyTag)
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag, nside)
mask = hpf.loadFitsFullMap(name)
mask = np.fmin(mask, 1)
if nside == 2048:
nside2 = 4096
mask = hpf.increaseResolution(mask, nside2)
name = '%sKiDS-1000_for_mocks/mask_%s_fromCountMap2048_nside%d.fits' % (P.KiDSPath, surveyTag, nside2)
hpf.saveFitsFullMap(name, mask)
return
## Save
name = '%sKiDS-1000_for_mocks/mask_%s_fromCountMap_nside%d.fits' % (P.KiDSPath, surveyTag, nside)
hpf.saveFitsFullMap(name, mask)
return
# This function combines the 2dFLenS mask and BOSS mask into one
def saveFitsLensMask():
P = Parameters()
name = '%sKiDS-1000_for_mocks/mask_BOSS_wcs_fromCountMap2048_nside4096.fits' % P.KiDSPath
mask_B = hpf.loadFitsFullMap(name)
name = '%sKiDS-1000_for_mocks/mask_2dFLenS_wcs_fromCountMap_nside4096.fits' % P.KiDSPath
mask_2 = hpf.loadFitsFullMap(name)
mask_L = mask_B + mask_2
mask_L = np.fmin(mask_L, 1)
name = '%sKiDS-1000_for_mocks/mask_BOSS_2dFLenS_wcs_nside4096.fits' % P.KiDSPath
hpf.saveFitsFullMap(name, mask_L)
return
## Then I called the following & used the output of the 2nd line
## saveFitsCountMap_BOSS(2048, 'wcs') ## Need external
## saveFitsMask_fromCountMap('BOSS_wcs')
###############################################################################
| 30.340426 | 106 | 0.648142 | 1,074 | 0.188289 | 0 | 0 | 0 | 0 | 0 | 0 | 1,954 | 0.342567 |
80d9b6be298e2345e53f3894aadc55c0241856e5 | 667 | py | Python | 2019/try/simple.py | rishidevc/stkovrflw | c33dffbce887f32f609a10dd717d594390ceac8b | [
"MIT"
] | null | null | null | 2019/try/simple.py | rishidevc/stkovrflw | c33dffbce887f32f609a10dd717d594390ceac8b | [
"MIT"
] | 5 | 2020-05-04T03:11:14.000Z | 2021-06-10T20:20:38.000Z | 2019/try/simple.py | rishidevc/stkovrflw | c33dffbce887f32f609a10dd717d594390ceac8b | [
"MIT"
] | 1 | 2019-07-31T18:28:34.000Z | 2019-07-31T18:28:34.000Z | def get_assign(user_input):
key, value = user_input.split("gets")
key = key.strip()
value = int(value.strip())
my_dict[key] = value
print(my_dict)
def add_values(num1, num2):
return num1 + num2
print("Welcome to the Adder REPL.")
my_dict = dict()
while True:
user_input = input("???")
if 'gets' in user_input:
get_assign(user_input)
if 'input' in user_input:
print("Enter a value for :")
input_assign()
if 'adds' in user_input:
a, b = user_input.split("adds")
if 'print' in user_input:
print_values()
if 'quit' in user_input:
print("GoodBye")
exit() | 17.552632 | 41 | 0.590705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.16042 |
80d9debe23c9db0ad1b1b6b68cf7cfe21670c087 | 794 | py | Python | mecc/middleware.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | null | null | null | mecc/middleware.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | 3 | 2021-03-19T10:36:10.000Z | 2021-09-08T01:37:47.000Z | mecc/middleware.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.models import User
from mecc.apps.years.models import UniversityYear
class UsefullDisplay(object):
def process_request(self, request):
# always sent real user in order e.g. to display last first name
if request.session.get('is_spoofed_user'):
u = User.objects.get(username=request.session['real_username'])
else:
u = request.user
request.display = {'user': u}
# give current year
y = UniversityYear.objects.filter(is_target_year=True).first()
c = "%s/%s" % (y.code_year, y.code_year + 1) if y is not None else ":\
aucune année selectionnée"
request.display.update({'current_year': c})
def process_response(self, request, response):
return response
| 34.521739 | 78 | 0.649874 | 699 | 0.878141 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.233668 |
80d9f8b742cb13b15fbc315b930cd55547e4866b | 4,031 | py | Python | solitude/_commandline/main.py | incerto-crypto/solitude | 1b21a2ca4912da212d413322953ceb4ec2983c17 | [
"BSD-3-Clause"
] | 7 | 2019-03-25T21:48:42.000Z | 2022-02-25T08:21:35.000Z | solitude/_commandline/main.py | incerto-crypto/solitude | 1b21a2ca4912da212d413322953ceb4ec2983c17 | [
"BSD-3-Clause"
] | null | null | null | solitude/_commandline/main.py | incerto-crypto/solitude | 1b21a2ca4912da212d413322953ceb4ec2983c17 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2019, Solitude Developers
#
# This source code is licensed under the BSD-3-Clause license found in the
# COPYING file in the root directory of this source tree
from typing import List, Tuple # noqa
import sys
import os
import argparse
import datetime
import binascii
import json
from solitude.common import (update_global_config, read_yaml_or_json, read_config_file)
from solitude.common.errors import CLIError
def _update_global_config_from_file(path):
cfg_from_file = read_yaml_or_json(path)
update_global_config(cfg_from_file)
def txhash_type(txhash):
try:
if not txhash.startswith("0x"):
raise ValueError()
return binascii.unhexlify(txhash[2:])
except ValueError:
raise CLIError("TXHASH format must be a hex string prefixed with 0x")
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-g", "--global-config", dest="global_config", type=str,
default="resource://global_config.json",
help="Global configuration file")
parser.add_argument(
"-c", "--config", type=str,
default="./solitude.yaml",
help="Project configuration file")
sub = parser.add_subparsers()
# create subparsers
p_init = sub.add_parser("init")
p_install = sub.add_parser("install")
p_compile = sub.add_parser("compile")
p_debug = sub.add_parser("debug")
p_trace = sub.add_parser("trace")
p_lint = sub.add_parser("lint")
p_server = sub.add_parser("server")
def module_init():
from solitude._commandline import cmd_init
return cmd_init
p_init.set_defaults(module=module_init)
def module_install():
from solitude._commandline import cmd_install
return cmd_install
p_install.set_defaults(module=module_install)
def module_compile():
from solitude._commandline import cmd_compile
return cmd_compile
p_compile.set_defaults(module=module_compile)
def module_debug():
from solitude._commandline import cmd_debug
return cmd_debug
p_debug.set_defaults(module=module_debug)
p_debug.add_argument(
"txhash", type=txhash_type,
help="Transaction hash, a hex string prefixed with 0x")
p_debug.add_argument(
"--eval-command", "-ex", action="append", help="Execute command at start", dest="ex")
def module_trace():
from solitude._commandline import cmd_trace
return cmd_trace
p_trace.set_defaults(module=module_trace)
p_trace.add_argument("txhash", type=txhash_type)
p_trace.add_argument("--variables", action="store_true")
p_trace.add_argument("--frames", action="store_true")
p_trace.add_argument("--stack", action="store_true")
p_trace.add_argument("--memory", action="store_true")
p_trace.add_argument("--storage", action="store_true")
def module_lint():
from solitude._commandline import cmd_lint
return cmd_lint
p_lint.set_defaults(module=module_lint)
p_lint.add_argument(
"--report",
help="Path to report (enable report output mode)")
p_lint.add_argument(
"--report-template", dest="report_template",
help="Path to report template",
default="resource://report.filemessage.default.html")
def module_server():
from solitude._commandline import cmd_server
return cmd_server
p_server.set_defaults(module=module_server)
p_server.add_argument(
"--port", type=int, default=0, help="Override server port")
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if not hasattr(args, "module"):
parser.print_help()
return 1
try:
_update_global_config_from_file(args.global_config)
module = args.module()
module.main(args)
except CLIError as e:
print("Error: %s" % str(e), file=sys.stderr)
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| 30.537879 | 93 | 0.685686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 935 | 0.231952 |
80db565e93e8ec6a3e32ef24bd87723013e86005 | 8,308 | py | Python | pypy/module/cpyext/typeobjectdefs.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | 1 | 2020-01-21T11:10:51.000Z | 2020-01-21T11:10:51.000Z | pypy/module/cpyext/typeobjectdefs.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | pypy/module/cpyext/typeobjectdefs.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | from pypy.rpython.lltypesystem import rffi, lltype
from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void
from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP,
PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP,
Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE)
from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref
from pypy.module.cpyext.modsupport import PyMethodDef
P, FT, PyO = Ptr, FuncType, PyObject
PyOPtr = Ptr(lltype.Array(PyO, hints={'nolength': True}))
freefunc = P(FT([rffi.VOIDP], Void))
destructor = P(FT([PyO], Void))
printfunc = P(FT([PyO, FILEP, rffi.INT_real], rffi.INT))
getattrfunc = P(FT([PyO, rffi.CCHARP], PyO))
getattrofunc = P(FT([PyO, PyO], PyO))
setattrfunc = P(FT([PyO, rffi.CCHARP, PyO], rffi.INT_real))
setattrofunc = P(FT([PyO, PyO, PyO], rffi.INT_real))
cmpfunc = P(FT([PyO, PyO], rffi.INT_real))
reprfunc = P(FT([PyO], PyO))
hashfunc = P(FT([PyO], lltype.Signed))
richcmpfunc = P(FT([PyO, PyO, rffi.INT_real], PyO))
getiterfunc = P(FT([PyO], PyO))
iternextfunc = P(FT([PyO], PyO))
descrgetfunc = P(FT([PyO, PyO, PyO], PyO))
descrsetfunc = P(FT([PyO, PyO, PyO], rffi.INT_real))
initproc = P(FT([PyO, PyO, PyO], rffi.INT_real))
newfunc = P(FT([PyTypeObjectPtr, PyO, PyO], PyO))
allocfunc = P(FT([PyTypeObjectPtr, Py_ssize_t], PyO))
unaryfunc = P(FT([PyO], PyO))
binaryfunc = P(FT([PyO, PyO], PyO))
ternaryfunc = P(FT([PyO, PyO, PyO], PyO))
inquiry = P(FT([PyO], rffi.INT_real))
lenfunc = P(FT([PyO], Py_ssize_t))
coercion = P(FT([PyOPtr, PyOPtr], rffi.INT_real))
intargfunc = P(FT([PyO, rffi.INT_real], PyO))
intintargfunc = P(FT([PyO, rffi.INT_real, rffi.INT], PyO))
ssizeargfunc = P(FT([PyO, Py_ssize_t], PyO))
ssizessizeargfunc = P(FT([PyO, Py_ssize_t, Py_ssize_t], PyO))
intobjargproc = P(FT([PyO, rffi.INT_real, PyO], rffi.INT))
intintobjargproc = P(FT([PyO, rffi.INT_real, rffi.INT, PyO], rffi.INT))
ssizeobjargproc = P(FT([PyO, Py_ssize_t, PyO], rffi.INT_real))
ssizessizeobjargproc = P(FT([PyO, Py_ssize_t, Py_ssize_t, PyO], rffi.INT_real))
objobjargproc = P(FT([PyO, PyO, PyO], rffi.INT_real))
objobjproc = P(FT([PyO, PyO], rffi.INT_real))
visitproc = P(FT([PyO, rffi.VOIDP], rffi.INT_real))
traverseproc = P(FT([PyO, visitproc, rffi.VOIDP], rffi.INT_real))
getter = P(FT([PyO, rffi.VOIDP], PyO))
setter = P(FT([PyO, PyO, rffi.VOIDP], rffi.INT_real))
wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO))
wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO))
readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t))
writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t))
segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t))
charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t))
## We don't support new buffer interface for now
getbufferproc = rffi.VOIDP
releasebufferproc = rffi.VOIDP
PyGetSetDef = cpython_struct("PyGetSetDef", (
("name", rffi.CCHARP),
("get", getter),
("set", setter),
("doc", rffi.CCHARP),
("closure", rffi.VOIDP),
))
PyNumberMethods = cpython_struct("PyNumberMethods", (
("nb_add", binaryfunc),
("nb_subtract", binaryfunc),
("nb_multiply", binaryfunc),
("nb_divide", binaryfunc),
("nb_remainder", binaryfunc),
("nb_divmod", binaryfunc),
("nb_power", ternaryfunc),
("nb_negative", unaryfunc),
("nb_positive", unaryfunc),
("nb_absolute", unaryfunc),
("nb_nonzero", inquiry),
("nb_invert", unaryfunc),
("nb_lshift", binaryfunc),
("nb_rshift", binaryfunc),
("nb_and", binaryfunc),
("nb_xor", binaryfunc),
("nb_or", binaryfunc),
("nb_coerce", coercion),
("nb_int", unaryfunc),
("nb_long", unaryfunc),
("nb_float", unaryfunc),
("nb_oct", unaryfunc),
("nb_hex", unaryfunc),
("nb_inplace_add", binaryfunc),
("nb_inplace_subtract", binaryfunc),
("nb_inplace_multiply", binaryfunc),
("nb_inplace_divide", binaryfunc),
("nb_inplace_remainder", binaryfunc),
("nb_inplace_power", ternaryfunc),
("nb_inplace_lshift", binaryfunc),
("nb_inplace_rshift", binaryfunc),
("nb_inplace_and", binaryfunc),
("nb_inplace_xor", binaryfunc),
("nb_inplace_or", binaryfunc),
("nb_floor_divide", binaryfunc),
("nb_true_divide", binaryfunc),
("nb_inplace_floor_divide", binaryfunc),
("nb_inplace_true_divide", binaryfunc),
("nb_index", unaryfunc),
))
PySequenceMethods = cpython_struct("PySequenceMethods", (
("sq_length", lenfunc),
("sq_concat", binaryfunc),
("sq_repeat", ssizeargfunc),
("sq_item", ssizeargfunc),
("sq_slice", ssizessizeargfunc),
("sq_ass_item", ssizeobjargproc),
("sq_ass_slice", ssizessizeobjargproc),
("sq_contains", objobjproc),
("sq_inplace_concat", binaryfunc),
("sq_inplace_repeat", ssizeargfunc),
))
PyMappingMethods = cpython_struct("PyMappingMethods", (
("mp_length", lenfunc),
("mp_subscript", binaryfunc),
("mp_ass_subscript", objobjargproc),
))
PyBufferProcs = cpython_struct("PyBufferProcs", (
("bf_getreadbuffer", readbufferproc),
("bf_getwritebuffer", writebufferproc),
("bf_getsegcount", segcountproc),
("bf_getcharbuffer", charbufferproc),
("bf_getbuffer", getbufferproc),
("bf_releasebuffer", releasebufferproc),
))
PyMemberDef = cpython_struct("PyMemberDef", (
("name", rffi.CCHARP),
("type", rffi.INT_real),
("offset", Py_ssize_t),
("flags", rffi.INT_real),
("doc", rffi.CCHARP),
))
# These fields are supported and used in different ways
# The following comments mean:
# #E essential, initialized for all PTOs
# #S supported
# #U unsupported
# #N not yet implemented
PyTypeObjectFields = []
PyTypeObjectFields.extend(PyVarObjectFields)
PyTypeObjectFields.extend([
("tp_name", rffi.CCHARP), #E For printing, in format "<module>.<name>"
("tp_basicsize", Py_ssize_t), #E For allocation
("tp_itemsize", Py_ssize_t), #E "
# Methods to implement standard operations
("tp_dealloc", destructor), #E
("tp_print", printfunc), #U
("tp_getattr", getattrfunc), #U
("tp_setattr", setattrfunc), #U
("tp_compare", cmpfunc), #N
("tp_repr", reprfunc), #N
# Method suites for standard classes
("tp_as_number", Ptr(PyNumberMethods)), #N
("tp_as_sequence", Ptr(PySequenceMethods)), #N
("tp_as_mapping", Ptr(PyMappingMethods)), #N
# More standard operations (here for binary compatibility)
("tp_hash", hashfunc), #N
("tp_call", ternaryfunc), #N
("tp_str", reprfunc), #N
("tp_getattro", getattrofunc),#N
("tp_setattro", setattrofunc),#N
# Functions to access object as input/output buffer
("tp_as_buffer", Ptr(PyBufferProcs)), #U
# Flags to define presence of optional/expanded features
("tp_flags", lltype.Signed), #E
("tp_doc", rffi.CCHARP), #N Documentation string
# Assigned meaning in release 2.0
# call function for all accessible objects
("tp_traverse", traverseproc),#U
# delete references to contained objects
("tp_clear", inquiry), #U
# Assigned meaning in release 2.1
# rich comparisons
("tp_richcompare", richcmpfunc), #N
# weak reference enabler
("tp_weaklistoffset", Py_ssize_t), #U
# Added in release 2.2
# Iterators
("tp_iter", getiterfunc), #N
("tp_iternext", iternextfunc), #N
# Attribute descriptor and subclassing stuff
("tp_methods", Ptr(PyMethodDef)), #S
("tp_members", Ptr(PyMemberDef)), #S
("tp_getset", Ptr(PyGetSetDef)), #S
("tp_base", Ptr(PyTypeObject)), #E
("tp_dict", PyObject), #U
("tp_descr_get", descrgetfunc), #N
("tp_descr_set", descrsetfunc), #N
("tp_dictoffset", Py_ssize_t), #U
("tp_init", initproc), #N
("tp_alloc", allocfunc), #N
("tp_new", newfunc), #S
("tp_free", freefunc), #E Low-level free-memory routine
("tp_is_gc", inquiry), #U For PyObject_IS_GC
("tp_bases", PyObject),#E
("tp_mro", PyObject), #U method resolution order
("tp_cache", PyObject),#S
("tp_subclasses", PyObject), #U
("tp_weaklist", PyObject), #U
("tp_del", destructor), #N
])
cpython_struct("PyTypeObject", PyTypeObjectFields, PyTypeObject)
| 34.907563 | 79 | 0.662253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,522 | 0.303563 |
80dbd2a18f61160481567354d8144c8201971b32 | 718 | py | Python | photobooth/1-web-base-layout/app/views.py | albertoSoto/raspberry-tic-projects | 692762dade2397ba4bedb77b4733a1d5d9829450 | [
"MIT"
] | null | null | null | photobooth/1-web-base-layout/app/views.py | albertoSoto/raspberry-tic-projects | 692762dade2397ba4bedb77b4733a1d5d9829450 | [
"MIT"
] | null | null | null | photobooth/1-web-base-layout/app/views.py | albertoSoto/raspberry-tic-projects | 692762dade2397ba4bedb77b4733a1d5d9829450 | [
"MIT"
] | null | null | null | from flask import render_template, flash, redirect
from app import app
@app.route('/')
@app.route('/index')
def index():
user = {'nickname': 'Berto'}
posts = [
{
'author': {'nickname': 'Alum Post 1'},
'body': 'This is the body of the first post.'
},
{
'author': {'nickname': 'Teacher Post 2'},
'body': 'This is the body of the second post.'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/about')
def about():
return render_template('about.html',
title='About') | 24.758621 | 58 | 0.473538 | 0 | 0 | 0 | 0 | 641 | 0.892758 | 0 | 0 | 225 | 0.31337 |
80dc746d4a550ea42299b59b314c14103e5afb26 | 14,420 | py | Python | twstock/stock.py | LorneWu/twstock | 8fbdd9fa7160f5441d29575544be8d9c570945cd | [
"MIT"
] | null | null | null | twstock/stock.py | LorneWu/twstock | 8fbdd9fa7160f5441d29575544be8d9c570945cd | [
"MIT"
] | null | null | null | twstock/stock.py | LorneWu/twstock | 8fbdd9fa7160f5441d29575544be8d9c570945cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import urllib.parse
from collections import namedtuple
from operator import attrgetter
from time import sleep
from twstock.proxy import get_proxies
import os
import json
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
import requests
try:
from . import analytics
from .codes import codes
except ImportError as e:
if e.name == 'lxml':
# Fix #69
raise e
import analytics
from codes import codes
TWSE_BASE_URL = 'http://www.twse.com.tw/'
TPEX_BASE_URL = 'http://www.tpex.org.tw/'
REQ_COUNTER = 0
DATATUPLE = namedtuple('Data', ['date', 'capacity', 'turnover', 'open',
'high', 'low', 'close', 'change', 'transaction'])
class BaseFetcher(object):
def fetch(self, year, month, sid, retry, retry_interval):
pass
def _convert_date(self, date):
"""Convert '106/05/01' to '2017/05/01'"""
return '/'.join([str(int(date.split('/')[0]) + 1911)] + date.split('/')[1:])
def _make_datatuple(self, data):
pass
def purify(self, original_data):
pass
class TWSEFetcher(BaseFetcher):
REPORT_URL = urllib.parse.urljoin(
TWSE_BASE_URL, 'exchangeReport/STOCK_DAY')
def __init__(self):
pass
def fetch(self, year: int, month: int, sid: str, retry: int=5, retry_interval: int=5):
global REQ_COUNTER
params = {'date': '%d%02d01' % (year, month), 'stockNo': sid}
for retry_i in range(retry):
REQ_COUNTER += 1
if REQ_COUNTER % 12 == 0:
sleep(25)
r = requests.get(self.REPORT_URL, params=params,
proxies=get_proxies())
sleep(retry_interval)
try:
data = r.json()
except JSONDecodeError:
continue
else:
break
else:
# Fail in all retries
data = {'stat': '', 'data': []}
if data['stat'] == 'OK':
data['data'] = self.purify(data)
else:
data['data'] = []
return data
def _make_datatuple(self, data):
data[0] = datetime.datetime.strptime(
self._convert_date(data[0]), '%Y/%m/%d')
data[1] = int(data[1].replace(',', ''))
data[2] = int(data[2].replace(',', ''))
data[3] = None if data[3] == '--' else float(data[3].replace(',', ''))
data[4] = None if data[4] == '--' else float(data[4].replace(',', ''))
data[5] = None if data[5] == '--' else float(data[5].replace(',', ''))
data[6] = None if data[6] == '--' else float(data[6].replace(',', ''))
# +/-/X表示漲/跌/不比價
data[7] = float(0.0 if data[7].replace(',', '') ==
'X0.00' else data[7].replace(',', ''))
data[8] = int(data[8].replace(',', ''))
return DATATUPLE(*data)
def purify(self, original_data):
return [self._make_datatuple(d) for d in original_data['data']]
class TPEXFetcher(BaseFetcher):
REPORT_URL = urllib.parse.urljoin(TPEX_BASE_URL,
'web/stock/aftertrading/daily_trading_info/st43_result.php')
def __init__(self):
pass
def fetch(self, year: int, month: int, sid: str, retry: int=5, retry_interval: int=5):
global REQ_COUNTER
params = {'d': '%d/%d' % (year - 1911, month), 'stkno': sid}
for retry_i in range(retry):
REQ_COUNTER += 1
if REQ_COUNTER % 12 == 0:
sleep(25)
r = requests.get(self.REPORT_URL, params=params,
proxies=get_proxies())
sleep(retry_interval)
try:
data = r.json()
except JSONDecodeError:
continue
else:
break
else:
# Fail in all retries
data = {'aaData': []}
data['data'] = []
if data['aaData']:
data['data'] = self.purify(data)
return data
def _convert_date(self, date):
"""Convert '106/05/01' to '2017/05/01'"""
return '/'.join([str(int(date.split('/')[0]) + 1911)] + date.split('/')[1:])
def _make_datatuple(self, data):
data[0] = datetime.datetime.strptime(self._convert_date(data[0].replace('*', '')),
'%Y/%m/%d')
data[1] = int(data[1].replace(',', '')) * 1000
data[2] = int(data[2].replace(',', '')) * 1000
data[3] = None if data[3] == '--' else float(data[3].replace(',', ''))
data[4] = None if data[4] == '--' else float(data[4].replace(',', ''))
data[5] = None if data[5] == '--' else float(data[5].replace(',', ''))
data[6] = None if data[6] == '--' else float(data[6].replace(',', ''))
data[7] = float(data[7].replace(',', ''))
data[8] = int(data[8].replace(',', ''))
return DATATUPLE(*data)
def purify(self, original_data):
return [self._make_datatuple(d) for d in original_data['aaData']]
class Stock(analytics.Analytics):
def __init__(self, sid: str, initial_fetch: bool=True, skip_fetch_31: bool=False):
self.sid = sid
self.fetcher = TWSEFetcher(
) if codes[sid].market == '上市' else TPEXFetcher()
self.raw_data = []
# Handle json cache
self.dump_file = 'twstock_' + sid + '.json'
self.data_cache = []
self.data_cache_ptr = 0
self.data = []
if os.path.exists(self.dump_file):
# Load json cache if exists
self.load()
# Init data
if initial_fetch and not skip_fetch_31:
self.fetch_31()
def search_data_cache(self, y, m):
# search data cache for _month_year_iter()
# if y and m find matched entry, copy data from self.data_cache to self.data
# return value
# 1. True : find matched entry. Copy the data to self.data_cache.
# And self.data_cache_ptr stores the index of self.data_cache.
# 2. False : Not found, need to send request to TWSE or TPEX.
if len(self.data_cache) == 0:
return False
find_match_entry = False
for data_cache_i in range(self.data_cache_ptr, len(self.data_cache)):
if self.data_cache[data_cache_i].date.year == y and \
self.data_cache[data_cache_i].date.month == m :
# Hit in data cache, start loop until next miss. To move a month data to data cache.
# ex. If find 11/1 , then loop to add 11/1 ~ 11/30
self.data.append(self.data_cache[data_cache_i])
find_match_entry = True
elif find_match_entry == True:
# First miss after hit, break
# Finish moving a month data.
self.data_cache_ptr = data_cache_i
break
elif self.data_cache[data_cache_i].date.year < y or \
( self.data_cache[data_cache_i].date.year == y and \
self.data_cache[data_cache_i].date.month < m ) :
# find datetime before first date of target month, continue search
self.data_cache_ptr = data_cache_i
continue
else:
# find datetime after last date of target month, break
self.data_cache_ptr = data_cache_i
break
return find_match_entry
def _month_year_iter(self, start_month, start_year, end_month, end_year):
ym_start = 12 * start_year + start_month - 1
ym_end = 12 * end_year + end_month
for ym in range(ym_start, ym_end):
y, m = divmod(ym, 12)
if self.search_data_cache(y,m + 1):
# if match in data cache, skip it
continue
yield y, m + 1
def fetch(self, year: int, month: int):
"""Fetch year month data"""
self.raw_data = [self.fetcher.fetch(year, month, self.sid)]
self.data = self.raw_data[0]['data']
return self.data
def fetch_period(self, from_year: int, from_month: int, from_day: int=0, to_year: int=0, to_month: int=0, to_day: int=0, retry: int=5, retry_interval: int=3):
self.raw_data = []
self.data = []
self.data_cache_ptr = 0
global REQ_COUNTER
REQ_COUNTER = 0
if to_year == 0 or to_month == 0:
today = datetime.datetime.today()
to_year = today.year
to_month = today.month
if from_year > to_year or ( from_year == to_year and from_month > to_month) or \
( from_year == to_year and from_month == to_month and from_day > to_day and from_day != 0):
# check if invalid period
return
for year, month in self._month_year_iter(from_month, from_year, to_month, to_year):
self.raw_data.append(self.fetcher.fetch(year, month, self.sid, retry, retry_interval))
self.data.extend(self.raw_data[-1]['data'])
# Copy fetched data to cache
if self.data_cache_ptr + 1 >= len(self.data_cache):
self.data_cache = self.data_cache + self.raw_data[-1]['data']
else:
self.data_cache = self.data_cache[:self.data_cache_ptr] + self.raw_data[-1]['data'] + self.data_cache[self.data_cache_ptr:]
if month == 12:
# To decrease save data_cache frequency
self.save()
if from_day != 0:
start_index = 0
for dd_i in range(len(self.data)):
if self.data[dd_i].date.day < from_day and \
self.data[dd_i].date.year == from_year and \
self.data[dd_i].date.month == from_month :
start_index += 1
else:
break
self.data = self.data[start_index:]
if to_day != 0:
end_index = len(self.data)
for dd_ii in range(len(self.data),0,-1):
dd_i = dd_ii - 1
if self.data[dd_i].date.day > to_day and \
self.data[dd_i].date.year == to_year and \
self.data[dd_i].date.month == to_month :
end_index -= 1
else:
break
self.data = self.data[:end_index]
self.check_data_valid()
self.save()
return self.data
def fetch_from(self, from_year: int, from_month: int):
"""Fetch data from year, month to current year month data"""
self.fetch_period(from_year=from_year, from_month=from_month)
return self.data
def fetch_31(self, current_year: int=0, current_month: int=0, current_day: int=0):
"""Fetch 31 days data"""
if current_year == 0 or current_month == 0:
start_date = datetime.datetime.today()
else:
start_date = datetime.datetime( current_year, current_month, current_day)
before = start_date - datetime.timedelta(days=60)
self.fetch_from(before.year, before.month)
self.data = self.data[-31:]
self.check_data_valid()
return self.data
def save(self):
data_cache_save = self.data_cache
today = datetime.datetime.today()
# To avoid saving incomplete month data. ex. if today is 2020/11/12, then all data with 2020/11 will be ignore.
for dc_c in range(len(data_cache_save),0,-1):
dc_i = dc_c - 1 # from len(data_cache_save)-1 ~ 0
if data_cache_save[dc_i].date.month == today.month and data_cache_save[dc_i].date.month == today.month:
continue
else:
data_cache_save = data_cache_save[:dc_c]
break
with open(self.dump_file, 'w') as f:
json.dump(data_cache_save, f, indent=4, sort_keys=True, default=str)
def load(self):
self.data_cache = []
data_cache_tmp = []
with open(self.dump_file, 'r') as f:
data_cache_tmp = json.load(f)
for data_i in range(len(data_cache_tmp)) :
# To package to namedtuple "Data"
entry_i = data_cache_tmp[data_i]
datetime_d = entry_i[0]
entry_i[0] = datetime.datetime.strptime(entry_i[0], '%Y-%m-%d %H:%M:%S')
self.data_cache.append(DATATUPLE(*entry_i))
self.check_data_valid()
def organize_data_cache(self):
self.data_cache = list(set(self.data_cache))
self.data_cache = sorted(self.data_cache,key=attrgetter('date'), reverse=False)
def check_data_valid(self):
data_tmp = sorted(self.data,key=attrgetter('date'), reverse=False)
detect_potential_issue = False
if data_tmp != self.data:
print("Potential self.data order issue")
detect_potential_issue = True
if len(set(data_tmp)) != len(self.data):
print("Potential self.data duplicate issue")
detect_potential_issue = True
data_tmp = sorted(self.data_cache,key=attrgetter('date'), reverse=False)
if data_tmp != self.data_cache:
print("Potential self.data_cache order issue")
detect_potential_issue = True
if len(set(data_tmp)) != len(self.data_cache):
print("Potential self.data_cache duplicate issue")
detect_potential_issue = True
if detect_potential_issue == False :
print("Check data pass")
@property
def date(self):
return [d.date for d in self.data]
@property
def capacity(self):
return [d.capacity for d in self.data]
@property
def turnover(self):
return [d.turnover for d in self.data]
@property
def price(self):
return [d.close for d in self.data]
@property
def high(self):
return [d.high for d in self.data]
@property
def low(self):
return [d.low for d in self.data]
@property
def open(self):
return [d.open for d in self.data]
@property
def close(self):
return [d.close for d in self.data]
@property
def change(self):
return [d.change for d in self.data]
@property
def transaction(self):
return [d.transaction for d in self.data]
| 36.231156 | 162 | 0.559501 | 13,643 | 0.944806 | 396 | 0.027424 | 756 | 0.052355 | 0 | 0 | 2,076 | 0.143767 |
80dc78ffed99f08797f54d11b8b5ae608f71cfe0 | 2,208 | py | Python | kth_power.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | kth_power.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | kth_power.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | """
Solution for
Sort Integers by The Power Value: https://leetcode.com/problems/sort-integers-by-the-power-value/
"""
class Solution:
def getKth(self, lo: int, hi: int, k: int) -> int:
# Can I calculate the power of an integer?
# if two ints have same power, sort by the ints
# power of 1 = 0 steps
# [1] -> 1
# no zeros for now
# assume no negatives
def calculate_steps(range_int: int) -> int: # i = range_int
# init a variable to count the number of steps at 0
steps = 0
# iterate while the number not equal to zero
while range_int != 1:
# transform it using the right eq, based on even or odd
if range_int % 2 == 0:
range_int /= 2
else: # should be odd
range_int = 3 * range_int + 1
steps += 1
# return the steps
return steps
"""
steps = 9
range_int = 1
"""
power_ints = dict()
# n = hi - lo + 1
# iterate over all the integers in the range
for num in range(lo, hi + 1): # n iterations
# calulate the power of the integer
power = calculate_steps(num)
# map each integer of the power -> range_int
if power in power_ints: # O(1)
power_ints[power].append(num)
else:
power_ints[power] = [num]
# sort the range ints into an seq, based on the powers
sorted_powers = sorted(power_ints) # O(n log n)
sorted_ints = list()
for power in sorted_powers: # n iterations
ints = power_ints[power]
ints.sort()
sorted_ints.extend(ints) #
# return the k - 1 element
return sorted_ints[k - 1]
# Time O(n * calculate_power + n log n)
# Space O(n)
"""
"""
"""
lo = 12
hi = 15
k = 2
power_ints = {
9: [12, 13]
17: [14, 15]
}
sorted_powers = [9, 17]
sorted_ints = [12, 13, 14, 15]
ints = [14, 15]
power = 17
num = 12
power = 9
"""
| 28.307692 | 97 | 0.498188 | 2,085 | 0.944293 | 0 | 0 | 0 | 0 | 0 | 0 | 1,121 | 0.507699 |
80dcec9d61fecffce04b116dfcd7c74ede2c697a | 847 | py | Python | aruco-decoder/tests/test_aruco_detection_blue.py | Shinyhero36/Info | 68c74d44ce8ccf632d4f1b79283ac20ff933670d | [
"MIT"
] | 2 | 2021-11-02T09:14:35.000Z | 2021-11-22T19:24:19.000Z | aruco-decoder/tests/test_aruco_detection_blue.py | Shinyhero36/Info | 68c74d44ce8ccf632d4f1b79283ac20ff933670d | [
"MIT"
] | null | null | null | aruco-decoder/tests/test_aruco_detection_blue.py | Shinyhero36/Info | 68c74d44ce8ccf632d4f1b79283ac20ff933670d | [
"MIT"
] | null | null | null | import os
import unittest
from aruco import ArucoDetector
class TestArucoDetectionBlue(unittest.TestCase):
ar = ArucoDetector()
BLUE = 13
def test_blue(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue.jpg"))
self.assertEqual(self.BLUE, result)
def test_blue_tilted_r(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue-tilted-r.jpg"))
self.assertEqual(self.BLUE, result)
def test_blue_reverse(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue-reverse.jpg"))
self.assertEqual(self.BLUE, result)
def test_blue_tilted_l(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue-tilted-l.jpg"))
self.assertEqual(self.BLUE, result)
if __name__ == '__main__':
unittest.main(verbosity=3)
| 26.46875 | 84 | 0.68595 | 725 | 0.855962 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.141677 |
80de6c64c1123b72f584b7441b7d5bc184df60a9 | 1,218 | py | Python | script.py | michaelihwang/speedtest-monitor | a412ba4ec74d80fb07498258af6ee934ec9e2686 | [
"MIT"
] | 1 | 2020-10-25T22:22:20.000Z | 2020-10-25T22:22:20.000Z | script.py | michaelihwang/speedtest-monitor | a412ba4ec74d80fb07498258af6ee934ec9e2686 | [
"MIT"
] | null | null | null | script.py | michaelihwang/speedtest-monitor | a412ba4ec74d80fb07498258af6ee934ec9e2686 | [
"MIT"
] | null | null | null | import sys
import time
from datetime import datetime
from traceback import print_exc
import speedtest
from decorator import restartable
KILOBYTE = 1024
MEGABYTE = 1024 * KILOBYTE
REPORT_FREQ = 60
def test_setup(st):
st.get_servers()
st.get_best_server()
st.download() # bits/s
st.upload() # bits/s
res = st.results.dict()
download = "{:.2f}".format(res["download"] / MEGABYTE)
upload = "{:.2f}".format(res["upload"] / MEGABYTE)
ping = "{:.2f}".format(res["ping"])
return download, upload, ping
@restartable
def main():
# Check if command line argument for reporting freq is provided (min 30)
global REPORT_FREQ
if len(sys.argv) > 1 and int(sys.argv[1]) >= 30:
REPORT_FREQ = int(sys.argv[1])
try:
st = speedtest.Speedtest()
while True:
time_now = datetime.now().strftime("%H:%M:%S")
download, upload, ping = test_setup(st)
print(f"[{time_now}]: PING: {ping} ms\tDOWN: {download} Mbps\tUP: {upload} Mbps")
time.sleep(REPORT_FREQ)
except Exception as exc:
print("\nCaught exception: ", exc.__class__.__name__)
print_exc()
if __name__ == "__main__":
main()
| 24.857143 | 93 | 0.625616 | 0 | 0 | 0 | 0 | 632 | 0.518883 | 0 | 0 | 252 | 0.206897 |
80e0d1c76660cb41ffbb52948c7fc98bd6120bac | 8,901 | py | Python | reports/management/mw.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
] | 10 | 2018-03-14T06:17:06.000Z | 2022-03-10T05:33:34.000Z | reports/management/mw.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
] | 512 | 2018-09-10T07:37:34.000Z | 2022-03-30T02:23:43.000Z | reports/management/mw.py | D00dleman/l2 | 0870144537ee340cd8db053a608d731e186f02fb | [
"MIT"
] | 24 | 2018-07-31T05:52:12.000Z | 2022-02-08T00:39:41.000Z | import os
from importlib import import_module
from django.apps import apps
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.serializer import serializer_factory
from django.db.models import ForeignKey, ManyToManyField
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
def fullname(o):
return o.__module__ + "." + o.__class__.__name__
class OperationWriter:
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
self.data = []
def serialize(self, app):
d = {}
def _write(_arg_name, _arg_value):
if _arg_name in self.operation.serialization_expand_args and isinstance(_arg_value, (list, tuple, dict)):
if isinstance(_arg_value, dict):
ds = {}
for a, b in _arg_value.items():
if any([isinstance(b, str), isinstance(b, list), isinstance(b, dict), isinstance(b, bool), isinstance(b, float), isinstance(b, int)]) or b is not None:
ds[a] = b
else:
ds[a] = str(b)
d[_arg_name] = ds
else:
f = []
for item in _arg_value:
if isinstance(item, tuple):
if len(item) == 2:
props = {}
i = item[1].__dict__
props["type_name"] = fullname(item[1])
props["choices"] = i.get("choices", None)
props["blank"] = i.get("blank", True)
props["is_null"] = i.get("null", True)
props["primary_key"] = i.get("primary_key", False)
props["help_text"] = i.get("help_text", '')
props["max_length"] = i.get("max_length", None)
props["verbose_name"] = i.get("verbose_name", None)
if "default" in i:
props["default"] = str(i["default"]) if type(i["default"]) not in [set, list, dict, int, float, bool, type(None)] else i["default"]
else:
props["default"] = None
f.append({'name': str(item[0]), 'props': props})
else:
f.append(list(item))
elif (
any([isinstance(item, str), isinstance(item, list), isinstance(item, dict), isinstance(item, bool), isinstance(item, float), isinstance(item, int)])
or item is None
):
f.append(item)
else:
f.append(str(item))
d[_arg_name] = f
elif isinstance(_arg_value, ForeignKey):
ab = {
"many_to_many": bool(_arg_value.many_to_many),
"many_to_one": bool(_arg_value.many_to_one),
"one_to_many": bool(_arg_value.one_to_many),
"one_to_one": bool(_arg_value.one_to_one),
"field_str": str(_arg_value),
"to": str(_arg_value.remote_field.model).replace("__fake__.", "").replace("<class", "").replace("'", "").replace(">", "").replace(" ", ""),
}
d[_arg_name] = ab
d["related"] = True
elif isinstance(_arg_value, ManyToManyField):
ab = {
"many_to_many": bool(_arg_value.many_to_many),
"many_to_one": bool(_arg_value.many_to_one),
"one_to_many": bool(_arg_value.one_to_many),
"one_to_one": bool(_arg_value.one_to_one),
"field_str": str(_arg_value),
"to": str(_arg_value.remote_field.model).replace("__fake__.", "").replace("<class", "").replace("'", "").replace(">", "").replace(" ", ""),
}
d[_arg_name] = ab
d["related"] = True
elif (
any(
[
isinstance(_arg_value, str),
isinstance(_arg_value, list),
isinstance(_arg_value, dict),
isinstance(_arg_value, bool),
isinstance(_arg_value, float),
isinstance(_arg_value, int),
]
)
or _arg_value is None
):
d[_arg_name] = _arg_value
else:
d[_arg_name] = str(_arg_value)
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
for arg_name in operation_args[i:]:
if arg_name in kwargs:
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
if "name" in d:
d["name"] = app + "." + d["name"]
return d
class MigrationWriter:
"""
Take a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_list(self, app):
operations = []
for operation in self.migration.operations:
operations.append(OperationWriter(operation).serialize(app))
return operations
@property
def basedir(self):
migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label)
if migrations_package_name is None:
raise ValueError("Django can't create migrations for app '%s' because " "migrations have been disabled via the MIGRATION_MODULES " "setting." % self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return module_dir(migrations_module)
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except ImportError:
continue
else:
try:
base_dir = module_dir(base_module)
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create " "migrations package %s. Make sure the toplevel " "package exists and can be imported." % migrations_package_name
)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize(cls, value):
return serializer_factory(value).serialize()
| 40.459091 | 182 | 0.526795 | 8,470 | 0.951578 | 0 | 0 | 2,545 | 0.285923 | 0 | 0 | 1,315 | 0.147736 |
80e0fed34003c8412ae4f44e18a85afe86d3f7f7 | 375 | py | Python | example/011_matching_zero_or_more_repetitions.py | mafda/regex_101 | 085a9ee48829243d87e4bd74bb1baf07abc6481e | [
"MIT"
] | null | null | null | example/011_matching_zero_or_more_repetitions.py | mafda/regex_101 | 085a9ee48829243d87e4bd74bb1baf07abc6481e | [
"MIT"
] | null | null | null | example/011_matching_zero_or_more_repetitions.py | mafda/regex_101 | 085a9ee48829243d87e4bd74bb1baf07abc6481e | [
"MIT"
] | null | null | null | """
Task
You have a test string S.
Your task is to write a regex that will match S using the following conditions:
S should begin with 2 or more digits.
After that, S should have 0 or more lowercase letters.
S should end with 0 or more uppercase letters
"""
import re
Regex_Pattern = r'^[\d]{2,}[a-z]*[A-Z]*$'
print(str(bool(re.search(Regex_Pattern, input()))).lower())
| 22.058824 | 79 | 0.712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.757333 |
80e115033a86c707eb93a0ae3031b719ba2a3293 | 3,755 | py | Python | day-16/part_2.py | leotappe/aoc-2021 | 6132e01bd9b4c6ee6a8d95e213f08463102596c2 | [
"MIT"
] | null | null | null | day-16/part_2.py | leotappe/aoc-2021 | 6132e01bd9b4c6ee6a8d95e213f08463102596c2 | [
"MIT"
] | null | null | null | day-16/part_2.py | leotappe/aoc-2021 | 6132e01bd9b4c6ee6a8d95e213f08463102596c2 | [
"MIT"
] | null | null | null | """
Advent of Code 2021 | Day 16 | Part 2
"""
import sys
import math
class Packet:
def __init__(self, version, type_id):
self.version = version
self.type_id = type_id
class Literal(Packet):
def __init__(self, version, type_id, value):
super().__init__(version, type_id)
self.value = value
def sum_version_numbers(self):
return self.version
def eval(self):
return self.value
def __str__(self):
return f'L-{self.version}-{self.type_id}({self.value})'
class Operator(Packet):
def __init__(self, version, type_id, length_type_id):
super().__init__(version, type_id)
self.length_type_id = length_type_id
self.subpackets = []
def sum_version_numbers(self):
return self.version + sum(packet.sum_version_numbers() for packet in self.subpackets)
def eval(self):
if self.type_id == 0:
return sum(p.eval() for p in self.subpackets)
if self.type_id == 1:
return math.prod(p.eval() for p in self.subpackets)
if self.type_id == 2:
return min(p.eval() for p in self.subpackets)
if self.type_id == 3:
return max(p.eval() for p in self.subpackets)
if self.type_id == 5:
return int(self.subpackets[0].eval() > self.subpackets[1].eval())
if self.type_id == 6:
return int(self.subpackets[0].eval() < self.subpackets[1].eval())
if self.type_id == 7:
return int(self.subpackets[0].eval() == self.subpackets[1].eval())
def __str__(self):
return f'O-{self.version}-{self.type_id}({",".join(str(packet) for packet in self.subpackets)})'
def get_version(bits, start_index):
return int(bits[start_index:start_index + 3], base=2)
def get_type_id(bits, start_index):
return int(bits[start_index + 3:start_index + 6], base=2)
def get_literal_value(bits, start_index):
groups = []
for i in range(start_index + 6, len(bits), 5):
groups.append(bits[i + 1:i + 5])
if bits[i] == '0':
break
return int(''.join(groups), base=2), i + 5
def get_length_type_id(bits, start_index):
return int(bits[start_index + 6])
def get_total_length_of_subpackets_in_bits(bits, start_index):
return int(bits[start_index + 7:start_index + 7 + 15], base=2)
def get_number_of_subpackets(bits, start_index):
return int(bits[start_index + 7:start_index + 7 + 11], base=2)
def parse(bits, start_index):
version = get_version(bits, start_index)
type_id = get_type_id(bits, start_index)
if type_id == 4:
value, index = get_literal_value(bits, start_index)
return Literal(version, type_id, value), index
else:
packet = Operator(version, type_id, get_length_type_id(bits, start_index))
if packet.length_type_id == 0:
bit_length_of_subpackets = get_total_length_of_subpackets_in_bits(bits, start_index)
index = start_index + 6 + 1 + 15
while index < start_index + 6 + 1 + 15 + bit_length_of_subpackets:
subpacket, index = parse(bits, index)
packet.subpackets.append(subpacket)
else:
num_subpackets = get_number_of_subpackets(bits, start_index)
index = start_index + 6 + 1 + 11
for _ in range(num_subpackets):
subpacket, index = parse(bits, index)
packet.subpackets.append(subpacket)
return packet, index
def main():
with open(sys.argv[1]) as f:
bits = f.readline().strip()
bits = ''.join(f'{int(c, base=16):04b}' for c in bits)
packet, _ = parse(bits, 0)
print(packet.eval())
if __name__ == '__main__':
main()
| 30.282258 | 104 | 0.622636 | 1,622 | 0.431957 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.059387 |
80e8e4f12d8b86345865f28ec633cf5984a0885b | 2,142 | py | Python | pyspark/test/bigdl/test_engine_env.py | twicoder/BigDL | f065db372e1c682fa4a7903e287bba21d5f46750 | [
"Apache-2.0"
] | 55 | 2018-01-12T01:43:29.000Z | 2021-03-09T02:35:56.000Z | pyspark/test/bigdl/test_engine_env.py | jason-hzw/BigDL | ef4f4137965147e2bc59e41f40c4acbb50eeda97 | [
"Apache-2.0"
] | 4 | 2018-01-15T07:34:41.000Z | 2018-01-16T05:46:12.000Z | pyspark/test/bigdl/test_engine_env.py | jason-hzw/BigDL | ef4f4137965147e2bc59e41f40c4acbb50eeda97 | [
"Apache-2.0"
] | 22 | 2018-01-15T14:18:15.000Z | 2019-12-16T18:51:33.000Z | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import os
from bigdl.util.common import *
class TestEngineEnv():
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
pass
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
pass
def test___prepare_bigdl_env(self):
# BigDL will automatically execute 'prepare_env()' function which
# includes '__prepare_bigdl_env()'. To test if there's no more duplicate
# adding jar path message, just do prepare_env()' again
# to see if the log is correct and the environment variables should not vary.
from bigdl.util.engine import prepare_env
bigdl_jars_env_1 = os.environ.get("BIGDL_JARS", None)
spark_class_path_1 = os.environ.get("SPARK_CLASSPATH", None)
sys_path_1 = sys.path
prepare_env()
# there should be no duplicate messages about adding jar path to
# the environment var "BIGDL_JARS"
# environment variables should remain the same
bigdl_jars_env_2 = os.environ.get("BIGDL_JARS", None)
spark_class_path_2 = os.environ.get("SPARK_CLASSPATH", None)
sys_path_2 = sys.path
assert bigdl_jars_env_1 == bigdl_jars_env_2
assert spark_class_path_1 == spark_class_path_2
assert sys_path_1 == sys_path_2
if __name__ == '__main__':
pytest.main()
| 36.305085 | 85 | 0.694211 | 1,450 | 0.676937 | 0 | 0 | 0 | 0 | 0 | 0 | 1,299 | 0.606443 |
80e9be4c60061c31a13757a6151f6f453e313e20 | 5,047 | py | Python | hex-to-dec/eval.py | adiyen/jetson-projects | 4f0f7fcdbf885dbde896e4e97b01d2349a44797d | [
"MIT"
] | null | null | null | hex-to-dec/eval.py | adiyen/jetson-projects | 4f0f7fcdbf885dbde896e4e97b01d2349a44797d | [
"MIT"
] | null | null | null | hex-to-dec/eval.py | adiyen/jetson-projects | 4f0f7fcdbf885dbde896e4e97b01d2349a44797d | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2019 JetsonHacks
# See license
# Using a CSI camera (such as the Raspberry Pi Version 2) connected to a
# NVIDIA Jetson Nano Developer Kit using OpenCV
# Drivers for the camera and OpenCV are included in the base image
from __future__ import print_function
import os
import argparse
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from train import Net
# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Defaults to 1280x720 @ 60fps
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of the window on the screen
def gstreamer_pipeline (capture_width=1280, capture_height=720, display_width=640, display_height=360, framerate=20, flip_method=0) :
return ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)%d, height=(int)%d, '
'format=(string)NV12, framerate=(fraction)%d/1 ! '
'nvvidconv flip-method=%d ! '
'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '
'videoconvert ! '
'video/x-raw, format=(string)BGR ! appsink' % (capture_width,capture_height,framerate,flip_method,display_width,display_height))
def transform_inputs(image, device):
img = cv2.resize(image, (28, 28), interpolation=cv2.INTER_AREA)
# img = cv2.resize(image, (28, 28))
# image = cv2.threshold(image0, 50, 255, cv2.THRESH_BINARY)[1]
blur = cv2.GaussianBlur(img,(5,5),0)
image = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# image = image[np.newaxis, ...]
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
inputs = transform(image)
inputs = inputs.unsqueeze(0)
return inputs.to(device)
def show_camera(args):
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
print(gstreamer_pipeline(flip_method=0))
labels = []
with open("data/labels.txt", "r") as f:
for line in f:
labels.append(line.strip())
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
model = Net(n_classes=len(labels))
model.load_state_dict(torch.load(args.model_path))
model = model.to(device)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
org = (30, 50)
color = (0, 0, 255)
thickness = 2
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
if cap.isOpened():
window_handle = cv2.namedWindow('Camera', cv2.WINDOW_AUTOSIZE)
# Window
while cv2.getWindowProperty('Camera',0) >= 0:
ret_val, img = cap.read()
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
# Find contours in the image
im2, ctrs, hier = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# For each rectangular region, calculate HOG features and predict
# the digit using Linear SVM.
for rect in rects:
# Draw the rectangles
cv2.rectangle(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_gray[pt1:pt1+leng, pt2:pt2+leng]
# Resize the image
h, w = roi.shape
if h > 10 and w > 10:
# Transform inputs
inputs = transform_inputs(roi, device)
# Run Model Evaluation
output = model(inputs)
result = output.data.cpu().numpy().argmax()
cv2.putText(img, labels[result], (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)
cv2.imshow("Camera", img)
# This also acts as
keyCode = cv2.waitKey(30) & 0xff
# Stop the program on the ESC key
if keyCode == 27:
break
cap.release()
cv2.destroyAllWindows()
else:
print('Unable to open camera')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--model_path', default="models/model.pt")
args = parser.parse_args()
show_camera(args)
| 37.947368 | 136 | 0.618585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,574 | 0.311868 |
80eafa3cec93de224ebf997c5f37fafcf8ce4716 | 146 | py | Python | graphics/__main__.py | wangyibin/biowy | a534f35fc6f96fe1b3a6ca78853a5aa076337328 | [
"BSD-2-Clause"
] | 1 | 2018-10-22T04:44:42.000Z | 2018-10-22T04:44:42.000Z | graphics/__main__.py | wangyibin/bioway | a534f35fc6f96fe1b3a6ca78853a5aa076337328 | [
"BSD-2-Clause"
] | null | null | null | graphics/__main__.py | wangyibin/bioway | a534f35fc6f96fe1b3a6ca78853a5aa076337328 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time: 2019/1/8 16:41
from bioway.apps.base import dmain
if __name__ == "__main__":
dmain() | 14.6 | 34 | 0.623288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.520548 |
80ec08dee27899b7efa435ee963821efd51d2ad1 | 5,050 | py | Python | sparse_operation_kit/sparse_operation_kit/core/initialize.py | marsmiao/HugeCTR | c9ff359a69565200fcc0c7aae291d9c297bea70e | [
"Apache-2.0"
] | null | null | null | sparse_operation_kit/sparse_operation_kit/core/initialize.py | marsmiao/HugeCTR | c9ff359a69565200fcc0c7aae291d9c297bea70e | [
"Apache-2.0"
] | null | null | null | sparse_operation_kit/sparse_operation_kit/core/initialize.py | marsmiao/HugeCTR | c9ff359a69565200fcc0c7aae291d9c297bea70e | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sparse_operation_kit.kit_lib import get_nccl_unique_id, gen_random_seed, plugin_init
from tensorflow.python.ops import collective_ops
try:
from tensorflow.distribute import MultiWorkerMirroredStrategy
except:
from tensorflow.distribute.experimental import MultiWorkerMirroredStrategy
from tensorflow.distribute import MirroredStrategy, get_replica_context, has_strategy, get_strategy
from tensorflow import constant, TensorShape, function
from tensorflow.dtypes import int32, int64
from tensorflow import print as tf_print
def Init(**kwargs):
"""
This function is used to do the initialization for plugin.
It should only be called once for this process.
And it must be called under the tf.distribute.Strategy.Scope().
"""
@function
def _single_worker_init(**kwargs):
replica_ctx = get_replica_context()
replica_ctx.merge_call(lambda strategy:
tf_print("You are using the plugin with MirroredStrategy."))
nccl_unique_id = replica_ctx.merge_call(lambda strategy:
get_nccl_unique_id())
global_random_seed = replica_ctx.merge_call(lambda strategy:
gen_random_seed())
global_id = replica_ctx.replica_id_in_sync_group
status = plugin_init(global_id, replica_ctx.num_replicas_in_sync, nccl_unique_id, global_random_seed,
global_batch_size=kwargs['global_batch_size']) #TODO: input from kwargs
return status
@function
def _multi_worker_init(**kwargs):
replica_ctx = get_replica_context()
global_id = replica_ctx.replica_id_in_sync_group
task_id = replica_ctx.strategy.cluster_resolver.task_id
if task_id == 0 and global_id == 0:
unique_id = get_nccl_unique_id()
re = collective_ops.broadcast_send(unique_id,
TensorShape([32,]),
int32,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=2,
timeout=10)
else:
re = collective_ops.broadcast_recv(TensorShape([32,]),
int32,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=2,
timeout=10)
if task_id == 0 and global_id == 0:
global_seed = gen_random_seed()
re_seed = collective_ops.broadcast_send(global_seed,
TensorShape([1,]),
int64,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=3,
timeout=10)
else:
re_seed = collective_ops.broadcast_recv(TensorShape([1,]),
int64,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=3,
timeout=10)
status = plugin_init(global_id, replica_ctx.num_replicas_in_sync, re, re_seed,
global_batch_size=kwargs['global_batch_size']) #TODO: input from kwargs
return status
if has_strategy():
strategy = get_strategy()
if isinstance(strategy, MirroredStrategy):
return strategy.run(_single_worker_init, kwargs=kwargs)
elif isinstance(strategy, MultiWorkerMirroredStrategy):
return strategy.run(_multi_worker_init, kwargs=kwargs)
else:
raise RuntimeError("This strategy type is not supported yet.")
else:
raise RuntimeError("This function must be called inside tf.distribute.Strategy.Scope().")
| 47.196262 | 109 | 0.575248 | 0 | 0 | 0 | 0 | 3,077 | 0.609307 | 0 | 0 | 1,023 | 0.202574 |
80ed103ee3a82af548fecca2907a61d57c124b83 | 503 | py | Python | ErrorsAndExceptionsHandling/assignment.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | 1 | 2017-05-02T10:28:36.000Z | 2017-05-02T10:28:36.000Z | ErrorsAndExceptionsHandling/assignment.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | null | null | null | ErrorsAndExceptionsHandling/assignment.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | null | null | null | try:
for i in ['a', 'b', 'c']:
print(i ** 2)
except TypeError:
print("An error occured!")
x = 5
y = 0
try:
z = x /y
print(z)
except ZeroDivisionError:
print("Can't devide by zero")
finally:
print("All done")
def ask():
while True:
try:
val = int(input("Input an integer: "))
except:
print("An error occured! Please try again")
else:
break
print("Thank you, you number sqared is: ", val ** 2)
ask()
| 16.766667 | 56 | 0.516899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.300199 |
80ee0d6b8414bcb69cd0dca69b5279de3f08e3fc | 3,846 | py | Python | ex1/owais/imu_exercise.py | balintmaci/drone_intro_exercises | 1d8b839fecd6b0c5e33210b9a88fd741a71034cc | [
"Unlicense"
] | null | null | null | ex1/owais/imu_exercise.py | balintmaci/drone_intro_exercises | 1d8b839fecd6b0c5e33210b9a88fd741a71034cc | [
"Unlicense"
] | null | null | null | ex1/owais/imu_exercise.py | balintmaci/drone_intro_exercises | 1d8b839fecd6b0c5e33210b9a88fd741a71034cc | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# IMU exercise
# Copyright (c) 2015-2020 Kjeld Jensen kjen@mmmi.sdu.dk kj@kjen.dk
##### Insert initialize code below ###################
## Uncomment the file to read ##
fileName = 'imu_razor_data_static.txt'
#fileName = 'imu_razor_data_pitch_55deg.txt'
#fileName = 'imu_razor_data_roll_65deg.txt'
#fileName = 'imu_razor_data_yaw_90deg.txt'
## IMU type
#imuType = 'vectornav_vn100'
imuType = 'sparkfun_razor'
## Variables for plotting ##
showPlot = True
plotData = []
## Initialize your variables here ##
myValue = 0.0
######################################################
# import libraries
from math import pi, sqrt, atan2
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, freqz #For Low pass filter
#####Filter function##################
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
######################################
###### Filter Parameters #############
order = 6
fs = 30.0 # sample rate, Hz
cutoff = 3.667
######################################
# open the imu data file
f = open (fileName, "r")
# initialize variables
count = 0
# looping through file
for line in f:
count += 1
# split the line into CSV formatted data
line = line.replace ('*',',') # make the checkum another csv value
csv = line.split(',')
# keep track of the timestamps
ts_recv = float(csv[0])
if count == 1:
ts_now = ts_recv # only the first time
ts_prev = ts_now
ts_now = ts_recv
if imuType == 'sparkfun_razor':
# import data from a SparkFun Razor IMU (SDU firmware)
acc_x = int(csv[2]) / 1000.0 * 4 * 9.82;
acc_y = int(csv[3]) / 1000.0 * 4 * 9.82;
acc_z = int(csv[4]) / 1000.0 * 4 * 9.82;
gyro_x = int(csv[5]) * 1/14.375 * pi/180.0;
gyro_y = int(csv[6]) * 1/14.375 * pi/180.0;
gyro_z = int(csv[7]) * 1/14.375 * pi/180.0;
elif imuType == 'vectornav_vn100':
# import data from a VectorNav VN-100 configured to output $VNQMR
acc_x = float(csv[9])
acc_y = float(csv[10])
acc_z = float(csv[11])
gyro_x = float(csv[12])
gyro_y = float(csv[13])
gyro_z = float(csv[14])
##### Insert loop code below #########################
# Variables available
# ----------------------------------------------------
# count Current number of updates
# ts_prev Time stamp at the previous update
# ts_now Time stamp at this update
# acc_x Acceleration measured along the x axis
# acc_y Acceleration measured along the y axis
# acc_z Acceleration measured along the z axis
# gyro_x Angular velocity measured about the x axis
# gyro_y Angular velocity measured about the y axis
# gyro_z Angular velocity measured about the z axis
## Insert your code here ##
#3.2.1
#myValue=atan2((acc_y),sqrt((pow(acc_x,2))+(pow(acc_z,2))))
#3.2.2
#myValue=atan2((-acc_x),sqrt(acc_z))
#3.2.3
#myValue=atan2((acc_y),sqrt((pow(acc_x,2))+(pow(acc_z,2))))
#myValue=atan2((-acc_x),sqrt(acc_z))
#3.2.4
#myValue=atan2((acc_y),sqrt((pow(acc_x,2))+(pow(acc_z,2))))
#3.3.1
#myValue= myValue + gyro_z*(ts_now-ts_prev)
#3.3.2
myValue= myValue + gyro_z*(ts_now-ts_prev)- (0.00045 * pi/180)
#3.3.3
#myValue = pitch # relevant for the first exercise, then change this.
# in order to show a plot use this function to append your value to a list:
plotData.append (myValue*180.0/pi)
#plotData2 = butter_lowpass_filter(plotData, cutoff, fs, order)
######################################################
# closing the file
f.close()
# show the plot
if showPlot == True:
plt.plot(plotData)
plt.savefig('imu_exercise_plot.png')
plt.show()
| 23.888199 | 76 | 0.619345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,322 | 0.603744 |
80ef3cf60e112d79c054dc1061b480da77a25354 | 1,148 | py | Python | theroot/users_bundle/models/address.py | Deviad/Adhesive | a7eb5140c4e5de783aca24ea935b3bf00a44f3e1 | [
"MIT"
] | null | null | null | theroot/users_bundle/models/address.py | Deviad/Adhesive | a7eb5140c4e5de783aca24ea935b3bf00a44f3e1 | [
"MIT"
] | null | null | null | theroot/users_bundle/models/address.py | Deviad/Adhesive | a7eb5140c4e5de783aca24ea935b3bf00a44f3e1 | [
"MIT"
] | null | null | null | from theroot.db import *
from theroot.users_bundle.models.user_info import address_user_table
class Address(db.Model):
__tablename__ = 'addresses'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
address_line = db.Column(db.String(255), unique=False, nullable=False)
zip = db.Column(db.String(255), unique=False, nullable=True)
country = db.Column(db.String(255), unique=False, nullable=False)
geohash = db.Column(db.String(255), unique=False, nullable=False)
user_info = db.relationship("UserInfo", secondary=address_user_table, back_populates="addresses")
def __init__(self, address, country, geohash, the_zip=None):
self.address_line = address
self.country = country
self.geohash = geohash
self.zip = the_zip
def __repr__(self):
return "<User (id='%r', address_line='%r', country='%r', geohash='%r', zip='%r', user_info='%r')>" \
% (self.id, self.address_line, self.country, self.geohash, self.zip, self.user_info)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns} | 44.153846 | 108 | 0.675958 | 1,052 | 0.916376 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.107143 |
80efd9b0c916dc081a320670389bb8f5cfdb867a | 76 | py | Python | aoc/day06_2.py | GitOnUp/Advent2021 | c9cd5a2d38a09389bdecac5f45be854da7aacee8 | [
"MIT"
] | null | null | null | aoc/day06_2.py | GitOnUp/Advent2021 | c9cd5a2d38a09389bdecac5f45be854da7aacee8 | [
"MIT"
] | null | null | null | aoc/day06_2.py | GitOnUp/Advent2021 | c9cd5a2d38a09389bdecac5f45be854da7aacee8 | [
"MIT"
] | null | null | null | from aoc.day06_1 import run
if __name__ == "__main__":
print(run(256))
| 15.2 | 27 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.131579 |
80f11b3d5441eb3f837c986bb073d3b384465c04 | 36,516 | py | Python | pymatgen/io/abinitio/workflow.py | miaoliu/pymatgen | fe3c48ce3334924e6693f857aebc64b9714d1af2 | [
"MIT"
] | null | null | null | pymatgen/io/abinitio/workflow.py | miaoliu/pymatgen | fe3c48ce3334924e6693f857aebc64b9714d1af2 | [
"MIT"
] | null | null | null | pymatgen/io/abinitio/workflow.py | miaoliu/pymatgen | fe3c48ce3334924e6693f857aebc64b9714d1af2 | [
"MIT"
] | null | null | null | """
Abinit workflows
"""
from __future__ import division, print_function
import sys
import os
import os.path
import shutil
import abc
import collections
import functools
import numpy as np
from pprint import pprint
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.design_patterns import Enum, AttrDict
from pymatgen.core.physical_constants import Bohr2Ang, Ang2Bohr, Ha2eV, Ha_eV, Ha2meV
from pymatgen.serializers.json_coders import MSONable, json_pretty_dump
from pymatgen.io.smartio import read_structure
from pymatgen.util.num_utils import iterator_from_slice, chunks
from pymatgen.io.abinitio.task import task_factory, Task
from .utils import abinit_output_iscomplete, File
from .netcdf import GSR_Reader
from .abiobjects import Smearing, AbiStructure, KSampling, Electrons
from .pseudos import Pseudo, PseudoDatabase, PseudoTable, get_abinit_psp_dir
from .strategies import ScfStrategy
from .task import RunMode
#import logging
#logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
#__all__ = [
#]
################################################################################
def map_method(method):
"Decorator that calls item.method for all items in a iterable object."
@functools.wraps(method)
def wrapped(iter_obj, *args, **kwargs):
return [getattr(item, method.__name__)(*args, **kwargs)
for item in iter_obj]
return wrapped
################################################################################
class Product(object):
"""
A product represents a file produced by an AbinitTask instance, file
that is needed by another task in order to start the calculation.
"""
# TODO
# It would be nice to pass absolute paths to abinit with getden_path
# so that I can avoid creating symbolic links before running but
# the presence of the C-bindings complicates the implementation
# (gfortran SIGFAULTs if I add strings to dataset_type!
_ext2abivars = {
"_DEN": {"irdden": 1},
"_WFK": {"irdwfk": 1},
"_SCR": {"irdscr": 1},
"_QPS": {"irdqps": 1},
}
def __init__(self, ext, path):
self.ext = ext
self.file = File(path)
def __str__(self):
return "ext = %s, file = %s" % (self.ext, self.file)
def get_filepath(self):
return self.file.path
def get_abivars(self):
return self._ext2abivars[self.ext].copy()
class WorkLink(object):
"""
This object describes the dependencies among the tasks contained in a Work instance.
A WorkLink is a task that produces a list of products (files) that are
reused by the other tasks belonging to a Work instance.
One usually instantiates the object by calling work.register_task and produces_exts.
Example:
# Register the SCF task in work and get the link.
scf_link = work.register_task(scf_strategy)
# Register the NSCF calculation and its dependency on the SCF run.
nscf_link = work.register_task(nscf_strategy, links=scf_link.produces_exts("_DEN"))
"""
def __init__(self, task, exts=None):
"""
Args:
task:
The task associated to the link.
exts:
Extensions of the output files that are needed for running the other tasks.
"""
self._task = task
self._products = []
if exts is not None:
if isinstance(exts, str):
exts = [exts,]
for ext in exts:
prod = Product(ext, task.odata_path_from_ext(ext))
self._products.append(prod)
def __str__(self):
s = "%s: task %s with products\n %s" % (
self.__class__.__name__, repr(self._task), "\n".join(str(p) for p in self.products))
return s
@property
def products(self):
return self._products
def produces_exts(self, exts):
return WorkLink(self._task, exts=exts)
def get_abivars(self):
"""
Returns a dictionary with the abinit variables that must
be added to the input file in order to connect the two tasks.
"""
abivars = {}
for prod in self._products:
abivars.update(prod.get_abivars())
return abivars
def get_filepaths_and_exts(self):
"Returns the paths of the output files produced by self and its extensions"
filepaths = [prod.get_filepath() for prod in self._products]
exts = [prod.ext for prod in self._products]
return filepaths, exts
@property
def status(self):
"The status of the link, equivalent to the task status"
return self._task.status
################################################################################
class WorkflowError(Exception):
"Base class for the exceptions raised by Workflow objects"
class BaseWorkflow(object):
__metaclass__ = abc.ABCMeta
Error = WorkflowError
# interface modeled after subprocess.Popen
@abc.abstractproperty
def processes(self):
"Return a list of objects that support the subprocess.Popen protocol."
def poll(self):
"""
Check if all child processes have terminated. Set and return
returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode
attributes.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncpus_reserved(self):
"Returns the number of CPUs reserved in this moment."
ncpus = 0
for task in self:
if task.status in [task.S_SUB, task.S_RUN]:
ncpus += task.tot_ncpus
return ncpus
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or None if no task can be submitted at present"
Raises StopIteration if all tasks are done.
"""
for task in self:
# The task is ready to run if its status is S_READY and all the other links (if any) are done!
if (task.status == task.S_READY) and all([link_stat==task.S_DONE for link_stat in task.links_status]):
return task
# All the tasks are done so raise an exception that will be handled by the client code.
if all([task.status == task.S_DONE for task in self]):
raise StopIteration
# No task found, this usually happens when we have dependencies. Beware of possible deadlocks here!
return None
@abc.abstractmethod
def setup(self, *args, **kwargs):
"Method called before submitting the calculations."
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def get_results(self, *args, **kwargs):
"""
Method called once the calculations completes.
The base version returns a dictionary task_name : TaskResults for each task in self.
"""
return WorkFlowResults(task_results={task.name: task.results for task in self})
##########################################################################################
class WorkFlowResults(dict, MSONable):
"""
Dictionary used to store some of the results produce by a Task object
"""
_mandatory_keys = [
"task_results",
]
EXC_KEY = "_exceptions"
def __init__(self, *args, **kwargs):
super(WorkFlowResults, self).__init__(*args, **kwargs)
if self.EXC_KEY not in self:
self[self.EXC_KEY] = []
@property
def exceptions(self):
return self[self.EXC_KEY]
def push_exceptions(self, *exceptions):
for exc in exceptions:
newstr = str(exc)
if newstr not in self.exceptions:
self[self.EXC_KEY] += [newstr,]
def assert_valid(self):
"""
Returns empty string if results seem valid.
The try assert except trick allows one to get a string with info on the exception.
We use the += operator so that sub-classes can add their own message.
"""
# Validate tasks.
for tres in self.task_results:
self[self.EXC_KEY] += tres.assert_valid()
return self[self.EXC_KEY]
@property
def to_dict(self):
d = {k: v for k,v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
mydict = {k: v for k,v in d.items() if k not in ["@module", "@class",]}
return cls(mydict)
def json_dump(self, filename):
json_pretty_dump(self.to_dict, filename)
@classmethod
def json_load(cls, filename):
return cls.from_dict(json_load(filename))
##########################################################################################
class Workflow(BaseWorkflow, MSONable):
"""
A Workflow is a list of (possibly connected) tasks.
"""
Error = WorkflowError
#@classmethod
#def from_task(cls, task):
# "Build a Work instance from a task object"
# workdir, tail = os.path.dirname(task.workdir)
# new = cls(workdir, taks.runmode)
# new.register_task(task.input)
# return new
def __init__(self, workdir, runmode, **kwargs):
"""
Args:
workdir:
Path to the working directory.
runmode:
RunMode instance or string "sequential"
"""
self.workdir = os.path.abspath(workdir)
self.runmode = RunMode.asrunmode(runmode)
self._kwargs = kwargs
self._tasks = []
# Dict with the dependencies of each task, indexed by task.id
self._links_dict = collections.defaultdict(list)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def chunks(self, chunk_size):
"Yield successive chunks of tasks of lenght chunk_size."
for tasks in chunks(self, chunk_size):
yield tasks
def __getitem__(self, slice):
return self._tasks[slice]
def __repr__(self):
return "<%s at %s, workdir = %s>" % (self.__class__.__name__, id(self), str(self.workdir))
@property
def to_dict(self):
d = {"workdir": self.workdir,
"runmode": self.runmode.to_dict,
"kwargs" : self._kwargs,
}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@staticmethod
def from_dict(d):
return Work(d["workdir"], d["runmode"], **d["kwargs"])
@property
def alldone(self):
return all([task.status == Task.S_DONE for task in self])
@property
def isnc(self):
"True if norm-conserving calculation"
return all(task.isnc for task in self)
@property
def ispaw(self):
"True if PAW calculation"
return all(task.ispaw for task in self)
def path_in_workdir(self, filename):
"Create the absolute path of filename in the workind directory."
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
#def show_inputs(self, stream=sys.stdout):
# lines = []
# app = lines.append
# width = 120
# for task in self:
# app("\n")
# app(repr(task))
# app("\ninput: %s" % task.input_file.path)
# app("\n")
# app(str(task.input))
# app(width*"=" + "\n")
# stream.write("\n".join(lines))
def register_task(self, strategy, links=()):
"""
Registers a new task:
- creates a new AbinitTask from the input strategy.
- adds the new task to the internal list, taking into account possible dependencies.
Returns: WorkLink object
"""
task_id = len(self) + 1
task_workdir = os.path.join(self.workdir, "task_" + str(task_id))
# Handle possible dependencies.
if links:
if not isinstance(links, collections.Iterable):
links = [links,]
# Create the new task (note the factory so that we create subclasses easily).
task = task_factory(strategy, task_workdir, self.runmode, task_id=task_id, links=links)
self._tasks.append(task)
if links:
self._links_dict[task_id].extend(links)
print("task_id %s neeeds\n %s" % (task_id, [str(l) for l in links]))
return WorkLink(task)
def build(self, *args, **kwargs):
"Creates the top level directory"
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
def get_status(self, only_highest_rank=False):
"Get the status of the tasks in self."
status_list = [task.status for task in self]
if only_highest_rank:
return max(status_list)
else:
return status_list
@property
def processes(self):
return [task.process for task in self]
def rmtree(self, *args, **kwargs):
"""
Remove all calculation files and directories.
Keyword arguments:
force: (False)
Do not ask confirmation.
verbose: (0)
Print message if verbose is not zero.
"""
if kwargs.pop('verbose', 0):
print('Removing directory tree: %s' % self.workdir)
shutil.rmtree(self.workdir)
def move(self, dst, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dst is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dst = os.path.join(os.path.dirname(self.workdir), dst)
shutil.move(self.workdir, dst)
def submit_tasks(self, *args, **kwargs):
"""
Submits the task in self.
"""
for task in self:
task.start(*args, **kwargs)
# FIXME
task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then the tasks are submitted.
Non-blocking call
"""
# Build dirs and files.
self.build(*args, **kwargs)
# Initial setup
self._setup(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(*args, **kwargs)
def read_etotal(self):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.alldone:
raise self.Error("Some task is still in running/submitted state")
etotal = []
for task in self:
# Open the GSR file and read etotal (Hartree)
with GSR_Reader(task.odata_path_from_ext("_GSR")) as ncdata:
etotal.append(ncdata.read_value("etotal"))
return etotal
################################################################################
class IterativeWork(Workflow):
"""
TODO
"""
__metaclass__ = abc.ABCMeta
def __init__(self, workdir, runmode, strategy_generator, max_niter=25):
"""
Args:
workdir:
Working directory.
strategy_generator:
Strategy generator.
max_niter:
Maximum number of iterations. A negative value or zero value
is equivalent to having an infinite number of iterations.
"""
super(IterativeWork, self).__init__(workdir, runmode)
self.strategy_generator = strategy_generator
self.max_niter = max_niter
def next_task(self):
"""
Generate and register a new task
Return: task object
"""
try:
next_strategy = next(self.strategy_generator)
except StopIteration:
raise StopIteration
self.register_task(next_strategy)
assert len(self) == self.niter
return self[-1]
def submit_tasks(self, *args, **kwargs):
"""
Run the tasks till self.exit_iteration says to exit or the number of iterations exceeds self.max_niter
Return dictionary with the final results
"""
self.niter = 1
while True:
if self.max_niter > 0 and self.niter > self.max_niter:
print("niter %d > max_niter %d" % (self.niter, self.max_niter))
break
try:
task = self.next_task()
except StopIteration:
break
# Start the task and block till completion.
task.start(*args, **kwargs)
task.wait()
data = self.exit_iteration(*args, **kwargs)
if data["exit"]:
break
self.niter += 1
@abc.abstractmethod
def exit_iteration(self, *args, **kwargs):
"""
Return a dictionary with the results produced at the given iteration.
The dictionary must contains an entry "converged" that evaluates to
True if the iteration should be stopped.
"""
##########################################################################################
def strictly_increasing(values):
return all(x<y for x, y in zip(values, values[1:]))
def strictly_decreasing(values):
return all(x>y for x, y in zip(values, values[1:]))
def non_increasing(values):
return all(x>=y for x, y in zip(values, values[1:]))
def non_decreasing(values):
return all(x<=y for x, y in zip(values, values[1:]))
def monotonic(values, mode="<", atol=1.e-8):
"""
Returns False if values are not monotonic (decreasing|increasing).
mode is "<" for a decreasing sequence, ">" for an increasing sequence.
Two numbers are considered equal if they differ less that atol.
.. warning:
Not very efficient for large data sets.
>>> values = [1.2, 1.3, 1.4]
>>> monotonic(values, mode="<")
False
>>> monotonic(values, mode=">")
True
"""
if len(values) == 1:
return True
if mode == ">":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp <= v:
return False
elif mode == "<":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp >= v:
return False
else:
raise ValueError("Wrong mode %s" % mode)
return True
def check_conv(values, tol, min_numpts=1, mode="abs", vinf=None):
"""
Given a list of values and a tolerance tol, returns the leftmost index for which
abs(value[i] - vinf) < tol if mode == "abs"
or
abs(value[i] - vinf) / vinf < tol if mode == "rel"
returns -1 if convergence is not achieved. By default, vinf = values[-1]
Args:
tol:
Tolerance
min_numpts:
Minimum number of points that must be converged.
mode:
"abs" for absolute convergence, "rel" for relative convergence.
vinf:
Used to specify an alternative value instead of values[-1].
"""
vinf = values[-1] if vinf is None else vinf
if mode == "abs":
vdiff = [abs(v - vinf) for v in values]
elif mode == "rel":
vdiff = [abs(v - vinf) / vinf for v in values]
else:
raise ValueError("Wrong mode %s" % mode)
numpts = len(vdiff)
i = -2
if (numpts > min_numpts) and vdiff[-2] < tol:
for i in range(numpts-1, -1, -1):
if vdiff[i] > tol:
break
if (numpts - i -1) < min_numpts: i = -2
return i + 1
def compute_hints(ecut_list, etotal, atols_mev, pseudo, min_numpts=1, stream=sys.stdout):
de_low, de_normal, de_high = [a / (1000 * Ha_eV) for a in atols_mev]
num_ene = len(etotal)
etotal_inf = etotal[-1]
ihigh = check_conv(etotal, de_high, min_numpts=min_numpts)
inormal = check_conv(etotal, de_normal)
ilow = check_conv(etotal, de_low)
accidx = {"H": ihigh, "N": inormal, "L": ilow}
table = []
app = table.append
app(["iter", "ecut", "etotal", "et-e_inf [meV]", "accuracy",])
for idx, (ec, et) in enumerate(zip(ecut_list, etotal)):
line = "%d %.1f %.7f %.3f" % (idx, ec, et, (et-etotal_inf)* Ha_eV * 1.e+3)
row = line.split() + ["".join(c for c,v in accidx.items() if v == idx)]
app(row)
if stream is not None:
from pymatgen.util.string_utils import pprint_table
stream.write("pseudo: %s\n" % pseudo.name)
pprint_table(table, out=stream)
ecut_high, ecut_normal, ecut_low = 3 * (None,)
exit = (ihigh != -1)
if exit:
ecut_low = ecut_list[ilow]
ecut_normal = ecut_list[inormal]
ecut_high = ecut_list[ihigh]
aug_ratios = [1,]
aug_ratio_low, aug_ratio_normal, aug_ratio_high = 3 * (1,)
data = {
"exit" : ihigh != -1,
"etotal" : list(etotal),
"ecut_list" : ecut_list,
"aug_ratios" : aug_ratios,
"low" : {"ecut": ecut_low, "aug_ratio": aug_ratio_low},
"normal" : {"ecut": ecut_normal, "aug_ratio": aug_ratio_normal},
"high" : {"ecut": ecut_high, "aug_ratio": aug_ratio_high},
"pseudo_name": pseudo.name,
"pseudo_path": pseudo.path,
"atols_mev" : atols_mev,
"dojo_level" : 0,
}
return data
##########################################################################################
def plot_etotal(ecut_list, etotals, aug_ratios, show=True, savefig=None, *args, **kwargs):
"""
Uses Matplotlib to plot the energy curve as function of ecut
Args:
ecut_list:
List of cutoff energies
etotals:
Total energies in Hartree, see aug_ratios
aug_ratios:
List augmentation rations. [1,] for norm-conserving, [4, ...] for PAW
The number of elements in aug_ration must equal the number of (sub)lists
in etotals. Example:
- NC: etotals = [3.4, 4,5 ...], aug_ratios = [1,]
- PAW: etotals = [[3.4, ...], [3.6, ...]], aug_ratios = [4,6]
show:
True to show the figure
savefig:
'abc.png' or 'abc.eps'* to save the figure to a file.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
npts = len(ecut_list)
if len(aug_ratios) != 1 and len(aug_ratios) != len(etotals):
raise ValueError("The number of sublists in etotal must equal the number of aug_ratios")
if len(aug_ratios) == 1:
etotals = [etotals,]
lines, legends = [], []
emax = -np.inf
for (aratio, etot) in zip(aug_ratios, etotals):
emev = Ha2meV(etot)
emev_inf = npts * [emev[-1]]
yy = emev - emev_inf
emax = max(emax, np.max(yy))
line, = ax.plot(ecut_list, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legends.append("aug_ratio = %s" % aratio)
ax.legend(lines, legends, 'upper right', shadow=True)
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Ecut [Ha]")
ax.set_ylabel("$\Delta$ Etotal [meV]")
ax.set_xticks(ecut_list)
#ax.yaxis.set_view_interval(-10, emax + 0.01 * abs(emax))
ax.yaxis.set_view_interval(-10, 20)
ax.set_title("$\Delta$ Etotal Vs Ecut")
if show:
plt.show()
if savefig is not None:
fig.savefig(savefig)
##########################################################################################
class PseudoConvergence(Workflow):
def __init__(self, workdir, pseudo, ecut_list, atols_mev,
runmode="sequential", spin_mode="polarized", acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV",):
super(PseudoConvergence, self).__init__(workdir, runmode)
# Temporary object used to build the strategy.
generator = PseudoIterativeConvergence(workdir, pseudo, ecut_list, atols_mev,
spin_mode = spin_mode,
acell = acell,
smearing = smearing,
max_niter = len(ecut_list),
)
self.atols_mev = atols_mev
self.pseudo = Pseudo.aspseudo(pseudo)
self.ecut_list = []
for ecut in ecut_list:
strategy = generator.strategy_with_ecut(ecut)
self.ecut_list.append(ecut)
self.register_task(strategy)
def get_results(self, *args, **kwargs):
# Get the results of the tasks.
wf_results = super(PseudoConvergence, self).get_results()
etotal = self.read_etotal()
data = compute_hints(self.ecut_list, etotal, self.atols_mev, self.pseudo)
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(etotal, mode="<", atol=1.0e-5):
print("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing")
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
class PseudoIterativeConvergence(IterativeWork):
def __init__(self, workdir, pseudo, ecut_list_or_slice, atols_mev,
runmode="sequential", spin_mode="polarized", acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV", max_niter=50,):
"""
Args:
workdir:
Working directory.
pseudo:
string or Pseudo instance
ecut_list_or_slice:
List of cutoff energies or slice object (mainly used for infinite iterations).
atols_mev:
List of absolute tolerances in meV (3 entries corresponding to accuracy ["low", "normal", "high"]
spin_mode:
Defined how the electronic spin will be treated.
acell:
Lengths of the periodic box in Bohr.
smearing:
Smearing instance or string in the form "mode:tsmear". Default: FemiDirac with T=0.1 eV
"""
self.pseudo = Pseudo.aspseudo(pseudo)
self.atols_mev = atols_mev
self.spin_mode = spin_mode
self.smearing = Smearing.assmearing(smearing)
self.acell = acell
if isinstance(ecut_list_or_slice, slice):
self.ecut_iterator = iterator_from_slice(ecut_list_or_slice)
else:
self.ecut_iterator = iter(ecut_list_or_slice)
# Construct a generator that returns strategy objects.
def strategy_generator():
for ecut in self.ecut_iterator:
yield self.strategy_with_ecut(ecut)
super(PseudoIterativeConvergence, self).__init__(
workdir, runmode, strategy_generator(), max_niter=max_niter)
if not self.isnc:
raise NotImplementedError("PAW convergence tests are not supported yet")
def strategy_with_ecut(self, ecut):
"Return a Strategy instance with given cutoff energy ecut"
# Define the system: one atom in a box of lenghts acell.
boxed_atom = AbiStructure.boxed_atom(self.pseudo, acell=self.acell)
# Gamma-only sampling.
gamma_only = KSampling.gamma_only()
# Setup electrons.
electrons = Electrons(spin_mode=self.spin_mode, smearing=self.smearing)
# Don't write WFK files.
extra_abivars = {
"ecut" : ecut,
"prtwf": 0,
}
strategy = ScfStrategy(boxed_atom, self.pseudo, gamma_only,
spin_mode=self.spin_mode, smearing=self.smearing,
charge=0.0, scf_algorithm=None,
use_symmetries=True, **extra_abivars)
return strategy
@property
def ecut_list(self):
"""The list of cutoff energies computed so far"""
return [float(task.strategy.ecut) for task in self]
def check_etotal_convergence(self, *args, **kwargs):
return compute_hints(self.ecut_list, self.read_etotal(), self.atols_mev,
self.pseudo)
def exit_iteration(self, *args, **kwargs):
return self.check_etotal_convergence(self, *args, **kwargs)
def get_results(self, *args, **kwargs):
# Get the results of the tasks.
wf_results = super(PseudoIterativeConvergence, self).get_results()
data = self.check_etotal_convergence()
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(data["etotal"], mode="<", atol=1.0e-5):
print("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing")
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
################################################################################
class BandStructure(Workflow):
def __init__(self, workdir, runmode, scf_strategy, nscf_strategy,
dos_strategy=None):
super(BandStructure, self).__init__(workdir, runmode)
# Register the GS-SCF run.
scf_link = self.register_task(scf_strategy)
# Register the NSCF run and its dependency
self.register_task(nscf_strategy, links=scf_link.produces_exts("_DEN"))
# Add DOS computation
if dos_strategy is not None:
self.register_task(dos_strategy,
links=scf_link.produces_exts("_DEN"))
################################################################################
class Relaxation(Workflow):
def __init__(self, workdir, runmode, relax_strategy):
super(Relaxation, self).__init__(workdir, runmode)
link = self.register_task(relax_strategy)
################################################################################
class DeltaTest(Workflow):
def __init__(self, workdir, runmode, structure_or_cif, pseudos, kppa,
spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
accuracy="normal",
ecut=None, ecutsm=0.05, chksymbreak=0): # FIXME Hack
super(DeltaTest, self).__init__(workdir, runmode)
if isinstance(structure_or_cif, Structure):
structure = structure_or_cif
else:
# Assume CIF file
structure = read_structure(structure_or_cif)
structure = AbiStructure.asabistructure(structure)
smearing = Smearing.assmearing(smearing)
self._input_structure = structure
v0 = structure.volume
self.volumes = v0 * np.arange(90, 112, 2) / 100.
for vol in self.volumes:
new_lattice = structure.lattice.scale(vol)
new_structure = Structure(new_lattice, structure.species,
structure.frac_coords)
new_structure = AbiStructure.asabistructure(new_structure)
extra_abivars = {
"ecutsm": ecutsm,
"prtwf" : 0,
}
if ecut is not None:
extra_abivars.update({"ecut": ecut})
ksampling = KSampling.automatic_density(new_structure, kppa,
chksymbreak=chksymbreak)
scf_strategy = ScfStrategy(new_structure, pseudos, ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, **extra_abivars)
self.register_task(scf_strategy)
def get_results(self, *args, **kwargs):
num_sites = self._input_structure.num_sites
etotal = Ha2eV(self.read_etotal())
wf_results = super(DeltaTest, self).get_results()
wf_results.update({
"etotal" : list(etotal),
"volumes" : list(self.volumes),
"natom" : num_sites,
"dojo_level": 1,
})
from .eos import EOS
try:
eos_fit = EOS.Murnaghan().fit(self.volumes, etotal)
print(eos_fit)
eos_fit.plot(show=False, savefig=self.path_in_workdir("eos.pdf"))
wf_results.update({
"v0": eos_fit.v0,
"b" : eos_fit.b,
"bp": eos_fit.bp,
})
except EOS.Error as exc:
wf_results.push_exceptions(exc)
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
# Write data for the computation of the delta factor
with open(self.path_in_workdir("deltadata.txt"), "w") as fh:
fh.write("# Volume/natom [Ang^3] Etotal/natom [eV]\n")
for (v, e) in zip(self.volumes, etotal):
fh.write("%s %s\n" % (v/num_sites, e/num_sites))
return wf_results
################################################################################
class GW_Workflow(Workflow):
def __init__(self, workdir, runmode, scf_strategy, nscf_strategy,
scr_strategy, sigma_strategy):
"""
Workflow for GW calculations.
Args:
workdir:
Working directory of the calculation.
runmode:
Run mode.
scf_strategy:
SCFStrategy instance
nscf_strategy:
NSCFStrategy instance
scr_strategy:
Strategy for the screening run.
sigma_strategy:
Strategy for the self-energy run.
"""
super(GW_Workflow, self).__init__(workdir, runmode)
# Register the GS-SCF run.
scf_link = self.register_task(scf_strategy)
# Construct the input for the NSCF run.
nscf_link = self.register_task(nscf_strategy,
links=scf_link.produces_exts("_DEN"))
# Register the SCR run.
screen_link = self.register_task(scr_strategy,
links=nscf_link.produces_exts("_WFK"))
# Register the SIGMA run.
sigma_links = [nscf_link.produces_exts("_WFK"),
screen_link.produces_exts("_SCR"),]
self.register_task(sigma_strategy, links=sigma_links)
################################################################################
| 32.172687 | 126 | 0.578322 | 27,467 | 0.752191 | 1,896 | 0.051922 | 2,968 | 0.081279 | 0 | 0 | 13,936 | 0.381641 |
80f13a81a491ee548192c6197ec3cfb3667be23d | 65 | py | Python | mdstudio/mdstudio/deferred/__init__.py | NLeSC/LIEStudio | 03c163b4a2590b4e2204621e1c941c28a9624887 | [
"Apache-2.0"
] | 10 | 2017-09-14T07:26:15.000Z | 2021-04-01T09:33:03.000Z | mdstudio/mdstudio/deferred/__init__.py | NLeSC/LIEStudio | 03c163b4a2590b4e2204621e1c941c28a9624887 | [
"Apache-2.0"
] | 117 | 2017-09-13T08:09:48.000Z | 2019-10-03T12:19:13.000Z | mdstudio/mdstudio/deferred/__init__.py | NLeSC/LIEStudio | 03c163b4a2590b4e2204621e1c941c28a9624887 | [
"Apache-2.0"
] | 1 | 2018-09-26T09:40:51.000Z | 2018-09-26T09:40:51.000Z | __all__ = ['chainable', 'make_deferred', "return_value", 'lock']
| 32.5 | 64 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.707692 |
80f142bd0ab437969db559a95c9f06b07679a259 | 1,440 | py | Python | setup.py | TheMatjaz/RangeForce | 906b0e303d2e4eecac75dc4680f7d9b1a86bd79c | [
"BSD-3-Clause"
] | null | null | null | setup.py | TheMatjaz/RangeForce | 906b0e303d2e4eecac75dc4680f7d9b1a86bd79c | [
"BSD-3-Clause"
] | null | null | null | setup.py | TheMatjaz/RangeForce | 906b0e303d2e4eecac75dc4680f7d9b1a86bd79c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © 2019-2020, Matjaž Guštin <dev@matjaz.it> <https://matjaz.it>.
# Released under the BSD 3-Clause License
"""Package setup for the Rangeforce library."""
from distutils.core import setup
# noinspection PyUnresolvedReferences
import setuptools
setup(
name='Rangeforce',
version='1.1.0',
description='Developer-friendly range checks with user-friendly error '
'messages',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Matjaž Guštin',
author_email='dev@matjaz.it',
url='https://github.com/TheMatjaz/Rangeforce',
license='BSD',
py_modules=[
'rangeforce',
],
keywords=[
'range',
'domain',
'limited',
'validation',
'user-input',
'friendly',
'understandable',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
python_requires='>=3',
)
| 28.235294 | 75 | 0.603472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 913 | 0.631834 |
80f14f64278b66615ac972c541b233272625a4d5 | 6,079 | py | Python | dataset/Dataset Processing/data_processing.py | Eashwar-S/Icy-Road-Website | f07bf56212e36e786ffaea3c67f35fd301600c47 | [
"MIT"
] | null | null | null | dataset/Dataset Processing/data_processing.py | Eashwar-S/Icy-Road-Website | f07bf56212e36e786ffaea3c67f35fd301600c47 | [
"MIT"
] | null | null | null | dataset/Dataset Processing/data_processing.py | Eashwar-S/Icy-Road-Website | f07bf56212e36e786ffaea3c67f35fd301600c47 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[10]:
import os
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import random as rd
# In[11]:
def readFile(folderPath):
with open(folderPath, 'r') as f:
fileContents = f.readlines()
return fileContents
# In[12]:
def fillInfoFromContents(fileContents, info):
for i, content in enumerate(fileContents):
if i == 0:
info['Instance Name'].append(content.split()[2])
elif i == 1:
info['Number of Nodes'].append([int(word) for word in content.split() if word.isdigit()][0])
elif i == 2:
info['Required Edges'].append([int(word) for word in content.split() if word.isdigit()][0])
elif i == 3:
c = [int(word) for word in content.split() if word.isdigit()][0]
elif i == 6:
info['Capacity'].append([int(word) for word in content.split() if word.isdigit()][0])
elif i == 9:
info['Depot Nodes'].append([int(word) for word in content.split() if word.isdigit()])
info['Number of Depot Nodes'].append(len(info['Depot Nodes'][-1]))
info['Number of Edges'].append(c + info['Required Edges'][-1])
# In[13]:
def readAndStoreInstanceInfo(folderPath):
info = {'Instance Name' : [],
'Number of Nodes' : [],
'Number of Edges' : [],
'Required Edges' : [],
'Capacity' : [],
'Number of Depot Nodes' : [],
'Depot Nodes' : []}
for i, file in enumerate(os.listdir(folderPath)):
if file.endswith(".txt"):
file_path = f"{folderPath}/{file}"
fileContents = readFile(file_path)
fillInfoFromContents(fileContents, info)
df = pd.DataFrame(data=info,columns=['Instance Name','Number of Nodes', 'Number of Edges',
'Required Edges', 'Capacity', 'Number of Depot Nodes', 'Depot Nodes'])
print(df.columns)
df.to_csv("DeArmon_dataset_info.csv")
df.sort_values(by='Number of Edges', ascending=False)
return info
# In[14]:
def createGraphfromFile(file, info, index):
fileContents = readFile(file)
s = ["LIST_REQ_EDGES :\n", "LIST_NOREQ_EDGES :\n"]
startProcessing = False
startNode = []
endNode = []
edgeWeight = []
i = 0
for contents in fileContents:
if contents == s[i] and startProcessing:
startProcessing = False
break
if startProcessing:
startNode.append([int(letters) for word in contents.split() for letters in word.split(",") if letters.isdigit()][0])
endNode.append([int(letters) for word in contents.split() for letters in word.split(",") if letters.isdigit()][1])
edgeWeight.append([int(letters) for word in contents.split() for letters in word.split(",") if letters.isdigit()][2])
if contents == s[i]:
startProcessing = True
i += 1
requiredEdges = []
for i in range(info['Required Edges'][index]):
requiredEdges.append([startNode[i], endNode[i]])
return startNode, endNode, edgeWeight
# In[15]:
def plotGraph(depotNodes ,requiredEdges, numNodes, s, t, weights, show=True):
G = nx.Graph()
edges = []
for i in range(len(s)):
edges.append((s[i], t[i], weights[i]))
for i in range(1, numNodes+1):
G.add_node(i)
pos = nx.spring_layout(G)
node_color = ['y']*int(G.number_of_nodes())
depot_node_color = node_color
for i in range(1, len(node_color)+1):
if i in depotNodes:
depot_node_color[i-1] = 'g'
G.add_weighted_edges_from(edges)
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx(G,pos, node_color = node_color)
nx.draw_networkx(G,pos, node_color = depot_node_color)
nx.draw_networkx_edges(G, pos, edgelist=requiredEdges, width=3, alpha=0.5,
edge_color="r")
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
if show:
plt.figure(1)
plt.show()
return G,pos, node_color, depot_node_color, edges
# In[16]:
def creatingIcyRoadInstance(file, info, index, startNode, endNode, edgeWeight):
newDepotNodes = []
requiredEdgeIndexes = []
newRequiredEdges = []
count = 0
while count <= (info['Number of Nodes'][index]//5):
node = rd.randint(1, info['Number of Nodes'][index])
if node not in newDepotNodes:
newDepotNodes.append(node)
count += 1
count = 0
while count <= (info['Number of Edges'][index]//3):
edge = rd.randint(0, info['Number of Edges'][index])
if edge not in requiredEdgeIndexes:
requiredEdgeIndexes.append(edge)
count += 1
for i in range(info['Number of Edges'][index]):
if i in requiredEdgeIndexes:
newRequiredEdges.append([startNode[i], endNode[i]])
G,pos, node_color, depot_node_color, edges = plotGraph(newDepotNodes, newRequiredEdges, info['Number of Nodes'][index], startNode, endNode, edgeWeight)
# plt.savefig('../IcyRoad Instances from DeArmon\icy_road_' + info['Instance Name'][index] + '.png')
# plt.show()
return G,pos, node_color, depot_node_color, edges, newDepotNodes, newRequiredEdges, 2*max(edgeWeight), G.number_of_nodes()
# In[25]:
# def createGraph(inputType = 'txt'):
# # folderPath = '../CARP_datasets/DeArmon_gdb-IF'
# # for i, file in enumerate(os.listdir(folderPath)):
# # if file.endswith(".txt"):
# # file_path = f"{folderPath}/{file}"
# file_path = '../CARP_datasets/DeArmon_gdb-IF/gdb-IF-01.txt'
# info = readAndStoreInstanceInfo('../../../CARP_datasets/DeArmon_gdb-IF')
# startNode, endNode, edgeWeight = createGraphfromFile(file_path, info, 0)
# G, depotNodes, requiredNodes, vehicleCapacity, numNodes = creatingIcyRoadInstance(file_path, info, 0, startNode, endNode, edgeWeight)
# return G, depotNodes, requiredNodes, vehicleCapacity, numNodes
# In[ ]:
| 33.772222 | 155 | 0.611614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,521 | 0.250206 |
80f1fcacc7763df5460142961dd40a1c5c44d6a2 | 160 | py | Python | settings.py | ErikRichardS/mnist-ann | 3fb34a25ec41177d34445d2ccda6cf42b7d4175e | [
"MIT"
] | null | null | null | settings.py | ErikRichardS/mnist-ann | 3fb34a25ec41177d34445d2ccda6cf42b7d4175e | [
"MIT"
] | null | null | null | settings.py | ErikRichardS/mnist-ann | 3fb34a25ec41177d34445d2ccda6cf42b7d4175e | [
"MIT"
] | null | null | null | NR_CLASSES = 10
hyperparameters = {
"number-epochs" : 30,
"batch-size" : 100,
"learning-rate" : 0.005,
"weight-decay" : 1e-9,
"learning-decay" : 1e-3
}
| 14.545455 | 25 | 0.61875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.45 |
80f3f14a093019280342ab7f0daf049daf505aeb | 141 | py | Python | strategy/managers.py | moshthepitt/probsc | 9b8cab206bb1c41238e36bd77f5e0573df4d8e2d | [
"MIT"
] | null | null | null | strategy/managers.py | moshthepitt/probsc | 9b8cab206bb1c41238e36bd77f5e0573df4d8e2d | [
"MIT"
] | null | null | null | strategy/managers.py | moshthepitt/probsc | 9b8cab206bb1c41238e36bd77f5e0573df4d8e2d | [
"MIT"
] | null | null | null | from core.managers import CoreManager
class StrategicThemeManager(CoreManager):
pass
class ObjectiveManager(CoreManager):
pass
| 11.75 | 41 | 0.780142 | 97 | 0.687943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
80f51317fba911ed237d54c4c7c39490d353795f | 903 | py | Python | 03 Prime Number .py | yoursamlan/FunWithNumbers | 15e139a7c7d56f0553ef63446ab08f68c8262631 | [
"MIT"
] | null | null | null | 03 Prime Number .py | yoursamlan/FunWithNumbers | 15e139a7c7d56f0553ef63446ab08f68c8262631 | [
"MIT"
] | null | null | null | 03 Prime Number .py | yoursamlan/FunWithNumbers | 15e139a7c7d56f0553ef63446ab08f68c8262631 | [
"MIT"
] | null | null | null | # A prime number is a positive integer greater than one, that has no positive integer factors except one and itself.
# Since we have already dealt with number of factors of a number, I'm thinking to implement this idea finding prime number.
# The prime number has the factor of 1 and itself.
# So, number of factors of a prime number is always 2. We will use this logic to find it.
# After that, we will find prime numbers upto a certain limit.
def factors(n):
flist = []
for i in range(1,n+1):
if n%i == 0:
flist.append(i)
return flist
def numfact(num):
fno = []
for i in range(1,num+1):
fno.append(len(factors(i)))
return fno
def is_prime(m):
q = len(factors(m))
if q == 2:
return True
else:
return False
limit = int(input("Enter the limit: "))
for q in range(limit):
if is_prime(q):
print(q)
| 28.21875 | 123 | 0.635659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 459 | 0.508306 |
80f9553aa2281baf010bcd189b89d3921013e8ac | 1,903 | py | Python | upload2db/upload2db.py | rhoerbe/eu23enerwatch | 2749f0d3314580fa9df3251a2151817ec8c38d9c | [
"MIT"
] | null | null | null | upload2db/upload2db.py | rhoerbe/eu23enerwatch | 2749f0d3314580fa9df3251a2151817ec8c38d9c | [
"MIT"
] | null | null | null | upload2db/upload2db.py | rhoerbe/eu23enerwatch | 2749f0d3314580fa9df3251a2151817ec8c38d9c | [
"MIT"
] | null | null | null | """
Upload samples into database
"""
import os
import sys
import psycopg2
from pathlib import Path
import constants
def main():
password = os.environ['PG_PASSWD']
conn = psycopg2.connect(host="dc.idn.local", dbname="eu23enerwatch", user="eu23enerwatch", password=password)
logdir = Path(sys.argv[1])
for fpath in logdir.rglob('*'):
if fpath.is_file() and not fpath.name.startswith('done_'):
with open(fpath) as fd:
row_values: dict = read_sample(fd)
write_db(conn.cursor(), fpath.name, row_values)
rename_inputfile(fpath)
conn.commit()
conn.close()
def read_sample(fd) -> dict:
row_values = {}
for line in fd.readlines():
s_id, value = line.split()
s_name = constants.sensor_id[s_id]
s_location = constants.sensor_loc[s_name]
row_values[s_location] = round(int(value)/1000, 1)
return row_values
def write_db(cursor, sampletime: str, row_values: dict):
sampletime_edited = sampletime.replace('_', ':')
sql = f"""
INSERT INTO samples (
sampletime,
Kellerabluft,
Ofenvorlauf,
EGabluft,
Boiler,
Puffer,
OGabluft,
FBHvorlauf,
FBHruecklauf
)
VALUES (
'{sampletime_edited}',
{row_values.get('Kellerabluft', -99)},
{row_values.get('Ofenvorlauf', -99)},
{row_values.get('EGabluft', -99)},
{row_values.get('Boiler', -99)},
{row_values.get('Puffer', -99)},
{row_values.get('OGabluft', -99)},
{row_values.get('FBHvorlauf', -99)},
{row_values.get('FBHruecklauf', -99)}
)
"""
try:
cursor.execute(sql)
except psycopg2.errors.UniqueViolation:
pass
def rename_inputfile(fpath: Path):
newpath = Path(fpath.parent, 'done_' + str(fpath.name))
fpath.rename(newpath)
main() | 26.068493 | 113 | 0.601682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 733 | 0.385181 |
80f9d023d887fe6457bea250a3a2411216917f21 | 1,314 | py | Python | tests/unit/test_heap.py | thoughteer/edera | c4ddb5d8a25906c3bd773c91afb3260fc0b704f2 | [
"MIT"
] | 3 | 2018-11-27T15:45:19.000Z | 2018-12-21T20:32:10.000Z | tests/unit/test_heap.py | thoughteer/edera | c4ddb5d8a25906c3bd773c91afb3260fc0b704f2 | [
"MIT"
] | 18 | 2018-12-02T18:38:59.000Z | 2020-02-05T22:09:37.000Z | tests/unit/test_heap.py | thoughteer/edera | c4ddb5d8a25906c3bd773c91afb3260fc0b704f2 | [
"MIT"
] | null | null | null | import pytest
from edera import Heap
def test_heap_is_initially_empty():
assert not Heap()
def test_pushing_items_increases_heap_size():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), 0)
assert len(heap) == i
def test_top_of_heap_always_has_highest_priority():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), -i)
assert heap.top == "1"
for i in range(1, 6):
heap.push(str(i), i)
assert heap.top == str(i)
def test_heap_pops_items_in_correct_order():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), i)
assert heap.pop() == "5"
for i in range(5, 10):
heap.push(str(i), i)
for i in range(9, 0, -1):
assert heap.pop() == str(i)
assert not heap
def test_accessing_empty_heap_gives_assertion_error():
heap = Heap()
with pytest.raises(AssertionError):
return heap.top
def test_popping_from_empty_heap_gives_assertion_error():
heap = Heap()
with pytest.raises(AssertionError):
heap.pop()
def test_heap_ordering_is_stable():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), 0)
for i in range(6, 10):
heap.push(str(i), 0)
for i in range(1, 10):
assert heap.pop() == str(i)
assert not heap
| 21.9 | 57 | 0.605784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.004566 |
80faad49d3190989627064e2432a5db253be98cb | 502 | py | Python | curso_python#07.py/desafio7.1.py | robinson-85/python_curso_em_video | ba6292650663dee377c50b4be87ba1c2b5f1d475 | [
"MIT"
] | null | null | null | curso_python#07.py/desafio7.1.py | robinson-85/python_curso_em_video | ba6292650663dee377c50b4be87ba1c2b5f1d475 | [
"MIT"
] | null | null | null | curso_python#07.py/desafio7.1.py | robinson-85/python_curso_em_video | ba6292650663dee377c50b4be87ba1c2b5f1d475 | [
"MIT"
] | null | null | null | ''' 7. Desenvolva um programa que leia as duas notas de um aluno, calcule
e mostre a sua média. '''
import sys
try:
n1 = float(input("Primeira nota do aluno: "))
except Exception as error:
print("Voce deve informar apenas numeros")
sys.exit()
try:
n2 = float(input("Segunda nota do aluno: "))
except Exception as error:
print("Voce deve informar apenas numeros")
sys.exit()
media = (n1 + n2) / 2
print("A média entre {:.1f} e {:.1f} é igual a {:.1f}".format(n1, n2, media)) | 26.421053 | 77 | 0.653386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.538614 |
80fb1b3d4994990978cea4fcec79ef10d6af88e1 | 973 | py | Python | demo.py | Ph1lippK/anonymizer | c200ac501194544d00b9dedbbc4860e48f48c548 | [
"Apache-2.0"
] | null | null | null | demo.py | Ph1lippK/anonymizer | c200ac501194544d00b9dedbbc4860e48f48c548 | [
"Apache-2.0"
] | null | null | null | demo.py | Ph1lippK/anonymizer | c200ac501194544d00b9dedbbc4860e48f48c548 | [
"Apache-2.0"
] | null | null | null | import streamlit as st
st.image(
"https://datascientest.com/wp-content/uploads/2020/10/logo-text-right.png.webp"
)
st.header("Développer et déployer une application de Machine learning en **Streamlit**")
st.info("Webinar du 04/05/2021")
st.markdown("---")
st.markdown(
"""
**Objectifs 🎯**
* Se familiariser avec Streamlit
* Découvrir les différents types de widgets
* Créér une démo d'application de Machine Learning
* Déployer cette application 🚀
"""
)
first_name = st.sidebar.text_input("Prénom")
last_name = st.sidebar.text_input("Nom")
job = st.sidebar.selectbox(
"Profession",
options=("Data Scientist", "Data Engineer", "Développeur", "Autre"),
)
experience = st.sidebar.slider(
"Années d'expériences", min_value=0, max_value=10, value=2, step=1
)
interests = st.sidebar.multiselect(
"Intérêts",
options=["technologie", "IA", "développement", "python", "statistiques", "R"],
default=["python", "IA"],
)
| 23.166667 | 88 | 0.681398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 587 | 0.590543 |
80fbde707dcc237d96a9a00970ee7fdc1b035ca5 | 7,710 | py | Python | cogs/weapon_exp_calculator.py | richiekim/GenshinCalc | 8a8eac605bc03b8729334f5110f1052cfeebbe91 | [
"MIT"
] | null | null | null | cogs/weapon_exp_calculator.py | richiekim/GenshinCalc | 8a8eac605bc03b8729334f5110f1052cfeebbe91 | [
"MIT"
] | 3 | 2021-01-19T11:29:19.000Z | 2021-01-19T11:30:16.000Z | cogs/weapon_exp_calculator.py | richiekim/unital | 8a8eac605bc03b8729334f5110f1052cfeebbe91 | [
"MIT"
] | null | null | null | import discord
import json
import math
from discord.ext import commands
from common_functions import default_embed_template, use_exp_mat
MYSTIC = 10000
FINE = 2000
NORMAL = 400
ASCENSION_MILESTONES = [90, 80, 70, 60, 50, 40, 20]
class WeaponExpCalculator(commands.Cog):
def __init__(self, client):
self._client = client
self._calls = 0
@property
def calls(self):
return self._calls
@calls.setter
def calls(self, new_calls):
self._calls = new_calls
def format_char_stats(self, rarity, curr_level, curr_exp, curr_exp_cap, mystic_count, fine_count, normal_count):
msg = f"__Weapon__\n"
msg += f"Rarity: {rarity}:star:\n"
msg += f"Weapon level: {curr_level}\n"
msg += f"Current Exp: {curr_exp:,}/{curr_exp_cap:,}\n\n"
msg += f"__Inventory__\n"
msg += f"{mystic_count:,}x Mystic\n"
msg += f"{fine_count:,}x Fine\n"
msg += f"{normal_count:,}x Enhancement\n"
return msg
def add_exp(self, next_level_exp, curr_level, level_upto, curr_exp, mystic_count, fine_count, normal_count):
fine_ore_refunded = 0
normal_ore_refunded = 0
wasted_exp = 0
while curr_level < level_upto and mystic_count + fine_count + normal_count > 0:
total_exp_next_level = int(next_level_exp[str(curr_level)])
# Use materials until curr_level is over total_exp_next_level starting for those that give the most exp to the least
curr_exp, mystic_count = use_exp_mat(curr_exp, total_exp_next_level, mystic_count, MYSTIC, True)
curr_exp, fine_count = use_exp_mat(curr_exp, total_exp_next_level, fine_count, FINE, True)
curr_exp, normal_count = use_exp_mat(curr_exp, total_exp_next_level, normal_count, NORMAL, True)
# Calculate exp overflow
while True:
if curr_exp >= int(next_level_exp[str(curr_level)]):
if curr_level + 1 in ASCENSION_MILESTONES:
# Calculate refunded ores if any
wasted_exp = curr_exp - int(next_level_exp[str(curr_level)])
fine_ore_refunded = math.floor(wasted_exp/FINE)
fine_count += fine_ore_refunded
wasted_exp -= fine_ore_refunded*FINE
normal_ore_refunded = math.floor(wasted_exp/NORMAL)
normal_count += normal_ore_refunded
wasted_exp -= normal_ore_refunded*NORMAL
curr_exp = 0
curr_level += 1
return curr_level, curr_exp, mystic_count, fine_count, normal_count, fine_ore_refunded, normal_ore_refunded, wasted_exp
else:
curr_exp = curr_exp - int(next_level_exp[str(curr_level)])
curr_level += 1
else:
break
return curr_level, curr_exp, mystic_count, fine_count, normal_count, fine_ore_refunded, normal_ore_refunded, wasted_exp
def calculate(self, embed_msg, rarity, curr_level, goal_level, curr_exp, mystic_count, fine_count, normal_count):
with open(f"./wep_exp_per_level/wep_exp_per_level_{rarity}.json", "r") as f:
next_level_exp = json.load(f)
f.close()
if not str(curr_level) in next_level_exp:
raise commands.ArgumentParsingError(message="Please enter a valid weapon level.")
if not str(goal_level) in next_level_exp:
raise commands.ArgumentParsingError(message="Please enter a valid goal level.")
if curr_exp > int(next_level_exp[str(curr_level)]):
raise commands.ArgumentParsingError(message="Invalid current weapon experience points value.")
msg = self.format_char_stats(rarity, curr_level, curr_exp, next_level_exp[str(curr_level)], mystic_count, fine_count, normal_count)
embed_msg.add_field(name="**Before**", value=msg, inline=True)
start_mystic_count = mystic_count
start_fine_count = fine_count
start_normal_count = normal_count
total_fine_refunded = 0
total_normal_refunded = 0
while mystic_count + fine_count + normal_count > 0 and curr_level < goal_level:
prev_mystic_count = mystic_count
prev_fine_count = fine_count
prev_normal_count = normal_count
curr_upper_level_cap = 0
for level_cap in ASCENSION_MILESTONES:
if level_cap > curr_level:
curr_upper_level_cap = level_cap
else:
break
level_upto = curr_upper_level_cap
if goal_level < curr_upper_level_cap:
level_upto = goal_level
# Add exp
new_level, new_exp, mystic_count, fine_count, normal_count, fine_ore_refunded, normal_ore_refunded, wasted_exp = self.add_exp(next_level_exp, curr_level, level_upto, curr_exp, mystic_count, fine_count, normal_count)
total_fine_refunded += fine_ore_refunded
total_normal_refunded += normal_ore_refunded
embed_msg.add_field(name=f"**Leveling: {curr_level} -> {level_upto}**", value=f"Reached level {new_level:,}/{curr_upper_level_cap:,}\nCurrent exp: {new_exp:,}/{next_level_exp[str(new_level)]:,}", inline=True)
embed_msg.add_field(name=f"**Used**", value=f"{prev_mystic_count - mystic_count}x Mystic\n{prev_fine_count - fine_count +fine_ore_refunded}x Fine\n{prev_normal_count - normal_count + normal_ore_refunded}x Enhancement", inline=True)
embed_msg.add_field(name=f"**Refunded**", value=f"{fine_ore_refunded}x Fine\n{normal_ore_refunded}x Enhancement", inline=True)
curr_level = new_level
curr_exp = new_exp
msg = self.format_char_stats(rarity, curr_level, curr_exp, next_level_exp[str(curr_level)], mystic_count, fine_count, normal_count)
embed_msg.insert_field_at(index=1, name="**After**", value=msg, inline=True)
if curr_level >= goal_level:
msg = f"You have enough enhancement ores to reach level {goal_level}.\n\n"
else:
msg = f"You do not have enough enhancement ores to reach level {goal_level}.\n\n"
msg += f"__Total used__\n{start_mystic_count - mystic_count}x Mystic\n{start_fine_count - fine_count + total_fine_refunded}x Fine\n{start_normal_count - normal_count + total_normal_refunded}x Enhancement\n\n"
msg += f"__Total refunded__\n{total_fine_refunded}x Fine\n{total_normal_refunded}x Enhancement\n"
embed_msg.insert_field_at(index=2, name="**Summary**", value=msg, inline=False)
return embed_msg
# Input:
# current level, goal level, current exp, and number of mystic, fine and regular ores.
# Output:
# If enough then how many ores it will cost.
# If not enough then what level will using all of the ores will get to and how many more ores needed to reach goal.
@commands.command()
async def wep_exp(self, ctx):
self.calls += 1
args = ctx.message.content.split()
if len(args) == 8:
try:
rarity = int(args[1])
curr_level = int(args[2])
goal_level = int(args[3])
curr_exp = int(args[4])
mystic_count = int(args[5])
fine_count = int(args[6])
normal_count = int(args[7])
except ValueError:
raise commands.ArgumentParsingError(message="Please enter integer values only.")
if not rarity in [5, 4, 3, 2, 1]:
raise commands.ArgumentParsingError(message="Please enter a valid weapon rarity value.")
if curr_level > goal_level:
raise commands.ArgumentParsingError(message="Please enter current level and goal level where current level is less than goal level.")
if mystic_count < 0 or fine_count < 0 or normal_count < 0:
raise commands.ArgumentParsingError(message="Please enter number of enhancement ores greater or equal to 0.")
embed_msg = default_embed_template(ctx, self._client.user.name)
embed_msg = self.calculate(embed_msg, rarity, curr_level, goal_level, curr_exp, mystic_count, fine_count, normal_count)
await ctx.send(embed=embed_msg)
else:
await ctx.send(f"`Usage: {self._client.command_prefix}wep_exp <rarity> <curr_level> <goal_level> <curr_exp> <mystic_count> <fine_count> <normal_count>`\n`{self._client.command_prefix}help` for more details.")
@commands.command(hidden=True)
@commands.is_owner()
async def wep_exp_calls(self, ctx):
await ctx.send(content=f"Calls: {self.calls}")
def setup(client):
client.add_cog(WeaponExpCalculator(client)) | 41.010638 | 234 | 0.748508 | 7,413 | 0.961479 | 0 | 0 | 1,630 | 0.211414 | 1,439 | 0.186641 | 2,187 | 0.283658 |
80fcaf869f884bfa65105bd82efb6048d76b37ce | 38 | py | Python | test/test-sys.py | xupingmao/minipy | 5bce2f238925eb92fe9ff7d935f59ef68daa257a | [
"MIT"
] | 52 | 2016-07-11T10:14:35.000Z | 2021-12-09T09:10:43.000Z | test/test_case/060_test_sys.py | xupingmao/snake | c956f151ed1ebd2faeaf1565352b59ca5a8fa0b4 | [
"MIT"
] | 13 | 2016-07-24T13:50:37.000Z | 2019-03-02T06:56:18.000Z | test/test_case/060_test_sys.py | xupingmao/snake | c956f151ed1ebd2faeaf1565352b59ca5a8fa0b4 | [
"MIT"
] | 9 | 2017-01-27T10:46:04.000Z | 2021-12-09T09:10:46.000Z |
import sys
assert len(sys.argv) == 1 | 9.5 | 25 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
80fd13c03cdfaa618f595e649dee4f74f7c9f3b9 | 498 | py | Python | genthreads/actor.py | f1sty/genthreads | 00e509b7315b7e0107e1c46235ec11e044aeb9ef | [
"MIT"
] | null | null | null | genthreads/actor.py | f1sty/genthreads | 00e509b7315b7e0107e1c46235ec11e044aeb9ef | [
"MIT"
] | null | null | null | genthreads/actor.py | f1sty/genthreads | 00e509b7315b7e0107e1c46235ec11e044aeb9ef | [
"MIT"
] | null | null | null | from multiprocessing import Process
class InboxFullError(Exception):
pass
class Actor(Process):
def __init__(self, inbox_size=48):
super(Actor, self).__init__()
self._inbox = list()
self._inbox_size = inbox_size
def send(self, value):
if len(self._inbox) < self._inbox_size:
self._inbox.append(value)
else:
raise InboxFullError('no more space in inbox')
@property
def inbox(self):
return self._inbox
| 21.652174 | 58 | 0.628514 | 456 | 0.915663 | 0 | 0 | 57 | 0.114458 | 0 | 0 | 24 | 0.048193 |
0383eebbfbfff2168965dd0292da22a7b499eb2b | 25,457 | py | Python | mopidy_pidi/brainz.py | JimmyBlunt/mopidy-pidi | 30d99c2850bc527b81e8eec8941adcd49c2423d3 | [
"Apache-2.0"
] | null | null | null | mopidy_pidi/brainz.py | JimmyBlunt/mopidy-pidi | 30d99c2850bc527b81e8eec8941adcd49c2423d3 | [
"Apache-2.0"
] | null | null | null | mopidy_pidi/brainz.py | JimmyBlunt/mopidy-pidi | 30d99c2850bc527b81e8eec8941adcd49c2423d3 | [
"Apache-2.0"
] | null | null | null | """
Musicbrainz related functions.
"""
import base64
import logging
import os
import time
from threading import Thread
import musicbrainzngs as mus
from .__init__ import __version__
logger = logging.getLogger(__name__)
class Brainz:
def __init__(self, cache_dir):
"""Initialize musicbrainz."""
mus.set_useragent(
"python-pidi: A cover art daemon.",
__version__,
"https://github.com/pimoroni/mopidy-pidi",
)
self._cache_dir = cache_dir
self._default_filename = os.path.join(self._cache_dir, "__default.jpg")
self.save_album_art(self.get_default_album_art(), self._default_filename)
def get_album_art(self, artist, album, callback=None):
if artist is None or album is None or artist == "" or album == "":
if callback is not None:
return callback(self._default_filename)
return self._default_filename
file_name = self.get_cache_file_name(f"{artist}_{album}")
logger.info("BRAINZ::get_album-art: get_cached_file_name:(fartist_album" + str(file_name))
if os.path.isfile(file_name):
# If a cached file already exists, use it!
if callback is not None:
return callback(file_name)
return file_name
file_name1 = f"{artist}_{album}.jpg".replace("/", "")
file_name1 = "/var/lib/mopidy/pidi/"+file_name1
logger.info("BRAINZ:get_album-art: file_name1:" + str(file_name1))
if os.path.isfile(file_name):
if callback is not None:
return callback(file_name)
return file_name
if callback is not None:
def async_request_album_art(self, artist, album, file_name, callback):
album_art = self.request_album_art(artist, album)
if album_art is None:
# If the MusicBrainz request fails, cache the default
# art using this filename.
self.save_album_art(self.get_default_album_art(), file_name)
return callback(file_name)
self.save_album_art(album_art, file_name)
return callback(file_name)
t_album_art = Thread(
target=async_request_album_art,
args=(self, artist, album, file_name, callback),
)
t_album_art.start()
return t_album_art
else:
album_art = self.request_album_art(artist, album)
if album_art is None:
# If the MusicBrainz request fails, cache the default
# art using this filename.
self.save_album_art(self.get_default_album_art(), file_name)
return file_name
self.save_album_art(album_art, file_name)
return file_name
# album_art = self.request_album_art(artist, album)
#
# if album_art is None:
#
# file_name = f"{album}".jpg
# logger.info("BRAINZ::get_album-art: BrainzRequestFailed-try-cleartexfile.f{album}).jpg" + str(file_name))
# if os.path.isfile(file_name):
# # If a cached file already exists, use it!
# if callback is not None:
# return callback(file_name)
# return file_name
# file_name1 = f"{album}.jpg".replace("/", "")
# file_name1 = "/var/lib/mopidy/pidi/"+file_name1
# logger.info("BRAINZ:get_album-art: onlyalbumfile_name1:" + str(file_name1))
#
# if os.path.isfile(file_name1):
# if callback is not None:
# return callback(file_name1)
# return file_name1
# if album_art is None:
# If the MusicBrainz request fails, cache the default
# art using this filename.
# self.save_album_art(self.get_default_album_art(), file_name)
# return file_name
# If the MusicBrainz request fails, cache the default
# # art using this filename.
# self.save_album_art(self.get_default_album_art(), file_name)#
# return file_name
# self.save_album_art(album_art, file_name)
# return file_name
def save_album_art(self, data, output_file):
with open(output_file, "wb") as f:
f.write(data)
def request_album_art(self, artist, album, size=500, retry_delay=5, retries=5):
"""Download the cover art."""
try:
data = mus.search_releases(artist=artist, release=album, limit=1)
release_id = data["release-list"][0]["release-group"]["id"]
logger.info("mopidy-pidi: musicbrainz using release-id: {release_id}")
return mus.get_release_group_image_front(release_id, size=size)
except mus.NetworkError:
if retries == 0:
# raise mus.NetworkError("Failure connecting to MusicBrainz.org")
return None
logger.info(
f"mopidy-pidi: musicbrainz retrying download. {retries} retries left!"
)
time.sleep(retry_delay)
self.request_album_art(artist, album, size=size, retries=retries - 1)
except mus.ResponseError:
logger.info(
f"mopidy-pidi: musicbrainz couldn't find album art for {artist} - {album}"
)
return None
def get_cache_file_name(self, file_name):
file_name = file_name.encode("utf-8")
file_name = base64.b64encode(file_name)
if type(file_name) is bytes:
file_name = file_name.decode("utf-8")
# Ruh roh, / is a vaild Base64 character
# but also a valid UNIX path separator!
file_name = file_name.replace("/", "-")
file_name = f"{file_name}.jpg"
return os.path.join(self._cache_dir, file_name)
def get_default_album_art(self):
"""Return binary version of default album art."""
return base64.b64decode(
"""
/9j/4AAQSkZJRgABAgEAlgCWAAD//gASTEVBRFRPT0xTIHYyMC4wAP/bAIQABQUFCAUIDAcHDAwJCQkMDQwMDAwNDQ0NDQ0NDQ0ND
Q0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQEFCAgKBwoMBwcMDQwKDA0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ
0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0N/8QBogAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoLAQADAQEBAQEBAQEBAAAAAAAAAQI
DBAUGBwgJCgsQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkq
NDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw
8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+hEAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFE
KRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiIm
KkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/8AAEQgAvgENAwEi
AAIRAQMRAf/aAAwDAQACEQMRAD8A+m9UuZo7p1R3VRtwAxAHyr2BqiLu4P8Ay0k/77b/ABqfVh/pkn/Af/QFquiV6KtyrTovyPz6r
Kcq9WKlKyqT6v8AmYk99cQRmTfNIR0VGYt9fp+Nczc65qL8bpYk+rk/jmuxWOpxFU6J7I9zCT9lbmhzy/mlKX4J3R5rLqLSLtkRHY
D777t5z7lh/KqwuW2eWyodo67QWHsG6j8jXZeJYVS2VgoBEg5wAeQeOB/M1x1latezCBRw3LEdh3rdONnK1kmfUQn7SPPt5XESdIi
GEa5U55LYP1GRnPfrXaWuvG8bYJHjc9E3sB/wHnH4dfarP/CP2S9Ih/30/wD8VTG0GzP/ACz6f7T/AKfMMVlKcXsjjqTp1VaXOrap
qy/UtG4uBx5kn/fbf4037Tcf89ZP++2/xqVYRGAq9BwMkn9Tk1GyVlddkc8Jcrtd28xhu7gf8tZP++2/xqM3lz/z1k/77b/GnFajK
0XXY9OnW5eiGNe3I/5ay/8Afbf41C19dD/ltL/38b/GpGWoGStIyXZHasQuy+5EbX92P+W0v/fx/wDGoG1G8H/Leb/v4/8AjT2Smx
2jXBIQgbFLcnGfb612xlHsvuRXtl2X3IrNqd4P+W83/fx//iqrtqt6P+Xif/v6/wD8VSMuf881I1kfJ8/K/eACkqp78ncy56ds16E
HDrFfcifap9F9xVOr3w/5eJ/+/r//ABVEepX8rYFzOOp5mcAYGR/F3IA/GqbJjg9qZ5ddyUP5Y/ch6PoWf7Xv/wDn5n/7+v8A/FUo
1e//AOfmf/v7J/8AFVU8vt61YmcTbSqqm1QDtGM+59T71fufyR+5f5GcolhdWvv+fif/AL+yf/FVYXVL3/n4n/7+v/8AFVnLHg4qd
UqGofyx+5HBP3TRXU7z/nvN/wB/H/8AiqspqN3/AM95v+/j/wCNUg6+WI9g3Akl88kH/CnohA3dQOK5Go/yr7keZUqNbGkuoXX/AD
2l/wC/j/41Ot/df89pf++2/wAaz0Un8vyqyFKYyOoyPceorllGPZfcjyp15Lqy8L25x/rZP++2/wAamhvZg2XklYf3RIw7euaphGY
DHOOeB0HvRgH3rmlFdl9x588ROOt397LX26ftLJ/323+NKL24/wCesn/fbf41WUY57fpSsQTkfpWLiuyPOliJ780vvZZ+23H/AD1k
/wC+2/xqSO8nOf3kn/fbf41Uj2hvnztx2qSLHOOmeM1k0l0Rze1qXv7SXpzP/Mvar/x+P/wH/wBAWmRin6r/AMfj/wDAf/QFpkdR9
lei/I+gdN+3qvvUm/8AyZlyMVeihLZ9MVSQ4q0j7enHf/61YtnuUo2MHxPCzWRwMlWU4H5f1FM8PaSbGLzZB+9kAP8AujsPr6/hXR
HDDBAIPbt607NTz2Vuh6XMlHkRGVqBlqyaYwpXOdlRlqBlq2wqBhUXMGVCtRlasEVGRipuRz20KrLURWrRFRlatSsWqpSdaquuK0m
WqzJkj0rrpyN41L6GYy1FtJBXnnrgZ/z1/U1p+Tu4q1axvbtuUA8YwRkV6UZ8qud0Gc99mJqRLYrnAByMcj9RXRJZ7jnGMkmraWPs
K19vY6eexyX2I0v2M4rsfsPsPypjWPtT+sD9pfQxdNtbYB/tHXHFZ5gAYhemTiulNgWOAOage0GTt6fgf1FZqvFSs5avXlv0723sY
TjzK5h+XtBPoOg6n2FJpF7baojpG7rJE+GiaMhwQcdWKpg+zH354Gu0GDnAHoO365rF14vYrDJHIlr5rYMh+b848fk3UdO1ePmWKq
0KXtcPJJ9mk2/Tf5mdLD06kuWrFtd02vv2K2t20LBp5ftMMloN6LC2c46nbgByRkEFhgEjirtnq9teokib1Mg4VomTgDknjAP0yPQ
mlMd1BeQzRyxMrD/loG2txhsxrgZPXqMGtiMsY2tZEifZy21c4Dk4yTkjODjkH3r4/AYvEurJJw9tUv8AxE2n13Tj/WiOrGYajOko
1FJQhquTR9trP/LqxLfUhMrCErhflYqvP4mkFTWVtb2kLRRqIgclVXAyepz39ByT196Qj0GK+6pOoo/7Ryc9/sJqP4tv8T4HF0VOV
6PPyWS/eNOXX+VJW7CKOTnjjj3pVO07iM45waUDHTqaeUI4NaNo8d4eotbbETtkkgbfYdBUsPQ04jKBQoyDye/rUkSYzWDlYxdCXN
fuWNV/4/JP+A/+gLUcZxTtWOL2T/gP/oC1FGaxl8K9F+R+ivDWk59239+peU1YU1UU1MDXnTlYHHlLGacDUGaUGub2hlzE2aaaaDT
hWiqDvfQYwqJhVkigpW1yXEoMtQla0GjqIxUHM4solaYUq/5NJ5NNIhQZmmOk+z5rUEFTpb10wO2nEy47X2q9Ha47CtBIcdqnwsYy
2AB3PFbudt3sd8V0RVS2x2qXygnXAAqhNrEaAmEGQL95lHA+n978OPeuMurK/wDEVyHa5aCywQiANGS2ON6EASAHkHcRnFePWzCnS
bhD359Evhv5y29bXsdsaEpay0R12pa1Z6VCZ52G1SANuGLMxwFUDksfSufu9Yu/La5Iit7YgFNxYXDZ7eXKiY7dN3say7bT7a2xZS
ObiaJsnPzEuOQ3zZC84xtGFrrF05b9Y31JFlkiOUzzt9PQZx14xXlRxGJxrdNJ09dXF6R9Xe/odfso0td/VGVpNlc3L+feRvG6/cY
uTlSP7oIAPrkfSuiNsEGFGBV/dUTmvcoUY4ZOzcpPeUndmLbnvol0MqWBsHYdrYOD6H1rE0/SPsVu1vcubwyMzs0wB5YkkAc/LzwO
g6V07VWcVvOEKrUqiu1tfY2jHSy0M4xJjY6gr0x0wP8AZxjGO1NQNEWG9pEYAKGUAoBnjIPzZz36Y96tstQlaHSpSnGs4rmjt09Pu
6Gnsbpxvo/13IduOaULT8YpRXV7Q5XgYiKtTBS3JpoqVahzOSeBiASp40xQtWUWsXI8ueDUWZusNi9kH+5/6AtQRvSa5Jtv5B/uf+
i1qgtxiuyUG4RfkvyPq5Ul7KEu8Yv8EbKyYqUS4rE+1heKja+x3rheHlPY8SpTb2Og84etKJ19a5c3pY8ew96T7ZsJU5BFP6jI5fY
SOtWUdqnV65SO/wDer0V8PWoeFlEXsJROjBqQCseO8Bq/HcA1Ps3ErkaLe2k2ULIDUoYVokVyEXlUvlVPuFIWrXlK9mRiPFSBcVFL
cRwIXkYKB1J4rkdWvbi/mSxhPl20qkvKjDeBzx0OAfUYIrir4mnhfjfvPaK3f+SOmnRlPZadzdvdZhtSYUzJcbGZUVSckdASOBzjg
kfUVwmp+ItQawVVjimvGJLw8rtUdTnOBtGOCT1z2qgkEmkuYoZFe0QgmaV9zK2cMpCgMAvOc56c1ba5tbzzILUvLt2nz1BVNxySq5
VQw45HzV8lWx1aupbRprdJ207O97vu1p5HrQowha2r8w0a9m/dIVknJUu6BlEaZzlPMbGT9M4zVO70K6eHyraSaAvL8kDsZAI2f5s
SHbg7STnBA6ZNdLOYDFF5iSNJGNqCE7AGYAEsvy4zgHPIHOKr6fZ/2cGU58x23OS5fa3dVY/wg9OPfrVYPDPFO1NpU172lnb776nS
k5OyLmj6dBo6bU3SPjl3PzE/X0Fb4nBAwee/WsZTmp1OK+yVKNGKhTXKlvZLX10+ZpKmkayy1JuzWcjVaVqycuU45R5SU9KgYVNSE
VHtDNSsU2FQsKuMlRlKr2huqliptoAqxso2VXOW6qIgMVItLtxQBimpXOSdREinFWEfFU84p6vitUuY8qpNXOY8T3Bj1KYDPGzv/w
BMkrLmuFiC7XVy65OMjafQ1palpMmoTvdPIDK+3OFwPlUL0XceQvPQZ/KsWTRbmLoAwz0Ug8eu3hj+CmvbpVaLjGMpWaSTuna6Svr
sfQO/JGD6RSfyVhTctjJ6evvSkSNH5/8Ayz3bM579cY/rVJllQGNgcDkjBHTuQQD+Y6UiqcentXqwjB6waa8rP8jido7k2+p4TGQx
lJUhfk2jOWz3quqU4pXRyox54jlmI71bhuccEkYHGOeffPQVSRQGBYZXIB5HqKi46rwKzlTjI3i4yOghvSOp5+taUN/7muPV9tb+k
/Z5Vfz2KsvTmuGpQivss2dJbnRx3/vVxL2uFW7CsQCcAnHPbNXI77HeuV4bsH1a+p2n2wUyS/CKWJwAK5T7d71l6lqRklg09Fd/tR
3O6oWSNUPPmHGQB19MjrjmvOxf+yUnVau9kvN7f5ieH5FzM1LzUbq6uYYliJhkBL7WG+MgjGTyORzkdqW4eG0lLySsqbgE2HncQMh
icEknpt7Vl6mqSXsVzFcIEjjMcRgJWPcSMlwMhicYHTvgnnEv2ebWLJobq2bzEf8AdvuCsGB/1inAcdiARkjGDX5fUlKtVblJXu73
T3NkuVKwzVRcwzxSMn+gtuWaLjzTuyA7A54IOWXr796j1O6u53j0vQ4owwxKrSkxoqDhguDndyPYjPFZWoabPazBvMnmEa5WNc7/A
DF5w2WYkHBGMA4PXFW761vNZgt72222jEbJYnV0fafvfOoB3YHA4HOc0oNSlZ2snayVtWW1bXqbtrF5ZMsqo02djnlgSAM4Jxxk4B
#HpV0DJzjHsO1U7S2W2jWGIHZGMAEliB1JySTySTye9XVFfpOEwywdGNFJX3lbu9fw2NFLkVuvUlXir0KowO84x04/r+tVFFSr6Vr
JGUp3JhwasIaqjip0NcM4nHORbWpMVArYqdTXmTvE86VTlEKVGUqxSGsucxdcrbcUm2pjTDWkZGTrkJWoyMVK1QscV2wOaVcjY4pi
t1pHNRx969WktDglW1GMWj+/HIv8AwDI/NSf5VGLqInaWVSf4Wyp/JsVsjVLYnaXCH0kDR/8AoYWpyI7hcEJIp9cMP8DXDzeTPulV
b2sYbojj5lDD3A/TP9KozaZG2dhMe705B5zyDnv6HPbit1tIg+9CXhb/AGG+X/v22U/8d/GqrWdzGMptmHp9xv1JTP5VpGTjrFtPy
dmXzRlpJepzEunyQdRkeo/wxkfqPeq/lg9MH6V0y3CltjZif+7IpXP0P3W/4CTTJ7KO4O4/I+PvDAJ+o6MPTI/GvTp42cdKnvLvs/
8AJ/1qcdTCqd5UXZ9t1f8ANf1oc0Y8UQxxo+ZVLKAeAcdsD8q0JrdrcZkxt6bh0/4F/dz78e9RTWrRnawxkexBHYgjgj3HFe3CvGq
vcfy2f+Z5LlOg+WomvPo/mY7Jz/d+v6U+4hSLaUcNlQTjPX0rTt7M3MgiBCnk5JwBxWfJGVJU9jjjpxXRz3aj2PRpYnYrDJPA68AD
r+FPDleDkEevFSRlonDpwynIPvTJWMjFnyWY9enNN7nsU68R6ygAggknGOcYrUtr/wAtV+bYUbA4yCpzu3YBzk/jWW0DR43EYYcYI
/l1oWM1wYnDU8XTdGqnytp6aNNdnqbSq05K0jTQQyz+Z5rogAfA6Bs42hTkYOeuM+9TkTa1dSwOBbWKKuWLsskh6kxlWUKAMDnPTN
RWUiwghlDbsDpggZyTnqQMDir73AmQKq7GzgleMj0PqPqa+KxGUXxEFRhGOHStJvV3V9XqnfzW/U4J1I6qOiMyG0jvJ2iiM9rBABh
pCpkkYDHDHfuB7g9arWuo6hf3Xl25V7CHIndl2yFugCn5SBnHAyMZz1q5fQsUD7PNMTBiCWDBV6ldv90cgY7fhVqBUuwtzbuQufmX
CnOQeMqeoxznmvCng3Qx8KCV4c0ZJpNLlffp0f8Aw4c6cL32/QuIuKnAoRKswqhbEhKj2HXiv0BnH7UagwQT09PWrDlWbKDaOw/D/
HmolWpMVg0TziCng4poAB59RmpJNoY7M47Z6/5zWLic85DxxzUiyVV3dqN2K450eY8ypI0BJTjJWRcFzC7REhoxvz6hTkj8QMfjVC
CWa9MUZZo/O3yHZjKhQAFBIIxznpmuJ4cwjTlVtytK7tr6X/JM6IvUbPWCLqXEShizGUxngEsAxUdhyQOw61Zt52k37v4XZR9ASBV
RotHHVhOmnKVrK99+kuXt319Hcvl6gZqjL1GXrtp0rbnkyrAzURHrUZOakh6GvSiuVHI613YhHiq2fh45QPYKw/HnP6VImo6VcHkr
Gx7lSh/MY/ma5mfw9fQHDIufQOmfXkbh+tZs9lPbf62J198ZH5oWFR7Om9pH6GoRvZS17XPSkh3jNrcH6EiVfyyGH5/hT2uJ4v8AX
Rbx/fiO4fUocMPwzXlcbmA7oyyH1U4P5g5/A10Fn4lu7fiTEy+p+V/++h1/EVk8P1iVedPZ3R3Aa3vkxlZQOqnqv4H5gfyrPuNNkg
GbVty4z5bng/7r9V/4FuFRQX9hqxBz5c4+7n5JAf8AZccMPbJHqKv+Zc2Y+b/SEB5wAJF+q9H+qkfSuVxcdDphX6PR/gZkcwZvLKlHA5jcYOPUdQf94ZBqjNZFPmgHBPKdj6lfQ+2cHtXRSx2+qRq2dy9VYcOje3cH1X8way5w9idtwd0bYCzAcZ7LJ6Z7N0zVRm4u8W00dr5K0eSok/679DFCCQfL2OCDwQfQjsaia2rantt58yMhZAB16MOwYd/9k9Rk84ptuFuAeNrLwynqp/qO4PQjmvYpYq697SX5nz9fDywz5oXcPy9f8zBNsaZ9mNdSLT2H5U4WS+n6Vq8YomEakonLi3J69qmS2xXSixX0xS/YgOgrH69E19szBWAjirKRYrUNrimiHFH1mMilUIIlaM7l4P8Aj1qSO1EK5VQqkk8DHPc1OIsVNhsbSTgdB2/KsXKLfNZX721t6myl9xEgC9RT1WpQlShcUXHcgCYq4toGj8zIGO1RhafsPTtWbAqlaYVxVzy6Y0VUYTKZ4phOKtNDUTRVooo86pcqySN5bopxvUr/AN9ZH/66oqk1t5bw7DLECuHztIbr05HQEHH862JFTy9u3Dg53dsVRDpInmqylD/EDx6df0pezjLc5VUq0tafSV9r6tNW+ab0KoEsSoUK+ZHJ5vIO0sTuI4IIBJxnkj0qWJn5aQje7Fm25ABY5wM84HqeTTyUC+ZuXZ65Hbjr9aFQOMr0q1Tgnpa9u/Tv/wAE4qtWvKm4SvyObb0suZ62v0115fmBakzUgipwhquVHhyjJ7EWKnhHBoEWKliXGRUNGcaUrmzf/wDHy/8AwH/0EUxUB7VBqcuy8cem3/0BaWGeuDkaSZ9zZqcn/ef5le60a2vP9ZGM+q/KfzGK5XUPDE0JMlufMUfwn7+PYjAP869ERg1S+XVRm46I74Tkv8meI42nBBVgeeMEH6dR9a6LS/EcltiO5Jki6ZP31/8Aiq7HVdBt79S7YikAOJBgf99eo+p4615t/Z9yXaKON5ihwWjUsp9PmHH1FdXPGatPc6vdmux3txPA6rPaPiaUZTyxuDn0kTuvZm4I5OV7WGN1cqFaOKIEYIcmTr/sjYP/AB7ipdI0ZNMiAwDKwHmPjkn0z12jsOla4iriaimY88o+7B6I5eHSp4BhZgeTw0eVAPYAOGA+rGoZ7S6jYPGgaRejKcK3+y6tyAemQWK9RXW+XimsAvFYSlbZD9tUa5ZO6fRpfpYyFtp35kcRf7MYBI+rOGBH0RfrStbSLnbK4PuEI/IID+RFXZJNtU3nxWPvSOez7L7kVXu3tf8Aj5AKf89EBwP95SSVHuCwHerwkHbpVB7is6KZrRfLRS8an5QpUEA/w4baML0HzE4qfZMOS+uzOiyDSFAecVkJqCghXzGT0DdP++gSv4bs+1aUc2aXLKJHI4j/AC6Xy8VYVg1SbK6ITtoWmVVSpAlTbKcEroUzZMhCVIEqUJUqpV3NCvso8urQSn7KLkNGeYqjMNaZSoytWpHLKJzWrN9ntn7GQrGOAeXYJx781hC3FvZXFnGcNbuApwO+1lPAxjcTke1dvcWcdzgSqGCsGAPZhyD+B5HvVV9OgLOxQZmChzz823OO/bJ6U9W7+Vv6+YJqEVGz+JSurdGtPS1/mzkDn7A4Y4mVsykYyGPJI4wAQQwA45rpFg2cZLe7AA/oP8frUsmmW7hgUH7wANyRkAYGcEdBxnrT47dbcbV3Ef7Tu5/N2Y/rVwTjrptbT5nJXtVVlf4nJ3S6qOid+ln8rEPlUhXbxU7vtqjLLiulK55/sEDPt4pqSgZqjJcYqOCcEtn2/rW6hoaRw5d1248u/lX02f8AotKbaSNLkqCQoyeelZPiebZqkwz08vj/ALZJVOK4kg+XdjIGQDwcjv61uqF6UH3hF/gj6NUr69zuLa66VsxThq4K1u+R/T+Qret7zBx/9avNqUOU19lYvwRDU5Hml+aKNikafwnGCWYfxEnGM8Lg461siMKMAYArn9FuAkAj/ijJVv8Aezk9OOhFbonX1xXn8rHyjyuKaSFFRPcgdKpSXYFHI2LkLMkoWs+W4xVOa6JPH0rKmu85rWNByKVIuy3NZ0lz71Qkuufz9R/KqU10GORxwOB9B6+9d8MMa+yNFrmmC4FZn3ommDAbTjbn5jnuB0xVfz66Vhh+zOmDR+SZC4LZxs749cdKbbXJiby8kr1UnqB/d49O3TggckZPOi4rQtwZWxGRkJlgxA5BJ+UjqMEd85yCOK56uG5Wl1b/AA3f9d7Eun0Orhuua1opg/euDhvMd61oL0CuKphWtjnlSOvFPArIgvQa0451bmuP2comPK4lgLTwKaJFpjzhBgVSu9C7E5O2q8twsKl2OFUEk+gHJP5VSku8Vj6ld5t5R6xv/wCgmumNN7jsaqaxbSW5u0kVoBnLjkDHX3yPTr0qSC+juo1mibKSAMp6ZBGRXF29nEI0cEpuRfMRThHYLhWK9NwBIyOvfOBivBf+Rp7bSQY/MRT3yrso/lWiptayVlyuX3W+7czmktI6vmS++/8AkdlZ6tb36F4G3KrFT1HI602PVIJ2KRsWIyCQrbcjtuxt/WuO091spJYFLuvlq46DB27ScjjG8jryaS21FrSCKMq7Akj5cEAk9TyMZ9QO1OMdubTe9+lnaxzVFa/Jd6rls97q9/8AhjsjfR+aYATvCB/bBJUfqDVI6pE0hiQksvBIVsAjqN2MZ/Gub+0kXzMRn9woI5H/AC0fimWUjRw/Nkbmc59Tu5P510qm+ZQXXm/8laVvx3OJtcjm76cq36yTd392xsz6jGkqQE4kmyEHqQMkf561UnudpwxwTkAH1GMj8M1z91L8zXGT+7dFA4+8Dtbr7Pnr2qW7kAaMA9Gk6+mF9z+hNXFu9kl8SS9G+Vv70zsjTSUe/K2/8Vk0vuaLEtzUdvcct+H9ay5pKjtpDlufT+tenyaHpUaSkjW8Xy7dXuB0x5X/AKJjrJWRlAzkD1Iq54xkKa3P/smE4/7Yx1JqGqpfxRxpH5ZQcn1rthdUaEVG6dOF329xHTF8tvRBbXZiIZDhlORWzDdPIxdjkn5iTkfyrl1rUtbua0BKZ2vlevHqQB2PvWFWmn8K1OlNSN5Lpom8yLqcblJwD7j0Pv3HXtWgNWjH3jsI/vZX8ieD+BNclHcfX86v/aUEY27vMyc5+7j2ryp0GneP3dL/AKF+zW6Oka9xnNUJb33rFM5VV25AVVVie5AGT68nkZAyDVV5j6VVKkppS79Oz6r5FRgrJm5FqKxNlhvByMNyBx145zWNNdbiT93PYZx+vNU2c5HbJ/L3p93AIWAjbzFKg5A4z3GfbpXZGlGMl5kPlhqQNKW6Y+p98Z/KoX+UlcggE9OnWl20hSu9QRxzrwiRVLFE87iOPlicVNHZvKpZf4Rn8M4znGAB3JI9s05I5LXEy4BU5yCD+gJP6VDnTV0px507Wut/S5j7fqk7eglxBLp0u2QAMOcdun4etQyfI2Mg8A5HTkA4H0zirlzeTXEgmc5ZehH0H86pOhOTj39PwrGC55c8lb3bN2td3vonrZd+t/K5ca0bk0Mg3AEkdf8AP8v1qZLoqeM1VaIL907s9fbj/IpmD06GtZRizoUoyN6G/ZPX861odU+XGea5aafzsYUJtUL8oAzjqeO/vUQkI6VyvDxkPkTO7Gqe9Mk1LPf9a4wTMKkE7Vl9VUTP2Z0j3uec1VllE6MhyyspDY64YFcf57kVkeaTVu0u/s7FgqsdpHzZ75x7cNtI+meoq3S5V7q6GU42JhJ5QCdAoAHPYcD9KprBGE8vkrvL4JJ+YsWP4ZJ4/CnbdxyO/wBf61MsJoailaSW1vv3R5c79HbW/wA0R+Wpk87+ILt/AkH+Ypkdqq4y0hVTlV3LtB7fwFiBzxu6nPGOb6QVL5GK55Om911v/X+Wxyxp1Psvy+XT89HuUmjHmGVdwLKFOTnoSc9B3JP0456mLywjlkLANyVJBG49WB2hhk843YHTmtExVE0eKScW07bao0jhqmqvo9LeX9NmbLEjhlI4c5Iyepxz1z2H5VSmjyAASuzOD8pPIAP3gfStSRcVWlCbO+7JHtjArRKG1tNPwd1+J3U6E07t+f36MyJen0/zn8fyplp1b8P61Z+zyTNsiUu2CQB7dc/hVe24Lfh/WupOysezTjyon8an/ie3IP8A0x/9ERVRtxnFX/Gi5125/wC2P/oiKqVtxgV6tN/7NR/69U//AEhHjVJOKNNIs81YEHpT7cA4rTSIHpXmTny3OL6y6ZkGIr0FKFwpzndxit77MDz0pRZiuf2p1RzBR3MDDduR78/55/D1pziRx2zx8x3E4Axj72Mfh2rc+yAUnkBeKm8b81rPybV/WzV/mZzzFfZMVIWHJAJPHT8/p9Rgj1pwQqMdueMA/wAwf0wfQitVo8VCY6tRjLVq77tu69He6+VjyqmOnJ6GY8eMbRn1J4/IAH+dOCIP4ecdzkZ9Rjbj8d30q95VNMVb6NcrlK3rZ/erP8TheKmney+6/wCD0/ApgMMhSQDwfce+MZpPKq8IqcI6fMoK0VZeRl7eUneTdygIcU/yM1oLFUoirlnVsdEcQ0ZX2ej7NWyIqlEGe1cbxHKdsMU1oc/9lwcik+zGuk+y57Uv2P2rH66onpQxZzP2Y04WxrpRZe1OFj7U/wC0EdaxJzi2pq1HaGt9LLHarC2mOMCueePT0QnV5jFjtatrbYrWEAXjFKyha4ninIqEOYzhBikaLFW2YLVWSXFaRm5HrUqCIwig/N07/T/Go7tIlY+XuxgY9Puio3mFRSh/LEv8LHaPwrrjc9BUIpogURq4MmSgPzAelZt2qhiUBCMTt6dM+vWp5JAOlZ8r9PbOB/npXbA6PZRiU2kaI7o2KsMjKkg89eRzz3qvbdW/D+tWL1Y48CJ/MBUE8YwfSq9t1b8P613x2OecbbGz4xti2sTv6+V/6JjFY1tEAfmOOOPr6f8A169C8R2Ky3ssh6nZ+kaj+lcpJZqh4p0a6lShTu01CKv2tFLrdHz7hdJ+SIo50XAwVI65I65PTAGBjHc+uew24mKqGP3TjkcjkZAyOM45x19qxJVVlwd3mA8NnjGBhSMfrnpxjiqgkaInYSv8vyPH4jBrH963yyjzLutL/wBdtDjqYWM9rr8jtY5Ae4/GoItQSe4FvCN+cjcDxkdh61zd3qj3SqjKqBePkULu/wB7B5qC1uzav5seVdeAcDv1/wA4rJ0KsmuWLjr1/rY54YKEU/aXlo7W0t/wfwO1Mi1C0g7VzaXrNzVuO5JrodDkOB4JmqTmgLmoI33c1ejXNYSlymTwjRD5dOENaCxipxAK5nXsZ/VWZQhpfJrW8gUeQKz9sH1UzFhqQQ1oiAU7ywvFYyqXK+rWKAixUqpirW0U4KK4p6lfVmQqlTqtAGKcDiuGdNvY0VBoeq1IEFRg0u41zewqdGbKm1oS4ApCVH1qrJMVqjLcNWkcLN9TspwNCSZV71RlugtZUtw1Zkt01enTwr6nr0lymtLeAVRe9BGO+fxrCmu2qi121evDDWsexCRvvdYqFrokbcnaDkDPGfXHrXPm6bNNNy1dSoWOvnOuWxMtqboOABn5eOxxXOvLmqX2yTbs3Hae2Tj8ulReYTWkaXKzNTevqyxI+atWrBxgKMgDJyeeW9PbiszzFUHcCeOMHGD/APqz+lXLJsbse39a6eWyMpH/2Q=="""
)
'''
/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxAPDhUPDw8QDw8PEA8VFQ8QDxAPDhAVFhEWFxUVFhcY
HSggGBolHRUVITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGxAQGy0lHx8tLS0tLy0tLy0vLS8vLS0v
LzUtLS0tLS0tLS0vLS0tLS0tLS0tLS8tLS0tLS0tLS4tNf/AABEIAOEA4QMBIgACEQEDEQH/xAAbAAEB
AAMBAQEAAAAAAAAAAAAAAQIDBAUGB//EAD4QAAICAQIDBAgDBQYHAAAAAAABAhEDEiEEMUEFIlFhEzJx
gZGhscEUQvBSctHh8SMzYnOCkgY0RFOisrP/xAAaAQEAAwEBAQAAAAAAAAAAAAAAAQIDBAUG/8QAKBEB
AAICAgEDAwQDAAAAAAAAAAECAxEhMRIEIkEFE1FhcfDxgcHh/9oADAMBAAIRAxEAPwD7sEB1PFUEAFBA
BQQAUEAFBABQQAUEAFBABQQAUEAFBABQQAUEAApAAKQAUEAFBABQQAUEAHTnxxUbSqvR723q1Y9T59U/
qY5WklFRjvGL1d7VbW/Wje5uThjdOLhHorVx9a+e1fBHPxMvVW20IdFe8U931Kw0nXcMliXptH5dbXus
mRqKS0R3jB33tW6TfX2ma/5h/wCZP6s18TL1Vttjh0V7xT3fUk6iWz0EVKSd1HJGK98n9k/ias0t60xj
TfLV92dM16j/AO5OD+EYJ/Ns5M0rk3tu3ySSEFo1C4IpzSfJtG+fDpU+jw6n+9ov7p+808L/AHkfada3
i1+xjT90sCT+cY/EiZ5KREw4AQFmaggAoIAKCACggAFIAKQACggAoBAKQAAUgA2vO6rZbJNpd5rwbEst
qnGNpJau9dLl1r5GoDSfKW6XEd7XpipXdrVu/O2SWW1TirSS1d66XLrXyNbXv9hYxbdLdvoNG5ZellUV
e0L0+W9/UZZqW+lR58nLf4tmDVOns0Ev14g3K456WpLmnZks0rburgovzVJfZGsBG5UgAFBABQABAUgF
BAAKQoAgAFAAAAgFAAAhlkg4upJp+DJe1APKvrbHAQlkUr7ujLkhut9nt8mjs4PGktb57+5HzfYfEp8T
xMO4tWWeSlk1yb1uMm10/KZWvPOvh6/pPp9b1i2T5fQvhV0mmzS1KEusWuTRTdhWtOD8LT8Clcs/Lf1H
0ynjvHxMOaUm3bdt9WStr6ePQslW3VXZidDwZ75U3ZOEyRlpcJN+Scr81ReDeNTvLq0rw5X59aPouIyR
0ta4wc4um2l05/MxyZZrMREOjDhi9ZmZfLELyfmnzT8PBg2c4AAABABQAAAAAAAQACkAAoBAKASW1+XU
DLJNydybb8WS/wCpq4HI5Z8afqvJBOPRrUuZ9d2jP0HD5MmPFrljxykscIrVJpbJLqVvbxbYMX3tzt4X
B5U1ofPevOz4Lsji4R7R3yYV6TNkg1DHLW9UmknJrxo/V+E7Ux5eH/E6ZwwveLywlinKO1ScZJONvlaX
j1N/oMOTfRjnpk1bhF95PenXNP5oxi0c/q9nBlnHSKzzp4XD8NH0i124OVJcrqOptvwS+q8TDhFu30PQ
7aioU1s5RcEltGKu5teb7q9xx4Vphb8LZnLtrfdPKXHxXrv3fRGospW7fVmedQT7jbVR5qt6V9TrjiNP
lsk+VptH5a35cvPZmWTJKVandJJeSXJGBvw8NKSb5Rjzk2oxj7WyLWivMr4cOTLxX/jSmDCXHcGnpfHc
Jq/zk18VsdMuHelTi4zxvlkxyU8b96K/dq6LfT8sRxqf5+umLyLQo6Vabere3defkawGaOKd75QpAEKC
FAhQAIUhQICkAoAAEKABjONprxRuwxg71NqltSu3ftNYJhh2BinLjIxcGowuTlaadcqXPnXPy5n3B8bg
zyxvVCTi/FHo4u3Mi9aMZfGLM71mZdPpstMddS9rj+FhmwzxZYLJjyQlGWN8pprkaeyMCxYlihiWLHDa
MYpxSXhTL2bxrzRb0aVF162q9vZ7DsMZjUvRreLV3Dy+34XCL8J18V/I4cyTjpbqz2e0cOvFJe/4bnze
GNy8o735IrPbtwxW+KYt1DXmxODp72azZnyanfRbHX2VwMMtuU60uPd8f1yOqJ1HL569ItkmMfXw5cEI
1LJkenFjjKU59IxirfvPHcZcdWXiE48Pzw8HdY4x/LPIl683z32Vn6DLg8bxyxOEfR5FJShXdkmqafuP
K7V7Nxxi5xahSj/Zqq8NjOsx5bl35JtTDFMfGu/9vAjwuNLSscFHwUI18KON8FPh5PPwVY8nOWD/AKfi
F1jKHJPwaPSO3szg45XLVPTprba3+vua21rlwYcmSL+2XLjyQz4YcThjL0eSLemreKSdThL2O0YH1vC8
DixRcMWOMIzlKUoxVJyl6zfmzzu0+zcai5xaxuMb0qqf9eRljtrh1+rx/cnzj/LwwAbPOAAAAAAAAQFI
ABQBC1tfT9fwBAKLMseNyemKbb6I712TP0V1/aX6tr1f49Str1r3K9MdrdQ80t7cvf1MsuJxemSaa6Mw
LKdPqOxMenBH/E2/nt8kjuNXBxrFBeEI/Q3HNPb2KRqsQh8xxkFiySg09L3Xg103PqDj7R4FZo1ykuUv
s/IRrfKbWvWs+H9vnMmWOnTFOvFmiLp2tmuT8DdxPCzxupxa8+cX7zGWROCiopNNvUrt3Xn5HREREcPL
yXtafdxp24+2cig4vvSfKfJx28KOHNmlOWqb1S8TBP8AXgZ4cbk6+L8BqI5Ru+SYr2wb8v5kOtxxLanL
zsxy4Fp1QdrqnzKRlrPDpyfT81K+UunB2zkjFqXfb5Sbpx28lucWfPLI9U5anVXtyNafz+fUF4iIclsl
rRqZH/UhZJp01TXQEqoCgCFAAAAAQACgEAoBmsMnBzp6U0r6AiNsIyadp01ya5o+hXamK9Op8vXru2fP
AzyYov21xZrY+lnJttt23zb6kkgnW66G7v5smyuUui2Roz7/AHfW4vVXsX0MjDBBxhGL3ajFNrlaRsOV
7EdIAUJQ5+Ix44xcpY4uv8MWzoJOCaafJg1Hy4o48E5aVjg7V3oSRhxHZMdL9H3G/a0/LyOjg+F0W3u3
9DqG51ytqtL7p8Pllij3o04zj48zHhZb10Z6HbsdE4ZVzdp+dcvv8jzVjd6obrmv4MWr8w6vS5/ueVLz
zDnyRpteDZE63WzXXqd+Kbk2pJcji9G9OrpdG1L+TxvV+jnDPHPc/sZcspvVJtt9WYitvt1Bo4pCAAUA
gFAAAAAAAAF9OnOun63YAANitr+6sAWMW2klbbpJdT6XguGhw2Nym0pV3pfZfrc4OwOHVvLKqjsr6bbv
4fc5e1OOeaW393HkvHzZS3unTpx6x1857np7HA9qRy5HBRcdri3zdc78D0Txv+HuHqLyPnLZexc/n9Do
ydr4o5HB3S5ySuKfgZzHOodWPJ7Im89vQKasXEQn6s4y9jVm0o2id9IUhpycXjj62SK/1K/gCZiO28hw
rtfC5KKk93Wqmor4k7bxyeFuLa07tL8y639fcTrnlSckeMzHOmXH8LHiMfdkrVuMk7j7D5tSljk1umnTTOjszj3hl4wb3Xh5rzO/t3hlKKzw35W11T5M1jidT05LW84+5Ti0PL/Fb3pV+KbRryZXLnyXRbJGALRSI6ZZPU5ckatbgABZiAAAAAAAAgKQCgAAAAAIbcGLW61KOzdvlsrBEbaIxak5KTVqmk6teHsM02uXUGWGtcb2WqNt8kr3BEPo+Jn+H4altJRUV+8+b+rPml8T1O2+OhlUVjkpRTk3XjyX3PLKUjhv6i8TbUdQhmskl+aXxZl3NHXXf+mjWXY9K5XzbfzMTPFilN6Yq3vt7FZjpfg9ue3L2jZz2h9N2Rn9Lh0y3ce6/NVt8vofNXtXTw6Hp/8AD+Wsrj0nF/Fb/Syt43Db09vG+vy8/Nj0SlF84tr4MxzZsjhGCm1CLb03tv4+K/idnbMa4iXnpf8A4o4kv1ZMcxtleNTMAT+ZccqadJ0+T5MuWWqTdKNu6XJEqsQQoAhSAUAgFBCgCFAAhSAUhQAIUAQskuXT4AAYwgoqkZX/AF33BmsMnFz0vSmk5Vtv+voCI/DAEKB6fYvo9VuT9JulGqX82dnbKbxNRa2acle7X6pnhY5uMlJc4u0ScnJtvdt22Yzh3fy26K54jH4aReZLf5W4vfdbNFBs50Tf5pOT8W238wVvy/mABCgCFAAEKAAAAhQABCkAAoAAAAAAAAADU6ro2nXsuvqzPFC3XhGb+EW/sbcfDJwcrdrG5V02m018FYmUxWZ6c6dfPpYN/wCHvZc6xV4XNfzKscJcm0ot3urlFRbtLpyrrzRG0+Mudv5A6I44PvU60zenV1j51ypov4dOLkrXcUknTfrNNefqtjZ4S1SlHQkotSTdyvZratjUdEsUYrVLU01Gkmk7cFJ70/2kassUpNJ2ujJgtE/LEhQFQAAQFAAAAAABCgACFIBQAAAAAAAAABt4b1n+5l/+cjpwTShFvl3U/Y8k7+Rx4p6XdXs01ytNNP5MznmtaUqjUUldtU2+fXdsiYXrbUOuEHGdPnF8Oveov+Bx8Nza8YT/APVv7GUuKk3f5v7Pf9xbMjz004xUdLut3b28em3IjUpm0S38K0opyVxSzWvKoouOTUpa2vXxxdbLS1JbeVHPLN0SSWlxS3dJu3v42SWZuNNLfRv17qaXyde4aPOIbs3demaenTjT8VJY47rzOfJDS2uddVya6P4G2fE6m9a1Xp5PS7jGk79hqyz1O6rlsuSSVJfBEwi0xPTEAEqAAAAAAAAAAAAACAACkKQCkKQCgACFSIZQk001zTTXtQGUsM1zjL4PwsPDJflltfR1tzNn4yfK1VJVSquiMpcdN1yTTbtJW3VX8ERyvqjUsE3+WXNLdVu+SIsUrrS781X1Nj4ub6rZp8l05e4Pi5XF8nDlt+vZQ5R7TJwc4uqva+6001tuvijV6KX7Mv8Aazdl43JKWptWlWyVJXf1QhxuRVvaVbNLpyHKZ8N/LTLFJK3Fpfzr3F9DP9mX+1+FmcuLm1TeyqlSpVy+Bfxc/FLnyjFc+fQco9rRKLTppp+DVMhnkm5O3u9vkqMSVZCFAEAKBCggFAAAAACFAAhQAAAAAAAAAAAAAACFAAAAAAAAAAhQAAAAAAAAB//Z"""
)
return base64.b64decode(
"""
iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAMAAAAM7l6QAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFn
ZVJlYWR5ccllPAAAAMBQTFRFBHwvBSl8d04DCQ99egJLfAMzejQGcGoAAGZ6AHN3N3wBSHwBKXwDAHlp
NQF9AHtXAFV7VwB7HgN9B30aG30FXncAAXtERwB8fQMbZQB5AUF8fRsHQ04rfQgLFlZTVzgteABiZ14F
agNiAmpoF3kaLVU4V1QVYhdFLkZIQy1MFWc/biYkKSVpLWUmLjVYcQBzJHMbeRQiBWxZBlxnOmkXDn0M
WAdnGhd5FkBlSRZfCk1rO3MMTmwJCm5FQgtwMhJydzVfDgAAAYtJREFUeNpUzeligjAQBOCNgFcVFVRQ
FC3gUU/Uingg7/9W3U1CpJOf38wGGpQ2ptPpDIcAYNv29Xrt9/utVqsJXBsfLmmzKbiYy3WZ6/XC1fyj
X8iiIOZQsFDBvFBct+1I6BcGuvUuedgIwzOfR9dI6QC6FF4I2+dsmEEURVIHA+RxVzZwfs4gi+JW3Hwi
ch5juF8ul/CcbTZxHD+ffFqwrGDB32z2+9/n6/VCqw1qwMZMFh6Ph+/7C2RUJAowGWqlqb9eLCa/y2/M
f2YsZWl6WK8nk+VSOTBN05iGemO73e5w+JnNZpVlRQYIKTcM+g/xtiq1BloR5Dy/3++r7ba6rWLkmmLd
LCvP8zfqCp0zNYgtepZlmu93kiCfTifP87iDNK5OkiSBbpyEe1WPs0DTdJxeEAQr3TCUgyXUQnR6ySgI
dJy7rjclV8y3PdS5jm647nRKDVBIOjoSG4KpAOpfB3V0nM/LjmyapXHBriscylrwx0FpiQ11Hf6PyXX5
ORWAoxqr44Y4/ifAAPd/TAMIg8r1AAAAAElFTkSuQmCC"""
)
'''
| 79.553125 | 5,753 | 0.864124 | 18,950 | 0.744393 | 0 | 0 | 0 | 0 | 0 | 0 | 21,725 | 0.8534 |
0384989e82636b5dbefe82692c4b228ca4b5e756 | 607 | py | Python | class4/pm.py | patrebert/pynet_cert | b82cce3ddb20d9e4abc89d74579ddeb3513bdf55 | [
"Apache-2.0"
] | null | null | null | class4/pm.py | patrebert/pynet_cert | b82cce3ddb20d9e4abc89d74579ddeb3513bdf55 | [
"Apache-2.0"
] | null | null | null | class4/pm.py | patrebert/pynet_cert | b82cce3ddb20d9e4abc89d74579ddeb3513bdf55 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Demonstrate use of paramiko to connect to
managed device, send commands, get output
"""
import sys
import paramiko
from getpass import getpass
from time import sleep
ip_addr = '184.105.247.70'
username = 'pyclass'
password = '88newclass'
password=getpass()
sess=paramiko.SSHClient()
sess.set_missing_host_key_policy(paramiko.AutoAddPolicy())
sess.connect(ip_addr,username=username,password=password,
look_for_keys=False, allow_agent=False)
rc = sess.invoke_shell()
outp = rc.recv(5000)
print outp
rc.send("show ip int brief\n")
sleep(1)
outp=rc.recv(5000)
print outp
sys.exit()
| 22.481481 | 58 | 0.775947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.280066 |
0387a1db9142d9733d4ec277b3e83f604075ea51 | 9,590 | py | Python | saraki/utility.py | lucmichalski/saraki | 74c11f70b4e7bdedfd33984cb96944c27a4eebbf | [
"MIT"
] | 3 | 2020-07-01T17:34:39.000Z | 2021-05-04T17:53:01.000Z | saraki/utility.py | lucmichalski/saraki | 74c11f70b4e7bdedfd33984cb96944c27a4eebbf | [
"MIT"
] | 25 | 2018-01-25T00:56:18.000Z | 2021-06-12T04:29:00.000Z | saraki/utility.py | lucmichalski/saraki | 74c11f70b4e7bdedfd33984cb96944c27a4eebbf | [
"MIT"
] | 4 | 2020-04-19T21:24:34.000Z | 2021-01-23T19:04:27.000Z | import datetime
from cerberus import Validator as _Validator
from sqlalchemy import inspect
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.exc import NoInspectionAvailable
def is_sqla_obj(obj):
"""Checks if an object is a SQLAlchemy model instance."""
try:
inspect(obj)
return True
except NoInspectionAvailable:
return False
def import_into_sqla_object(model_instance, data):
""" Import a dictionary into a SQLAlchemy model instance. Only those
keys in `data` that match a column name in the model instance are
imported, everthing else is omitted.
This function does not validate the values coming in `data`.
:param model_instance: A SQLAlchemy model instance.
:param data: A python dictionary.
"""
mapper = inspect(model_instance.__class__)
for key in data:
if key in mapper.c:
setattr(model_instance, key, data[key])
return model_instance
def _get_column_default(c):
d = c.default
return d.arg if isinstance(getattr(d, "arg", None), (int, str, bool)) else None
class ExportData:
""" Creates a callable object that convert SQLAlchemy model instances
to dictionaries.
"""
def __init__(self, exclude=()):
#: A global list of column names to exclude. This takes precedence over
#: the parameters ``include`` and/or ``exclude`` of this instance call.
self.exclude = tuple(exclude)
def __call__(self, obj, include=(), exclude=()):
"""Converts SQLAlchemy models into python serializable objects. It can
take a single model or a list of models.
By default, all columns are included in the output, unless a list of
column names are provided to the parameters ``include`` or ``exclude``.
The latter has precedence over the former. Finally, the columns that
appear in the :attr:`excluded` property will be excluded, regardless of
the values that the parameters include and exclude have.
If the model is not persisted in the database, the default values of
the columns are used if they exist in the class definition. From the
example below, the value False will be used for the column active::
active = Column(Boolean, default=False)
:param obj: A instance or a list of SQLAlchemy model instances.
:param include: tuple, list or set.
:param exclude: tuple, list or set.
"""
if isinstance(obj, (list, InstrumentedList)):
try:
return [item.export_data(include, exclude) for item in obj]
except AttributeError as e:
# If the method exist, the exception comes inside of it.
if hasattr(obj[0], "export_data"):
# So re-raise the exception.
raise e
return [self(item, include, exclude) for item in obj]
try:
persisted = inspect(obj).persistent
except NoInspectionAvailable as e:
raise ValueError("Pass a valid SQLAlchemy mapped class instance")
columns = obj.__mapper__.columns
exclude = tuple(exclude) + self.exclude
data = {}
for c in columns:
name = c.name
if (not include or name in include) and name not in exclude:
column_value = getattr(obj, name)
data[name] = (
column_value
if persisted
else _get_column_default(c)
if column_value is None
else column_value
)
if persisted is True:
unloaded_relationships = inspect(obj).unloaded
relationship_keys = [
relationship.key
for relationship in obj.__class__.__mapper__.relationships
]
for key in relationship_keys:
if key not in unloaded_relationships and key not in exclude:
rproperty = getattr(obj, key)
has_export_data = hasattr(rproperty, "export_data")
data[key] = None
if has_export_data:
data[key] = rproperty.export_data()
elif rproperty:
data[key] = self(rproperty)
return data
#: Converts SQLAlchemy models into python serializable objects.
#:
#: This is an instance of :class:`ExportData` so head on to the
#: :meth:`~ExportData.__call__` method to known how this work. This instances
#: globally removes columns named ``org_id``.
export_from_sqla_object = ExportData(exclude=("org_id",))
schema_type_conversions = {
int: "integer",
str: "string",
bool: "boolean",
datetime.date: "string",
datetime.datetime: "string",
}
def generate_schema(model_class, include=(), exclude=(), exclude_rules=None):
""" Inspects a SQLAlchemy model class and returns a validation schema to be
used with the Cerberus library. The schema is generated mapping column
types and constraints to Cerberus rules:
+---------------+------------------------------------------------------+
| Cerberus Rule | Based on |
+===============+======================================================+
| type | SQLAlchemy column class used (String, Integer, etc). |
+---------------+------------------------------------------------------+
| readonly | **True** if the column is primary key. |
+---------------+------------------------------------------------------+
| required | **True** if ``Column.nullable`` is **False** or |
| | ``Column.default`` and ``Column.server_default`` |
| | **None**. |
+---------------+------------------------------------------------------+
| unique | Included only when the ``unique`` constraint is |
| | ``True``, otherwise is omitted: |
| | ``Column(unique=True)`` |
+---------------+------------------------------------------------------+
| default | Not included in the output. This is handled by |
| | SQLAlchemy or by the database engine. |
+---------------+------------------------------------------------------+
:param model_class: SQLAlchemy model class.
:param include: List of columns to include in the output.
:param exclude: List of column to exclude from the output.
:param exclude_rules: Rules to be excluded from the output.
"""
schema = {}
exclude_rules = exclude_rules or []
mapper = inspect(model_class)
for column in mapper.c:
name = column.name
if len(include) > 0 and name not in include:
continue
if name in exclude:
continue
prop = {}
python_type = column.type.python_type
prop["type"] = schema_type_conversions.get(python_type)
if prop["type"] is None:
raise LookupError("Unable to determine the column type")
if (
"readonly" not in exclude_rules
and python_type == str
and column.type.length is not None
):
prop["maxlength"] = column.type.length
if "readonly" not in exclude_rules and column.primary_key is True:
prop["readonly"] = True
if (
"required" not in exclude_rules
and column.default is None
and column.server_default is None
and column.nullable is False
and column.primary_key is False
):
prop["required"] = True
if "unique" not in exclude_rules and column.unique:
prop["unique"] = True
schema[name] = prop
return schema
class Validator(_Validator):
def __init__(self, schema, model_class=None, **kwargs):
super(Validator, self).__init__(schema, **kwargs)
self.model_class = model_class
def validate(self, document, model=None, **kwargs):
self.model = model
return super(Validator, self).validate(document, **kwargs)
def _validate_unique(self, is_unique, field, value):
"""Performs a query to the database to check value is already present
in a given column.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if is_unique:
if not self.model_class:
raise RuntimeError(
"The rule `unique` needs a SQLAlchemy declarative class"
" to perform queries to check if the value being validated"
" is unique. Provide a class in Validator constructor."
)
filters = {field: value}
model = self.model_class.query.filter_by(**filters).first()
if model and (not self.update or model is not self.model):
self._error(field, f"Must be unique, but '{value}' already exist")
def get_key_path(key, _map):
for map_key, value in _map.items():
path = []
if map_key == key:
return [map_key]
if type(value) == dict:
_path = get_key_path(key, value)
path = ([map_key] + path + _path) if _path else []
if len(path) > 0:
return path
return None
| 34.496403 | 83 | 0.557873 | 4,502 | 0.469447 | 0 | 0 | 0 | 0 | 0 | 0 | 4,434 | 0.462357 |
0387e368da07b1b24fdcd7aec00346e86273ba76 | 4,147 | py | Python | fixture/orm.py | Treshch1/python_traning | de796861b7227fab176d342b67cf47acbd2b166f | [
"Apache-2.0"
] | null | null | null | fixture/orm.py | Treshch1/python_traning | de796861b7227fab176d342b67cf47acbd2b166f | [
"Apache-2.0"
] | null | null | null | fixture/orm.py | Treshch1/python_traning | de796861b7227fab176d342b67cf47acbd2b166f | [
"Apache-2.0"
] | null | null | null | from pony.orm import *
from datetime import datetime
from model.group import Group
from model.contact import Contact
import random
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = "group_list"
id = PrimaryKey(int, column="group_id")
name = Optional(str, column="group_name")
header = Optional(str, column="group_header")
footer = Optional(str, column="group_footer")
contacts = Set(lambda: ORMFixture.ORMContact, table="address_in_groups",
column="id", reverse="groups", lazy=True)
class ORMContact(db.Entity):
_table_ = "addressbook"
id = PrimaryKey(int, column="id")
first_name = Optional(str, column="firstname")
last_name = Optional(str, column="lastname")
deprecated = Optional(datetime, column="deprecated")
groups = Set(lambda: ORMFixture.ORMGroup, table="address_in_groups",
column="group_id", reverse="contacts", lazy=True)
def __init__(self, host, name, username, password):
self.db.bind("mysql", host=host, database=name, user=username, password=password)
self.db.generate_mapping()
sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer)
return list(map(convert, groups))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return Contact(id=str(contact.id), first_name=contact.first_name, last_name=contact.last_name)
return list(map(convert, contacts))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixture.ORMGroup))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None))
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups))
@db_session
def get_contact_by_id(self, id):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.id == id))[0]
def get_available_contact_and_group(self):
groups = self.get_group_list()
contacts = self.get_contact_list()
available_itmes = {}
for group in groups:
if len(self.get_contacts_in_group(group)) < len(contacts):
contacts_ids = [i.id for i in contacts]
contacts_in_group_ids = [i.id for i in self.get_contacts_in_group(group)]
available_group = group
available_contact_id = list(set(contacts_ids).difference(contacts_in_group_ids))[0]
available_contact = self.get_contact_by_id(available_contact_id)
available_itmes = {"group": available_group, "contact": available_contact}
return available_itmes
return available_itmes
def get_available_contact_and_group_del(self):
groups = self.get_group_list()
available_itmes = {}
for group in groups:
if len(self.get_contacts_in_group(group)):
available_contact = random.choice(self.get_contacts_in_group(group))
available_group = group
available_itmes = {"group": available_group, "contact": available_contact}
return available_itmes
return available_itmes
def is_contact_in_group(self, contact, group):
contact_ids_in_group = [i.id for i in self.get_contacts_in_group(group)]
if contact.id in contact_ids_in_group:
return True
return False
| 41.888889 | 111 | 0.667712 | 4,013 | 0.967687 | 0 | 0 | 944 | 0.227634 | 0 | 0 | 221 | 0.053292 |
0387e9376b6688e3463663f91f1a21bc934301ee | 8,994 | py | Python | trustworthiness/util.py | DeFacto/WebCredibility | dfbb990966fc6b33f60378acffa0f12e25183431 | [
"Apache-2.0"
] | 10 | 2018-09-14T06:57:29.000Z | 2021-12-13T18:26:38.000Z | trustworthiness/util.py | DeFacto/WebCredibility | dfbb990966fc6b33f60378acffa0f12e25183431 | [
"Apache-2.0"
] | 1 | 2021-05-16T20:34:23.000Z | 2021-05-16T20:34:23.000Z | trustworthiness/util.py | DeFacto/WebCredibility | dfbb990966fc6b33f60378acffa0f12e25183431 | [
"Apache-2.0"
] | 2 | 2021-06-22T08:30:46.000Z | 2021-12-13T18:26:35.000Z | import collections
import datetime
import logging
import os
import sys
from pathlib import Path
import numpy as np
import pdfkit as pdfkit
from bs4 import BeautifulSoup
from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, \
accuracy_score
from tldextract import tldextract
from sklearn.externals import joblib
from coffeeandnoodles.core.util import get_md5_from_string
from trustworthiness.config import DeFactoConfig
from trustworthiness.definitions import DATASET_3C_SITES_PATH, DATASET_MICROSOFT_PATH_PAGES_MISSING, \
DATASET_MICROSOFT_PATH_PAGES_CACHED, ENC_WEB_DOMAIN, ENC_WEB_DOMAIN_SUFFIX, DATASET_MICROSOFT_PATH, OUTPUT_FOLDER, \
ENC_TAGS
import re
config = DeFactoConfig()
def filterTerm(word):
if word is not None:
temp = word.lower()
return re.sub(r"[^A-Za-z]+", '', temp)
else:
return ''
def print_report_regression(clf_name, predictions, y_test, targets):
print('MAE', mean_absolute_error(y_test, predictions))
print('RMSE', np.math.sqrt(mean_squared_error(y_test, predictions)))
print("-----------------------------------------------------------------------")
def print_report(clf_name, predictions, y_test, targets):
print("Classifier: ", clf_name)
print(confusion_matrix(y_test, predictions))
print("accuracy: ", accuracy_score(y_test, predictions))
print(classification_report(y_test, predictions, target_names=targets))
# print(":: recall: ", recall_score(y_test, predictions, average='weighted'))
# print(":: precision: ", precision_score(y_test, predictions, average='weighted'))
# print(":: f1: ", f1_score(y_test, predictions, average='weighted'))
print("-----------------------------------------------------------------------")
def get_logger(name, dir, file_level=logging.DEBUG, console_level=logging.INFO):
try:
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
now = datetime.datetime.now()
filename = dir + name + '_' + now.strftime("%Y-%m-%d") + '.log'
formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
fileHandler = logging.FileHandler(filename)
fileHandler.setFormatter(formatter)
fileHandler.setLevel(file_level)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(console_level)
logger.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
logger.propagate = False
return logger
except:
raise
def get_html_file_path(url):
path = url.replace('http://', '')
last = path.split('/')[-1]
path_root = None
if ('.html' not in last) and ('.htm' not in last) and ('.shtml' not in last):
if path[-1] != '/':
path = path + '/'
path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path + 'index.html')
path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path + 'index.html')
else:
path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path)
path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path)
if path_root1.exists():
path_root = path_root1
elif path_root2.exists():
path_root = path_root2
else:
# sometimes the last part is not a folder, but the file itself without the ".html" , try it as a last attempt
path_root3a = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.html')
path_root3b = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.htm')
path_root3c = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.shtml')
if path_root3a.exists():
path_root = path_root3a
elif path_root3b.exists():
path_root = path_root3b
elif path_root3c.exists():
path_root = path_root3c
else:
# url_broken.append(url)
raise Exception(
':: this should not happen, double check core/web/credibility/fix_dataset_microsoft.py | url = ' + url)
return path_root
def save_encoder_html2seq(folder_html_data):
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
config.logger.info('get_encoder_html2seq()')
try:
tags_set = []
#sentences = []
tot_files = 0
#my_file = Path(folder_html_data + 'features.html2seq.pkl')
my_encoder = Path(ENC_TAGS)
#path_html2seq = folder_html_data + 'html2seq/'
#path_html = folder_html_data + 'html/'
#path_text = folder_html_data + 'text/'
for dirpath, dirs, files in os.walk(folder_html_data):
for file_html in files:
if file_html.endswith('.txt'):
tot_files += 1
config.logger.info('processing file ' + str(tot_files) + ' - ' + str(len(tags_set)))
# get tags
tags = []
soup = BeautifulSoup(open(os.path.join(dirpath, file_html)), "html.parser")
html = soup.prettify()
for line in html.split('\n'):
if isinstance(line, str) and len(line.strip()) > 0:
if (line.strip()[0] == '<') and (line.strip()[0:2] != '<!'):
if len(line.split()) > 1:
tags.append(line.split()[0] + '>')
else:
tags.append(line.split()[0])
elif (line.strip()[0:2] == '</' and line.strip()[0:2] != '<!'):
tags.append(line.split()[0])
if len(tags) > 0:
#sentences.append(tags)
tags_set.extend(tags)
tags_set = list(set(tags_set))
else:
config.logger.info('no tags for this file...')
config.logger.info('saving dump')
le.fit(tags_set)
joblib.dump(le, str(my_encoder))
config.logger.info('tot files: ' + str(tot_files))
config.logger.info('dictionary size: ' + str(len(tags_set)))
except Exception as e:
config.logger.error(repr(e))
raise
def save_encoder_domain_and_suffix():
import pandas as pd
from sklearn import preprocessing
le1 = preprocessing.LabelEncoder()
le2 = preprocessing.LabelEncoder()
domain_s = ['com']
domain_s = ['']
domain = ['']
df_sites = pd.read_csv(DATASET_3C_SITES_PATH, na_values=0, delimiter=',', usecols=['document_url'])
for index, row in df_sites.iterrows():
url = str(row[0])
print(index, url)
try:
o = tldextract.extract(url)
if o.suffix is not None:
domain_s.append(str(o.suffix).lower())
if o.domain is not None:
domain.append(str(o.domain).lower())
except:
continue
# appending upper level domains, from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
# Version 2018040300, Last Updated Tue Apr 3 07:07:01 2018 UTC
df = pd.read_csv(config.datasets + 'data/iana/org/TLD/tlds-alpha-by-domain.txt', sep=" ", header=None)
for index, row in df.iterrows():
print(index, row[0])
domain.append(str(row[0]).lower())
df = pd.read_csv(DATASET_MICROSOFT_PATH, delimiter='\t', header=0)
for index, row in df.iterrows():
url = str(row[3])
print(index, url)
try:
o = tldextract.extract(url)
if o.suffix is not None:
domain_s.append(str(o.suffix).lower())
if o.domain is not None:
domain.append(str(o.domain).lower())
except:
continue
le1.fit(domain)
joblib.dump(le1, ENC_WEB_DOMAIN)
print(le1.classes_)
le2.fit(domain_s)
joblib.dump(le2, ENC_WEB_DOMAIN_SUFFIX)
print(le2.classes_)
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def save_url_body(extractor):
try:
config.logger.info('extracting features for: ' + extractor.url)
hash = get_md5_from_string(extractor.local_file_path)
text=extractor.webscrap.get_body()
with open(config.root_dir_data + 'marseille/input/' + hash + '.txt', "w") as file:
file.write(text)
except Exception as e:
config.logger.error(repr(e))
raise
if __name__ == '__main__':
save_encoder_domain_and_suffix()
# save_encoder_html2seq('/Users/diegoesteves/DropDrive/CloudStation/experiments_cache/web_credibility/output/all_html/') # just copy and paste all html files into a single temp file to generate this. | 36.860656 | 203 | 0.603402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,719 | 0.191127 |
03887af536a348e4bad8d5d4b9e1fa7903be4aba | 9,953 | py | Python | lib/lopy_max31856.py | kurta241/MAX31856 | cda689b80827561b0524c9ba3aa257a3ab329460 | [
"MIT"
] | 1 | 2018-01-17T02:14:55.000Z | 2018-01-17T02:14:55.000Z | lib/lopy_max31856.py | kurta241/MAX31856 | cda689b80827561b0524c9ba3aa257a3ab329460 | [
"MIT"
] | null | null | null | lib/lopy_max31856.py | kurta241/MAX31856 | cda689b80827561b0524c9ba3aa257a3ab329460 | [
"MIT"
] | 2 | 2019-01-28T11:51:29.000Z | 2021-03-27T22:34:06.000Z | #!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2017 Kurt Albrecht
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
'''
Class which defines interaction with the MAX31856 sensor.
library based on
johnrbnsn/Adafruit_Python_MAX31856
steve71/MAX31856
modified for pycom LoPy by Kurt Albrecht
'''
import math
from machine import SPI
from machine import Pin
# Thermocouple Types
MAX31856_B_TYPE = 0x0 # Read B Type Thermocouple
MAX31856_E_TYPE = 0x1 # Read E Type Thermocouple
MAX31856_J_TYPE = 0x2 # Read J Type Thermocouple
MAX31856_K_TYPE = 0x3 # Read K Type Thermocouple
MAX31856_N_TYPE = 0x4 # Read N Type Thermocouple
MAX31856_R_TYPE = 0x5 # Read R Type Thermocouple
MAX31856_S_TYPE = 0x6 # Read S Type Thermocouple
MAX31856_T_TYPE = 0x7 # Read T Type Thermocouple
class MAX31856(object):
"""Class to represent an Adafruit MAX31856 thermocouple temperature
measurement board.
"""
# Board Specific Constants
MAX31856_CONST_THERM_LSB = 2**-7
MAX31856_CONST_THERM_BITS = 19
MAX31856_CONST_CJ_LSB = 2**-6
MAX31856_CONST_CJ_BITS = 14
### Register constants, see data sheet Table 6 (in Rev. 0) for info.
# Read Addresses
MAX31856_REG_READ_CR0 = 0x00
MAX31856_REG_READ_CR1 = 0x01
MAX31856_REG_READ_MASK = 0x02
MAX31856_REG_READ_CJHF = 0x03
MAX31856_REG_READ_CJLF = 0x04
MAX31856_REG_READ_LTHFTH = 0x05
MAX31856_REG_READ_LTHFTL = 0x06
MAX31856_REG_READ_LTLFTH = 0x07
MAX31856_REG_READ_LTLFTL = 0x08
MAX31856_REG_READ_CJTO = 0x09
MAX31856_REG_READ_CJTH = 0x0A # Cold-Junction Temperature Register, MSB
MAX31856_REG_READ_CJTL = 0x0B # Cold-Junction Temperature Register, LSB
MAX31856_REG_READ_LTCBH = 0x0C # Linearized TC Temperature, Byte 2
MAX31856_REG_READ_LTCBM = 0x0D # Linearized TC Temperature, Byte 1
MAX31856_REG_READ_LTCBL = 0x0E # Linearized TC Temperature, Byte 0
MAX31856_REG_READ_FAULT = 0x0F # Fault status register
# Write Addresses
MAX31856_REG_WRITE_CR0 = 0x80
MAX31856_REG_WRITE_CR1 = 0x81
MAX31856_REG_WRITE_MASK = 0x82
MAX31856_REG_WRITE_CJHF = 0x83
MAX31856_REG_WRITE_CJLF = 0x84
MAX31856_REG_WRITE_LTHFTH = 0x85
MAX31856_REG_WRITE_LTHFTL = 0x86
MAX31856_REG_WRITE_LTLFTH = 0x87
MAX31856_REG_WRITE_LTLFTL = 0x88
MAX31856_REG_WRITE_CJTO = 0x89
MAX31856_REG_WRITE_CJTH = 0x8A # Cold-Junction Temperature Register, MSB
MAX31856_REG_WRITE_CJTL = 0x8B # Cold-Junction Temperature Register, LSB
# Pre-config Register Options
MAX31856_CR0_READ_ONE = 0x40 # One shot reading, delay approx. 200ms then read temp registers
MAX31856_CR0_READ_CONT = 0x80 # Continuous reading, delay approx. 100ms between readings
MAX31856_CR0_REJECT_50Hz = 0x01 # Noise rejection filter selection
def __init__(self, tc_type=MAX31856_K_TYPE, avgsel=0x0, cs_pin='P9'):
"""Initialize MAX31856 device with hardware SPI.
Args:
tc_type (1-byte Hex): Type of Thermocouple. Choose from class variables of the form
MAX31856.MAX31856_K_TYPE.
avgsel (1-byte Hex): Type of Averaging. Choose from values in CR0 table of datasheet.
Default is single sample.
cs_pin: chip select Pin. Default P9
"""
# initialize cs_pin in gpio mode and make it an CS output
self.CS = Pin(cs_pin, mode=Pin.OUT)
self.CS(True) # init chip select
self.spi = SPI(0, mode=SPI.MASTER, baudrate=500000, polarity=0, phase=1, firstbit=SPI.MSB)
# Initialize control register 1
self.tc_type = tc_type
self.avgsel = avgsel
self.cr1 = ((self.avgsel << 4) + self.tc_type)
# Setup for reading continuously with K-Type thermocouple und 50Hz noise rejection
self._write_register(self.MAX31856_REG_WRITE_CR0, self.MAX31856_CR0_READ_CONT+self.MAX31856_CR0_REJECT_50Hz)
self._write_register(self.MAX31856_REG_WRITE_CR1, self.cr1)
@staticmethod
def _cj_temp_from_bytes(msb, lsb):
# Takes in the msb and lsb from a Cold Junction (CJ) temperature reading and
# converts it into a decimal value.
# msb (hex): Most significant byte of CJ temperature
# lsb (hex): Least significant byte of a CJ temperature
# (((msb w/o +/-) shifted by number of 1 byte above lsb)
# + val_low_byte)
# >> shifted back by # of dead bits
temp_bytes = (((msb & 0x7F) << 8) + lsb) >> 2
if msb & 0x80:
# Negative Value. Scale back by number of bits
temp_bytes -= 2**(MAX31856.MAX31856_CONST_CJ_BITS -1)
# temp_bytes*value of lsb
temp_c = temp_bytes*MAX31856.MAX31856_CONST_CJ_LSB
return temp_c
@staticmethod
def _thermocouple_temp_from_bytes(byte0, byte1, byte2):
# Converts the thermocouple byte values to a decimal value.
# byte2 (hex): Most significant byte of thermocouple temperature
# byte1 (hex): Middle byte of thermocouple temperature
# byte0 (hex): Least significant byte of a thermocouple temperature
# temp_c (float): Temperature in degrees celsius
#
# (((val_high_byte w/o +/-) shifted by 2 bytes above LSB)
# + (val_mid_byte shifted by number 1 byte above LSB)
# + val_low_byte )
# >> back shift by number of dead bits
temp_bytes = (((byte2 & 0x7F) << 16) + (byte1 << 8) + byte0)
temp_bytes = temp_bytes >> 5
if byte2 & 0x80:
temp_bytes -= 2**(MAX31856.MAX31856_CONST_THERM_BITS -1)
# temp_bytes*value of LSB
temp_c = temp_bytes*MAX31856.MAX31856_CONST_THERM_LSB
return temp_c
def read_internal_temp_c(self):
# Return internal temperature value in degrees celsius.
# Read as a multibyte transfer to ensure both bytes are from the
# same temperature update.
self.CS(False)
self.spi.write(bytes([self.MAX31856_REG_READ_CJTH])) # first read address
val_high_byte = self.spi.read(1)[0]
val_low_byte = self.spi.read(1)[0]
self.CS(True)
temp_c = MAX31856._cj_temp_from_bytes(val_high_byte, val_low_byte)
return temp_c
def read_temp_c(self):
# Return the thermocouple temperature value in degrees celsius.
# Read as a multibyte transfer to ensure all three bytes are from the
# same temperature update.
self.CS(False)
self.spi.write(bytes([self.MAX31856_REG_READ_LTCBH])) # first read address
val_high_byte = self.spi.read(1)[0]
val_mid_byte = self.spi.read(1)[0]
val_low_byte = self.spi.read(1)[0]
fault = self.spi.read(1)[0]
self.CS(True)
# check fault byte
if ((fault & 0x80) != 0):
raise MAX31856Error("Cold Junction Out-of-Range")
if ((fault & 0x40) != 0):
raise MAX31856Error("Thermocouple Out-of-Range")
if ((fault & 0x20) != 0):
raise MAX31856Error("Cold-Junction High Fault")
if ((fault & 0x10) != 0):
raise MAX31856Error("Cold-Junction Low Fault")
if ((fault & 0x08) != 0):
raise MAX31856Error("Thermocouple Temperature High Fault")
if ((fault & 0x04) != 0):
raise MAX31856Error("Thermocouple Temperature Low Fault")
if ((fault & 0x02) != 0):
raise MAX31856Error("Overvoltage or Undervoltage Input Fault")
if ((fault & 0x01) != 0):
raise MAX31856Error("Thermocouple Open-Circuit Fault")
temp_c = MAX31856._thermocouple_temp_from_bytes(val_low_byte, val_mid_byte, val_high_byte)
return temp_c
def read_fault_register(self):
# Return bytes containing fault codes and hardware problems.
reg = self._read_register(self.MAX31856_REG_READ_FAULT)
return reg
def _read_register(self, address):
# Reads a register at address from the MAX31856
# Args: address (8-bit Hex): Address for read register.
self.CS(False)
self.spi.write(bytes([address]))
value=self.spi.read(1)[0]
self.CS(True)
return value
def _write_register(self, address, write_value):
# Writes to a register at address from the MAX31856
# address (8-bit Hex): Address for read register.
# write_value (8-bit Hex): Value to write to the register
self.CS(False)
self.spi.write(bytes([address, write_value]))
self.CS(True)
# print('Wrote Register: 0x{0:02X}, Value 0x{1:02X}'.format((address & 0xFF), (write_value & 0xFF)))
return True
class MAX31856Error(Exception):
# Constructor or Initializer
def __init__(self, msg):
super(MAX31856Error, self).__init__(msg)
| 41.298755 | 116 | 0.671858 | 8,174 | 0.82126 | 0 | 0 | 1,920 | 0.192907 | 0 | 0 | 4,958 | 0.498141 |
0388b46edfbba0396db6a8d52b25d94afdf26576 | 2,045 | py | Python | block.py | IgorReshetnyak/Statistics | f2f876a679389a7ecc4f24f23ca3f8aabd6a2604 | [
"MIT"
] | null | null | null | block.py | IgorReshetnyak/Statistics | f2f876a679389a7ecc4f24f23ca3f8aabd6a2604 | [
"MIT"
] | null | null | null | block.py | IgorReshetnyak/Statistics | f2f876a679389a7ecc4f24f23ca3f8aabd6a2604 | [
"MIT"
] | null | null | null |
"""Blocking analysis
Print running average
Blocking analysis scheme
Running error
takes as input filename to analyze
Igor Reshetnyak 2017
"""
import math,sys,pickle,os.path,pylab,time
if len(sys.argv)<2 :
print 'No file to analyze'
exit()
datafile=sys.argv[1]
file_name=datafile+''
if os.path.isfile(file_name)==False:
print 'file does not exist'
exit()
input=open(file_name,'r')
#samples=pickle.load(input)
samples=[]
for line in input:
data=line.split()
samples.append(float(data[1]))
input.close()
N=len(samples)
print N
#The first algorithm
def AvandError(sample,N):
Av=sum(sample)/float(N)
Error=math.sqrt(sum([(x-Av)**2 for x in sample]))/float(N)
return Av,Error
Av,Error=AvandError(samples,N)
#The bunching algorithm
def makebunch(sample):
new_list=[]
while len(sample)>1:
x=sample.pop(0)
y=sample.pop(0)
new_list.append((x+y)/2.)
return new_list
sample1=samples[:]
Avs2=[]
Errors2=[]
step=0
sample1=makebunch(sample1)
while len(sample1)>4:
print step
step+=1
N2=len(sample1)
Av2,Error2=AvandError(sample1,N2)
Avs2.append(Av2)
Errors2.append(Error2)
sample1=makebunch(sample1)
pylab.plot(range(1,step+1),Errors2,'ro')
pylab.axhline(y=Error,color='b')
pylab.axis([1,step,0,2*max(Errors2)])
pylab.xlabel('Bunching step')
pylab.ylabel('Error')
pylab.savefig(datafile+'Error2.png')
pylab.clf()
#Real time evaluation
Avs3=[samples[0]]
Errors3=[samples[0]**2]
for i in range(1,N):
Avs3.append(samples[i]+Avs3[i-1])
Errors3.append(samples[i]**2+Errors3[i-1])
Avs3=[Avs3[i]/float(i+1) for i in range(N)]
Errors3=[math.sqrt((Errors3[i]/float(i+1)-Avs3[i]**2)/float(i+1)) for i in range(N)]
pylab.plot(range(N),Errors3,'r')
pylab.axhline(y=Error,color='b')
pylab.axis([1,N-1,0,2*max(Errors3)])
pylab.xlabel('Step')
pylab.ylabel('Error')
pylab.savefig(datafile+'Error3.png')
pylab.clf()
pylab.plot(range(N),Avs3,'r')
pylab.axhline(y=Av,color='b')
#pylab.axis([1,N-1,1.1*min(Avs3),1.1*max(Avs3)])
pylab.xlabel('Step')
pylab.ylabel('Average')
pylab.savefig(datafile+'Average3.png')
pylab.clf()
| 18.590909 | 84 | 0.711002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.213203 |
038951af24c93c1a96b554943a344caa4bd191ed | 4,038 | py | Python | test/login_test.py | fausecteam/faustctf-2017-toilet | 825d4bf82749ca56e104e9d9b5ecd241a75eb0b6 | [
"0BSD"
] | null | null | null | test/login_test.py | fausecteam/faustctf-2017-toilet | 825d4bf82749ca56e104e9d9b5ecd241a75eb0b6 | [
"0BSD"
] | null | null | null | test/login_test.py | fausecteam/faustctf-2017-toilet | 825d4bf82749ca56e104e9d9b5ecd241a75eb0b6 | [
"0BSD"
] | null | null | null | from util import *
from test import BasicTest
class LoginTest(BasicTest):
def __init__(self, s):
self._name = "Login Test"
self._socket = s
def run_all_tests(self):
self.login_regular()
self.login_overflow()
self.login_twice_same_user()
self.login_twice_different_user()
self.logout_regular()
self.logout_twice()
self.logout_before_login()
self.logout_after_drop()
self.logout_after_flush()
def login_regular(self):
print_info("{}: login_regular".format(self._name))
ret = login(self._socket, "foobar")
if not ret:
print_success("{}: login_regular".format(self._name))
else:
print_error("{}: login_regular".format(self._name))
logout(self._socket)
def login_overflow(self):
print_info("{}: login_overflow".format(self._name))
ret = login(self._socket, "A"*200)
read_menu(self._socket)
if not ret:
print_success("{}: login_overflow".format(self._name))
else:
print_error("{}: login_overflow".format(self._name))
logout(self._socket)
def login_twice_same_user(self):
print_info("{}: login_twice_same_user".format(self._name))
ret = login(self._socket, "foobar")
ret2 = login(self._socket, "foobar")
if not ret and ret2:
print_success("{}: login_twice_same_user".format(self._name))
else:
print_error("{}: login_twice_same_user".format(self._name))
logout(self._socket)
def login_twice_different_user(self):
print_info("{}: login_twice_different_user".format(self._name))
ret = login(self._socket, "foobar")
ret2 = login(self._socket, "barfoo")
if not ret and ret2:
print_success("{}: login_twice_different_user".format(self._name))
else:
print_error("{}: login_twice_different_user".format(self._name))
logout(self._socket)
def logout_regular(self):
print_info("{}: logout_regular".format(self._name))
ret = login(self._socket, "foobar")
ret2 = logout(self._socket)
if not ret and not ret2:
print_success("{}: logout_regular".format(self._name))
else:
print_error("{}: logout_regular".format(self._name))
def logout_twice(self):
print_info("{}: logout_twice".format(self._name))
ret = login(self._socket, "foobar")
ret2= logout(self._socket)
ret3 = logout(self._socket)
if not ret and not ret2 and ret3:
print_success("{}: logout_twice".format(self._name))
else:
print_error("{}: logout_twice".format(self._name))
def logout_before_login(self):
print_info("{}: logout_before_login".format(self._name))
ret = logout(self._socket)
if ret:
print_success("{}: logout_before_login".format(self._name))
else:
print_error("{}: logout_before_login".format(self._name))
def logout_after_drop(self):
print_info("{}: logout_after_drop".format(self._name))
ret = login(self._socket, "foobar")
ret2 = drop_load(self._socket, 30, "CONS", "LOAD")
ret3 = logout(self._socket)
if not ret and not ret2 and ret3:
print_success("{}: logout_after_drop".format(self._name))
else:
print_error("{}: logout_after_drop".format(self._name))
flush(self._socket)
logout(self._socket)
def logout_after_flush(self):
print_info("{}: logout_after_flush".format(self._name))
ret = login(self._socket, "foobar")
ret2 = drop_load(self._socket, 30, "CONS", "LOAD")
ret3 = flush(self._socket)
ret4 = logout(self._socket)
if not ret and not ret2 and not ret3 and not ret4:
print_success("{}: logout_after_flush".format(self._name))
else:
print_error("{}: logout_after_flush".format(self._name))
| 37.045872 | 78 | 0.614908 | 3,989 | 0.987865 | 0 | 0 | 0 | 0 | 0 | 0 | 735 | 0.182021 |
0389b372315afb61df48dea72270207d420b8e60 | 24,418 | py | Python | paintbyword/utils/dissect.py | alexandonian/paint-by-word | 40213a597f4ecbc8cf95abe5a6cb856dda01baef | [
"MIT"
] | null | null | null | paintbyword/utils/dissect.py | alexandonian/paint-by-word | 40213a597f4ecbc8cf95abe5a6cb856dda01baef | [
"MIT"
] | null | null | null | paintbyword/utils/dissect.py | alexandonian/paint-by-word | 40213a597f4ecbc8cf95abe5a6cb856dda01baef | [
"MIT"
] | null | null | null | import torch
import re
import copy
import numpy
from torch.utils.data.dataloader import default_collate
from netdissect import nethook, imgviz, tally, unravelconv, upsample
def acts_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, rq, run = acts_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def grad_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = grad_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def update_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
cinv=None,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = update_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, cinv=cinv,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def proj_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = proj_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def acts_stats(model, dataset,
layer=None, unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'acts'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, all_r
topk, rq = tally.tally_topk_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/acts_topk_rq.npz' if cachedir else None)
return topk, rq, run
def grad_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
if layer is not None:
module = nethook.get_module(cloned_model, layer)
else:
module = cloned_model
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, y, *args):
with nethook.Trace(module, retain_grad=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
r = -r.grad
if unit is not None:
r = r[:, unit]
return r
run.name = 'grad'
def compute_samples(x, y, *args):
r = run(x, y)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/grad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def weight_grad(model, dataset, layer,
unit=None,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
nethook.set_requires_grad(True, module)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def accumulate_grad(x, y, *args):
with torch.enable_grad():
out = cloned_model(x.to(device))
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
def weight_grad():
return dict(wgrad=module.weight.grad)
module.weight.grad = None
wg = tally.tally_each(accumulate_grad, dataset, summarize=weight_grad,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/weight_grad.npz' if cachedir else None)['wgrad']
return wg
def update_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
cinv=None,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# get weight grad (assumes layer has a weight param)
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
if cinv is not None:
wg = torch.mm(wg.view(-1,
cinv.shape[0]).cpu(),
cinv.cpu()).view(wg.shape)
# copy the model so we can change its weights.
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
with torch.no_grad():
module.weight[...] = -wg.to(device)
if hasattr(module, 'bias') and module.bias is not None:
module.bias[...] = 0
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
cloned_model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'update' if cinv is None else 'proj'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/{run.name}_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def proj_c2m(model, dataset, layer,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
module = nethook.get_module(cloned_model, layer)
assert isinstance(module, torch.nn.Conv2d)
nethook.set_requires_grad(False, cloned_model)
unraveled = unravelconv.unravel_left_conv2d(module)
unraveled.wconv.weight.requires_grad = True
unraveled.wconv.weight.grad = None
nethook.replace_module(cloned_model, layer, unraveled)
tconv = unraveled.tconv
def ex_run(x, *args):
with nethook.Trace(tconv, stop=True) as unrav:
cloned_model(x.to(device))
return unrav.output
def ex_sample(x, *args):
r = ex_run(x, *args)
return r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
c2m = tally.tally_second_moment(ex_sample,
dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/input_cov_moment.npz' if cachedir else None)
return c2m, ex_run
def proj_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
c2m, ex_run = proj_c2m(model, dataset, layer,
batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
# old obsolete method - not stable.
# Cinv = c2m.momentPSD().cholesky_inverse()
moment = c2m.moment()
# TODO: consider uncommenting the following, which uses
# correlation for a better-conditioned inverse.
# Change 2.0 to 3.0 to reduce amplifying near-zero feats.
# rn = moment.diag().clamp(1e-30).pow(-1/2.0)
# moment = moment * rn[None,:] * rn[:,None]
# The following is standard regularization, to try.
# moment.diagonal.add_(1e-3)
Cinv = moment.pinverse()
return update_stats(model, dataset, layer, unit=unit,
cinv=Cinv,
k=k, r=r, batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
def window_images(dataset, topk, rq, run,
thumbsize=None,
return_as='strip', # or individual, or tensor
k=None, q=0.01,
border_color=None,
vizname=None,
cachedir=None):
assert return_as in ['strip', 'individual', 'tensor']
input_sample = default_collate([dataset[0]])
r_sample = run(*input_sample)
x_size = tuple(input_sample[0].shape[2:])
if thumbsize is None:
thumbsize = x_size
if not isinstance(thumbsize, (list, tuple)):
thumbsize = (thumbsize, thumbsize)
if topk is None:
topk = tally.range_topk(r_sample.size(1), size=(k or 1))
default_vizname = 'top' if topk.largest else 'bot'
if border_color in ['red', 'green', 'yellow']:
default_vizname += border_color
border_color = dict(red=[255.0, 0.0, 0.0], green=[0.0, 255.0, 0.0],
yellow=[255.0, 255.0, 0.0])[border_color]
if vizname is None:
vizname = default_vizname
iv = imgviz.ImageVisualizer(
thumbsize, image_size=x_size, source=dataset,
level=rq.quantiles((1.0 - q) if topk.largest else q))
func = dict(
strip=iv.masked_images_for_topk,
individual=iv.individual_masked_images_for_topk,
tensor=iv.masked_image_grid_for_topk)[return_as]
acts_images = func(run, dataset, topk, k=k, largest=topk.largest,
border_color=border_color,
cachefile=f'{cachedir}/{vizname}{k or ""}images.npz' if cachedir else None)
return acts_images
def label_stats(dataset_with_seg, num_seglabels,
run, level, upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
def compute_concept_pair(batch, seg, *args):
seg = seg.to(device)
acts = run(batch, *args)
hacts = upfn(acts)
iacts = (hacts < level if negate else hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
flat_segs = iseg.permute(0, 2, 3, 1).reshape(-1, iseg.shape[1])
flat_acts = iacts.permute(0, 2, 3, 1).reshape(-1, iacts.shape[1])
return flat_segs, flat_acts
neg = 'neg' if negate else ''
iu99 = tally.tally_all_intersection_and_union(
compute_concept_pair,
dataset_with_seg,
sample_size=sample_size,
num_workers=num_workers, pin_memory=pin_memory,
cachefile=f'{cachedir}/{neg}{run.name}_iu.npz' if cachedir else None)
return iu99
def topk_label_stats(dataset_with_seg, num_seglabels,
run, level, topk, k=None,
upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
num_units = r_sample.shape[1]
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
intersections = torch.zeros(num_units, num_seglabels).to(device)
unions = torch.zeros(num_units, num_seglabels).to(device)
def collate_unit_iou(units, imgs, seg, labels):
seg = seg.to(device)
acts = run(imgs, labels)
hacts = upfn(acts)
iacts = (hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
for i in range(len(imgs)):
ulist = units[i]
for unit, _ in ulist:
im_i = (iacts[i, unit][None] & iseg[i]).view(
num_seglabels, -1).float().sum(1)
im_u = (iacts[i, unit][None] | iseg[i]).view(
num_seglabels, -1).float().sum(1)
intersections[unit] += im_i
unions[unit] += im_u
return []
tally.gather_topk(collate_unit_iou, dataset_with_seg, topk, k=100)
return intersections / (unions + 1e-20)
### Experiment below - find the best representative with gradient in the consensus directioin.
# 1. Tally weight grad over the dataset.
# 2. For each unit, find the topk images with gradients in the same direction as this
# consensus weight grad.
def wgrad_stats(model, dataset, layer, cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
module.weight.requires_grad = True
module.weight.grad = None
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
wg = wg.to(device)
module.weight.requires_grad = False
ks = module.kernel_size
unfolder = torch.nn.Conv2d(
in_channels=module.in_channels, out_channels=module.out_channels,
kernel_size=ks, padding=module.padding,
dilation=module.dilation, stride=module.stride,
bias=False)
nethook.set_requires_grad(False, unfolder)
unfolder.to(device)
unfolder.weight[...] = wg
def run(x, y, *args, return_details=False):
with nethook.Trace(module, retain_grad=True, retain_input=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
inp = ret.input
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
# The contribution to the weight gradient from every patch.
# If we were to sum unfgrad.sum(dim=(0,5,6)) it would equal module.weight.grad
# Now to reduce things, we need to score it per-patch somehow. We will dot-product
# the average grad per-unit to see which patches push most in the consensus direction.
# This gives a per-unit score at every patch.
score = unfolder(inp) * r.grad
# Hack: it is interesting to separate the cases where rgrad is positive
# (the patch should look more like this to decrease the loss) from cases
# where it is negative (where the patch should look less like this. So
# we will drop cases here the score is negative, and then negate the
# score when ograd is negative.
signed_score = score.clamp(0) * (r.grad.sign())
if return_details:
return {k: v.detach().cpu() for k, v in dict(
model_output=out,
loss=loss,
layer_output=r,
layer_output_grad=r.grad,
layer_input=inp,
layer_input_by_Edw=unfolder(inp),
weight_grad=wg,
score=score,
signed_score=signed_score).items()}
return signed_score
# Equivalent unrolled code below.
# scores = []
# for i in range(0, len(unf), 2):
# ug = unf[i:i+2,None,:,:,:,:,:] * r.grad[i:i+2,:,None,None,None,:,:]
# # Now to reduce things, we need to score it per-patch somehow. We will dot-product
# # the average grad per-unit to see which patches push most in the consensus direction.
# # This gives a per-unit score at every patch.
# score = (ug * wg[None,:,:,:,:,None,None]
# ).view(ug.shape[0], ug.shape[1], -1, ug.shape[5], ug.shape[6]).sum(2)
# scores.append(score)
# return torch.cat(scores)
run.name = 'wgrad'
def compute_samples(batch, labels, *args):
score = run(batch, labels)
flat_score = score.view(score.shape[0], score.shape[1], -1)
top_score = flat_score.max(2)[0]
bot_score = flat_score.min(2)[0]
all_score = score.permute(0, 2, 3, 1).reshape(-1, score.shape[1])
return top_score, bot_score, all_score
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/swgrad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
### Experiment below:
# tally p-v times every post-relu activation in a layer
# and also sum up every activation
# This is intended to measure how well a (simple linear) model
# of the given feature can help solve the error p-v.
def sep_stats(model, dataset, layer=None, cachedir=None,
batch_size=10, sample_size=None, num_workers=30):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, labels, *args):
with nethook.Trace(module) as ret, torch.no_grad():
logits = model(x.to(device))
labels = labels.to(device)
r = ret.output
p = torch.nn.functional.softmax(logits, dim=1)
y = torch.zeros_like(p)
y.scatter_(1, labels[:,None], 1)
return r, p, y
def compute_samples(batch, labels, *args):
r, p, y = run(batch, labels)
err = p-y
sep_t = torch.cat((err, y, torch.ones(err.shape[0], 1, device=device)), dim=1)
flat_r = r.view(r.shape[0], r.shape[1], -1).mean(2)[:,:,None]
r_times_sep_t = flat_r * sep_t[:,None,:]
# Number of stats to track is units * (classes + 1)
sep_data = r_times_sep_t.view(len(batch), -1)
return sep_data
sepmv = tally.tally_mean(
compute_samples, dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/sep_stats.npz' if cachedir else None)
return sepmv
| 39.447496 | 104 | 0.576788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,176 | 0.130068 |
038e7cca624d292ed41cd443faa6f20564a62fd4 | 303 | py | Python | abct/abct.py | hornc/abctag | d2abd7384e0e155fbee35c639dd1afe6336f0bd9 | [
"MIT"
] | 2 | 2020-07-25T07:22:50.000Z | 2022-02-06T16:09:27.000Z | abct/abct.py | hornc/abctag | d2abd7384e0e155fbee35c639dd1afe6336f0bd9 | [
"MIT"
] | 2 | 2021-08-21T09:24:14.000Z | 2021-08-24T08:08:57.000Z | abct/abct.py | hornc/abctag | d2abd7384e0e155fbee35c639dd1afe6336f0bd9 | [
"MIT"
] | null | null | null | from math import floor, log
n = lambda x: (x + 1) % 2 + 1
s = lambda x: floor(log(x + 1, 2))
r = lambda x: x // 2 - n(x) + 1 + n(x) * 2**(s(x) - 1)
pn = lambda p: r(p) + (n(p) == 2) * (r(r(p)) - r(p))
dn = lambda p, d: (d + (n(p) == n(d) == 2) * (n(r(p)) * 2**s(d)) - (n(p) == 1)) // (3 - n(p))
| 43.285714 | 94 | 0.392739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
038ef106b20c259dc5c6a88c1f1d3f5f223b4129 | 289 | py | Python | src/evidently/profile_sections/__init__.py | jenoOvchi/evidently | 6ca36d633ee258442410ef47a219ff40b8a5097b | [
"Apache-2.0"
] | null | null | null | src/evidently/profile_sections/__init__.py | jenoOvchi/evidently | 6ca36d633ee258442410ef47a219ff40b8a5097b | [
"Apache-2.0"
] | null | null | null | src/evidently/profile_sections/__init__.py | jenoOvchi/evidently | 6ca36d633ee258442410ef47a219ff40b8a5097b | [
"Apache-2.0"
] | null | null | null | import warnings
import evidently.model_profile.sections
from evidently.model_profile.sections import *
__path__ = evidently.model_profile.sections.__path__ # type: ignore
warnings.warn("'import evidently.profile_sections' is deprecated, use 'import evidently.model_profile.sections'")
| 32.111111 | 113 | 0.83045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.387543 |
038fae421f26bf3cba7bbf5e1d0142783c1ea9e8 | 52,839 | py | Python | openEPhys_DACQ/NWBio.py | Barry-lab/SpatialAutoDACQ | f39341ea5c1a51c328ec43dba8e4d9a8f7d49a48 | [
"MIT"
] | null | null | null | openEPhys_DACQ/NWBio.py | Barry-lab/SpatialAutoDACQ | f39341ea5c1a51c328ec43dba8e4d9a8f7d49a48 | [
"MIT"
] | null | null | null | openEPhys_DACQ/NWBio.py | Barry-lab/SpatialAutoDACQ | f39341ea5c1a51c328ec43dba8e4d9a8f7d49a48 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import h5py
import numpy as np
import os
import sys
from openEPhys_DACQ.HelperFunctions import tetrode_channels, channels_tetrode, closest_argmin
from pprint import pprint
from copy import copy
import argparse
import importlib
from tqdm import tqdm
def OpenEphys_SamplingRate():
return 30000
def bitVolts():
return 0.195
def spike_waveform_leftwards_shift():
"""Returns the leftwards shift of waveforms from detection point in seconds.
:return:
:rtype: float
"""
return 6 * (1.0 / OpenEphys_SamplingRate())
def get_filename(path):
if not os.path.isfile(path):
return os.path.join(path, 'experiment_1.nwb')
else:
return path
def delete_path_in_file(filename, path):
with h5py.File(filename, 'r+') as h5file:
del h5file[path]
def get_recordingKey(filename):
with h5py.File(filename, 'r') as h5file:
return list(h5file['acquisition']['timeseries'].keys())[0]
def get_all_processorKeys(filename):
with h5py.File(filename, 'r') as h5file:
return list(h5file['acquisition']['timeseries'][get_recordingKey(filename)]['continuous'].keys())
def get_processorKey(filename):
return get_all_processorKeys(filename)[0]
def get_all_processor_paths(filename):
return ['/acquisition/timeseries/' + get_recordingKey(filename)
+ '/continuous/' + processorKey
for processorKey in get_all_processorKeys(filename)]
def get_processor_path(filename):
return '/acquisition/timeseries/' + get_recordingKey(filename) \
+ '/continuous/' + get_processorKey(filename)
def check_if_open_ephys_nwb_file(filename):
"""
Returns True if processor path can be identified
in the file and False otherwise.
"""
try:
processor_path = get_processor_path(filename)
with h5py.File(filename, 'r') as h5file:
return processor_path in h5file
except:
return False
def get_downsampled_data_paths(filename):
"""
Returns paths to downsampled data in NWB file.
:param filename: path to NWB file
:type filename: str
:return: paths
:rtype: dict
"""
processor_path = get_processor_path(filename)
return {'tetrode_data': processor_path + '/downsampled_tetrode_data/',
'aux_data': processor_path + '/downsampled_AUX_data/',
'timestamps': processor_path + '/downsampled_timestamps/',
'info': processor_path + '/downsampling_info/'}
def check_if_downsampled_data_available(filename):
"""
Checks if downsampled data is available in the NWB file.
:param filename: path to NWB file
:type filename: str
:return: available
:rtype: bool
"""
paths = get_downsampled_data_paths(filename)
with h5py.File(filename, 'r') as h5file:
# START Workaround for older downsampled datasets
if '/acquisition/timeseries/recording1/continuous/processor102_100/tetrode_lowpass' in h5file:
return True
# END Workaround for older downsampled datasets
for path in [paths[key] for key in paths]:
if not (path in h5file):
return False
if h5file[paths['tetrode_data']].shape[0] == 0:
return False
if h5file[paths['tetrode_data']].shape[0] \
!= h5file[paths['timestamps']].shape[0] \
!= h5file[paths['aux_data']].shape[0]:
return False
return True
def get_raw_data_paths(filename):
"""
Returns paths to downsampled data in NWB file.
:param filename: path to NWB file
:type filename: str
:return: paths
:rtype: dict
"""
processor_path = get_processor_path(filename)
return {'continuous': processor_path + '/data',
'timestamps': processor_path + '/timestamps'}
def check_if_raw_data_available(filename):
"""
Returns paths to raw data in NWB file.
:param filename:
:type filename: str
:return: paths
:rtype: dict
"""
paths = get_raw_data_paths(filename)
if all([check_if_path_exists(filename, paths[key]) for key in paths]):
return True
else:
return False
def save_downsampling_info_to_disk(filename, info):
# Get paths to respective dataset locations
paths = get_downsampled_data_paths(filename)
# Ensure dictionary fields are in correct format
info = {'original_sampling_rate': np.int64(info['original_sampling_rate']),
'downsampled_sampling_rate': np.int64(info['downsampled_sampling_rate']),
'downsampled_channels': np.array(info['downsampled_channels'], dtype=np.int64)}
# Write data to disk
with h5py.File(filename, 'r+') as h5file:
recursively_save_dict_contents_to_group(h5file, paths['info'], info)
def save_downsampled_data_to_disk(filename, tetrode_data, timestamps, aux_data, info):
# Get paths to respective dataset locations
paths = get_downsampled_data_paths(filename)
# Write data to disk
save_downsampling_info_to_disk(filename, info)
with h5py.File(filename, 'r+') as h5file:
h5file[paths['tetrode_data']] = tetrode_data
h5file[paths['timestamps']] = timestamps
h5file[paths['aux_data']] = aux_data
def delete_raw_data(filename, only_if_downsampled_data_available=True):
if only_if_downsampled_data_available:
if not check_if_downsampled_data_available(filename):
print('Warning', 'Downsampled data not available in NWB file. Raw data deletion aborted.')
return None
if not check_if_raw_data_available(filename):
print('Warning', 'Raw data not available to be deleted in: ' + filename)
else:
raw_data_paths = get_raw_data_paths(filename)
with h5py.File(filename,'r+') as h5file:
for path in [raw_data_paths[key] for key in raw_data_paths]:
del h5file[path]
def repack_NWB_file(filename, replace_original=True, check_validity_with_downsampled_data=True):
# Create a repacked copy of the file
os.system('h5repack ' + filename + ' ' + (filename + '.repacked'))
# Check that the new file is not corrupted
if check_validity_with_downsampled_data:
if not check_if_downsampled_data_available(filename):
raise Exception('Downsampled data cannot be found in repacked file. Original file not replaced.')
# Replace original file with repacked file
if replace_original:
os.system('mv ' + (filename + '.repacked') + ' ' + filename)
def repack_all_nwb_files_in_directory_tree(folder_path, replace_original=True,
check_validity_with_downsampled_data=True):
# Commence directory walk
for dir_name, subdirList, fileList in os.walk(folder_path):
for fname in fileList:
fpath = os.path.join(dir_name, fname)
if fname == 'experiment_1.nwb':
print('Repacking file {}'.format(fpath))
repack_NWB_file(fpath, replace_original=replace_original,
check_validity_with_downsampled_data=check_validity_with_downsampled_data)
def list_AUX_channels(filename, n_tetrodes):
data = load_continuous(filename)
n_channels = data['continuous'].shape[1]
data['file_handle'].close()
aux_chan_list = range(n_tetrodes * 4 - 1, n_channels)
return aux_chan_list
def load_continuous(filename):
# Load data file
h5file = h5py.File(filename, 'r')
# Load timestamps and continuous data
recordingKey = get_recordingKey(filename)
processorKey = get_processorKey(filename)
path = '/acquisition/timeseries/' + recordingKey + '/continuous/' + processorKey
if check_if_path_exists(filename, path + '/data'):
continuous = h5file[path + '/data'] # not converted to microvolts!!!! need to multiply by 0.195
timestamps = h5file[path + '/timestamps']
data = {'continuous': continuous, 'timestamps': timestamps, 'file_handle': h5file}
else:
data = None
return data
def load_raw_data_timestamps_as_array(filename):
data = load_continuous(filename)
timestamps = np.array(data['timestamps']).squeeze()
data['file_handle'].close()
return timestamps
def load_data_columns_as_array(filename, data_path, first_column, last_column):
"""
Loads a contiguous columns of dataset efficiently from HDF5 dataset.
"""
with h5py.File(filename, 'r') as h5file:
data = h5file[data_path]
data = h5file[data_path][:, first_column:last_column]
return data
def load_data_as_array(filename, data_path, columns):
"""
Fast way of reading a single column or a set of columns.
filename - str - full path to file
columns - list - column numbers to include (starting from 0).
Single column can be given as a single list element or int.
Columns in the list must be in sorted (ascending) order.
"""
# Make columns variable into a list if int given
if isinstance(columns, int):
columns = [columns]
# Check that all elements of columns are integers
if isinstance(columns, list):
for column in columns:
if not isinstance(column, int):
raise ValueError('columns argument must be a list of int values.')
else:
raise ValueError('columns argument must be list or int.')
# Check that column number are sorted
if sorted(columns) != columns:
raise ValueError('columns was not in sorted (ascending) order.')
# Check that data is available, otherwise return None
if not check_if_path_exists(filename, data_path):
raise ValueError('File ' + filename + '\n'
+ 'Does not contain path ' + data_path)
# Find contiguous column groups
current_column = columns[0]
column_groups = [current_column]
for i in range(1, len(columns)):
if (columns[i] - columns[i - 1]) == 1:
column_groups.append(current_column)
else:
column_groups.append(columns[i])
current_column = columns[i]
# Find start and end column numbers for contiguous groups
column_ranges = []
for first_channel in sorted(set(column_groups)):
last_channel = first_channel + column_groups.count(first_channel)
column_ranges.append((first_channel, last_channel))
# Get contiguous column segments for each group
column_group_data = []
for column_range in column_ranges:
column_group_data.append(
load_data_columns_as_array(filename, data_path, *column_range))
# Concatenate column groups
data = np.concatenate(column_group_data, axis=1)
return data
def load_continuous_as_array(filename, channels):
"""
Fast way of reading a single channel or a set of channels.
filename - str - full path to file
channels - list - channel numbers to include (starting from 0).
Single channel can be given as a single list element or int.
Channels in the list must be in sorted (ascending) order.
"""
# Generate path to raw continuous data
root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \
+ '/continuous/' + get_processorKey(filename)
data_path = root_path + '/data'
timestamps_path = root_path + '/timestamps'
# Check that data is available, otherwise return None
if not check_if_path_exists(filename, data_path):
return None
if not check_if_path_exists(filename, timestamps_path):
return None
# Load continuous data
continuous = load_data_as_array(filename, data_path, channels)
# Load timestamps for data
with h5py.File(filename, 'r') as h5file:
timestamps = np.array(h5file[timestamps_path])
# Arrange output into a dictionary
data = {'continuous': continuous, 'timestamps': timestamps}
return data
def remove_surrounding_binary_markers(text):
if text.startswith("b'"):
text = text[2:]
if text.endswith("'"):
text = text[:-1]
return text
def get_downsampling_info_old(filename):
# Generate path to downsampling data info
root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \
+ '/continuous/' + get_processorKey(filename)
data_path = root_path + '/downsampling_info'
# Load info from file
with h5py.File(filename, 'r') as h5file:
data = h5file[data_path]
data = [str(i) for i in data]
# Remove b'x' markers from strings if present. Python 3 change.
data = list(map(remove_surrounding_binary_markers, data))
# Parse elements in loaded data
info_dict = {}
for x in data:
key, value = x.split(' ')
if key == 'original_sampling_rate':
info_dict[key] = np.int64(value)
elif key == 'downsampled_sampling_rate':
info_dict[key] = np.int64(value)
elif key == 'downsampled_channels':
info_dict[key] = np.array(list(map(int, value.split(',')))).astype(np.int64)
return info_dict
def get_downsampling_info(filename):
root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \
+ '/continuous/' + get_processorKey(filename)
data_path = root_path + '/downsampling_info/'
with h5py.File(filename, 'r') as h5file:
return recursively_load_dict_contents_from_group(h5file, data_path)
def load_downsampled_tetrode_data_as_array(filename, tetrode_nrs):
"""
Returns a dict with downsampled continuous data for requested tetrodes
filename - str - full path to file
tetrode_nrs - list - tetrode numbers to include (starting from 0).
Single tetrode can be given as a single list element or int.
Tetrode numbers in the list must be in sorted (ascending) order.
If data is not available for a given tetrode number, error is raised.
"""
# Generate path to raw continuous data
root_path = '/acquisition/timeseries/' + get_recordingKey(filename) \
+ '/continuous/' + get_processorKey(filename)
data_path = root_path + '/downsampled_tetrode_data'
timestamps_path = root_path + '/downsampled_timestamps'
# Check that data is available, otherwise return None
if not check_if_path_exists(filename, data_path):
return None
if not check_if_path_exists(filename, timestamps_path):
return None
# Get info on downsampled data
info = get_downsampling_info(filename)
sampling_rate = int(info['downsampled_sampling_rate'])
downsampled_channels = list(info['downsampled_channels'])
# Map tetrode_nrs elements to columns in downsampled_tetrode_data
columns = []
channels_used = []
tetrode_nrs_remaining = copy(tetrode_nrs)
for tetrode_nr in tetrode_nrs:
for chan in tetrode_channels(tetrode_nr):
if chan in downsampled_channels:
columns.append(downsampled_channels.index(chan))
channels_used.append(chan)
tetrode_nrs_remaining.pop(tetrode_nrs_remaining.index(tetrode_nr))
break
# Check that all tetrode numbers were mapped
if len(tetrode_nrs_remaining) > 0:
raise Exception('The following tetrodes were not represented in downsampled data\n' \
+ ','.join(list(map(str, tetrode_nrs_remaining))))
# Load continuous data
continuous = load_data_as_array(filename, data_path, columns)
# Load timestamps for data
with h5py.File(filename, 'r') as h5file:
timestamps = np.array(h5file[timestamps_path])
# Arrange output into a dictionary
data = {'continuous': continuous, 'timestamps': timestamps,
'tetrode_nrs': tetrode_nrs, 'channels': channels_used,
'sampling_rate': sampling_rate}
return data
def empty_spike_data():
"""
Creates a fake waveforms of 0 values and at timepoint 0
"""
waveforms = np.zeros((1,4,40), dtype=np.int16)
timestamps = np.array([0], dtype=np.float64)
return {'waveforms': waveforms, 'timestamps': timestamps}
def get_tetrode_nrs_if_spikes_available(filename, spike_name='spikes'):
"""
Returns a list of tetrode numbers if spikes available in NWB file.
"""
spikes_path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/'
# Get tetrode keys if available
with h5py.File(filename, 'r') as h5file:
if not (spikes_path in h5file):
# Return empty list if spikes data not available
return []
tetrode_keys = list(h5file[spikes_path].keys())
# Return empty list if spikes not available on any tetrode
if len(tetrode_keys) == 0:
return []
# Extract tetrode numbers
tetrode_nrs = []
for tetrode_key in tetrode_keys:
tetrode_nrs.append(int(tetrode_key[9:]) - 1)
# Sort tetrode numbers in ascending order
tetrode_nrs.sort()
return tetrode_nrs
def construct_paths_to_tetrode_spike_data(filename, tetrode_nrs, spike_name='spikes'):
spikes_path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/'
return [(spikes_path + 'electrode' + str(tetrode_nr + 1) + '/') for tetrode_nr in tetrode_nrs]
def count_spikes(filename, tetrode_nrs, spike_name='spikes', use_idx_keep=False):
"""
:param filename: full path to NWB file
:type filename: str
:param tetrode_nrs: tetrode numbers to count spikes for
:type tetrode_nrs: list
:param spike_name: type of spikes to look for (field in NWB file)
:type spike_name: str
:param use_idx_keep: If False (default) all spikes are counted, otherwise only filtered spikes are counted
:type use_idx_keep: bool
:return: total number of spikes on each tetrode
:rtype: list
"""
tetrode_paths = construct_paths_to_tetrode_spike_data(filename, tetrode_nrs, spike_name=spike_name)
count = []
with h5py.File(filename, 'r') as h5file:
for tetrode_path in tetrode_paths:
if use_idx_keep:
count.append(sum(np.array(h5file[tetrode_path + 'idx_keep'][()]).squeeze()))
else:
count.append(h5file[tetrode_path + 'timestamps/'].shape[0])
return count
def load_spikes(filename, spike_name='spikes', tetrode_nrs=None, use_idx_keep=False,
use_badChan=False, no_waveforms=False, clustering_name=None, verbose=True):
"""
Inputs:
filename - pointer to NWB file to load
tetrode_nrs [list] - can be a list of tetrodes to load (from 0)
use_idx_keep [bool] - if True, only outputs spikes according to idx_keep of tetrode, if available
use_badChan [bool] - if True, sets all spikes on badChannels to 0
no_waveforms [bool] - if True, waveforms are not loaded
clustering_name [str] - if specified, clusterID will be loaded from:
electrode[nr]/clustering/clustering_name
verbose [bool] - prints out loading progress bar if True (default)
Output:
List of dictionaries for each tetrode in correct order where:
List is empty, if no spike data detected
'waveforms' is a list of tetrode waveforms in the order of channels
'timestamps' is a list of spike detection timestamps corresponding to 'waveforms'
If available, two more variables will be in the dictionary
'idx_keep' is boolan index for 'waveforms' and 'timestamps' indicating the spikes
that are to be used for further processing (based on filtering for artifacts etc)
'clusterIDs' is the cluster identities of spikes in 'waveforms'['idx_keep',:,:]
"""
# If not provided, get tetrode_nrs
if tetrode_nrs is None:
tetrode_nrs = get_tetrode_nrs_if_spikes_available(filename, spike_name=spike_name)
tetrode_paths = construct_paths_to_tetrode_spike_data(filename, tetrode_nrs, spike_name=spike_name)
with h5py.File(filename, 'r') as h5file:
# Put waveforms and timestamps into a list of dictionaries in correct order
data = []
if verbose:
print('Loading tetrodes from {}'.format(filename))
iterable = zip(tetrode_nrs, tetrode_paths)
for nr_tetrode, tetrode_path in (tqdm(iterable, total=len(tetrode_nrs)) if verbose else iterable):
# Load waveforms and timestamps
if no_waveforms:
waveforms = empty_spike_data()['waveforms']
else:
waveforms = h5file[tetrode_path + 'data/'][()]
timestamps = h5file[tetrode_path + 'timestamps/'][()]
if not isinstance(timestamps, np.ndarray):
timestamps = np.array([timestamps])
if waveforms.shape[0] == 0:
# If no waveforms are available, enter one waveform of zeros at timepoint zero
waveforms = empty_spike_data()['waveforms']
timestamps = empty_spike_data()['timestamps']
# Arrange waveforms, timestamps and nr_tetrode into a dictionary
tet_data = {'waveforms': waveforms,
'timestamps': timestamps,
'nr_tetrode': nr_tetrode}
# Include idx_keep if available
idx_keep_path = tetrode_path + 'idx_keep'
if idx_keep_path in h5file:
tet_data['idx_keep'] = np.array(h5file[idx_keep_path][()])
if use_idx_keep:
# If requested, filter wavefoms and timestamps based on idx_keep
if np.sum(tet_data['idx_keep']) == 0:
tet_data['waveforms'] = empty_spike_data()['waveforms']
tet_data['timestamps'] = empty_spike_data()['timestamps']
else:
if not no_waveforms:
tet_data['waveforms'] = tet_data['waveforms'][tet_data['idx_keep'], :, :]
tet_data['timestamps'] = tet_data['timestamps'][tet_data['idx_keep']]
# Include clusterIDs if available
if clustering_name is None:
clusterIDs_path = tetrode_path + 'clusterIDs'
else:
clusterIDs_path = tetrode_path + '/clustering/' + clustering_name
if clusterIDs_path in h5file:
tet_data['clusterIDs'] = np.int16(h5file[clusterIDs_path][()]).squeeze()
# Set spikes to zeros for channels in badChan list if requested
if use_badChan and not no_waveforms:
badChan = listBadChannels(filename)
if len(badChan) > 0:
for nchan in tetrode_channels(nr_tetrode):
if nchan in badChan:
tet_data['waveforms'][:, np.mod(nchan, 4), :] = 0
data.append(tet_data)
return data
def save_spikes(filename, tetrode_nr, data, timestamps, spike_name='spikes', overwrite=False):
"""
Stores spike data in NWB file in the same format as with OpenEphysGUI.
tetrode_nr=0 for first tetrode.
"""
if data.dtype != np.int16:
raise ValueError('Waveforms are not int16.')
if timestamps.dtype != np.float64:
raise ValueError('Timestamps are not float64.')
recordingKey = get_recordingKey(filename)
path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/' + \
'electrode' + str(tetrode_nr + 1) + '/'
if check_if_path_exists(filename, path):
if overwrite:
# If overwrite is true, path is first cleared
with h5py.File(filename, 'r+') as h5file:
del h5file[path]
else:
raise Exception('Spikes already in file and overwrite not requested.\n' \
+ 'File: ' + filename + '\n' \
+ 'path: ' + path)
with h5py.File(filename, 'r+') as h5file:
h5file[path + 'data'] = data
h5file[path + 'timestamps'] = np.float64(timestamps).squeeze()
def processing_method_and_spike_name_combinations():
"""
Outputs a list of potential processing_method and spike_name combinations
"""
processing_methods = ['klustakwik', 'klustakwik_raw', 'kilosort']
spike_names = ['spikes', 'spikes_raw', 'spikes_kilosort']
return processing_methods, spike_names
def get_spike_name_for_processing_method(processing_method):
processing_methods, spike_names = processing_method_and_spike_name_combinations()
spike_name = spike_names[processing_methods.index(processing_method)]
return spike_name
def load_events(filename, internally_generated=False):
# Outputs a dictionary timestamps and eventIDs for TTL signals received
# timestamps are in seconds, aligned to timestamps of continuous recording
# eventIDs indicate TTL channel number (starting from 1) and are positive for rising signals
if internally_generated:
ttl_type = 'ttl2'
else:
ttl_type = 'ttl1'
# Load data file
recordingKey = get_recordingKey(filename)
with h5py.File(filename, 'r') as h5file:
# Load timestamps and TLL signal info
timestamps = h5file['acquisition']['timeseries'][recordingKey]['events'][ttl_type]['timestamps'][()]
eventID = h5file['acquisition']['timeseries'][recordingKey]['events'][ttl_type]['data'][()]
data = {'eventID': eventID, 'timestamps': timestamps}
return data
def load_GlobalClock_timestamps(filename, GlobalClock_TTL_channel=1):
"""
Returns timestamps of GlobalClock TTL pulses.
"""
data = load_events(filename)
return data['timestamps'][data['eventID'] == GlobalClock_TTL_channel]
def load_open_ephys_generated_ttl_events(filename):
"""Returns Open Ephys generated TTL pulse events with channel numbers and timestamps
:param str filename: full path to NWB file
:return: channel_event, timestamps
"""
data = load_events(filename, internally_generated=True)
return data['eventID'], data['timestamps']
def load_network_events(filename):
"""returns network_events_data
Extracts the list of network messages from NWB file
and returns it along with corresponding timestamps
in dictionary with keys ['messages', 'timestamps']
'messages' - list of str
'timestamps' - list of float
:param filename: full path to NWB file
:type filename: str
:return: network_events_data
:rtype: dict
"""
# Load data file
recordingKey = get_recordingKey(filename)
with h5py.File(filename, 'r') as h5file:
# Load timestamps and messages
timestamps = h5file['acquisition']['timeseries'][recordingKey]['events']['text1']['timestamps'][()]
messages = h5file['acquisition']['timeseries'][recordingKey]['events']['text1']['data'][()]
messages = [x.decode('utf-8') for x in messages]
timestamps = [float(x) for x in timestamps]
data = {'messages': messages, 'timestamps': timestamps}
return data
def check_if_path_exists(filename, path):
with h5py.File(filename, 'r') as h5file:
return path in h5file
def save_list_of_dicts_to_group(h5file, path, dlist, overwrite=False, list_suffix='_NWBLIST'):
# Check that all elements are dictionaries
for dic in dlist:
if not isinstance(dic, dict):
raise Exception('List elements must be dictionaries')
# Write elements to file
for i, dic in enumerate(dlist):
recursively_save_dict_contents_to_group(h5file, (path + str(i) + '/'), dic,
overwrite=overwrite, list_suffix=list_suffix)
def recursively_save_dict_contents_to_group(h5file, path, dic, overwrite=False, list_suffix='_NWBLIST', verbose=False):
"""
h5file - h5py.File
path - str - path to group in h5file. Must end with '/'
overwrite - bool - any dictionary elements or lists that already exist are overwritten.
Default is False, if elements already exist in NWB file, error is raised.
list_suffix - str - suffix used to highlight paths created from lists of dictionaries.
Must be consistent when saving and loading data.
verbose - bool - If True (default is False), h5file path used is printed for each recursion
Only works with: numpy arrays, numpy int64 or float64, strings, bytes, lists of strings and dictionaries these are contained in.
Also works with lists dictionaries as part of the hierachy.
Long lists of dictionaries are discouraged, as individual groups are created for each element.
"""
if verbose:
print(path)
if len(dic) == 0:
if path in h5file:
del h5file[path]
h5file.create_group(path)
for key, item in dic.items():
if isinstance(item, (int, float)):
item = np.array(item)
if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes)):
if overwrite:
if path + key in h5file:
del h5file[path + key]
h5file[path + key] = item
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item,
overwrite=overwrite, list_suffix=list_suffix,
verbose=verbose)
elif isinstance(item, list):
if all(isinstance(i, str) for i in item):
if overwrite:
if path + key in h5file:
del h5file[path + key]
asciiList = [n.encode("ascii", "ignore") for n in item]
h5file[path + key] = h5file.create_dataset(None, (len(asciiList),),'S100', asciiList)
else:
if overwrite:
if path + key + list_suffix in h5file:
del h5file[path + key + list_suffix]
save_list_of_dicts_to_group(h5file, path + key + list_suffix + '/', item,
overwrite=overwrite, list_suffix=list_suffix)
elif item is None:
h5file.create_group(path + key)
else:
raise ValueError('Cannot save %s type'%type(item) + ' from ' + path + key)
def convert_bytes_to_string(b):
"""
If input is bytes, returns str decoded with utf-8
:param b:
:type b: bytes
:return: string decoded with utf-8 if input is bytes object, otherwise returns unchanged input
:rtype: str
"""
if isinstance(b, bytes):
if sys.version_info >= (3, 0):
return str(b, 'utf-8')
else:
return str(b.decode('utf-8'))
else:
return b
def load_list_of_dicts_from_group(h5file, path, list_suffix='_NWBLIST', ignore=()):
# Load all elements on this path
items = []
for key in list(h5file[path].keys()):
items.append(
(int(key), recursively_load_dict_contents_from_group(h5file, path + key + '/',
list_suffix=list_suffix,
ignore=ignore))
)
# Create a list from items sorted by group keys
ans = [item for _, item in sorted(items)]
return ans
def recursively_load_dict_contents_from_group(h5file, path, list_suffix='_NWBLIST', ignore=()):
"""
Returns value at path if it has no further items
h5file - h5py.File
path - str - path to group in h5file. Must end with '/'
list_suffix - str - suffix used to highlight paths created from lists of dictionaries.
Must be consistent when saving and loading data.
ignore - tuple - paths including elements matching any element in this tuple return None
"""
if not path.endswith('/'):
raise ValueError('Input path must end with "/"')
if path.split('/')[-2] in ignore or path.split('/')[-2][:-len(list_suffix)] in ignore:
ans = None
elif path[:-1].endswith(list_suffix):
ans = load_list_of_dicts_from_group(h5file, path, list_suffix=list_suffix,
ignore=ignore)
elif hasattr(h5file[path], 'items'):
ans = {}
for key, item in h5file[path].items():
if key.endswith(list_suffix):
ans[str(key)[:-len(list_suffix)]] = load_list_of_dicts_from_group(
h5file, path + key + '/', list_suffix=list_suffix,
ignore=ignore
)
elif isinstance(item, h5py._hl.dataset.Dataset):
if 'S100' == item.dtype:
tmp = list(item[()])
ans[str(key)] = [convert_bytes_to_string(i) for i in tmp]
elif item.dtype == 'bool' and item.ndim == 0:
ans[str(key)] = np.array(bool(item[()]))
else:
ans[str(key)] = convert_bytes_to_string(item[()])
elif isinstance(item, h5py._hl.group.Group):
ans[str(key)] = recursively_load_dict_contents_from_group(h5file, path + key + '/',
ignore=ignore)
else:
ans = convert_bytes_to_string(h5file[path][()])
return ans
def save_settings(filename, Settings, path='/'):
"""
Writes into an existing file if path is not yet used.
Creates a new file if filename does not exist.
Only works with: numpy arrays, numpy int64 or float64, strings, bytes, lists of strings and dictionaries these are contained in.
To save specific subsetting, e.g. TaskSettings, use:
Settings=TaskSetttings, path='/TaskSettings/'
"""
full_path = '/general/data_collection/Settings' + path
if os.path.isfile(filename):
write_method = 'r+'
else:
write_method = 'w'
with h5py.File(filename, write_method) as h5file:
recursively_save_dict_contents_to_group(h5file, full_path, Settings)
def load_settings(filename, path='/', ignore=()):
"""
By default loads all settings from path
'/general/data_collection/Settings/'
or for example to load animal ID, use:
path='/General/animal/'
ignore - tuple - any paths including any element of ignore are returned as None
"""
full_path = '/general/data_collection/Settings' + path
with h5py.File(filename, 'r') as h5file:
data = recursively_load_dict_contents_from_group(h5file, full_path, ignore=ignore)
return data
def check_if_settings_available(filename, path='/'):
"""
Returns whether settings information exists in NWB file
Specify path='/General/badChan/' to check for specific settings
"""
full_path = '/general/data_collection/Settings' + path
with h5py.File(filename, 'r') as h5file:
return full_path in h5file
def save_analysis(filename, data, overwrite=False, complete_overwrite=False, verbose=False):
"""Stores analysis results from nested dictionary to /analysis path in NWB file.
See :py:func:`NWBio.recursively_save_dict_contents_to_group` for details on supported data structures.
:param str filename: path to NWB file
:param dict data: analysis data to be stored in NWB file
:param bool overwrite: if True, any existing data at same dictionary keys
as in previously saved data is overwritten.
Default is False.
:param bool complete_overwrite: if True, all previous analysis data is discarded before writing.
Default is False.
:param bool verbose: if True (default is False), the path in file for each element is printed.
"""
with h5py.File(filename, 'r+') as h5file:
if complete_overwrite:
del h5file['/analysis']
recursively_save_dict_contents_to_group(h5file, '/analysis/', data, overwrite=overwrite, verbose=verbose)
def load_analysis(filename, ignore=()):
"""Loads analysis results from /analysis path in NWB file into a dictionary.
:param str filename: path to NWB file
:param tuple ignore: paths containing any element of ignore are terminated with None.
In the output dictionary any elements downstream of a key matching any element of ignore
is not loaded and dictionary tree is terminated at that point with value None.
"""
with h5py.File(filename, 'r') as h5file:
return recursively_load_dict_contents_from_group(h5file, '/analysis/', ignore=ignore)
def listBadChannels(filename):
if check_if_settings_available(filename,'/General/badChan/'):
badChanString = load_settings(filename,'/General/badChan/')
# Separate input string into a list using ',' as deliminaters
if badChanString.find(',') > -1: # If more than one channel specified
# Find all values tetrode and channel values listed
badChanStringList = badChanString.split(',')
else:
badChanStringList = [badChanString]
# Identify any ranges specified with '-' and append these channels to the list
for chanString in badChanStringList:
if chanString.find('-') > -1:
chan_from = chanString[:chanString.find('-')]
chan_to = chanString[chanString.find('-') + 1:]
for nchan in range(int(chan_to) - int(chan_from) + 1):
badChanStringList.append(str(nchan + int(chan_from)))
badChanStringList.remove(chanString) # Remove the '-' containing list element
# Reorder list of bad channels
badChanStringList.sort(key=int)
badChan = list(np.array(list(map(int, badChanStringList))) - 1)
else:
badChan = []
return badChan
def save_tracking_data(filename, TrackingData, ProcessedPos=False, overwrite=False):
"""
TrackingData is expected as dictionary with keys for each source ID
If saving processed data, TrackingData is expected to be numpy array
Use ProcessedPos=True to store processed data
Use overwrite=True to force overwriting existing processed data
"""
if os.path.isfile(filename):
write_method = 'r+'
else:
write_method = 'w'
recordingKey = get_recordingKey(filename)
with h5py.File(filename, write_method) as h5file:
full_path = '/acquisition/timeseries/' + recordingKey + '/tracking/'
if not ProcessedPos:
recursively_save_dict_contents_to_group(h5file, full_path, TrackingData)
elif ProcessedPos:
processed_pos_path = full_path + 'ProcessedPos/'
# If overwrite is true, path is first cleared
if overwrite:
if full_path in h5file and 'ProcessedPos' in list(h5file[full_path].keys()):
del h5file[processed_pos_path]
h5file[processed_pos_path] = TrackingData
def get_recording_cameraIDs(filename):
path = '/general/data_collection/Settings/CameraSettings/CameraSpecific'
with h5py.File(filename, 'r') as h5file:
if path in h5file:
return list(h5file[path].keys())
def load_raw_tracking_data(filename, cameraID, specific_path=None):
path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/tracking/' + cameraID + '/'
if not (specific_path is None):
path = path + '/' + specific_path + '/'
with h5py.File(filename, 'r') as h5file:
if path in h5file:
return recursively_load_dict_contents_from_group(h5file, path)
def load_processed_tracking_data(filename, subset='ProcessedPos'):
path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/tracking/'
path = path + subset
with h5py.File(filename, 'r') as h5file:
return np.array(h5file[path][()])
def get_processed_tracking_data_timestamp_edges(filename, subset='ProcessedPos'):
if check_if_processed_position_data_available(filename):
data = load_processed_tracking_data(filename, subset=subset)
edges = [data[0, 0], data[-1, 0]]
else:
print('Warning! ProcessedPos not available. Using continuous data timestamps')
h5file = h5py.File(filename, 'r')
recordingKey = get_recordingKey(filename)
processorKey = get_processorKey(filename)
path = '/acquisition/timeseries/' + recordingKey + '/continuous/' + processorKey + '/timestamps'
edges = [h5file[path][0], h5file[path][-1]]
h5file.close()
return edges
def check_if_tracking_data_available(filename):
if check_if_settings_available(filename, path='/General/Tracking/'):
return load_settings(filename, path='/General/Tracking/')
else:
return False
def check_if_processed_position_data_available(filename, subset='ProcessedPos'):
path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/tracking/'
path = path + subset
return check_if_path_exists(filename, path)
def check_if_binary_pos(filename):
# Checks if binary position data exists in NWB file
path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/events/binary1/'
return check_if_path_exists(filename, path)
def save_tetrode_idx_keep(filename, ntet, idx_keep, spike_name='spikes', overwrite=False):
path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/' + \
'electrode' + str(ntet + 1) + '/idx_keep'
with h5py.File(filename, 'r+') as h5file:
if path in h5file:
if overwrite:
del h5file[path]
else:
raise ValueError('Tetrode ' + str(ntet + 1) + ' idx_keep already exists in ' + filename)
h5file[path] = idx_keep
def save_tetrode_clusterIDs(filename, ntet, clusterIDs, spike_name='spikes', overwrite=False):
path = '/acquisition/timeseries/' + get_recordingKey(filename) + '/' + spike_name + '/' + \
'electrode' + str(ntet + 1) + '/clusterIDs'
with h5py.File(filename, 'r+') as h5file:
if path in h5file:
if overwrite:
del h5file[path]
else:
raise ValueError('Tetrode ' + str(ntet + 1) + ' clusterIDs already exists in ' + filename)
h5file[path] = np.int16(clusterIDs).squeeze()
def fill_empty_dictionary_from_source(selection, src_dict):
"""
Populates a dictionary with None values with values from a source
dictionary with identical structure.
"""
dst_dict = copy(selection)
for key, item in dst_dict.items():
if isinstance(item, dict):
dst_dict[key] = fill_empty_dictionary_from_source(item, src_dict[key])
elif item is None:
dst_dict[key] = src_dict[key]
else:
raise ValueError('Destination dictionary has incorrect.')
return dst_dict
def get_recording_start_timestamp_offset(filename):
"""Returns the first timestamp of raw or downsampled continuous data.
:param str filename: path to NWB file
:return: first timestamp of continuous data
:rtype: float
"""
if check_if_raw_data_available(filename):
path = get_raw_data_paths(filename)['timestamps']
elif check_if_downsampled_data_available(filename):
path = get_downsampled_data_paths(filename)['timestamps']
else:
raise Exception('NWB file does not contain raw or downsampled data ' + filename)
with h5py.File(filename, 'r') as h5file:
return float(h5file[path][0:1])
def get_recording_full_duration(filename):
"""Returns the total duration from first to last timestamp of
raw or downsampled continuous data.
:param str filename: path to NWB file
:return: total duration from first to last timestamp of continuous data
:rtype: float
"""
if check_if_raw_data_available(filename):
path = get_raw_data_paths(filename)['timestamps']
elif check_if_downsampled_data_available(filename):
path = get_downsampled_data_paths(filename)['timestamps']
else:
raise Exception('NWB file does not contain raw or downsampled data ' + filename)
with h5py.File(filename, 'r') as h5file:
return float(h5file[path][-1]) - float(h5file[path][0:1])
def import_task_specific_log_parser(task_name):
"""
Returns LogParser module for the specific task.
:param task_name: name of the task
:type task_name: str
:return: TaskLogParser
:rtype: module
"""
if task_name == 'Pellets_and_Rep_Milk_Task': # Temporary workaround to function with older files
task_name = 'Pellets_and_Rep_Milk'
try:
return importlib.import_module('.Tasks.' + task_name + '.LogParser', package='openEPhys_DACQ')
except ModuleNotFoundError:
print('Task {} LogParser not found. Returning None.'.format(task_name))
return None
def load_task_name(filename):
"""
Returns the name of the task active in the recording.
:param filename: absolute path to NWB recording file
:type filename: str
:return: task_name
:rtype: str
"""
return load_settings(filename, path='/TaskSettings/name/')
def get_recording_log_parser(filename, final_timestamp=None):
"""Finds task specific LogParser class and returns it initialized
with network events from that recording.
:param str filename:
:return: Task specific log parser initialized with network events
:rtype: LogParser class
"""
task_log_parser = import_task_specific_log_parser(load_task_name(filename))
if task_log_parser is None:
return None
else:
return task_log_parser.LogParser(task_settings=load_settings(filename, path='/TaskSettings/'),
final_timestamp=final_timestamp,
**load_network_events(filename))
def get_channel_map(filename):
return load_settings(filename, '/General/channel_map/')
def list_tetrode_nrs_for_area_channel_map(area_channel_map):
return list(set([channels_tetrode(chan) for chan in list(area_channel_map['list'])]))
def get_channel_map_with_tetrode_nrs(filename):
channel_map = get_channel_map(filename)
for area in channel_map:
channel_map[area]['tetrode_nrs'] = list_tetrode_nrs_for_area_channel_map(channel_map[area])
return channel_map
def check_if_channel_maps_are_same(channel_map_1, channel_map_2):
"""
Determines if two channel maps are identical
"""
# Check that there are same number of areas in the dictionary
if len(channel_map_1) != len(channel_map_2):
return False
# Sort the area names because dictionary is not ordered
channel_map_1_keys = sorted(list(channel_map_1.keys()))
channel_map_2_keys = sorted(list(channel_map_2.keys()))
# Check that the areas have the same name
for n_area in range(len(channel_map_1_keys)):
if channel_map_1_keys[n_area] != channel_map_2_keys[n_area]:
return False
# Check that the channel lists are the same
for area in channel_map_1_keys:
if not all(channel_map_1[area]['list'] == channel_map_2[area]['list']):
return False
return True
def estimate_open_ephys_timestamps_from_other_timestamps(open_ephys_global_clock_times, other_global_clock_times,
other_times, other_times_divider=None):
"""Returns Open Ephys timestamps for each timestamp from another device by synchronising with global clock.
Note, other times must be in same units as open_ephys_global_clock_times. Most likely seconds.
For example, Raspberry Pi camera timestamps would need to be divided by 10 ** 6
:param numpy.ndarray open_ephys_global_clock_times: shape (N,)
:param numpy.ndarray other_global_clock_times: shape (M,)
:param numpy.ndarray other_times: shape (K,)
:param int other_times_divider: if provided, timestamps from the other devices are divided by this value
before matching to Open Ephys time. This allows inputting timestamps from other device in original units.
In case of Raspberry Pi camera timestamps, this value should be 10 ** 6.
If this value is not provided, all provided timestamps must be in same units.
:return: open_ephys_times
:rtype: numpy.ndarray
"""
# Crop data if more timestamps recorded on either system.
if open_ephys_global_clock_times.size > other_global_clock_times.size:
open_ephys_global_clock_times = open_ephys_global_clock_times[:other_global_clock_times.size]
print('[ Warning ] OpenEphys recorded more GlobalClock TTL pulses than other system.\n' +
'Dumping extra OpenEphys timestamps from the end.')
elif open_ephys_global_clock_times.size < other_global_clock_times.size:
other_global_clock_times = other_global_clock_times[:open_ephys_global_clock_times.size]
print('[ Warning ] Other system recorded more GlobalClock TTL pulses than Open Ephys.\n' +
'Dumping extra other system timestamps from the end.')
# Find closest other_global_clock_times indices to each other_times
other_times_gc_indices = closest_argmin(other_times, other_global_clock_times)
# Compute difference from the other_global_clock_times for each value in other_times
other_times_nearest_global_clock_times = other_global_clock_times[other_times_gc_indices]
other_times_global_clock_delta = other_times - other_times_nearest_global_clock_times
# Convert difference values to Open Ephys timestamp units
if not (other_times_divider is None):
other_times_global_clock_delta = other_times_global_clock_delta / float(other_times_divider)
# Use other_times_global_clock_delta to estimate timestamps in OpenEphys time
other_times_nearest_open_ephys_global_clock_times = open_ephys_global_clock_times[other_times_gc_indices]
open_ephys_times = other_times_nearest_open_ephys_global_clock_times + other_times_global_clock_delta
return open_ephys_times
def extract_recording_info(filename, selection='default'):
"""
Returns recording info for the recording file.
selection - allows specifying which data return
'default' - some hard-coded selection of data
'all' - all of the recording settings
dict - a dictionary with the same exact keys and structure
as the recording settings, with None for item values
and missing keys for unwanted elements. The dictionary
will be returned with None values populated by values
from recording settings.
"""
recording_info = {}
if isinstance(selection, str) and selection == 'default':
recording_info.update(load_settings(filename, '/General/'))
del recording_info['experimenter']
del recording_info['rec_file_path']
del recording_info['root_folder']
if recording_info['TaskActive']:
recording_info.update({'TaskName': load_settings(filename, '/TaskSettings/name/')})
for key in list(recording_info['channel_map'].keys()):
del recording_info['channel_map'][key]['list']
pos_edges = get_processed_tracking_data_timestamp_edges(filename)
recording_info['duration'] = pos_edges[1] - pos_edges[0]
recording_info['duration (min)'] = int(round((pos_edges[1] - pos_edges[0]) / 60))
recording_info['time'] = load_settings(filename, '/Time/')
elif isinstance(selection, str) and selection == 'all':
recording_info = load_settings(filename)
elif isinstance(selection, dict):
full_recording_info = load_settings(filename)
recording_info = fill_empty_dictionary_from_source(selection, full_recording_info)
return recording_info
def display_recording_data(root_path, selection='default'):
"""
Prints recording info for the whole directory tree.
"""
for dirName, subdirList, fileList in os.walk(root_path):
for fname in fileList:
if fname == 'experiment_1.nwb':
filename = os.path.join(dirName, fname)
recording_info = extract_recording_info(filename, selection)
print('Data on path: ' + dirName)
pprint(recording_info)
if __name__ == '__main__':
# Input argument handling and help info
parser = argparse.ArgumentParser(description='Extract info from Open Ephys.')
parser.add_argument('root_path', type=str, nargs=1,
help='Root directory for recording(s)')
args = parser.parse_args()
# Get paths to recording files
display_recording_data(args.root_path[0])
| 41.409875 | 132 | 0.66203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,204 | 0.382369 |
0390c2fe78227e37b4043e6ad937f0c6cdda546d | 10,339 | py | Python | computersimulator/hardware/SimulatedCPU.py | jatgam/Computer-Simulator | a6f496679b16738e74663f092f61e758df9ce6f8 | [
"MIT"
] | 1 | 2021-05-02T12:30:31.000Z | 2021-05-02T12:30:31.000Z | computersimulator/hardware/SimulatedCPU.py | jatgam/Computer-Simulator | a6f496679b16738e74663f092f61e758df9ce6f8 | [
"MIT"
] | null | null | null | computersimulator/hardware/SimulatedCPU.py | jatgam/Computer-Simulator | a6f496679b16738e74663f092f61e758df9ce6f8 | [
"MIT"
] | null | null | null | import logging
import sys
from computersimulator.hardware.SimulatedRAM import SimulatedRAM
from computersimulator.hardware.SimulatedDisk import SimulatedDisk
from computersimulator.utils.bitutils import *
import computersimulator.constants as constants
CONST = constants.Constants
logger = logging.getLogger(__name__)
class SimulatedCPU:
def __init__(self):
logger.info("Initializing CPU")
### CPU Hardware Variables ###
self.gpr = [0]*8 # General Purpose Registers
self.sp = None # Stack Pointer
self.pc = None # Program Counter
self.ir = None # Instruction Register
self.psr = None # Processor Status Register
self.clock = None # Clock
### Other Hardware Accessed by CPU ###
self.sram = SimulatedRAM()
self.sdisk = SimulatedDisk("computersimulator/hardware/disks/disk.dsk")
if (self.sdisk.disk == -1):
print("Fatal Error! Disk not found!")
sys.exit()
def executeProgram(self, systemCallCallback, timeslice=200):
"""
Runs through the ram and grabs the next IR and decodes it. Then
performs the correct operation.
"""
status = 0
clock_start = self.clock
while (status >= 0):
if (self.pc < 0) or (self.pc > 9999): # Check to see if PC valid
return CONST.ER_PC
if (self.clock - clock_start >= timeslice):
return CONST.TIMESLICE
self.ir = self.sram.ram[self.pc]
self.pc += 1
# Decode IR
op_code = self.ir >> 16
op1_mode = extractBits(self.ir, 4, 13)
op1_reg = extractBits(self.ir, 4, 9)
op2_mode = extractBits(self.ir, 4, 5)
op2_reg = extractBits(self.ir, 4, 1)
logger.debug("IR:%s op_code:%s op1_mode:%s op1_reg:%s op1_mode:%s op1_reg:%s",
hex(self.ir), hex(op_code), hex(op1_mode), hex(op1_reg), hex(op2_mode), hex(op2_reg))
if (op_code == CONST.OP_HALT): # Halt Opcode
self.clock += 12
return CONST.OK
elif (op_code == CONST.OP_ADD): # Add Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
status, op2_addr, op2_value = self._fetchOperand(op2_mode, op2_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
result = op1_value + op2_value # ALU
if (op2_mode == CONST.MODE_REGISTER):
self.gpr[op2_reg] = result
else:
self.sram.ram[op2_addr] = result
self.clock += 3
continue
elif (op_code == CONST.OP_SUB): # Subtract Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
status, op2_addr, op2_value = self._fetchOperand(op2_mode, op2_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
result = op2_value - op1_value # ALU
if (op2_mode == CONST.MODE_REGISTER):
self.gpr[op2_reg] = result
else:
self.sram.ram[op2_addr] = result
self.clock += 3
continue
elif (op_code == CONST.OP_MULT): # Multiply Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
status, op2_addr, op2_value = self._fetchOperand(op2_mode, op2_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
result = op1_value * op2_value # ALU
if (op2_mode == CONST.MODE_REGISTER):
self.gpr[op2_reg] = result
else:
self.sram.ram[op2_addr] = result
self.clock += 6
continue
elif (op_code == CONST.OP_DIV): # Divide Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
status, op2_addr, op2_value = self._fetchOperand(op2_mode, op2_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
result = op2_value / op1_value # ALU
if (op2_mode == CONST.MODE_REGISTER):
self.gpr[op2_reg] = result
else:
self.sram.ram[op2_addr] = result
self.clock += 6
continue
elif (op_code == CONST.OP_MOVE): # Move Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
status, op2_addr, op2_value = self._fetchOperand(op2_mode, op2_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
result = op1_value
if (op2_mode == CONST.MODE_REGISTER):
self.gpr[op2_reg] = result
else:
self.sram.ram[op2_addr] = result
self.clock += 2
continue
elif (op_code == CONST.OP_BRANCH): # Branch Opcode
if (self.pc >= 0) and (self.pc <=9999):
self.pc = self.sram.ram[self.pc]
self.clock += 2
else:
return CONST.ER_INVALIDADDR
continue
elif (op_code == CONST.OP_BRANCHM): # Branch on Minus Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
if (op1_value < 0):
self.pc = self.sram.ram[self.pc]
else:
self.pc += 1
self.clock += 4
continue
elif (op_code == CONST.OP_SYSTEM): # System Call Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
status = systemCallCallback(op1_value)
self.clock += 12
if (status == CONST.WAITING):
return CONST.WAITING
if (status == CONST.HALT):
return 0
continue
elif (op_code == CONST.OP_BRANCHP): # Branch on Plus Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
if (op1_value > 0):
self.pc = self.sram.ram[self.pc]
else:
self.pc += 1
self.clock += 4
continue
elif (op_code == CONST.OP_BRANCHZ): # Branch on Zero Opcode
status, op1_addr, op1_value = self._fetchOperand(op1_mode, op1_reg)
if (status != 0):
return CONST.ER_INVALIDMODE
if (op1_value == 0):
self.pc = self.sram.ram[self.pc]
else:
self.pc += 1
self.clock += 4
continue
elif (op_code == CONST.OP_PUSH): # Push Opcode
return CONST.ER_OPNOTIMP
elif (op_code == CONST.OP_POP): # Pop Opcode
return CONST.ER_OPNOTIMP
else:
logger.warn("Invalid Op Code")
return CONST.ER_INVALIDOP
return CONST.OK
def _fetchOperand(self, mode, reg):
"""
Takes input of a mode and register and returns the values of the
operands and a status.
Returns: status, opAddr, opValue
"""
if (mode == CONST.MODE_DIRECT): # Direct Mode
if (self.pc >= 0) and (self.pc <= 9999):
opAddr = self.sram.ram[self.pc] # get opAddr using PC
self.pc += 1
if (opAddr >= 0) and (opAddr <= 9999):
opValue = self.sram.ram[opAddr] # get opValue
else:
return CONST.ER_INVALIDADDR, None, None
else:
return CONST.ER_INVALIDADDR, None, None
return CONST.OK, opAddr, opValue
elif (mode == CONST.MODE_REGISTER): # Register Mode
opAddr = -1 # Not in Memory
opValue = self.gpr[reg]
return CONST.OK, opAddr, opValue
elif (mode == CONST.MODE_REGDEFERRED): # Register Deferred Mode
opAddr = self.gpr[reg]
if (opAddr >= 0) and (opAddr <= 9999):
opValue = self.sram.ram[opAddr]
else:
return CONST.ER_INVALIDADDR, None, None
return CONST.OK, opAddr, opValue
elif (mode == CONST.MODE_AUTOINC): # Auto Increment Mode
opAddr = self.gpr[reg]
if (opAddr >= 0) and (opAddr <= 9999):
opValue = self.sram.ram[opAddr]
else:
return CONST.ER_INVALIDADDR, None, None
self.gpr[reg] += 1
return CONST.OK, opAddr, opValue
elif (mode == CONST.MODE_AUTODEC): # Auto Decrement Mode
self.gpr[reg] -= 1
opAddr = self.gpr[reg]
if (opAddr >= 0) and (opAddr <= 9999):
opValue = self.sram.ram[opAddr]
else:
return CONST.ER_INVALIDADDR, None, None
return CONST.OK, opAddr, opValue
elif (mode == CONST.MODE_IMMEDIATE): # Immediate Mode
opAddr = self.pc
self.pc += 1
if (opAddr >= 0) and (opAddr <= 9999):
opValue = self.sram.ram[opAddr]
else:
return CONST.ER_INVALIDADDR, None, None
return CONST.OK, opAddr, opValue
else:
return CONST.ER_INVALIDMODE, None, None
| 42.372951 | 113 | 0.510978 | 10,019 | 0.969049 | 0 | 0 | 0 | 0 | 0 | 0 | 1,076 | 0.104072 |
039115099b0acd0dd43e432d85e08424f1e0930e | 1,493 | py | Python | dom/metadata.py | Starwort/domgen | 598f57c2d365cdef353ed1b373a274715c896867 | [
"MIT"
] | null | null | null | dom/metadata.py | Starwort/domgen | 598f57c2d365cdef353ed1b373a274715c896867 | [
"MIT"
] | null | null | null | dom/metadata.py | Starwort/domgen | 598f57c2d365cdef353ed1b373a274715c896867 | [
"MIT"
] | null | null | null | import functools
from .base_classes import Container, Void
class BaseURL(Void):
"""The HTML `<base>` element specifies the base URL to use for *all*
relative URLs in a document. There can be only one `<base>` element in a
document.
"""
__slots__ = ()
tag = "base"
Base = BaseURL
class ExternalResourceLink(Void):
"""The HTML External Resource Link element (`<link>`) specifies
relationships between the current document and an external resource.
This element is most commonly used to link to stylesheets, but is
also used to establish site icons (both "favicon" style icons and
icons for the home screen and apps on mobile devices) among other
things.
"""
__slots__ = ()
tag = "link"
Link = ExternalResourceLink
ExternalStyleSheet = functools.partial(ExternalResourceLink, rel="stylesheet")
class Meta(Void):
"""The HTML `<meta>` element represents metadata that cannot be
represented by other HTML meta-related elements, like `<base>`,
`<link>`, `<script>`, `<style>` or `<title>`.
"""
__slots__ = ()
tag = "meta"
class Style(Container):
"""The HTML `<style>` element contains style information for a
document, or part of a document.
"""
__slots__ = ()
tag = "style"
class Title(Container):
"""The HTML Title element (`<title>`) defines the document's title
that is shown in a browser's title bar or a page's tab.
"""
__slots__ = ()
tag = "title"
| 23.698413 | 78 | 0.663094 | 1,291 | 0.864702 | 0 | 0 | 0 | 0 | 0 | 0 | 1,007 | 0.674481 |
0391350ff5403c977fa0fbdb326f594770fc8943 | 53 | py | Python | catcher_rl/__main__.py | sohnryang/catcher-rl | 8a45080f2be528be8abb94c3a4eea0dc700ab505 | [
"MIT"
] | null | null | null | catcher_rl/__main__.py | sohnryang/catcher-rl | 8a45080f2be528be8abb94c3a4eea0dc700ab505 | [
"MIT"
] | null | null | null | catcher_rl/__main__.py | sohnryang/catcher-rl | 8a45080f2be528be8abb94c3a4eea0dc700ab505 | [
"MIT"
] | null | null | null | """__main__.py"""
import catcher_rl
catcher_rl.main() | 17.666667 | 17 | 0.754717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.320755 |
039214ce3d01a32e6fe030aadf9b9ebee3ca3114 | 2,575 | py | Python | helper/downloader/downloadRequest.py | REX-BOTZ/MegaUploaderbot-1 | 025fd97344da388fe607f5db73ad9f4435f51baa | [
"Apache-2.0"
] | 2 | 2021-11-12T13:15:03.000Z | 2021-11-13T12:17:33.000Z | helper/downloader/downloadRequest.py | REX-BOTZ/MegaUploaderbot-1 | 025fd97344da388fe607f5db73ad9f4435f51baa | [
"Apache-2.0"
] | null | null | null | helper/downloader/downloadRequest.py | REX-BOTZ/MegaUploaderbot-1 | 025fd97344da388fe607f5db73ad9f4435f51baa | [
"Apache-2.0"
] | 1 | 2022-01-07T09:55:53.000Z | 2022-01-07T09:55:53.000Z | # !/usr/bin/env python3
"""Importing"""
# Importing Inbuilt packages
from re import match
from shutil import rmtree
from uuid import uuid4
from os import makedirs
# Importing Credentials & Developer defined modules
from helper.downloader.urlDL import UrlDown
from helper.downloader.tgDL import TgDown
from helper.downloader.ytDL import YTDown
from botModule.botMSG import BotMessage
# Importing Credentials & Required Data
try:
from testexp.config import Config
except ModuleNotFoundError:
from config import Config
"""Downloader Class"""
class Downloader:
def __init__(self, bot, msg, log_obj):
self.bot = bot
self.msg = msg
self.log_obj = log_obj
slash = '//' if '/'in Config.DOWNLOAD_LOCATION else '\\'
self.Downloadfolder = Config.DOWNLOAD_LOCATION + slash + str(uuid4()) + slash
makedirs(self.Downloadfolder)
async def start(self):
if self.msg.media: #For Telegram File/media
self.process_msg = await self.msg.reply_text(BotMessage.processing_file, parse_mode = 'html')
await self.file_downloader()
else:
self.url = self.msg.text
self.process_msg = await self.msg.reply_text(BotMessage.processing_url, parse_mode = 'html')
if match('^https://(www.)?youtu(.)?be(.com)?/(.*)', self.url): #For Youtube Video
await self.msg.reply_text("Currently not supporting Youtube Videos.")
await self.process_msg.delete()
# await self.youtube_downloader()
else: #Normal Url
await self.url_downloader()
return self
#Downloading Youtube Video
async def youtube_downloader(self):
rmtree(self.Downloadfolder, ignore_errors = True)
ytDl = YTDown(self.bot, self.msg, self.process_msg, self.url, self.log_obj)
await ytDl.start()
self.filename = None
return
#Downloading From url
async def url_downloader(self):
urlDl = UrlDown(self.bot, self.msg, self.process_msg, self.Downloadfolder, self.url)
await urlDl.start()
self.filename = urlDl.filename
if urlDl.filename:
self.n_msg = urlDl.n_msg
return
return
#Downloading From Telegram File/Media
async def file_downloader(self):
tgDl = TgDown(self.bot, self.msg, self.process_msg, self.Downloadfolder)
await tgDl.start()
self.filename = tgDl.filename
if self.filename:
self.n_msg = tgDl.n_msg
return
return
| 32.594937 | 105 | 0.650485 | 2,019 | 0.784078 | 0 | 0 | 0 | 0 | 1,565 | 0.607767 | 454 | 0.176311 |
03925cef2e841a3daf7a534758f7b4e0dfd688dc | 17,544 | py | Python | skyperious/wx_accel.py | tt9133github/Skyperious | 878957fa8e69b21b9c5465458a896a7008e0bcdc | [
"MIT"
] | null | null | null | skyperious/wx_accel.py | tt9133github/Skyperious | 878957fa8e69b21b9c5465458a896a7008e0bcdc | [
"MIT"
] | null | null | null | skyperious/wx_accel.py | tt9133github/Skyperious | 878957fa8e69b21b9c5465458a896a7008e0bcdc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Functionality for binding wx control label shortcut keys to events
automatically. In wx, a button with a label "E&xit" would be displayed as
having the label "Exit" with "x" underlined, indicating a keyboard shortcut,
but wx does not bind these shortcuts automatically, requiring constructing
the acceleration table piecemeal.
Supported controls:
- wx.Button click handler called
- wx.CheckBox value is reversed, control focused, change handler called
- wx.TextCtrl control focused, all text selected
- wx.RadioButton control focused, value selected
- wx.Control control focused
- wx.ToolBar tool event is called, if the tool shorthelp includes a
parseable shortcut key like (Alt-S)
- wx.ToggleButton ToggleButton handler called
Uses primitive heuristic analysis to detect connected label-control pairs:
- wx.StaticTexts whose next sibling is a focusable control
- wx.StaticTexts that have an Id one less from a focusable control (created
immediately before creating the control)
- wx.StaticTexts that have the same Name as a control with "label" appended or
prepended,
e.g. "iptext" and "iptext_label"|"iptext.label"|"iptext label"|"labeliptext"
------------------------------------------------------------------------------
This file is part of Skyperious - a Skype database viewer and merger.
Released under the MIT License.
@author Erki Suurjaak
@created 19.11.2011
@modified 09.03.2015
------------------------------------------------------------------------------
"""
import functools
import re
import wx
DEBUG = False
class AutoAcceleratorMixIn(object):
"""
A windowed control that assigns global keyboard shortcuts to all its
controls that have a shortcut key defined in their label (e.g. a button'
labeled "E&xit" gets assigned the shortcut Alt-X).
Accelerator table is autocreated on first showing; if changing controls
afterwards, call UpdateAccelerators().
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
"""
def __init__(self, use_heuristics=True):
"""
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
"""
self.__use_heuristics = use_heuristics
self.__shortcuts = None # {shortcut char: target control, }
def Show(self, *args, **kwargs):
"""
Initializes the shortcut keys from child controls, if not already
created, and calls parent.Show.
"""
if not hasattr(self, "__shortcuts"):
self.__shortcuts = None # {shortcut char: target control, }
if self.__shortcuts is None:
self.UpdateAccelerators()
super(AutoAcceleratorMixIn, self).Show(*args, **kwargs)
def UpdateAccelerators(self, use_heuristics=True):
"""
Rebuilds the control shortcut keys in this frame.
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs (sticky)
"""
if not hasattr(self, "__shortcuts"):
self.__shortcuts = None # {shortcut char: target control, }
self.__use_heuristics = use_heuristics
self.__shortcuts = accelerate(self, self.__use_heuristics)
def collect_shortcuts(control, use_heuristics=True):
"""
Returns a map of detected shortcut keys and target controls under the
specified control.
@param control the control to start from
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
@return a map of detected shortcut chars and a list of
their target controls (there can be several
controls with one shortcut, e.g. controls on
different pages of a Notebook)
"""
result = {} # {char: control, }
nameds = {} # collected controls with Name {name: control, }
statics = {} # collected StaticTexts with a shortcut {control: char, }
def parse_shortcuts(ctrl):
"""
Parses the shortcut keys from the control label, if any.
@return [keys]
"""
result = []
# wx.TextCtrl.Label is the same as its value, so must not use that
if isinstance(ctrl, wx.ToolBar):
toolsmap = dict()
for i in range(ctrl.GetToolsCount() + 1):
# wx 2.8 has no functionality for getting tools by index, so
# need to gather them by layout position
try:
tool = ctrl.FindToolForPosition(i * ctrl.ToolSize[0], 0)
toolsmap[repr(tool)] = tool
except Exception: pass # FindTool not implemented in GTK
for tool in filter(None, toolsmap.values()):
text = ctrl.GetToolShortHelp(tool.GetId())
parts = re.split("\\(Alt-(.)\\)", text, maxsplit=1)
if len(parts) > 1:
result.append(parts[1].lower())
elif hasattr(ctrl, "Label") and not isinstance(ctrl, wx.TextCtrl):
for part in filter(len, ctrl.Label.split("&")[1:]):
# Labels have potentially multiple ampersands - find one that
# is usable (preceding a valid character. 32 and lower are
# spaces, punctuation, control characters, etc).
key = part[0].lower()
if ord(key) > 32:
result.append(key)
if (DEBUG) and key:
print("Parsed '%s' in label '%s'." % (key, ctrl.Label))
break # break for part in filter
return result
def collect_recurse(ctrl, result, nameds, statics):
"""
Goes through the control and all its children and collects accelerated
controls.
@return {key: control, }
"""
if hasattr(ctrl, "GetChildren"):
children = ctrl.GetChildren()
for i in range(len(children)):
collect_recurse(children[i], result, nameds, statics)
keys = parse_shortcuts(ctrl)
for key in keys:
if isinstance(ctrl, wx.StaticText):
statics[ctrl] = key
else:
if key not in result:
result[key] = []
if ctrl not in result[key]:
result[key].append(ctrl)
if (DEBUG): print("Selected '%s' for '%s' (%s.Id=%s)." %
(key, ctrl.Label, ctrl.ClassName,
ctrl.GetId()))
if ctrl.Name:
if DEBUG: print("Found named control %s %s." % (ctrl.Name, ctrl))
nameds[ctrl.Name] = ctrl
collect_recurse(control, result, nameds, statics)
result_values = [j for i in result.values() for j in i]
if use_heuristics:
for ctrl, key in statics.items():
# For wx.StaticTexts, see if the next sibling, or control with the
# next ID, or control sitting next in the sizer is focusable -
# shortcut will set focus to the control.
chosen = None
next_sibling = hasattr(ctrl, "GetNextSibling") \
and ctrl.GetNextSibling()
# Do not include buttons, as buttons have their own shortcut keys.
if next_sibling and not isinstance(next_sibling, wx.Button) \
and (not next_sibling.Enabled or next_sibling.AcceptsFocus()
or getattr(next_sibling, "CanAcceptFocus", lambda: False)()):
chosen = next_sibling
if (DEBUG):
print("Selected '%s' by previous sibling wxStaticText "
"'%s' (%s.ID=%s)." %
(key, ctrl.Label, chosen.ClassName, chosen.Id))
if not chosen:
# Try to see if the item with the next ID is focusable.
next_ctrl = wx.FindWindowById(ctrl.Id - 1)
# Disabled controls might return False for AcceptsFocus).
if next_ctrl and not isinstance(next_ctrl, wx.Button) \
and (not next_ctrl.Enabled or next_ctrl.AcceptsFocus()
or getattr(next_ctrl, "CanAcceptFocus", lambda: False)()):
chosen = next_ctrl
if (DEBUG):
print("Selected '%s' by previous ID wxStaticText "
"'%s' (%s.ID=%s)." %
(key, ctrl.Label, chosen.ClassName, chosen.Id))
if not chosen and ctrl.ContainingSizer:
# Try to see if the item next in the same sizer is focusable
sizer_items = []
while True:
try:
item = ctrl.ContainingSizer.GetItem(len(sizer_items))
sizer_items.append(item.Window)
except Exception:
break # Reached item limit
index = sizer_items.index(ctrl)
if index < len(sizer_items) - 1:
next_ctrl = sizer_items[index + 1]
if (next_ctrl and not isinstance(next_ctrl, wx.Button)
and (not next_ctrl.Enabled or next_ctrl.AcceptsFocus()
or getattr(next_ctrl, "CanAcceptFocus", lambda: False)())):
chosen = next_ctrl
if (DEBUG):
print("Selected '%s' by previous in sizer "
"wxStaticText '%s' (%s.ID=%s)." %
(key, ctrl.Label, chosen.ClassName, chosen.Id))
if chosen and chosen not in result_values:
if key not in result:
result[key] = []
result[key].append(chosen)
result_values.append(chosen)
for name, ctrl in nameds.items():
# For named controls, see if there is another control with the same
# name, but "label" appended or prepended.
if (DEBUG): print("Going through named %s '%s'." % (ctrl, name))
match_found = False
label_regex = re.compile("(^label[_ \\.]*%s$)|(^%s[_ \\.]*label$)"
% tuple([name] * 2), re.IGNORECASE)
for potential_name, potential in nameds.items():
if label_regex.match(potential_name):
keys = parse_shortcuts(potential)
for key in keys:
if (DEBUG):
print("Name %s matches potential %s, key=%s." % (
name, potential_name, key))
if key and (ctrl not in result_values):
match_found = True
if key not in result:
result[key] = []
if ctrl not in result[key]:
result[key].append(ctrl)
result_values.append(ctrl)
if (DEBUG):
print("Selected '%s' by named StaticText "
"'%s' (%s.ID=%s, %s.Name=%s, "
"wxStaticText.Name=%s)." %
(key, potential.Label, ctrl.ClassName,
ctrl.ClassName, ctrl.Id, ctrl.Name,
potential.Name))
break # break for key in keys
if match_found:
break # break for potential_name, potential in nameds
return result
def accelerate(window, use_heuristics=True):
"""
Assigns global keyboard shortcuts to all controls under the specified
wx.Window that have a shortcut key defined in their label (e.g. a button
labeled "E&xit" gets assigned the shortcut Alt-X). Resets previously
set accelerators, if any.
@param control the wx.Window instance to process, gets its
accelerator table reset
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
@return a map of detected shortcut chars and their target
controls
"""
def shortcut_handler(targets, key, shortcut_event):
"""
Shortcut event handler, calls the appropriate event on the target.
@param targets list of target controls. If there is more than
one target control, the first non-disabled
and visible is chosen.
@param key the event shortcut key, like 's'
@param shortcut_event menu event generated by the accelerator table
"""
if (DEBUG):
print("Handling target %s" %
[(type(t), t.Id, t.Label) for t in targets])
event = None
for target in targets:
if (isinstance(target, wx.Control) # has not been destroyed
and target.IsShownOnScreen() # visible on current panel
and target.Enabled):
if isinstance(target, wx.Button):
# Buttons do not get focus on shortcuts by convention
event = wx.CommandEvent(wx.EVT_BUTTON.typeId, target.Id)
event.SetEventObject(target)
elif isinstance(target, wx.ToggleButton):
# Buttons do not get focus on shortcuts by convention
event = wx.CommandEvent(wx.EVT_TOGGLEBUTTON.typeId,
target.Id)
event.SetEventObject(target)
# Need to change value, as event goes directly to handler
target.Value = not target.Value
elif isinstance(target, wx.CheckBox):
event = wx.CommandEvent(wx.EVT_CHECKBOX.typeId, target.Id)
# Need to change value, as event goes directly to handler
target.Value = not target.Value
target.SetFocus()
elif isinstance(target, wx.ToolBar):
# Toolbar shortcuts are defined in their shorthelp texts
toolsmap, tb = dict(), target
for i in range(tb.GetToolsCount() + 1):
try:
tool = tb.FindToolForPosition(i * tb.ToolSize[0], 0)
toolsmap[repr(tool)] = tool
except Exception: pass # FindTool not implemented in GTK
for tool in filter(None, toolsmap.values()):
id = tool.GetId()
text = tb.GetToolShortHelp(id)
parts = re.split("\\(Alt-(%s)\\)" % key, text,
maxsplit=1, flags=re.IGNORECASE)
if len(parts) > 1:
event = wx.CommandEvent(wx.EVT_TOOL.typeId, id)
event.SetEventObject(target)
target.ToggleTool(id, not target.GetToolState(id))
break # break for i in range(target.GetToolsCount)
else:
target.SetFocus()
if isinstance(target, wx.TextCtrl):
target.SelectAll()
break # break for target in targets
if event:
if (DEBUG): print("Chose target %s." % (target.Label or target))
wx.PostEvent(target.GetEventHandler(), event)
else:
shortcut_event.Skip(True) # Not handled by us: propagate
if hasattr(window, "__ampersand_shortcut_menu"):
# Remove previously created menu, if any
for menu_item in window.__ampersand_shortcut_menu.MenuItems:
if (DEBUG): print("Removing dummy menu item '%s'" % menu_item.Label)
window.Unbind(wx.EVT_MENU, menu_item)
del window.__ampersand_shortcut_menu
shortcuts = collect_shortcuts(window, use_heuristics)
if shortcuts:
accelerators = []
dummy_menu = wx.Menu()
for key, targets in shortcuts.items():
if (DEBUG): print("Binding %s to targets %s." %
(key, [type(t) for t in targets]))
menu_item = dummy_menu.Append(wx.ID_ANY, text="&%s" % key)
window.Bind(wx.EVT_MENU,
functools.partial(shortcut_handler, targets, key),
menu_item)
accelerators.append((wx.ACCEL_ALT, ord(key), menu_item.Id))
window.SetAcceleratorTable(wx.AcceleratorTable(accelerators))
window.__ampersand_shortcut_menu = dummy_menu
return shortcuts
| 48.197802 | 82 | 0.529127 | 1,827 | 0.104138 | 0 | 0 | 0 | 0 | 0 | 0 | 7,087 | 0.403956 |
0392a06f401816010aba9707153aeba037ae42bf | 217,226 | py | Python | pirates/leveleditor/worldData/CubaIsland.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/leveleditor/worldData/CubaIsland.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/leveleditor/worldData/CubaIsland.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.CubaIsland
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Locator Links': [['1161732578.11sdnaik', '1161732370.86sdnaik', 'Bi-directional'], ['1161732317.95sdnaik', '1161732370.88sdnaik', 'Bi-directional'], ['1161732322.52sdnaik', '1161732705.72sdnaik', 'Bi-directional'], ['1161732578.08sdnaik', '1161732705.7sdnaik', 'Bi-directional']], 'Objects': {'1160614528.73sdnaik': {'Type': 'Island', 'Name': 'CubaIsland', 'File': '', 'Environment': 'OpenSky', 'Footstep Sound': 'Sand', 'Minimap': False, 'Objects': {'1161732317.95sdnaik': {'Type': 'Locator Node', 'Name': 'portal_exterior_1', 'Hpr': VBase3(180.0, 0.0, 0.0), 'Pos': Point3(471.383, -559.794, -2.597), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1.0, 1.0, 1.0, 1.0)}}, '1161732322.52sdnaik': {'Type': 'Locator Node', 'Name': 'portal_exterior_2', 'Hpr': VBase3(-101.237, 0.0, 0.0), 'Pos': Point3(107.301, -127.258, 0.205), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1161732370.84sdnaik': {'Type': 'Connector Tunnel', 'File': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Objects': {'1161732370.86sdnaik': {'Type': 'Locator Node', 'Name': 'portal_connector_1', 'GridPos': Point3(1127.779, -170.628, 33.329), 'Hpr': VBase3(-88.748, 0.0, 0.0), 'Pos': Point3(-3.613, 0.304, 4.651), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1161732370.88sdnaik': {'Type': 'Locator Node', 'Name': 'portal_connector_2', 'GridPos': Point3(1061.428, -327.097, 32.474), 'Hpr': VBase3(72.65, -1.426, -0.516), 'Pos': Point3(-103.188, 135.024, 3.777), 'Scale': VBase3(1.0, 1.0, 1.0)}}, 'Pos': Point3(95.277, -622.544, 241.267), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/tunnels/tunnel_swamp'}}, '1161732578.06sdnaik': {'Type': 'Island Game Area', 'File': 'cuba_area_swamp_1', 'Hpr': VBase3(83.644, 0.105, -0.94), 'Objects': {'1161732578.08sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_1', 'GridPos': Point3(1533.649, 436.867, 94.327), 'Hpr': VBase3(-177.386, -0.684, -0.017), 'Pos': Point3(400.751, 192.485, 6.419), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1161732578.11sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_2', 'GridPos': Point3(900.096, 220.241, 102.291), 'Hpr': VBase3(2.192, 0.683, 0.039), 'Pos': Point3(-232.802, -24.141, 14.383), 'Scale': VBase3(1.0, 1.0, 1.0)}}, 'Pos': Point3(1132.898, 244.382, 597.635), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/swamps/pir_m_are_swm_a'}}, '1161732705.67sdnaik': {'Type': 'Connector Tunnel', 'File': '', 'Hpr': VBase3(-47.944, -3.89, 3.503), 'Objects': {'1161732705.72sdnaik': {'Type': 'Locator Node', 'Name': 'portal_connector_2', 'GridPos': Point3(708.83, 396.283, 89.205), 'Hpr': VBase3(72.65, -1.426, -0.516), 'Pos': Point3(-103.188, 135.024, 3.777), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1161732705.7sdnaik': {'Type': 'Locator Node', 'Name': 'portal_connector_1', 'GridPos': Point3(775.181, 552.752, 90.061), 'Hpr': VBase3(-88.748, 0.0, 0.0), 'Pos': Point3(-3.613, 0.304, 4.651), 'Scale': VBase3(1.0, 1.0, 1.0)}}, 'Pos': Point3(-163.185, 26.795, 316.996), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/tunnels/tunnel_swamp'}}, '1162496104.57dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-121.98, 5.318, 2.905), 'Pos': Point3(194.391, -145.836, 1.786), 'Scale': VBase3(1.14, 1.14, 1.14), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496561.59dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-174.43, 3.494, 3.134), 'Pos': Point3(248.807, -187.757, -1.425), 'Scale': VBase3(1.749, 1.749, 1.749), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496585.79dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(102.954, -3.649, 0.624), 'Pos': Point3(228.148, -194.805, -0.104), 'Scale': VBase3(1.749, 1.749, 1.749), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496638.89dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-178.512, 0.068, -5.979), 'Pos': Point3(221.706, -161.475, -3.687), 'Scale': VBase3(1.212, 1.212, 1.212), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496693.54dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-81.75, 5.236, 2.288), 'Pos': Point3(306.624, -244.912, 2.29), 'Scale': VBase3(1.846, 1.846, 1.846), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496757.15dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-162.582, -1.433, 5.53), 'Pos': Point3(288.119, -213.242, 5.442), 'Scale': VBase3(1.846, 1.846, 1.846), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496818.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-95.42, 2.604, -0.358), 'Pos': Point3(262.002, -197.86, -1.237), 'Scale': VBase3(1.813, 1.813, 1.813), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496857.71dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-49.89, 1.57, -2.109), 'Pos': Point3(290.286, -233.631, 1.056), 'Scale': VBase3(1.685, 1.685, 1.685), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496880.34dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-49.89, 1.57, -2.109), 'Pos': Point3(203.311, -212.777, 2.077), 'Scale': VBase3(1.685, 1.685, 1.685), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496889.81dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-132.466, 2.295, 1.283), 'Pos': Point3(159.066, -132.814, 2.534), 'Scale': VBase3(1.685, 1.685, 1.685), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162496999.35dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-76.632, 2.351, -1.177), 'Pos': Point3(185.402, -156.567, -1.333), 'Scale': VBase3(1.181, 1.181, 1.181), 'Visual': {'Color': (0.8, 0.87, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162497015.78dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(175.664, -2.875, 4.677), 'Pos': Point3(132.299, -158.771, 0.088), 'Scale': VBase3(1.181, 1.181, 1.181), 'Visual': {'Color': (0.8, 0.87, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162497038.53dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-93.151, 1.894, 1.69), 'Pos': Point3(174.318, -174.436, -2.428), 'Scale': VBase3(1.477, 1.477, 1.477), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162497249.64dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-27.472, 2.32, -1.029), 'Pos': Point3(61.752, -128.37, 0.0), 'Scale': VBase3(2.466, 2.466, 2.466), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162497329.21dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-55.834, 1.221, 0.0), 'Pos': Point3(86.467, -131.751, -2.015), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162497460.96dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(158.083, -7.978, -0.962), 'Pos': Point3(28.162, -66.255, -8.54), 'Scale': VBase3(2.788, 2.788, 2.788), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162497568.12dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(16.872, -119.363, -2.0), 'Scale': VBase3(0.895, 0.895, 0.895), 'Visual': {'Color': (1.0, 0.9900000095367432, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162497591.24dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-27.12, 0.0, 0.0), 'Pos': Point3(-9.666, -73.007, -2.0), 'Scale': VBase3(0.872, 0.872, 0.872), 'Visual': {'Model': 'models/vegetation/swamp_tree_b'}}, '1162497648.96dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-96.431, -90.501, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.8500000238418579, 0.8199999928474426, 0.7300000190734863, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162497681.26dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-6.462, -124.144, -2.0), 'Scale': VBase3(1.172, 1.172, 1.172), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162497693.48dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(11.047, -132.343, 1.061), 'Scale': VBase3(1.172, 1.172, 1.172), 'Visual': {'Model': 'models/vegetation/swamp_tree_thin'}}, '1162497709.17dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-7.714, -131.882, -4.475), 'Scale': VBase3(1.018, 1.018, 1.018), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162498231.46dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(92.824, 1.752, 1.371), 'Pos': Point3(289.328, -322.038, -1.973), 'Scale': VBase3(2.391, 2.391, 2.391), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498233.67dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-81.75, 5.236, 2.288), 'Pos': Point3(311.02, -330.127, -2.0), 'Scale': VBase3(1.846, 1.846, 1.846), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498236.93dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-81.75, 5.236, 2.288), 'Pos': Point3(291.992, -284.872, -2.0), 'Scale': VBase3(1.846, 1.846, 1.846), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498256.79dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-175.195, -2.608, 5.084), 'Pos': Point3(351.684, -338.435, 6.714), 'Scale': VBase3(2.484, 2.484, 2.484), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498287.12dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-55.834, 0.0, 0.0), 'Pos': Point3(330.397, -323.928, -2.0), 'Scale': VBase3(0.868, 0.868, 0.868), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162498321.2dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-163.404, -2.394, 1.296), 'Pos': Point3(-105.028, -386.378, -0.932), 'Scale': VBase3(1.729, 1.729, 1.729), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498369.29dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-156.824, -2.23, 1.562), 'Pos': Point3(-39.408, -430.211, -4.527), 'Scale': VBase3(1.942, 1.942, 1.942), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498390.34dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(56.286, 1.014, -2.526), 'Pos': Point3(-12.316, -534.81, -2.0), 'Scale': VBase3(2.072, 2.072, 2.072), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498400.56dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-138.604, -1.63, 4.472), 'Pos': Point3(-6.568, -563.456, -2.0), 'Scale': VBase3(1.924, 1.924, 1.924), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498416.74dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-115.464, 0.262, -0.36), 'Pos': Point3(-32.335, -543.622, 0.636), 'Scale': VBase3(2.21, 2.21, 2.21), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498428.64dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(177.628, -4.27, 2.103), 'Pos': Point3(-37.621, -560.769, 0.288), 'Scale': VBase3(1.591, 1.591, 1.591), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498500.51dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-125.283, -101.993, -2.0), 'Scale': VBase3(0.779, 0.779, 0.779), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162498514.14dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-96.751, 0.0, 0.0), 'Pos': Point3(-133.251, -114.765, -2.0), 'Scale': VBase3(0.7, 0.7, 0.7), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162498585.56dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-131.038, -155.049, -2.0), 'Scale': VBase3(1.276, 1.276, 1.276), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498611.99dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-95.057, 0.0, 0.0), 'Pos': Point3(-122.674, -137.024, 3.248), 'Scale': VBase3(1.372, 1.372, 1.372), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162498633.87dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-89.806, -151.497, -2.0), 'Scale': VBase3(0.745, 0.745, 0.745), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162498653.28dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-106.137, -96.349, -1.129), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162501202.2dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-58.658, 0.0, 0.0), 'Pos': Point3(-257.241, -565.015, 15.245), 'Scale': VBase3(1.692, 1.692, 1.692), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501211.4dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-290.342, -852.048, 8.106), 'Scale': VBase3(1.364, 1.364, 1.364), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501216.51dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-283.447, -583.547, 20.569), 'Scale': VBase3(1.364, 1.364, 1.364), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501218.89dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-276.987, -1036.227, -2.0), 'Scale': VBase3(1.364, 1.364, 1.364), 'Visual': {'Color': (0.72, 0.79, 0.788235294117647, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501221.32dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(86.035, 0.0, 0.0), 'Pos': Point3(-247.343, -951.268, -1.225), 'Scale': VBase3(1.364, 1.364, 1.364), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501223.18dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(31.708, -1.565, -0.47), 'Pos': Point3(-279.114, -872.037, 2.431), 'Scale': VBase3(1.278, 1.278, 1.278), 'Visual': {'Color': (0.75, 0.75, 0.7529411764705882, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501264.03dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(174.015, -1.384, -1.516), 'Pos': Point3(-275.049, -895.262, 2.226), 'Scale': VBase3(1.324, 1.324, 1.324), 'Visual': {'Color': (0.699999988079071, 0.7300000190734863, 0.5799999833106995, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501292.92dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(2.058, 1.989, 3.485), 'Pos': Point3(-269.343, -916.458, 2.4), 'Scale': VBase3(1.845, 1.845, 1.845), 'Visual': {'Color': (0.71, 0.82, 0.7019607843137254, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501329.71dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-95.508, -3.718, 1.51), 'Pos': Point3(-269.887, -984.059, -1.318), 'Scale': VBase3(1.431, 1.431, 1.431), 'Visual': {'Color': (0.78, 0.77, 0.5058823529411764, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501346.57dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(86.721, 3.656, -1.653), 'Pos': Point3(-23.837, -466.188, -7.102), 'Scale': VBase3(1.546, 1.546, 1.546), 'Visual': {'Color': (0.75, 0.98, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501361.39dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-103.846, -3.897, 0.955), 'Pos': Point3(-217.634, -540.937, 18.079), 'Scale': VBase3(1.479, 1.479, 1.479), 'VisSize': '', 'Visual': {'Color': (0.800000011920929, 0.8700000047683716, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501378.34dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-96.283, -3.737, 1.459), 'Pos': Point3(-357.62, -161.217, -2.0), 'Scale': VBase3(1.624, 1.624, 1.624), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501380.67dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-96.283, -3.737, 1.459), 'Pos': Point3(-357.62, -161.217, -2.0), 'Scale': VBase3(1.624, 1.624, 1.624), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501506.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-61.432, -3.526, 1.918), 'Pos': Point3(-230.972, -1023.899, -2.592), 'Scale': VBase3(1.479, 1.479, 1.479), 'Visual': {'Color': (0.62, 0.72, 0.7568627450980392, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501515.84dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(88.974, 3.588, -1.795), 'Pos': Point3(-290.512, -1072.11, -8.037), 'Scale': VBase3(1.479, 1.479, 1.479), 'Visual': {'Color': (1.0, 0.9900000095367432, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501551.93dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(28.831, 3.346, 2.215), 'Pos': Point3(-274.776, -943.157, 1.978), 'Scale': VBase3(1.479, 1.479, 1.479), 'Visual': {'Color': (0.87, 0.87, 0.6196078431372549, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501577.87dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(13.203, -3.603, 3.052), 'Pos': Point3(-164.497, -476.962, 8.161), 'Scale': VBase3(1.489, 1.489, 1.489), 'Visual': {'Color': (1.0, 0.9900000095367432, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501603.15dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-59.997, -3.967, -2.561), 'Pos': Point3(-188.222, -504.262, 14.045), 'Scale': VBase3(1.275, 1.275, 1.275), 'Visual': {'Color': (1.0, 0.9900000095367432, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162501641.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-90.357, -94.791, -2.0), 'Scale': VBase3(0.86, 0.86, 0.86), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162501646.46dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-48.359, -71.927, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_thin'}}, '1162501650.07dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-38.622, 0.0, 0.0), 'Pos': Point3(-46.673, -61.255, -2.0), 'Scale': VBase3(0.93, 0.93, 0.93), 'Visual': {'Model': 'models/vegetation/swamp_tree_thin'}}, '1162501671.89dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-6.162, 0.0, 0.0), 'Pos': Point3(-109.827, -75.473, -1.193), 'Scale': VBase3(0.563, 0.563, 0.563), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162501689.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-33.613, 0.0, 0.0), 'Pos': Point3(-44.023, -93.572, -2.0), 'Scale': VBase3(0.605, 0.605, 0.605), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162501722.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-140.898, 0.105, 1.216), 'Pos': Point3(-40.76, -76.109, -2.0), 'Scale': VBase3(0.822, 0.822, 0.822), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162501750.34dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-32.019, 0.0, 0.0), 'Pos': Point3(-117.391, -205.372, -2.0), 'Scale': VBase3(0.745, 0.745, 0.745), 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162501776.62dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-13.269, 0.0, 0.0), 'Pos': Point3(-72.482, -272.37, -2.0), 'Scale': VBase3(0.745, 0.745, 0.745), 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162501796.43dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-13.269, 0.0, 0.0), 'Pos': Point3(56.456, -177.916, 0.0), 'Scale': VBase3(0.745, 0.745, 0.745), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162501799.67dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-13.269, 0.0, 0.0), 'Pos': Point3(-49.421, -254.503, -2.0), 'Scale': VBase3(0.745, 0.745, 0.745), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162501880.84dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-55.834, 1.221, 0.0), 'Pos': Point3(64.544, -147.513, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162502780.21dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-56.618, 0.0, 0.0), 'Pos': Point3(238.848, -441.555, 0.0), 'Scale': VBase3(0.868, 0.868, 0.868), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162504044.29dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(80.734, -0.123, -2.72), 'Pos': Point3(-119.407, -428.205, 0.956), 'Scale': VBase3(1.729, 1.729, 1.729), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504062.54dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-18.895, 2.702, 0.334), 'Pos': Point3(-141.666, -446.35, 7.427), 'Scale': VBase3(1.488, 1.488, 1.488), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504090.14dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-13.269, 0.0, 0.0), 'Pos': Point3(-67.554, -342.621, -2.0), 'Scale': VBase3(0.745, 0.745, 0.745), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504101.34dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-13.269, 0.0, 0.0), 'Pos': Point3(-71.952, -260.648, -2.0), 'Scale': VBase3(0.745, 0.745, 0.745), 'Visual': {'Color': (0.7900000214576721, 0.6499999761581421, 0.5299999713897705, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504103.39dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-13.269, 0.0, 0.0), 'Pos': Point3(-83.681, -350.664, -2.0), 'Scale': VBase3(0.641, 0.641, 0.641), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504362.84dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-55.834, 1.221, 0.0), 'Pos': Point3(109.058, -288.79, -2.0), 'Scale': VBase3(0.877, 0.877, 0.877), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504374.53dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-134.441, 0.241, 1.197), 'Pos': Point3(-57.046, -228.005, 0.0), 'Scale': VBase3(0.804, 0.804, 0.804), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504384.23dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-142.166, 0.095, 1.217), 'Pos': Point3(227.3, -465.162, 0.0), 'Scale': VBase3(0.804, 0.804, 0.804), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504406.53dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-145.594, 0.251, 5.707), 'Pos': Point3(185.073, -205.108, -2.0), 'Scale': VBase3(2.21, 2.21, 2.21), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504452.48dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(175.153, -3.425, 4.575), 'Pos': Point3(-107.083, -370.161, -3.838), 'Scale': VBase3(2.408, 2.408, 2.408), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504463.24dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(55.708, -2.313, -5.225), 'Pos': Point3(-36.583, -340.483, -2.0), 'Scale': VBase3(2.228, 2.228, 2.228), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504470.51dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(55.708, -2.313, -5.225), 'Pos': Point3(-108.99, -412.732, -0.098), 'Scale': VBase3(2.012, 2.012, 2.012), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504493.21dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-115.124, 3.117, 4.789), 'Pos': Point3(-112.092, -484.478, -9.504), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504510.79dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(375.199, -363.599, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504512.71dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(379.228, -395.531, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504513.99dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(344.415, -416.725, -3.103), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504515.7dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(392.737, -486.368, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504517.18dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(408.232, -487.761, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504518.45dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-156.061, -0.793, 5.657), 'Pos': Point3(420.874, -500.259, -2.0), 'Scale': VBase3(1.819, 1.819, 1.819), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504520.09dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(457.577, -504.001, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504563.18dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-174.648, -2.559, 5.108), 'Pos': Point3(380.532, -434.393, -1.918), 'Scale': VBase3(1.819, 1.819, 1.819), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504565.28dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(161.081, -4.434, 3.606), 'Pos': Point3(376.171, -469.964, -2.0), 'Scale': VBase3(2.094, 2.094, 2.094), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504579.32dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(161.081, -4.434, 3.606), 'Pos': Point3(412.314, -435.24, -2.0), 'Scale': VBase3(2.094, 2.094, 2.094), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504581.82dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(161.081, -4.434, 3.606), 'Pos': Point3(470.863, -525.388, -2.0), 'Scale': VBase3(2.094, 2.094, 2.094), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504589.29dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(161.081, -4.434, 3.606), 'Pos': Point3(507.166, -510.536, -2.0), 'Scale': VBase3(2.094, 2.094, 2.094), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504613.95dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(53.269, 0.0, 0.0), 'Pos': Point3(473.617, -539.435, -0.625), 'Scale': VBase3(0.868, 0.868, 0.868), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504615.65dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-55.834, 0.0, 0.0), 'Pos': Point3(470.252, -579.967, -2.0), 'Scale': VBase3(0.868, 0.868, 0.868), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162504622.87dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(460.295, -590.002, 0.0), 'Scale': VBase3(1.417, 1.417, 1.417), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504625.28dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-148.266, -6.577, -3.666), 'Pos': Point3(442.589, -600.058, 0.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.8500000238418579, 0.8199999928474426, 0.7300000190734863, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504626.54dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(464.441, -643.422, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504628.01dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(513.888, -652.936, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504629.42dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(493.592, -653.513, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504630.82dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-111.991, 3.374, 4.612), 'Pos': Point3(545.894, -678.517, -2.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504635.54dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-55.834, 0.0, 0.0), 'Pos': Point3(471.228, -729.263, -2.0), 'Scale': VBase3(0.868, 0.868, 0.868), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162504651.18dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-55.834, 0.0, 0.0), 'Pos': Point3(-131.543, -219.835, -2.0), 'Scale': VBase3(0.868, 0.868, 0.868), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162504683.95dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-130.455, 16.842, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162504691.23dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-126.7, -27.748, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_thin'}}, '1162504697.12dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-76.974, 58.572, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162504701.23dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-44.268, 51.753, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_thin'}}, '1162504709.56dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-45.704, 57.451, -2.0), 'Scale': VBase3(0.546, 0.546, 0.546), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162504732.4dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-156.623, -31.079, -2.0), 'Scale': VBase3(0.546, 0.546, 0.546), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162504754.37dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-76.495, -286.104, -2.0), 'Scale': VBase3(0.546, 0.546, 0.546), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1162504802.04dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-25.52, 27.068, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504808.48dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-141.043, 0.0, 0.0), 'Pos': Point3(-21.642, 11.532, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162504824.68dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-31.21, 4.01, -2.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_thin'}}, '1162505050.93dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(190.77, -487.635, -1.37), 'Scale': VBase3(1.454, 1.454, 1.454), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505128.43dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(69.079, 0.0, 0.0), 'Pos': Point3(191.501, -487.204, -0.256), 'Scale': VBase3(1.114, 1.114, 1.114), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505210.81dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(43.056, 0.0, 0.0), 'Pos': Point3(191.159, -481.078, -1.405), 'Scale': VBase3(0.935, 0.935, 0.935), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505214.28dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(46.454, 0.0, 0.0), 'Pos': Point3(190.649, -479.107, -0.431), 'Scale': VBase3(0.893, 0.893, 0.893), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505293.49dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-10.798, 0.0, 0.0), 'Pos': Point3(191.62, -486.729, -0.14), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505296.51dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(46.454, 0.0, 0.0), 'Pos': Point3(190.472, -488.612, -0.203), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505308.45dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(46.454, 0.0, 0.0), 'Pos': Point3(185.497, -478.535, -2.29), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505330.64dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(46.454, 0.0, 0.0), 'Pos': Point3(294.657, -542.887, -0.842), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162505333.48dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(46.454, 0.0, 0.0), 'Pos': Point3(300.606, -545.852, -0.915), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162575738.25dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(92.147, 5.514, -7.297), 'Pos': Point3(-203.283, -515.135, 5.177), 'Scale': VBase3(1.794, 1.794, 1.794), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162575755.71dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(148.307, 4.157, -19.181), 'Pos': Point3(-249.184, -557.096, 12.971), 'Scale': VBase3(2.029, 2.029, 2.029), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162575900.62dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-17.49, 0.0, 0.0), 'Pos': Point3(-324.384, -1095.936, -3.407), 'Scale': VBase3(1.811, 1.811, 1.811), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots'}}, '1162576043.68dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, -3.114), 'Pos': Point3(121.584, -137.254, -2.913), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162576078.71dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(145.113, -134.736, 0.0), 'Scale': VBase3(0.525, 0.525, 0.525), 'Visual': {'Model': 'models/vegetation/swamp_tree_b'}}, '1162576335.14dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(315.575, -271.262, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576483.87dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(301.403, -323.476, 0.0), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576530.06dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(482.967, -204.012, 0.0), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576533.54dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(290.001, -326.597, -4.449), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576597.46dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(324.318, -253.808, 54.758), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576602.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(277.52, -268.28, -3.084), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576619.82dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, -3.866, 0.0), 'Pos': Point3(354.38, -394.409, -4.883), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Color': (0.800000011920929, 0.6000000238418579, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576649.89dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, -3.866, 0.0), 'Pos': Point3(349.251, -444.049, 5.853), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Color': (0.75, 0.9300000071525574, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576682.79dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 3.651, 3.614), 'Pos': Point3(386.805, -407.068, -26.618), 'Scale': VBase3(1.869, 1.869, 1.869), 'Visual': {'Color': (0.800000011920929, 0.6000000238418579, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576722.81dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(79.561, 4.221, -2.928), 'Pos': Point3(363.335, -465.418, -1.41), 'Scale': VBase3(1.34, 1.34, 1.34), 'Visual': {'Color': (0.800000011920929, 0.6000000238418579, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162576805.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, -1.758, 0.0), 'Pos': Point3(-86.915, -90.111, -2.123), 'Scale': VBase3(0.797, 0.797, 0.797), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162576816.26dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-108.078, -105.697, -62.869), 'Scale': VBase3(0.936, 0.936, 0.936), 'Visual': {'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162576895.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.06, 0.0), 'Pos': Point3(-41.678, -102.308, -8.363), 'Scale': VBase3(0.736, 0.736, 0.736), 'Visual': {'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162576950.03dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, -1.758, 0.0), 'Pos': Point3(-116.174, -26.237, -3.135), 'Scale': VBase3(0.643, 0.643, 0.643), 'Visual': {'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162576969.61dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-24.171, -1.604, -0.72), 'Pos': Point3(-29.692, 31.725, 0.102), 'Scale': VBase3(0.643, 0.643, 0.643), 'Visual': {'Color': (0.75, 0.9300000071525574, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162576986.5dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-24.171, -1.604, -0.72), 'Pos': Point3(-182.008, 71.806, -63.602), 'Scale': VBase3(0.643, 0.643, 0.643), 'Visual': {'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162576998.25dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-24.171, -1.604, -0.72), 'Pos': Point3(-181.717, 29.795, -70.83), 'Scale': VBase3(0.643, 0.643, 0.643), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162577044.42dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-41.182, -1.323, -1.157), 'Pos': Point3(-82.963, -195.76, 0.0), 'Scale': VBase3(0.732, 0.732, 0.732), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162577094.82dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-39.645, -240.447, -20.215), 'Scale': VBase3(0.956, 0.956, 0.956), 'Visual': {'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162577152.48dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-8.806, -279.214, -36.906), 'Scale': VBase3(0.854, 0.854, 0.854), 'Visual': {'Model': 'models/vegetation/swamp_tree_canopy'}}, '1162577320.84dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(108.215, 5.123, -3.235), 'Pos': Point3(449.692, -619.545, -0.489), 'Scale': VBase3(1.84, 1.84, 1.84), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162577355.15dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(437.004, -609.408, 20.708), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162577566.42dzlu': {'Type': 'Swamp_props_small', 'DisableCollision': False, 'Hpr': VBase3(-158.68, 0.0, 0.0), 'Pos': Point3(379.469, -503.431, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/swamp_boat'}}, '1162577813.76dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(285.231, -543.089, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577835.12dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(283.706, -542.153, -0.659), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577839.92dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(286.137, -546.313, -1.246), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577843.75dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(276.088, -544.595, -0.828), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577867.18dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(370.702, -485.211, 0.0), 'Scale': VBase3(2.209, 2.209, 2.209), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577870.71dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(367.014, -487.09, -2.071), 'Scale': VBase3(1.998, 1.998, 1.998), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577884.96dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(370.107, -479.469, 0.0), 'Scale': VBase3(2.209, 2.209, 2.209), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577982.15dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(276.339, -548.121, -1.189), 'Scale': VBase3(1.583, 1.583, 1.583), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162577996.29dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-72.138, -11.409, 0.0), 'Pos': Point3(280.375, -555.086, -1.704), 'Scale': VBase3(1.583, 1.583, 1.583), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578006.14dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-40.563, 0.0, 0.0), 'Pos': Point3(276.393, -550.524, 0.0), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578016.5dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-40.563, 0.0, 0.0), 'Pos': Point3(292.14, -570.623, -1.631), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578019.04dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-40.563, 0.0, 0.0), 'Pos': Point3(294.999, -577.17, -0.541), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578024.62dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-40.563, 0.0, 0.0), 'Pos': Point3(320.253, -629.48, 0.0), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578025.75dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-40.563, 0.0, 0.0), 'Pos': Point3(323.649, -628.406, -1.866), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578026.78dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-30.189, 0.0, 0.0), 'Pos': Point3(321.532, -628.769, -1.643), 'Scale': VBase3(2.112, 2.112, 2.112), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578038.0dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-38.112, 0.0, -13.852), 'Pos': Point3(316.948, -625.273, -1.35), 'Scale': VBase3(1.198, 1.198, 1.198), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578047.75dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-125.812, 0.0, 0.0), 'Pos': Point3(311.178, -627.427, -1.526), 'Scale': VBase3(1.198, 1.198, 1.198), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578056.12dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-38.112, 0.0, 0.0), 'Pos': Point3(309.187, -573.984, -1.051), 'Scale': VBase3(1.198, 1.198, 1.198), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578057.4dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-38.112, 0.0, 0.0), 'Pos': Point3(309.738, -573.679, -0.606), 'Scale': VBase3(1.198, 1.198, 1.198), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578070.86dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-38.112, 0.0, 0.0), 'Pos': Point3(283.348, -383.938, -1.192), 'Scale': VBase3(1.198, 1.198, 1.198), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578075.4dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-38.112, 0.0, 0.0), 'Pos': Point3(282.696, -382.364, -0.641), 'Scale': VBase3(1.198, 1.198, 1.198), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578081.51dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-40.563, 0.0, 0.0), 'Pos': Point3(332.729, -462.495, -1.052), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578094.39dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-40.563, 0.0, 0.0), 'Pos': Point3(334.295, -461.528, -0.914), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578096.17dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-106.705, 0.0, 0.0), 'Pos': Point3(333.465, -465.382, -0.852), 'Scale': VBase3(1.399, 1.399, 1.399), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578108.39dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-61.921, 0.0, 0.0), 'Pos': Point3(340.54, -464.982, -1.988), 'Scale': VBase3(1.931, 1.931, 1.506), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578113.9dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-124.097, 0.0, 0.0), 'Pos': Point3(331.864, -453.649, -1.143), 'Scale': VBase3(1.199, 1.199, 1.166), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578121.26dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-124.097, 0.0, 0.0), 'Pos': Point3(284.828, -383.294, 0.0), 'Scale': VBase3(1.152, 1.152, 1.152), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578126.61dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-74.895, 0.0, 0.0), 'Pos': Point3(289.473, -386.795, -1.012), 'Scale': VBase3(1.152, 1.152, 1.152), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162578259.32dzlu': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(164.725, 1.618, -0.376), 'Pos': Point3(437.692, -501.002, -4.014), 'Scale': VBase3(3.887, 3.887, 3.887), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/props/rock_group_4_sphere'}}, '1162578426.64dzlu': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(128.086, 0.0, 0.0), 'Pos': Point3(478.996, -632.527, -0.866), 'Scale': VBase3(1.523, 1.523, 1.523), 'Visual': {'Color': (0.45, 0.54, 0.82, 1.0), 'Model': 'models/props/zz_dont_use_rocks_Dk_group_2F'}}, '1162578793.03dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-78.746, 3.272, 3.365), 'Pos': Point3(510.015, -662.867, -2.009), 'Scale': VBase3(2.049, 2.049, 2.049), 'Visual': {'Color': (0.7900000214576721, 0.6499999761581421, 0.5299999713897705, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162578861.75dzlu': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(-11.629, -3.641, -0.749), 'Pos': Point3(525.404, -675.711, -0.473), 'Scale': VBase3(2.523, 2.523, 2.523), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/props/rock_group_2_sphere'}}, '1162578920.61dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(479.384, -666.297, 21.059), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162578961.18dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(541.198, -708.802, 27.942), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162578998.36dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-28.111, 4.951, 2.127), 'Pos': Point3(583.253, -677.403, 15.897), 'Scale': VBase3(1.911, 1.911, 1.911), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579001.46dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-112.519, 9.832, 4.673), 'Pos': Point3(546.451, -711.086, -5.558), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579041.9dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-32.947, 5.173, -2.425), 'Pos': Point3(587.067, -740.43, 17.701), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579056.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-32.947, 5.173, -2.425), 'Pos': Point3(645.594, -605.485, 0.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579058.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-32.947, 5.173, -2.425), 'Pos': Point3(611.287, -573.076, -1.942), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579060.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-28.111, 4.951, -2.852), 'Pos': Point3(636.011, -642.186, 0.0), 'Scale': VBase3(2.128, 2.128, 2.128), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579199.21dzlu': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(47.744, -6.893, -24.723), 'Pos': Point3(580.916, -732.395, 8.128), 'Scale': VBase3(4.334, 4.334, 4.334), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/props/rock_group_1_sphere'}}, '1162579331.75dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-73.655, 5.504, 1.529), 'Pos': Point3(632.005, -818.121, 0.0), 'Scale': VBase3(2.296, 2.296, 2.296), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579341.32dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-73.655, 5.504, 1.529), 'Pos': Point3(610.397, -807.52, 3.503), 'Scale': VBase3(2.296, 2.296, 2.296), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579346.82dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-74.018, 3.433, -9.889), 'Pos': Point3(611.612, -780.956, 6.711), 'Scale': VBase3(2.296, 2.296, 2.296), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579382.68dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-73.655, 5.504, 1.529), 'Pos': Point3(587.418, -775.26, 0.386), 'Scale': VBase3(2.758, 2.758, 2.758), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579401.92dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-33.558, -9.216, -2.447), 'Pos': Point3(578.094, -713.307, 12.02), 'Scale': VBase3(3.5, 3.5, 3.5), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1162579450.07dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-32.512, 4.664, 0.0), 'Pos': Point3(593.209, -787.988, 3.967), 'Scale': VBase3(2.178, 2.178, 2.178), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162579477.28dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(29.511, -3.952, 7.392), 'Pos': Point3(541.052, -765.265, 23.137), 'Scale': VBase3(2.178, 2.178, 2.178), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162579495.23dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(29.511, -3.952, 7.392), 'Pos': Point3(586.636, -636.216, 0.0), 'Scale': VBase3(2.178, 2.178, 2.178), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162579496.87dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-31.653, -8.376, 0.125), 'Pos': Point3(532.338, -673.668, 24.436), 'Scale': VBase3(2.178, 2.178, 2.178), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162579552.36dzlu': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(-51.838, 0.0, 0.0), 'Pos': Point3(564.672, -764.757, 0.0), 'Scale': VBase3(4.098, 4.098, 4.098), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/props/rock_1_sphere'}}, '1162579651.42dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-32.512, -5.078, 0.0), 'Pos': Point3(497.166, -662.155, -17.54), 'Scale': VBase3(2.178, 2.178, 2.178), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1162579990.07dzlu': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(66.911, -2.792, 6.518), 'Pos': Point3(207.844, -185.499, -2.0), 'Scale': VBase3(0.659, 0.659, 0.659), 'Visual': {'Color': (0.25999999046325684, 0.3499999940395355, 0.38999998569488525, 1.0), 'Model': 'models/props/rock_2_sphere'}}, '1162580050.34dzlu': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(66.911, -2.792, 6.518), 'Pos': Point3(243.967, -201.603, -3.383), 'Scale': VBase3(3.035, 3.035, 3.035), 'Visual': {'Color': (0.25999999046325684, 0.3499999940395355, 0.38999998569488525, 1.0), 'Model': 'models/props/rock_2_sphere'}}, '1162580134.26dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-160.943, 112.688, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.44999998807907104, 0.5400000214576721, 0.8199999928474426, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162580160.11dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-44.49, 0.0, 0.0), 'Pos': Point3(-156.604, 163.007, 0.0), 'Scale': VBase3(0.8, 0.8, 0.8), 'Visual': {'Color': (0.44999998807907104, 0.5400000214576721, 0.8199999928474426, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162580232.92dzlu': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(24.147, 0.0, 0.0), 'Pos': Point3(-18.696, -38.561, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.47999998927116394, 0.5699999928474426, 0.5600000023841858, 1.0), 'Model': 'models/vegetation/swamp_tree_thin'}}, '1162580882.43dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-141.382, 0.095, 1.217), 'Pos': Point3(356.264, -488.07, 0.0), 'Scale': VBase3(0.804, 0.804, 0.804), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162580923.68dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-164.53, -0.392, 1.157), 'Pos': Point3(78.234, -365.675, 0.0), 'Scale': VBase3(0.804, 0.804, 0.804), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/swamp_tree_a'}}, '1162581117.26dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(398.317, -600.067, -2.702), 'Scale': VBase3(1.469, 1.469, 1.469), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581120.68dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(405.001, -605.904, -2.763), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581126.39dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-34.585, 0.0, 0.0), 'Pos': Point3(404.004, -604.363, -0.572), 'Scale': VBase3(0.795, 0.795, 0.795), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581133.0dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(395.308, -597.602, -1.009), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581141.04dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(384.9, -610.758, -1.807), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581144.36dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(389.232, -621.779, -1.571), 'Scale': VBase3(1.668, 1.668, 1.668), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581145.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(387.319, -620.824, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581162.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(395.799, -619.491, -2.062), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581164.37dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(366.265, -648.756, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581164.98dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(371.651, -649.648, -2.216), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581166.56dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(407.408, -608.957, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581167.39dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(403.408, -607.635, -0.981), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581168.25dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(412.959, -610.534, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581168.79dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(418.696, -611.483, -1.508), 'Scale': VBase3(0.86, 0.86, 0.86), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581169.65dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(406.897, -618.523, -1.178), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581170.36dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(409.577, -609.645, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581171.11dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(411.358, -611.579, -1.948), 'Scale': VBase3(1.149, 1.149, 0.93), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581171.96dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(409.626, -611.292, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581173.79dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(407.546, -610.948, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581176.57dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(424.085, -612.375, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581177.9dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(413.632, -616.473, -3.38), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581181.67dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(375.524, -597.658, -1.887), 'Scale': VBase3(0.906, 0.906, 0.906), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581182.32dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(380.798, -605.211, -1.342), 'Scale': VBase3(1.66, 1.66, 1.66), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581183.29dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(390.45, -599.788, -0.824), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581183.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(390.136, -604.776, -1.205), 'Scale': VBase3(1.085, 1.085, 1.085), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581184.18dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(396.887, -598.459, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581185.78dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(346.427, -619.499, -2.216), 'Scale': VBase3(2.001, 2.001, 2.001), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581187.11dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(290.641, -614.373, -1.295), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581188.48dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, -20.885), 'Pos': Point3(292.543, -612.486, -1.215), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581188.82dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(291.46, -614.799, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581189.76dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, -12.712, 0.0), 'Pos': Point3(295.874, -614.075, -1.646), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581190.51dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 28.677), 'Pos': Point3(293.445, -615.906, -0.884), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581192.04dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(292.871, -599.964, -1.685), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581194.96dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(435.95, -631.628, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581195.62dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(446.847, -628.558, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581196.4dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(414.889, -679.682, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581198.9dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-36.591, -4.63, 0.0), 'Pos': Point3(369.75, -647.131, -1.782), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581199.73dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-98.678, 0.0, 0.0), 'Pos': Point3(366.458, -646.173, -1.448), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581201.07dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 20.788, 0.0), 'Pos': Point3(364.402, -649.497, -1.48), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581201.68dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-75.303, 0.0, 0.0), 'Pos': Point3(366.828, -649.107, -1.439), 'Scale': VBase3(0.781, 0.781, 0.781), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581202.34dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-53.777, 0.0, 0.0), 'Pos': Point3(364.625, -649.767, 0.0), 'Scale': VBase3(1.149, 1.149, 1.149), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581230.31dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-30.189, 0.0, 0.0), 'Pos': Point3(366.756, -488.935, 0.0), 'Scale': VBase3(1.273, 1.273, 1.273), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581230.92dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-30.189, 0.0, 0.0), 'Pos': Point3(368.096, -489.157, 0.0), 'Scale': VBase3(1.273, 1.273, 1.273), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162581234.92dzlu': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-30.189, 0.0, 0.0), 'Pos': Point3(375.018, -513.072, -1.199), 'Scale': VBase3(1.273, 1.273, 1.273), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1162600301.39sdnaik': {'Type': 'Port Collision Sphere', 'Name': '', 'Hpr': VBase3(-21.848, 0.0, 0.0), 'Pos': Point3(174.854, -853.185, -2.0), 'Scale': VBase3(1079.46, 1079.46, 1079.46), 'Visual': {'Color': (0.5, 0.5, 1.0, 0.2), 'Model': 'models/misc/smiley'}}, '1163119773.31sdnaik': {'Type': 'Locator Node', 'Name': 'portal_exterior_1', 'Hpr': VBase3(-180.0, 0.0, 0.0), 'Pos': Point3(474.323, -559.42, -0.943), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1163119776.08sdnaik': {'Type': 'Locator Node', 'Name': 'portal_exterior_2', 'Hpr': VBase3(-122.603, 0.0, 0.0), 'Pos': Point3(107.301, -127.258, 0.205), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1163130907.42sdnaik': {'Type': 'Locator Node', 'Name': 'portal_exterior_1', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(471.383, -559.794, -2.597), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1163130908.98sdnaik': {'Type': 'Locator Node', 'Name': 'portal_exterior_2', 'Hpr': VBase3(-101.237, 0.0, 0.0), 'Pos': Point3(103.631, -123.494, -2.529), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1163462918.28sdnaik': {'Type': 'Player Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Index': -1, 'Pos': Point3(194.884, -615.123, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'All', 'Visual': {'Model': 'models/misc/smiley'}}, '1171314304.0dxschafe': {'Type': 'Building Exterior', 'File': 'Cuba_jail_interior', 'ExtUid': '1171314304.0dxschafe0', 'Holiday': '', 'Hpr': VBase3(58.558, 0.0, 1.06), 'Objects': {'1201041678.38dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(-180.0, 0.0, 0.0), 'Pos': Point3(12.899, -22.494, 0.283), 'Scale': VBase3(1.0, 1.0, 1.0)}}, 'Pos': Point3(-333.987, -619.709, 27.958), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Name': '', 'Color': (0.7, 0.7, 0.7, 1.0), 'Door': 'models/buildings/shanty_guildhall_door', 'Interior': 'models/buildings/navy_jail_interior', 'Model': 'models/buildings/jail_exterior', 'SignImage': 'models/buildings/sign1_eng_a_icon_doctor'}}, '1171315072.0dxschafe': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(-6.279, -0.862, 7.781), 'Pos': Point3(-199.537, -773.193, 2.416), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(-76.672, 5.284, 1.846), 'Pos': Point3(-285.512, -606.503, 25.88), 'Scale': VBase3(0.829, 0.829, 0.829), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe0': {'Type': 'Tree - Animated', 'DisableCollision': True, 'Hpr': VBase3(-35.024, 12.51, 17.522), 'Pos': Point3(-202.949, -789.058, 0.554), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe1': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(-17.26, 5.387, 7.542), 'Pos': Point3(-193.68, -788.419, 1.426), 'Scale': VBase3(0.744, 0.744, 0.744), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (1.0, 0.93, 0.8509803921568627, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe2': {'Type': 'Tree - Animated', 'DisableCollision': True, 'Hpr': VBase3(-20.254, -3.793, 13.121), 'Pos': Point3(-192.776, -633.153, 11.116), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe3': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(-36.173, -4.639, 19.768), 'Pos': Point3(-347.684, -752.909, 24.386), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (0.55, 0.67, 0.6901960784313725, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe4': {'Type': 'Tree - Animated', 'DisableCollision': True, 'Hpr': VBase3(74.217, 13.753, 4.957), 'Pos': Point3(-379.833, -728.54, 26.258), 'Scale': VBase3(1.176, 1.176, 1.176), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (0.71, 0.67, 0.5686274509803921, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe5': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(76.729, 7.622, 1.792), 'Pos': Point3(-339.218, -732.857, 23.816), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (0.93, 0.87, 0.7764705882352941, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe6': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(-1.294, 5.793, 12.616), 'Pos': Point3(-334.943, -742.422, 22.706), 'Scale': VBase3(0.856, 0.856, 0.856), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe7': {'Type': 'Tree - Animated', 'DisableCollision': True, 'Hpr': VBase3(0.0, 0.0, 7.829), 'Pos': Point3(-293.975, -605.243, 26.434), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (1.0, 1.0, 0.8509803921568627, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe8': {'Type': 'Tree - Animated', 'DisableCollision': True, 'Hpr': VBase3(8.838, -2.369, 4.157), 'Pos': Point3(-372.098, -650.87, 27.918), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe9': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(-49.999, 9.933, 5.223), 'Pos': Point3(-374.047, -662.824, 27.26), 'Scale': VBase3(0.72, 0.72, 0.72), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (0.75, 0.72, 0.6352941176470588, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe:': {'Type': 'Tree - Animated', 'DisableCollision': True, 'Hpr': VBase3(-101.309, -4.414, -1.533), 'Pos': Point3(-206.707, -559.092, 18.329), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe;': {'Type': 'Tree - Animated', 'DisableCollision': True, 'Hpr': VBase3(0.0, 0.0, 13.303), 'Pos': Point3(-204.403, -564.043, 17.606), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (1.0, 1.0, 0.8509803921568627, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe<': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(-48.156, -6.558, 4.285), 'Pos': Point3(-157.612, -591.007, 3.037), 'Scale': VBase3(0.74, 0.74, 0.74), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315200.0dxschafe=': {'Type': 'Tree - Animated', 'DisableCollision': False, 'Hpr': VBase3(8.576, 0.0, 20.786), 'Pos': Point3(-159.203, -583.839, 3.061), 'Scale': VBase3(1.0, 1.0, 1.0), 'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle', 'Attach': ['trunk', 'def_trunk_attach'], 'Model': 'models/vegetation/palm_leaf_a_hi', 'PartName': 'leaf'}}}, 'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle', 'Color': (1.0, 1.0, 0.8509803921568627, 1.0), 'Model': 'models/vegetation/palm_trunk_a_hi', 'PartName': 'trunk'}}, '1171315712.0dxschafe': {'Type': 'Well', 'DisableCollision': False, 'Hpr': VBase3(48.58, 0.0, 0.0), 'Pos': Point3(-282.536, -681.893, 24.96), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/wellA'}}, '1171315840.0dxschafe': {'Type': 'Trellis', 'DisableCollision': True, 'Hpr': VBase3(-13.255, -8.917, 0.0), 'Pos': Point3(-340.755, -651.328, 24.456), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/trellis_d'}}, '1171315968.0dxschafe': {'Type': 'Swamp_props_small', 'DisableCollision': False, 'Hpr': VBase3(44.133, -0.124, 0.95), 'Pos': Point3(-299.978, -615.327, 26.982), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/swamp_bench'}}, '1171315968.0dxschafe1': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-284.632, -885.184, -15.189), 'Scale': VBase3(2.831, 2.831, 2.831), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1171316096.0dxschafe': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 3.63), 'Pos': Point3(-264.811, -579.308, -4.364), 'Scale': VBase3(2.831, 2.831, 2.831), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1171316096.0dxschafe0': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, -2.013), 'Pos': Point3(-175.505, -498.597, -37.061), 'Scale': VBase3(3.643, 3.643, 3.643), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1171316096.0dxschafe1': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, -2.013), 'Pos': Point3(-82.649, -424.379, -34.183), 'Scale': VBase3(3.643, 3.643, 3.643), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1171316224.0dxschafe': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, -2.013), 'Pos': Point3(-242.122, -980.327, -54.296), 'Scale': VBase3(3.643, 3.643, 3.643), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1171316224.0dxschafe0': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, -2.013), 'Pos': Point3(-311.461, -827.956, -42.249), 'Scale': VBase3(3.643, 3.643, 3.643), 'Visual': {'Model': 'models/vegetation/swamp_tree_roots_canopy'}}, '1171316224.0dxschafe1': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(-33.626, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(-201.285, -785.155, 1.43), 'Scale': VBase3(1.935, 1.935, 1.935), 'Visual': {'Model': 'models/props/rock_group_1_floor'}}, '1171316224.0dxschafe2': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(-33.626, 0.0, 0.0), 'Objects': {'1184718841.62kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-341.25, -726.337, 26.104), 'Hpr': VBase3(165.261, 0.0, 0.0), 'Pos': Point3(-5.829, 5.141, 1.051), 'Scale': VBase3(0.725, 0.616, 0.689), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}}, 'Pos': Point3(-337.368, -740.866, 24.07), 'Scale': VBase3(1.935, 1.935, 1.935), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/rock_group_1_floor'}}, '1171316224.0dxschafe3': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(-25.05, 0.0, 0.0), 'Pos': Point3(-158.907, -596.686, 2.573), 'Scale': VBase3(1.935, 1.935, 1.935), 'Visual': {'Model': 'models/props/rock_group_1_floor'}}, '1171316224.0dxschafe4': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(154.877, 2.608, -3.7), 'Pos': Point3(-284.701, -836.427, 10.167), 'Scale': VBase3(1.222, 1.222, 1.222), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/rock_group_1_sphere'}}, '1171316224.0dxschafe5': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(94.132, -0.065, -0.006), 'Pos': Point3(-265.121, -576.052, 25.379), 'Scale': VBase3(1.935, 1.935, 1.935), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/rock_group_1_floor'}}, '1171316224.0dxschafe6': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(-44.627, 0.0, 0.0), 'Pos': Point3(-293.592, -601.992, 25.908), 'Scale': VBase3(1.165, 1.165, 1.165), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/rock_group_1_sphere'}}, '1171316352.0dxschafe': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(104.908, 0.301, -3.731), 'Pos': Point3(-374.373, -657.124, 27.514), 'Scale': VBase3(1.124, 1.124, 1.124), 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/props/rock_group_2_sphere'}}, '1171316480.0dxschafe0': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(104.908, 0.301, -3.731), 'Pos': Point3(-349.236, -752.373, 25.045), 'Scale': VBase3(1.53, 1.53, 1.53), 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/props/rock_group_2_sphere'}}, '1171316480.0dxschafe1': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(40.562, 5.236, 8.064), 'Pos': Point3(-194.106, -627.554, 13.373), 'Scale': VBase3(2.034, 2.034, 2.034), 'Visual': {'Model': 'models/props/rock_group_2_sphere'}}, '1171316480.0dxschafe2': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(-141.706, -6.318, -7.136), 'Pos': Point3(-177.118, -636.279, 11.289), 'Scale': VBase3(1.572, 1.572, 1.572), 'Visual': {'Model': 'models/props/rock_group_5_floor'}}, '1171316864.0dxschafe': {'Type': 'Building Exterior', 'File': 'cuba_building_int_tailor', 'ExtUid': '1171316864.0dxschafe0', 'Hpr': VBase3(88.811, 0.0, 0.0), 'Objects': {'1201041677.5dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(-180.0, 0.0, 0.0), 'Pos': Point3(0.162, -4.354, 0.599), 'Scale': VBase3(1.0, 1.0, 1.0)}}, 'Pos': Point3(-368.014, -694.339, 28.233), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Name': '', 'Color': (0.85, 0.83, 0.7411764705882353, 1.0), 'Door': 'models/buildings/shanty_guildhall_door', 'Interior': 'models/buildings/interior_shanty_guildhall', 'Model': 'models/buildings/shanty_npc_house_combo_G', 'SignFrame': 'models/buildings/sign1_shanty_a_frame', 'SignImage': 'models/buildings/sign1_eng_a_icon_tailor'}}, '1171316864.0dxschafe1': {'Type': 'Building Exterior', 'File': 'cuba_building_int_tattoo', 'ExtUid': '1171316864.0dxschafe2', 'Hpr': VBase3(67.089, 0.0, 4.171), 'Objects': {'1201041678.91dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(-180.0, 0.0, 0.0), 'Pos': Point3(0.313, -4.248, 1.444), 'Scale': VBase3(1.0, 1.0, 1.0)}}, 'Pos': Point3(-165.409, -499.223, 9.61), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Name': '', 'Color': (0.86, 0.86, 0.8588235294117647, 1.0), 'Door': 'models/buildings/shanty_guildhall_door', 'Interior': 'models/buildings/interior_shanty_guildhall', 'Model': 'models/buildings/shanty_npc_house_combo_J', 'SignFrame': 'models/buildings/sign1_shanty_a_frame', 'SignImage': 'models/buildings/sign1_eng_a_icon_tattoo'}}, '1171317248.0dxschafe': {'Type': 'Building Exterior', 'File': '', 'ExtUid': '1171317248.0dxschafe0', 'Holiday': '', 'Hpr': VBase3(110.324, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(-289.272, -827.364, 11.794), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Name': '', 'Color': (0.7, 0.7, 0.7, 1.0), 'Door': 'models/buildings/shanty_guildhall_door', 'Interior': 'models/buildings/interior_shanty_guildhall', 'Model': 'models/buildings/shanty_npc_house_combo_D', 'SignFrame': '', 'SignImage': 'models/buildings/sign1_eng_a_icon_doctor'}}, '1171317760.0dxschafe': {'Type': 'Wall', 'DisableCollision': False, 'Hpr': VBase3(76.725, 0.0, 0.417), 'Pos': Point3(-364.738, -682.879, 27.909), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/pir_m_prp_fnc_wood60'}}, '1171317760.0dxschafe0': {'Type': 'Wall', 'DisableCollision': False, 'Hpr': VBase3(100.931, -0.147, -2.003), 'Pos': Point3(-354.916, -759.608, 25.737), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/pir_m_prp_fnc_wood60'}}, '1171318016.0dxschafe': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(174.015, -1.384, -1.516), 'Pos': Point3(-315.867, -807.895, 18.608), 'Scale': VBase3(1.324, 1.324, 1.324), 'Visual': {'Color': (0.7019607843137254, 0.84, 0.6509803921568628, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1171318016.0dxschafe0': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(42.53, 2.053, 12.816), 'Pos': Point3(-332.123, -785.348, 18.625), 'Scale': VBase3(1.324, 1.324, 1.324), 'Visual': {'Color': (0.96, 0.96, 0.9372549019607843, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1171318016.0dxschafe1': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(174.015, -1.384, -12.349), 'Pos': Point3(-372.147, -615.81, 20.95), 'Scale': VBase3(1.739, 1.739, 1.739), 'Visual': {'Color': (0.699999988079071, 0.7300000190734863, 0.5799999833106995, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1171318016.0dxschafe2': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(-104.832, 6.689, -11.126), 'Pos': Point3(-342.548, -770.489, 23.537), 'Scale': VBase3(1.324, 1.324, 1.324), 'Visual': {'Color': (0.699999988079071, 0.7300000190734863, 0.5799999833106995, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1171318144.0dxschafe': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(174.015, -1.384, -1.516), 'Pos': Point3(-242.418, -547.412, 21.945), 'Scale': VBase3(1.324, 1.324, 1.324), 'Visual': {'Color': (0.699999988079071, 0.7300000190734863, 0.5799999833106995, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1171318144.0dxschafe0': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(-102.342, -1.661, 1.207), 'Pos': Point3(-308.624, -582.81, 23.457), 'Scale': VBase3(1.324, 1.324, 1.324), 'Visual': {'Color': (0.7, 0.73, 0.58, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1171319168.0dxschafe': {'Type': 'Wall', 'DisableCollision': False, 'Hpr': VBase3(73.631, 0.036, 359.332), 'Pos': Point3(-333.435, -684.617, 27.253), 'Scale': VBase3(1.0, 1.0, 1.146), 'VisSize': '', 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/pir_m_prp_fnc_wood60'}}, '1171319168.0dxschafe0': {'Type': 'Wall', 'DisableCollision': False, 'Hpr': VBase3(1.345, 0.862, 0.28), 'Pos': Point3(-393.734, -685.211, 27.965), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/props/pir_m_prp_fnc_wood60'}}, '1171319424.0dxschafe': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(134.89, -3.563, -3.569), 'Pos': Point3(-371.999, -713.504, 27.482), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_a'}}, '1171319424.0dxschafe0': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(154.824, -4.565, -2.142), 'Pos': Point3(-370.918, -635.954, 28.577), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_a'}}, '1171319424.0dxschafe1': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-142.021, -3.978, 3.1), 'Pos': Point3(-374.971, -674.349, 28.074), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_a'}}, '1171319424.0dxschafe2': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-142.021, -3.978, 3.1), 'Pos': Point3(-364.271, -744.154, 26.403), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_a'}}, '1171319552.0dxschafe0': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(89.587, 0.0, 0.0), 'Pos': Point3(-345.824, -763.628, 24.667), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319680.0dxschafe': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(70.418, 0.0, -5.372), 'Pos': Point3(-375.646, -731.158, 26.489), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319680.0dxschafe0': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-82.382, 2.461, 4.776), 'Pos': Point3(-379.378, -723.104, 27.348), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319680.0dxschafe2': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-133.989, -5.375, 0.099), 'Pos': Point3(-341.151, -750.03, 25.761), 'Scale': VBase3(1.251, 1.251, 1.251), 'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0), 'Model': 'models/vegetation/bush_b'}}, '1171319808.0dxschafe': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-155.254, -1.124, 5.253), 'Pos': Point3(-303.177, -809.415, 15.781), 'Scale': VBase3(1.935, 1.935, 1.935), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319808.0dxschafe2': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(175.717, -1.124, 5.253), 'Pos': Point3(592.907, -798.972, 0.159), 'Scale': VBase3(1.935, 1.935, 1.935), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319808.0dxschafe4': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(36.364, -1.124, 5.253), 'Pos': Point3(592.284, -797.455, -1.197), 'Scale': VBase3(1.935, 1.935, 1.935), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319808.0dxschafe5': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-280.878, -837.764, 10.574), 'Hpr': VBase3(30.143, 0.0, 0.0), 'Pos': Point3(-280.878, -837.764, 10.574), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319808.0dxschafe6': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(10.396, 0.0, 0.0), 'Pos': Point3(-264.55, -577.318, 24.378), 'Scale': VBase3(1.595, 1.595, 1.595), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319936.0dxschafe': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(10.396, 0.0, 4.167), 'Pos': Point3(-188.435, -515.677, 13.628), 'Scale': VBase3(1.595, 1.595, 1.595), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319936.0dxschafe0': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(10.396, 0.0, 4.167), 'Pos': Point3(-290.971, -585.907, 26.919), 'Scale': VBase3(1.775, 1.775, 1.775), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319936.0dxschafe1': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(10.396, 0.0, 4.167), 'Pos': Point3(-290.971, -864.503, 8.611), 'Scale': VBase3(1.775, 1.775, 1.775), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319936.0dxschafe2': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(103.439, 4.161, -0.221), 'Pos': Point3(-203.235, -560.818, 17.232), 'Scale': VBase3(1.775, 1.775, 1.775), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319936.0dxschafe3': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(24.204, 0.996, 4.046), 'Pos': Point3(-205.917, -569.068, 17.84), 'Scale': VBase3(1.775, 1.775, 1.775), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171319936.0dxschafe4': {'Type': 'Bush', 'DisableCollision': False, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(100.939, 4.126, 0.582), 'Pos': Point3(-188.231, -638.274, 14.392), 'Scale': VBase3(1.775, 1.775, 1.775), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171320064.0dxschafe': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(24.5, 1.023, 5.25), 'Pos': Point3(-160.085, -485.298, 9.374), 'Scale': VBase3(1.391, 1.391, 1.391), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171320064.0dxschafe0': {'Type': 'Bush', 'DisableCollision': True, 'GridPos': Point3(-277.701, -841.101, 10.574), 'Hpr': VBase3(-153.042, -1.248, -5.202), 'Pos': Point3(-156.365, -467.801, 9.347), 'Scale': VBase3(1.391, 1.391, 1.391), 'Visual': {'Model': 'models/vegetation/bush_b'}}, '1171320832.0dxschafe': {'Type': 'Bush', 'DisableCollision': False, 'Hpr': VBase3(22.759, 10.417, -1.703), 'Pos': Point3(-206.729, -777.82, 5.521), 'Scale': VBase3(0.739, 0.739, 0.739), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171320832.0dxschafe0': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-1.712, 10.196, 2.741), 'Pos': Point3(-294.622, -833.425, 13.44), 'Scale': VBase3(1.16, 1.16, 1.16), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171320832.0dxschafe1': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(132.892, 4.002, -1.679), 'Pos': Point3(606.193, -837.635, -3.384), 'Scale': VBase3(1.16, 1.16, 1.16), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171320832.0dxschafe2': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-116.594, 10.417, -1.703), 'Pos': Point3(620.037, -845.385, -0.281), 'Scale': VBase3(1.16, 1.16, 1.16), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171320832.0dxschafe3': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-157.241, -5.59, -6.894), 'Pos': Point3(-300.613, -827.068, 15.281), 'Scale': VBase3(1.16, 1.16, 1.16), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171320832.0dxschafe4': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-120.691, -8.586, -2.225), 'Pos': Point3(-319.293, -796.378, 20.273), 'Scale': VBase3(1.16, 1.16, 1.16), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171320960.0dxschafe': {'Type': 'Bush', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(-13.349, 0.429, 8.857), 'Pos': Point3(-206.803, -526.927, 17.119), 'Scale': VBase3(1.16, 1.16, 1.16), 'VisSize': '', 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171320960.0dxschafe0': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(170.705, -1.059, -8.804), 'Pos': Point3(-151.201, -460.804, 9.186), 'Scale': VBase3(1.16, 1.16, 1.16), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171321088.0dxschafe': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-165.437, -4.549, -7.62), 'Pos': Point3(-342.28, -739.438, 26.037), 'Scale': VBase3(1.16, 1.16, 1.16), 'Visual': {'Model': 'models/vegetation/bush_d'}}, '1171321088.0dxschafe0': {'Type': 'Animal', 'Hpr': VBase3(-169.923, 0.0, 0.0), 'Patrol Radius': '5.1084', 'Pos': Point3(-342.453, -681.228, 28.032), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Pig', 'Start State': 'Idle', 'StartFrame': '0', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1171321216.0dxschafe': {'Type': 'Animal', 'Hpr': VBase3(-101.132, 0.0, 0.0), 'Patrol Radius': '5.2410', 'Pos': Point3(-336.727, -661.427, 28.026), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Pig', 'Start State': 'Walk', 'StartFrame': '0', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1171321216.0dxschafe0': {'Type': 'Animal', 'Hpr': VBase3(-101.132, 0.0, 0.0), 'Patrol Radius': '12.0000', 'Pos': Point3(-281.902, -692.238, 24.702), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Rooster', 'Start State': 'Walk', 'StartFrame': '0', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1171321216.0dxschafe1': {'Type': 'Animal', 'Hpr': VBase3(-101.132, 0.0, 0.0), 'Patrol Radius': '12.0000', 'Pos': Point3(-266.781, -682.498, 23.137), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Chicken', 'Start State': 'Walk', 'StartFrame': '0', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1171321216.0dxschafe2': {'Type': 'Animal', 'Hpr': VBase3(-101.132, 0.0, 0.0), 'Patrol Radius': 12, 'Pos': Point3(-261.136, -659.03, 23.744), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Rooster', 'Start State': 'Walk', 'StartFrame': '0', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1172863796.36kmuller': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(66.911, -2.792, 6.518), 'Pos': Point3(326.362, 26.621, -2.0), 'Scale': VBase3(6.057, 6.057, 6.057), 'Visual': {'Color': (0.25999999046325684, 0.3499999940395355, 0.38999998569488525, 1.0), 'Model': 'models/props/rock_2_sphere'}}, '1172863850.45kmuller': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(3.368, -0.137, 4.704), 'Pos': Point3(212.361, -229.269, -0.091), 'Scale': VBase3(5.606, 5.606, 5.606), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/props/rock_4_sphere'}}, '1176186151.42mike': {'Type': 'Townsperson', 'Category': 'Gypsy', 'AnimSet': 'bar_talk03', 'AuraFX': 'None', 'Boss': False, 'CustomModel': 'None', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'HelpID': 'NONE', 'Holiday': '', 'Hpr': VBase3(194.702, 0.0, 0.0), 'Instanced World': 'None', 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '8.0904', 'Pos': Point3(-329.386, -675.124, 27.384), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Requires Quest Interest': False, 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PORT_ROYAL_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Zombie': False, 'rolOffset': VBase3(0.0, 0.0, 0.0), 'spawnTimeAlt': '', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1176258388.82kmuller': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(41.325, -124.235, -4.357), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_a'}}, '1176258538.6kmuller': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-27.599, -86.053, -4.727), 'Scale': VBase3(4.077, 4.077, 4.077), 'Visual': {'Color': (0.41999998688697815, 0.5799999833106995, 0.7200000286102295, 1.0), 'Model': 'models/props/rock_2_sphere'}}, '1176258603.27kmuller': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-49.053, -100.206, -2.976), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1176258613.43kmuller': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-57.458, -105.108, -6.347), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1176258631.8kmuller': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-67.927, -105.152, -1.229), 'Scale': VBase3(3.271, 3.271, 3.271), 'Visual': {'Color': (0.49000000953674316, 0.6100000143051147, 0.9200000166893005, 1.0), 'Model': 'models/props/rock_3_sphere'}}, '1176258756.88kmuller': {'Type': 'Swamp_props', 'DisableCollision': True, 'Hpr': VBase3(5.171, 41.577, -84.933), 'Pos': Point3(-57.645, -109.688, -1.483), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_tree_thin'}}, '1176258807.22kmuller': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-122.855, -123.625, -1.68), 'Scale': VBase3(2.1, 2.1, 2.1), 'Visual': {'Model': 'models/props/rock_2_sphere'}}, '1176258867.93kmuller': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-40.483, -109.599, -2.4), 'Scale': VBase3(0.648, 0.648, 0.648), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1176258898.99kmuller': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(0.702, -87.743, -1.109), 'Scale': VBase3(1.0, 1.0, 1.383), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1176258906.33kmuller': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(3.118, -96.117, -1.037), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1176258927.07kmuller': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(17.995, -107.384, -0.878), 'Scale': VBase3(3.296, 3.296, 2.501), 'Visual': {'Model': 'models/vegetation/swamp_bush_a'}}, '1176258938.58kmuller': {'Type': 'Rock', 'DisableCollision': True, 'Hpr': VBase3(-92.166, 0.0, 0.0), 'Pos': Point3(8.119, -104.756, -3.156), 'Scale': VBase3(7.184, 7.184, 7.184), 'Visual': {'Color': (0.5, 0.5799999833106995, 0.5899999737739563, 1.0), 'Model': 'models/props/rock_4_sphere'}}, '1179791064.29Aholdun': {'Type': 'Spawn Node', 'Aggro Radius': '12.0000', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': VBase3(-108.17, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-319.939, -759.868, 22.895), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1179791120.18Aholdun': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-272.299, -807.439, 12.475), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1179791134.46Aholdun': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-262.084, -857.896, 6.657), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1179791701.57Aholdun': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': VBase3(-161.67, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '7.2952', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-10.276, -477.541, -1.327), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1179793974.72Aholdun': {'Type': 'Spawn Node', 'Aggro Radius': '12.0000', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': VBase3(144.658, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '6.2651', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-112.853, -459.391, -0.618), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1184716513.73kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(118.656, 0.0, 0.0), 'Pos': Point3(-282.475, -860.838, 6.787), 'Scale': VBase3(1.0, 1.0, 3.146), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184716531.9kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(116.072, 0.0, 0.0), 'Pos': Point3(-274.672, -882.702, 3.365), 'Scale': VBase3(1.0, 1.0, 3.497), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184716542.28kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(118.023, 0.0, 0.0), 'Pos': Point3(-267.455, -902.252, -0.361), 'Scale': VBase3(1.0, 1.0, 3.619), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184717296.78kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-279.242, -841.955, 8.594), 'Hpr': VBase3(77.613, 0.0, 0.0), 'Pos': Point3(-279.242, -841.955, 8.594), 'Scale': VBase3(1.975, 1.807, 2.096), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184717358.28kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-286.708, -832.001, 11.316), 'Hpr': VBase3(-179.707, 0.0, 0.0), 'Pos': Point3(-286.708, -832.001, 11.316), 'Scale': VBase3(1.792, 1.792, 1.792), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184717384.58kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-296.947, -819.412, 12.195), 'Hpr': VBase3(119.548, 0.0, -4.688), 'Pos': Point3(-296.947, -819.412, 12.195), 'Scale': VBase3(2.894, 1.46, 2.73), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184717452.25kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(137.257, 0.0, -3.56), 'Pos': Point3(-312.435, -799.391, 16.132), 'Scale': VBase3(2.564, 1.997, 3.379), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184717774.17kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(38.538, 0.0, 0.0), 'Pos': Point3(-253.242, -575.139, 15.784), 'Scale': VBase3(8.433, 5.903, 5.903), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184717903.14kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-5.496, 0.0, 0.0), 'Pos': Point3(-204.018, -562.348, 11.991), 'Scale': VBase3(2.392, 4.901, 2.809), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1184717982.65kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(31.759, 0.0, 0.0), 'Pos': Point3(-197.119, -523.265, 12.405), 'Scale': VBase3(3.446, 3.446, 1.862), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184718346.58kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(50.493, 0.0, 0.0), 'Pos': Point3(-143.146, -459.234, 4.424), 'Scale': VBase3(5.785, 2.186, 2.186), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184718408.0kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(80.042, 0.0, 0.0), 'Pos': Point3(-156.558, -481.447, 7.87), 'Scale': VBase3(1.647, 1.586, 1.586), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184718756.09kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(116.714, 0.0, 0.0), 'Pos': Point3(-336.226, -767.127, 23.465), 'Scale': VBase3(2.559, 1.888, 1.888), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184718783.67kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(81.452, 0.0, 0.0), 'Pos': Point3(-338.845, -744.705, 23.506), 'Scale': VBase3(2.877, 1.685, 1.685), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184718870.54kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-166.203, 0.0, 0.0), 'Pos': Point3(-348.336, -721.977, 26.051), 'Scale': VBase3(0.602, 1.0, 1.335), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184718946.59kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-85.948, 0.0, 0.0), 'Pos': Point3(-350.32, -732.694, 25.196), 'Scale': VBase3(1.995, 1.429, 1.429), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184719009.84kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-176.754, 0.0, 0.0), 'Pos': Point3(-352.852, -742.675, 25.386), 'Scale': VBase3(1.0, 1.0, 1.386), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184719101.83kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(11.051, 0.0, 0.0), 'Pos': Point3(-294.921, -607.314, 26.788), 'Scale': VBase3(0.801, 1.0, 1.0), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184719164.21kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-72.385, 0.0, 0.0), 'Pos': Point3(-46.593, -542.406, -18.838), 'Scale': VBase3(2.647, 2.647, 8.776), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184719190.71kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-22.245, -563.691, -4.271), 'Scale': VBase3(1.0, 1.0, 4.834), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184719277.87kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(100.696, 0.0, 0.0), 'Pos': Point3(-2.572, -548.712, -3.941), 'Scale': VBase3(0.488, 1.0, 2.697), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184719296.98kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(172.417, 0.0, 0.0), 'Pos': Point3(-36.554, -532.038, -0.105), 'Scale': VBase3(2.779, 2.476, 5.133), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184720985.21kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(35.205, 0.0, 0.0), 'Pos': Point3(79.502, -138.629, -6.594), 'Scale': VBase3(2.293, 2.293, 4.515), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721040.78kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(243.699, -207.921, 0.0), 'Scale': VBase3(2.704, 2.704, 2.704), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721161.87kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-20.497, 0.0, 0.0), 'Pos': Point3(395.657, -500.026, -12.135), 'Scale': VBase3(6.587, 4.342, 4.342), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721215.28kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-26.999, 0.0, 0.0), 'Pos': Point3(445.877, -510.059, -7.858), 'Scale': VBase3(3.986, 3.696, 3.696), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721233.95kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(41.742, 0.0, 0.0), 'Pos': Point3(430.517, -507.735, -2.914), 'Scale': VBase3(1.222, 1.0, 3.159), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721291.89kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(165.907, 0.0, 0.0), 'Pos': Point3(418.533, -477.362, -11.395), 'Scale': VBase3(8.882, 8.419, 8.419), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721406.87kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(132.152, 0.0, 0.0), 'Pos': Point3(489.104, -633.97, 0.0), 'Scale': VBase3(3.714, 2.777, 2.777), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721426.18kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(164.262, 0.0, 0.0), 'Pos': Point3(469.196, -633.133, -1.26), 'Scale': VBase3(1.726, 2.464, 2.901), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721812.36kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-11.304, 0.0, 0.0), 'Pos': Point3(465.739, -655.873, -4.272), 'Scale': VBase3(2.878, 2.878, 2.878), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721823.17kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-71.597, 0.0, 0.0), 'Pos': Point3(445.567, -632.786, -1.355), 'Scale': VBase3(4.425, 2.405, 2.405), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721880.11kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(96.012, 0.0, 0.0), 'Pos': Point3(461.205, -615.181, -3.647), 'Scale': VBase3(3.559, 3.159, 3.159), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721907.79kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-123.568, 0.0, 0.0), 'Pos': Point3(471.884, -627.656, -9.972), 'Scale': VBase3(1.863, 1.863, 4.774), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721953.51kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(45.775, 0.0, 0.0), 'Pos': Point3(466.141, -594.241, -3.326), 'Scale': VBase3(2.179, 2.179, 3.091), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184721971.46kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-151.881, 0.0, 0.0), 'Pos': Point3(454.64, -582.71, -8.004), 'Scale': VBase3(1.789, 1.405, 4.494), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184722024.71kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-35.565, 0.0, 0.0), 'Pos': Point3(500.403, -671.125, -8.22), 'Scale': VBase3(1.919, 2.373, 4.389), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1184722304.54kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(5.496, 0.0, 0.0), 'Pos': Point3(604.613, -839.789, -5.012), 'Scale': VBase3(1.288, 1.288, 1.288), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1184891648.0dxschafe': {'Type': 'Dinghy', 'Aggro Radius': 20, 'Hpr': VBase3(40.217, 5.058, 0.0), 'Location': 'Water', 'Pos': Point3(-144.25, -717.336, 1.117), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}}, '1184891776.0dxschafe0': {'Type': 'Player Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Index': 1, 'Pos': Point3(-140.128, -651.976, 2.377), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'All', 'Visual': {'Color': (0.5, 0.5, 0.5, 1), 'Model': 'models/misc/smiley'}}, '1184891776.0dxschafe1': {'Type': 'Player Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Index': 1, 'Pos': Point3(-184.722, -758.28, 2.699), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'All', 'Visual': {'Color': (0.5, 0.5, 0.5, 1), 'Model': 'models/misc/smiley'}}, '1185921614.52kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(18.837, 0.0, 0.0), 'Pos': Point3(-305.391, -813.387, 26.631), 'Scale': VBase3(1.59, 1.59, 1.59), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1186006891.46kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': VBase3(69.221, 0.0, 0.0), 'Pos': Point3(-306.308, -819.284, 27.342), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.72, 0.78, 0.7529411764705882, 1.0), 'Model': 'models/props/crate_04'}}, '1186007092.14kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-304.305, -823.26, 29.649), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.71, 0.8, 0.7607843137254902, 1.0), 'Model': 'models/props/crate'}}, '1186007187.37kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(108.406, 0.0, 0.0), 'Pos': Point3(-301.8, -822.736, 26.72), 'Scale': VBase3(1.0, 1.0, 1.955), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1186007215.54kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(161.324, 0.0, 0.0), 'Pos': Point3(-307.586, -817.184, 26.407), 'Scale': VBase3(1.0, 1.0, 2.024), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1186007267.03kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Objects': {}, 'Pos': Point3(-292.103, -841.622, 27.342), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.73, 0.78, 0.7568627450980392, 1.0), 'Model': 'models/props/crates_group_1'}}, '1186007415.7kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(98.834, 0.0, 0.0), 'Pos': Point3(-289.516, -840.404, 26.219), 'Scale': VBase3(1.0, 1.0, 1.651), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187913949.14akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-128.874, 0.0, 0.0), 'Pos': Point3(454.734, -484.921, -11.51), 'Scale': VBase3(1.878, 1.878, 8.431), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914011.44akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-42.248, 0.0, 0.0), 'Pos': Point3(358.995, -356.361, -4.153), 'Scale': VBase3(1.0, 1.0, 2.585), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914045.25akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-67.838, 0.0, 0.0), 'Pos': Point3(369.831, -379.882, -2.0), 'Scale': VBase3(1.0, 1.0, 2.585), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914073.11akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(110.197, 0.0, 0.0), 'Pos': Point3(384.244, -377.965, -2.0), 'Scale': VBase3(1.0, 1.0, 2.585), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914104.39akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-42.248, 0.0, 0.0), 'Pos': Point3(347.259, -341.046, -2.0), 'Scale': VBase3(1.0, 1.0, 2.585), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914121.33akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-6.709, 0.0, 0.0), 'Pos': Point3(327.875, -338.618, -2.0), 'Scale': VBase3(1.796, 1.796, 3.425), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914162.75akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-26.96, 0.0, 0.0), 'Pos': Point3(271.414, -210.79, -2.0), 'Scale': VBase3(2.704, 2.704, 2.704), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914199.86akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-110.168, 0.0, 0.0), 'Pos': Point3(293.678, -262.519, 0.0), 'Scale': VBase3(2.101, 2.101, 2.101), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914236.7akelts': {'Type': 'Swamp_props', 'DisableCollision': False, 'Hpr': VBase3(178.851, -3.122, 4.786), 'Pos': Point3(301.413, -302.12, -1.385), 'Scale': VBase3(1.846, 1.846, 1.846), 'Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0), 'Model': 'models/vegetation/swamp_tree_roots'}}, '1187914264.97akelts': {'Type': 'Rock', 'DisableCollision': False, 'Hpr': VBase3(3.368, -0.137, 4.704), 'Pos': Point3(302.212, -264.111, -7.994), 'Scale': VBase3(10.072, 10.072, 10.072), 'Visual': {'Color': (0.28999999165534973, 0.4000000059604645, 0.46000000834465027, 1.0), 'Model': 'models/props/rock_4_sphere'}}, '1187914386.67akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(17.622, 0.0, 0.0), 'Pos': Point3(206.577, -145.923, -0.939), 'Scale': VBase3(1.346, 1.346, 1.346), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914411.34akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-64.455, 0.0, 0.0), 'Pos': Point3(184.517, -137.759, -1.635), 'Scale': VBase3(1.346, 1.346, 1.346), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914475.81akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-67.009, 0.0, 0.0), 'Pos': Point3(0.822, -105.142, -1.998), 'Scale': VBase3(3.006, 0.257, 1.385), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914641.95akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-16.882, 0.0, 0.0), 'Pos': Point3(27.072, -124.314, -4.181), 'Scale': VBase3(0.678, 0.372, 2.225), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914830.53akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(77.413, 0.0, 0.0), 'Pos': Point3(-73.571, -407.45, 0.0), 'Scale': VBase3(0.525, 1.259, 1.259), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187914874.69akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(4.408, 0.0, 0.0), 'Pos': Point3(-95.124, -422.22, -2.0), 'Scale': VBase3(0.468, 1.123, 1.123), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1187993555.97akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(-193.985, -788.725, 1.073), 'Scale': VBase3(0.711, 0.719, 1.871), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1187993700.22akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(39.363, 0.0, 0.0), 'Pos': Point3(-198.947, -773.149, 3.772), 'Scale': VBase3(1.0, 1.0, 1.854), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1187995354.27akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-199.38, -789.316, 2.818), 'Hpr': VBase3(0.0, 14.537, 5.684), 'Pos': Point3(-202.599, -789.944, 2.707), 'Scale': VBase3(0.598, 0.819, 1.733), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1189212260.31kmuller': {'Type': 'Building Exterior', 'File': '', 'ExtUid': '1189212260.31kmuller0', 'Hpr': VBase3(-128.508, 0.527, -1.189), 'Objects': {'1189456314.77kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-39.156, -453.783, -15.774), 'Holiday': '', 'Hpr': VBase3(-136.109, 2.634, 2.735), 'Pos': Point3(-41.913, 2.173, -14.63), 'Scale': VBase3(3.212, 1.0, 8.981), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1189456447.08kmuller': {'Type': 'Townsperson', 'Category': 'Shipwright', 'Aggro Radius': '12.0000', 'AnimSet': 'default', 'AuraFX': 'None', 'Boss': False, 'CustomModel': 'None', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'GridPos': Point3(-72.355, -489.439, 0.343), 'Hpr': VBase3(-70.979, 3.589, -1.236), 'Instanced World': 'None', 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '1.0000', 'Pos': Point3(21.292, -1.475, 0.0), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Requires Quest Interest': False, 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Zombie': False, 'rolOffset': VBase3(0.0, 0.0, 0.0), 'spawnTimeAlt': '', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1251247369.96caoconno': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-97.628, -474.345, -11.857), 'Holiday': '', 'Hpr': VBase3(-45.963, -1.221, 0.448), 'Pos': Point3(10.66, -30.742, -11.501), 'Scale': VBase3(2.409, 1.0, 5.4), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1251247425.66caoconno': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-30.615, -479.316, -3.936), 'Holiday': '', 'Hpr': VBase3(163.563, 0.842, -0.991), 'Pos': Point3(-27.012, 24.862, -3.307), 'Scale': VBase3(1.443, 1.0, 3.288), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}}, 'Pos': Point3(-66.869, -484.894, -0.298), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Door': 'models/buildings/shanty_guildhall_door', 'Model': 'models/buildings/shanty_repairshop_exterior', 'SignImage': 'models/buildings/sign1_eng_a_icon_blacksmith'}}, '1189456366.97kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-126.219, 0.0, 0.0), 'Pos': Point3(-58.252, -443.342, -8.95), 'Scale': VBase3(3.042, 1.19, 5.679), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1190666496.0dxschafe': {'Type': 'Animated Avatar - Navy', 'Animation Track': 'sit_sleep', 'Hpr': VBase3(-35.554, 28.688, 2.895), 'Pos': Point3(-299.244, -614.407, 27.064), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1190666880.0dxschafe': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-31.482, 0.0, 0.0), 'Pos': Point3(-300.724, -614.498, 26.702), 'Scale': VBase3(1.242, 1.488, 1.205), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1190741504.0dxschafe': {'Type': 'Building Exterior', 'File': 'cuba_building_int_tavern', 'ExtUid': '1190741504.0dxschafe0', 'Hpr': VBase3(43.111, 0.0, 2.086), 'Objects': {'1201041675.97dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(-179.829, 0.0, 0.0), 'Pos': Point3(-0.498, -4.914, 0.952), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1201041677.36dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator_2', 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(-6.626, 20.947, 1.006), 'Scale': VBase3(1.0, 1.0, 1.0)}}, 'Pos': Point3(-166.222, -609.364, 14.521), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0.67, 0.7254901960784313, 1.0), 'Door': 'models/buildings/shanty_guildhall_door', 'Model': 'models/buildings/shanty_tavern_exterior', 'SignImage': 'models/buildings/sign1_eng_a_icon_blacksmith'}}, '1191629336.42kmuller': {'Type': 'Stairs', 'DisableCollision': False, 'Hpr': VBase3(41.465, 0.0, 0.0), 'Pos': Point3(-154.369, -622.269, 8.323), 'Scale': VBase3(0.973, 1.0, 1.0), 'Visual': {'Color': (0.36, 0.44, 0.5, 1.0), 'Model': 'models/buildings/stone_stairs_double'}}, '1191629553.64kmuller': {'Type': 'Stairs', 'DisableCollision': False, 'Hpr': VBase3(42.295, 0.0, 0.0), 'Pos': Point3(-146.961, -630.23, 1.915), 'Scale': VBase3(0.762, 1.0, 1.0), 'Visual': {'Color': (0.36000001430511475, 0.4399999976158142, 0.5, 1.0), 'Model': 'models/buildings/stone_stairs_double'}}, '1191629599.67kmuller': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(36.897, 0.0, 0.0), 'Pos': Point3(-153.414, -608.288, 7.57), 'Scale': VBase3(0.76, 0.76, 0.76), 'Visual': {'Model': 'models/vegetation/bush_a'}}, '1191629616.42kmuller': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': VBase3(-18.577, 0.0, 0.0), 'Pos': Point3(-165.963, -624.1, 10.167), 'Scale': VBase3(0.659, 0.659, 0.659), 'Visual': {'Model': 'models/vegetation/bush_c'}}, '1191629698.66kmuller': {'Type': 'Bush', 'DisableCollision': True, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-158.788, -598.788, 8.404), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/bush_c'}}, '1191629817.2kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(126.054, 0.0, 0.0), 'Pos': Point3(-151.785, -598.595, 6.556), 'Scale': VBase3(2.507, 2.507, 2.507), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1191629838.92kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(43.161, 0.0, 0.0), 'Pos': Point3(-148.854, -612.866, 5.699), 'Scale': VBase3(1.225, 1.225, 2.864), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1191629854.09kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-43.63, 0.0, 0.0), 'Pos': Point3(-156.953, -613.242, 8.88), 'Scale': VBase3(1.09, 1.09, 3.751), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1191629933.37kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(45.217, 0.0, 0.0), 'Pos': Point3(-169.664, -631.311, 8.863), 'Scale': VBase3(2.621, 1.855, 2.096), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1191629959.37kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-7.406, 0.0, 0.0), 'Pos': Point3(-182.264, -640.102, 12.636), 'Scale': VBase3(0.724, 1.0, 1.333), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1191629982.17kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(125.106, 0.0, 0.0), 'Pos': Point3(-163.15, -618.235, 8.931), 'Scale': VBase3(1.0, 1.0, 3.513), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1191630016.62kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-106.537, 0.0, 0.0), 'Pos': Point3(-190.914, -616.698, 15.546), 'Scale': VBase3(1.0, 1.0, 2.315), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1192043971.0kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-79.363, 0.0, 0.0), 'Pos': Point3(175.154, -189.323, -2.684), 'Scale': VBase3(1.674, 1.674, 4.468), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1192044061.55kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-66.827, 0.0, 0.0), 'Pos': Point3(126.316, -150.761, -1.654), 'Scale': VBase3(1.0, 1.0, 3.962), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1192044102.02kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(53.799, 0.0, 0.0), 'Pos': Point3(143.19, -154.164, -2.369), 'Scale': VBase3(1.0, 1.0, 2.87), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1192228480.0dxschafe': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(26.869, -359.892, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(148.457, -233.669, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe0': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-27.17, -156.317, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Big Gator', 'Start State': 'Ambush', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe1': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(51.014, -214.437, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe2': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(363.513, -547.292, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe3': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(409.275, -595.765, -1.327), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe4': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(330.853, -437.092, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe5': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(223.516, -546.544, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe6': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(215.664, -309.744, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe7': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-69.322, -165.851, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Big Gator', 'Start State': 'Ambush', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228608.0dxschafe8': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-90.742, -210.405, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Ambush', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228736.0dxschafe': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(255.22, -245.849, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228736.0dxschafe0': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(325.088, -639.495, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228736.0dxschafe1': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(265.244, -605.764, -1.327), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228736.0dxschafe2': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(310.809, -372.223, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0.0, 0.0, 0.65, 1.0), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228736.0dxschafe3': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-49.478, -297.461, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228736.0dxschafe4': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-22.994, -218.547, -2.0), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192228736.0dxschafe5': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(76.849, -324.094, -1.327), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Alligator', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1192733824.0dxschafe': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-212.788, -770.382, 7.013), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1192733952.0dxschafe': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-225.931, -895.237, -5.479), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1192836222.09kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-11.592, 0.0, 0.0), 'Pos': Point3(-337.636, -684.906, 27.34), 'Scale': VBase3(0.811, 1.0, 1.472), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1201041679.88dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(-180.0, 0.0, 0.0), 'Pos': Point3(0.313, -4.248, 1.444), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1201041679.89dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator_2', 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(-6.626, 20.947, 1.006), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1210704118.17kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-97.912, 0.0, 0.0), 'Pos': Point3(460.304, -573.978, -2.094), 'Scale': VBase3(1.86, 1.86, 3.402), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1210704165.86kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-82.933, 0.0, -2.169), 'Pos': Point3(457.725, -535.476, -3.064), 'Scale': VBase3(4.18, 3.824, 3.824), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1241909300.25ian': {'Type': 'Townsperson', 'Category': 'Stowaway', 'AnimSet': 'cargomaster', 'AuraFX': 'None', 'Boss': False, 'CustomModel': 'None', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': '', 'HelpID': 'NONE', 'Holiday': '', 'Hpr': VBase3(347.905, 0.0, 0.0), 'Instanced World': 'None', 'Level': '37', 'Notice Animation 1': '', 'Notice Animation 2': '', 'Patrol Radius': '12.0000', 'Pos': Point3(-201.181, -765.469, 5.135), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Requires Quest Interest': False, 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PORT_ROYAL_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Player', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Zombie': False, 'rolOffset': VBase3(0.0, 0.0, 0.0), 'spawnTimeAlt': '', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1241909665.5ian': {'Type': 'Crate', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(100.024, 8.973, -8.978), 'Pos': Point3(-194.823, -767.801, 3.577), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/pir_m_prp_cnt_crateStowaway'}}, '1242332258.36kmuller': {'Type': 'Pier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(154.739, 0.0, 0.0), 'Pos': Point3(-171.371, -750.487, -0.819), 'Scale': VBase3(0.375, 0.375, 0.375), 'VisSize': '', 'Visual': {'Model': 'models/islands/pier_walkway'}}, '1242332511.27kmuller': {'Type': 'Crate', 'DisableCollision': True, 'Holiday': '', 'Hpr': VBase3(31.05, 5.218, 8.59), 'Pos': Point3(-199.308, -764.029, 4.786), 'Scale': VBase3(0.961, 0.961, 0.961), 'VisSize': '', 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/props/crate_04'}}, '1242332757.48kmuller': {'Type': 'Sack', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-73.023, -1.291, -68.071), 'Pos': Point3(-193.531, -769.122, 4.57), 'Scale': VBase3(0.621, 0.621, 0.621), 'VisSize': '', 'Visual': {'Model': 'models/props/Sack'}}, '1242332822.32kmuller': {'Type': 'Sack', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-0.452, 2.351, 10.886), 'Pos': Point3(-204.79, -768.917, 5.569), 'Scale': VBase3(0.323, 0.323, 0.323), 'VisSize': '', 'Visual': {'Model': 'models/props/sack_18stack'}}, '1242332871.57kmuller': {'Type': 'Sack', 'DisableCollision': False, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-195.891, -768.73, 3.705), 'Scale': VBase3(1.326, 1.326, 1.326), 'VisSize': '', 'Visual': {'Model': 'models/props/package_sack'}}, '1242332937.68kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(25.533, 0.0, 0.0), 'Pos': Point3(-198.89, -764.772, 3.188), 'Scale': VBase3(0.757, 1.171, 1.013), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1242332966.9kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-41.885, 0.0, 4.203), 'Pos': Point3(-198.653, -768.691, 3.361), 'Scale': VBase3(0.218, 0.604, 0.96), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1248385792.0jloehrle': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-251.774, -584.307, 23.852), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Bat T1', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1248385792.0jloehrle0': {'Type': 'Spawn Node', 'AnimSet': 'default', 'AuraFX': 'None', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-223.199, -558.898, 19.878), 'PoseAnim': '', 'PoseFrame': '', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Bat T1', 'Start State': 'Patrol', 'StartFrame': '0', 'Team': 'default', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}, 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1257284346.42caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(56.033, 0.0, 0.0), 'Pos': Point3(-160.171, -638.435, 7.858), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_snowman_jack_winter09'}}, '1257284362.29caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(72.895, 0.0, 0.0), 'Pos': Point3(-163.315, -639.753, 7.868), 'Scale': VBase3(1.852, 1.852, 1.852), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_sandpile01_winter09'}}, '1257284384.94caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-161.037, -635.779, 7.868), 'Scale': VBase3(1.852, 1.852, 1.852), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_sandpile01_winter09'}}, '1267728993.08Jared': {'Type': 'PotionTable', 'Hpr': VBase3(-20.072, 0.0, 3.1), 'Objects': {'1269464043.62akelts': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'GridPos': Point3(-330.818, -694.55, 29.961), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-2.92, 1.177, 2.977), 'Scale': VBase3(0.685, 0.685, 0.685), 'VisSize': '', 'Visual': {'Model': 'models/props/jar'}}, '1269464058.34akelts': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'GridPos': Point3(-325.309, -696.252, 29.961), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(2.589, -0.525, 2.977), 'Scale': VBase3(0.537, 0.537, 0.97), 'VisSize': '', 'Visual': {'Model': 'models/props/jar'}}, '1269464073.87akelts': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'GridPos': Point3(-324.978, -696.137, 29.961), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(2.92, -0.41, 2.977), 'Scale': VBase3(0.537, 0.537, 0.537), 'VisSize': '', 'Visual': {'Model': 'models/props/jar'}}, '1269464088.19akelts': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'GridPos': Point3(-325.313, -696.812, 29.961), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(2.585, -1.085, 2.977), 'Scale': VBase3(1.175, 1.175, 1.175), 'VisSize': '', 'Visual': {'Model': 'models/props/winebottle_B'}}, '1269464139.18akelts': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'GridPos': Point3(-325.824, -695.314, 29.962), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(2.074, 0.413, 2.978), 'Scale': VBase3(0.79, 0.79, 0.79), 'VisSize': '', 'Visual': {'Model': 'models/props/bottle_tan'}}, '1269464174.37akelts': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'GridPos': Point3(-330.382, -696.296, 29.962), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-2.484, -0.569, 2.978), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/bottle_red'}}, '1269464413.98akelts': {'Type': 'Mortar_Pestle', 'DisableCollision': False, 'GridPos': Point3(-329.687, -695.9, 29.961), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-1.789, -0.173, 2.977), 'Scale': VBase3(1.007, 1.007, 1.007), 'VisSize': '', 'Visual': {'Model': 'models/props/mortar_pestle_wood'}}, '1269464456.65akelts': {'Type': 'Mortar_Pestle', 'DisableCollision': False, 'GridPos': Point3(-326.578, -695.53, 29.961), 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(1.32, 0.197, 2.977), 'Scale': VBase3(0.676, 0.676, 0.676), 'VisSize': '', 'Visual': {'Model': 'models/props/mortar_pestle_stone'}}}, 'Pos': Point3(-328.594, -682.984, 27.173), 'Potion Zone': '3', 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/minigames/pir_m_gam_pot_table'}}, '1270764606.85akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(46.759, 0.0, 0.0), 'Pos': Point3(-416.63, -1300.635, -9.4), 'Scale': VBase3(19.924, 2.047, 6.536), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_barrier_plane'}}, '1270764673.88akelts': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(73.7, 0.0, 0.0), 'Pos': Point3(-332.041, -1171.865, -9.4), 'Scale': VBase3(11.981, 2.047, 6.536), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_barrier_plane'}}, '1286984549.68gcarranza': {'Type': 'Townsperson', 'Category': 'CatalogRep', 'AnimSet': 'coin_flip', 'AuraFX': 'None', 'Boss': False, 'CustomModel': 'None', 'DNA': '1286984549.68gcarranza', 'GhostColor': 'None', 'GhostFX': 0, 'Greeting Animation': 'emote_yawn', 'HelpID': 'NONE', 'Holiday': '', 'Hpr': VBase3(-147.764, -2.811, -1.612), 'Instanced World': 'None', 'Level': '37', 'Notice Animation 1': 'emote_wink', 'Notice Animation 2': 'emote_smile', 'Patrol Radius': '12.0000', 'Pos': Point3(-186.314, -540.718, 14.402), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'PropFXLeft': 'None', 'PropFXRight': 'None', 'PropLeft': 'None', 'PropRight': 'None', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PORT_ROYAL_DEFAULTS', 'Start State': 'Walk', 'StartFrame': '0', 'Team': 'Player', 'TrailFX': 'None', 'TrailLeft': 'None', 'TrailRight': 'None', 'VisSize': '', 'Zombie': False, 'rolOffset': VBase3(0.0, 0.0, 0.0), 'spawnTimeAlt': '', 'spawnTimeBegin': 0.0, 'spawnTimeEnd': 0.0}, '1287611070.99kmuller': {'Type': 'Building Exterior', 'File': '', 'ExtUid': '1287611070.99kmuller0', 'Holiday': '', 'Hpr': VBase3(71.081, 0.0, 0.0), 'Objects': {'1287611234.14kmuller': {'Type': 'Prop_Groups', 'DisableCollision': True, 'GridPos': Point3(-194.793, -544.071, 16.961), 'Holiday': '', 'Hpr': VBase3(-41.155, 0.0, 0.0), 'Pos': Point3(1.017, -6.057, 0.101), 'Scale': VBase3(1.037, 1.037, 1.037), 'VisSize': '', 'Visual': {'Model': 'models/props/prop_group_B'}}, '1287612652.97kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'GridPos': Point3(-191.824, -546.233, 15.359), 'Holiday': '', 'Hpr': VBase3(-1.202, 0.0, 0.0), 'Pos': Point3(-0.383, -10.596, -1.971), 'Scale': VBase3(2.161, 2.145, 3.185), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}}, 'Pos': Point3(-199.479, -543.296, 16.883), 'Scale': VBase3(0.773, 0.773, 0.773), 'VisSize': '', 'Visual': {'Door': 'models/buildings/shanty_guildhall_door', 'Model': 'models/buildings/shanty_leanto_A', 'SignFrame': '', 'SignImage': 'models/buildings/sign1_eng_a_icon_barber'}}, '1287612604.35kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-8.797, 0.0, 0.0), 'Pos': Point3(-216.769, -550.687, 17.258), 'Scale': VBase3(1.825, 1.825, 1.825), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1287612731.53kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(99.746, 0.0, 0.0), 'Pos': Point3(-199.918, -529.652, 14.94), 'Scale': VBase3(1.0, 1.0, 1.461), 'VisSize': '', 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}}, 'Phase': 1, 'Undockable': False, 'Visibility': 'Grid', 'Visual': {'Model': 'models/islands/pir_m_are_isl_cuba'}}}, 'TodSettings': {'AmbientColors': {0: Vec4(0.207843, 0.243137, 0.447059, 1), 2: Vec4(1, 1, 1, 1), 4: Vec4(0.721569, 0.611765, 0.619608, 1), 6: Vec4(0.207843, 0.243137, 0.447059, 1), 8: Vec4(0.388235, 0.423529, 0.568627, 1), 12: Vec4(0.34, 0.28, 0.41, 1), 13: Vec4(0.34, 0.28, 0.41, 1), 16: Vec4(0.25, 0.25, 0.25, 1), 17: Vec4(0.34, 0.28, 0.41, 1)}, 'DirectionalColors': {0: Vec4(0.956863, 0.909804, 0.894118, 1), 2: Vec4(1, 1, 1, 1), 4: Vec4(0.439216, 0.176471, 0, 1), 6: Vec4(0.513726, 0.482353, 0.639216, 1), 8: Vec4(0.447059, 0.439216, 0.537255, 1), 12: Vec4(0.66, 0.76, 0.05, 1), 13: Vec4(0.66, 0.76, 0.05, 1), 16: Vec4(0, 0, 0, 1), 17: Vec4(0.66, 0.76, 0.05, 1)}, 'FogColors': {0: Vec4(0.172549, 0.180392, 0.290196, 1), 2: Vec4(0.894118, 0.894118, 1, 1), 4: Vec4(0.231373, 0.203922, 0.184314, 1), 6: Vec4(0.172549, 0.180392, 0.290196, 1), 8: Vec4(0.129412, 0.137255, 0.203922, 1), 12: Vec4(0.1, 0.12, 0.03, 0), 13: Vec4(0.1, 0.12, 0.03, 0), 16: Vec4(0.25, 0.25, 0.25, 1), 17: Vec4(0.1, 0.12, 0.03, 0)}, 'FogRanges': {0: 0.000699999975040555, 2: 0.00019999999494757503, 4: 0.00039999998989515007, 6: 0.000699999975040555, 8: 0.0, 12: 0.00025, 13: 0.00025, 16: 0.0001, 17: 0.005}, 'LinearFogRanges': {0: (0.0, 100.0), 2: (0.0, 100.0), 4: (0.0, 100.0), 6: (0.0, 100.0), 8: (0.0, 100.0), 12: (0.0, 100.0), 13: (0.0, 100.0), 16: (0.0, 100.0), 17: (0.0, 100.0)}}, 'Node Links': [['1192733952.0dxschafe', '1192733824.0dxschafe', 'Bi-directional'], ['1179791064.29Aholdun', '1192733824.0dxschafe', 'Bi-directional'], ['1179791064.29Aholdun', '1192733952.0dxschafe', 'Bi-directional']], 'Layers': {'Collisions': ['1184008208.59kmuller', '1184016064.62kmuller', '1184013852.84kmuller', '1185822696.06kmuller', '1184006140.32kmuller', '1184002350.98kmuller', '1184007573.29kmuller', '1184021176.59kmuller', '1184005963.59kmuller', '1188324241.31akelts', '1184006537.34kmuller', '1184006605.81kmuller', '1187139568.33kmuller', '1188324186.98akelts', '1184006730.66kmuller', '1184007538.51kmuller', '1184006188.41kmuller', '1184021084.27kmuller', '1185824396.94kmuller', '1185824250.16kmuller', '1185823630.52kmuller', '1185823760.23kmuller', '1185824497.83kmuller', '1185824751.45kmuller', '1187739103.34akelts', '1188323993.34akelts', '1184016538.29kmuller', '1185822200.97kmuller', '1184016225.99kmuller', '1195241421.34akelts', '1195242796.08akelts', '1184020642.13kmuller', '1195237994.63akelts', '1184020756.88kmuller', '1184020833.4kmuller', '1185820992.97kmuller', '1185821053.83kmuller', '1184015068.54kmuller', '1184014935.82kmuller', '1185821432.88kmuller', '1185821701.86kmuller', '1195240137.55akelts', '1195241539.38akelts', '1195238422.3akelts', '1195238473.22akelts', '1185821453.17kmuller', '1184021269.96kmuller', '1185821310.89kmuller', '1185821165.59kmuller', '1185821199.36kmuller', '1185822035.98kmuller', '1184015806.59kmuller', '1185822059.48kmuller', '1185920461.76kmuller', '1194984449.66akelts', '1185824206.22kmuller', '1184003446.23kmuller', '1184003254.85kmuller', '1184003218.74kmuller', '1184002700.44kmuller', '1186705073.11kmuller', '1187658531.86akelts', '1186705214.3kmuller', '1185824927.28kmuller', '1184014204.54kmuller', '1184014152.84kmuller']}, 'ObjectIds': {'1160614528.73sdnaik': '["Objects"]["1160614528.73sdnaik"]', '1161732317.95sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732317.95sdnaik"]', '1161732322.52sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732322.52sdnaik"]', '1161732370.84sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732370.84sdnaik"]', '1161732370.86sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732370.84sdnaik"]["Objects"]["1161732370.86sdnaik"]', '1161732370.88sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732370.84sdnaik"]["Objects"]["1161732370.88sdnaik"]', '1161732578.06sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732578.06sdnaik"]', '1161732578.08sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732578.06sdnaik"]["Objects"]["1161732578.08sdnaik"]', '1161732578.11sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732578.06sdnaik"]["Objects"]["1161732578.11sdnaik"]', '1161732705.67sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732705.67sdnaik"]', '1161732705.72sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732705.67sdnaik"]["Objects"]["1161732705.72sdnaik"]', '1161732705.7sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1161732705.67sdnaik"]["Objects"]["1161732705.7sdnaik"]', '1162496104.57dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496104.57dzlu"]', '1162496561.59dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496561.59dzlu"]', '1162496585.79dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496585.79dzlu"]', '1162496638.89dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496638.89dzlu"]', '1162496693.54dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496693.54dzlu"]', '1162496757.15dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496757.15dzlu"]', '1162496818.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496818.98dzlu"]', '1162496857.71dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496857.71dzlu"]', '1162496880.34dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496880.34dzlu"]', '1162496889.81dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496889.81dzlu"]', '1162496999.35dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162496999.35dzlu"]', '1162497015.78dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497015.78dzlu"]', '1162497038.53dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497038.53dzlu"]', '1162497249.64dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497249.64dzlu"]', '1162497329.21dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497329.21dzlu"]', '1162497460.96dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497460.96dzlu"]', '1162497568.12dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497568.12dzlu"]', '1162497591.24dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497591.24dzlu"]', '1162497648.96dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497648.96dzlu"]', '1162497681.26dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497681.26dzlu"]', '1162497693.48dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497693.48dzlu"]', '1162497709.17dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162497709.17dzlu"]', '1162498231.46dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498231.46dzlu"]', '1162498233.67dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498233.67dzlu"]', '1162498236.93dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498236.93dzlu"]', '1162498256.79dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498256.79dzlu"]', '1162498287.12dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498287.12dzlu"]', '1162498321.2dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498321.2dzlu"]', '1162498369.29dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498369.29dzlu"]', '1162498390.34dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498390.34dzlu"]', '1162498400.56dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498400.56dzlu"]', '1162498416.74dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498416.74dzlu"]', '1162498428.64dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498428.64dzlu"]', '1162498500.51dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498500.51dzlu"]', '1162498514.14dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498514.14dzlu"]', '1162498585.56dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498585.56dzlu"]', '1162498611.99dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498611.99dzlu"]', '1162498633.87dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498633.87dzlu"]', '1162498653.28dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162498653.28dzlu"]', '1162501202.2dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501202.2dzlu"]', '1162501211.4dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501211.4dzlu"]', '1162501216.51dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501216.51dzlu"]', '1162501218.89dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501218.89dzlu"]', '1162501221.32dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501221.32dzlu"]', '1162501223.18dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501223.18dzlu"]', '1162501264.03dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501264.03dzlu"]', '1162501292.92dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501292.92dzlu"]', '1162501329.71dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501329.71dzlu"]', '1162501346.57dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501346.57dzlu"]', '1162501361.39dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501361.39dzlu"]', '1162501378.34dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501378.34dzlu"]', '1162501380.67dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501380.67dzlu"]', '1162501506.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501506.98dzlu"]', '1162501515.84dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501515.84dzlu"]', '1162501551.93dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501551.93dzlu"]', '1162501577.87dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501577.87dzlu"]', '1162501603.15dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501603.15dzlu"]', '1162501641.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501641.98dzlu"]', '1162501646.46dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501646.46dzlu"]', '1162501650.07dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501650.07dzlu"]', '1162501671.89dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501671.89dzlu"]', '1162501689.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501689.73dzlu"]', '1162501722.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501722.73dzlu"]', '1162501750.34dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501750.34dzlu"]', '1162501776.62dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501776.62dzlu"]', '1162501796.43dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501796.43dzlu"]', '1162501799.67dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501799.67dzlu"]', '1162501880.84dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162501880.84dzlu"]', '1162502780.21dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162502780.21dzlu"]', '1162504044.29dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504044.29dzlu"]', '1162504062.54dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504062.54dzlu"]', '1162504090.14dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504090.14dzlu"]', '1162504101.34dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504101.34dzlu"]', '1162504103.39dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504103.39dzlu"]', '1162504362.84dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504362.84dzlu"]', '1162504374.53dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504374.53dzlu"]', '1162504384.23dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504384.23dzlu"]', '1162504406.53dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504406.53dzlu"]', '1162504452.48dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504452.48dzlu"]', '1162504463.24dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504463.24dzlu"]', '1162504470.51dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504470.51dzlu"]', '1162504493.21dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504493.21dzlu"]', '1162504510.79dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504510.79dzlu"]', '1162504512.71dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504512.71dzlu"]', '1162504513.99dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504513.99dzlu"]', '1162504515.7dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504515.7dzlu"]', '1162504517.18dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504517.18dzlu"]', '1162504518.45dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504518.45dzlu"]', '1162504520.09dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504520.09dzlu"]', '1162504563.18dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504563.18dzlu"]', '1162504565.28dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504565.28dzlu"]', '1162504579.32dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504579.32dzlu"]', '1162504581.82dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504581.82dzlu"]', '1162504589.29dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504589.29dzlu"]', '1162504613.95dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504613.95dzlu"]', '1162504615.65dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504615.65dzlu"]', '1162504622.87dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504622.87dzlu"]', '1162504625.28dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504625.28dzlu"]', '1162504626.54dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504626.54dzlu"]', '1162504628.01dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504628.01dzlu"]', '1162504629.42dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504629.42dzlu"]', '1162504630.82dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504630.82dzlu"]', '1162504635.54dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504635.54dzlu"]', '1162504651.18dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504651.18dzlu"]', '1162504683.95dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504683.95dzlu"]', '1162504691.23dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504691.23dzlu"]', '1162504697.12dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504697.12dzlu"]', '1162504701.23dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504701.23dzlu"]', '1162504709.56dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504709.56dzlu"]', '1162504732.4dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504732.4dzlu"]', '1162504754.37dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504754.37dzlu"]', '1162504802.04dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504802.04dzlu"]', '1162504808.48dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504808.48dzlu"]', '1162504824.68dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162504824.68dzlu"]', '1162505050.93dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505050.93dzlu"]', '1162505128.43dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505128.43dzlu"]', '1162505210.81dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505210.81dzlu"]', '1162505214.28dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505214.28dzlu"]', '1162505293.49dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505293.49dzlu"]', '1162505296.51dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505296.51dzlu"]', '1162505308.45dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505308.45dzlu"]', '1162505330.64dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505330.64dzlu"]', '1162505333.48dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162505333.48dzlu"]', '1162575738.25dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162575738.25dzlu"]', '1162575755.71dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162575755.71dzlu"]', '1162575900.62dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162575900.62dzlu"]', '1162576043.68dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576043.68dzlu"]', '1162576078.71dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576078.71dzlu"]', '1162576335.14dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576335.14dzlu"]', '1162576483.87dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576483.87dzlu"]', '1162576530.06dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576530.06dzlu"]', '1162576533.54dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576533.54dzlu"]', '1162576597.46dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576597.46dzlu"]', '1162576602.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576602.98dzlu"]', '1162576619.82dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576619.82dzlu"]', '1162576649.89dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576649.89dzlu"]', '1162576682.79dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576682.79dzlu"]', '1162576722.81dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576722.81dzlu"]', '1162576805.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576805.98dzlu"]', '1162576816.26dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576816.26dzlu"]', '1162576895.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576895.73dzlu"]', '1162576950.03dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576950.03dzlu"]', '1162576969.61dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576969.61dzlu"]', '1162576986.5dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576986.5dzlu"]', '1162576998.25dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162576998.25dzlu"]', '1162577044.42dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577044.42dzlu"]', '1162577094.82dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577094.82dzlu"]', '1162577152.48dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577152.48dzlu"]', '1162577320.84dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577320.84dzlu"]', '1162577355.15dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577355.15dzlu"]', '1162577566.42dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577566.42dzlu"]', '1162577813.76dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577813.76dzlu"]', '1162577835.12dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577835.12dzlu"]', '1162577839.92dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577839.92dzlu"]', '1162577843.75dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577843.75dzlu"]', '1162577867.18dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577867.18dzlu"]', '1162577870.71dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577870.71dzlu"]', '1162577884.96dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577884.96dzlu"]', '1162577982.15dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577982.15dzlu"]', '1162577996.29dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162577996.29dzlu"]', '1162578006.14dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578006.14dzlu"]', '1162578016.5dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578016.5dzlu"]', '1162578019.04dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578019.04dzlu"]', '1162578024.62dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578024.62dzlu"]', '1162578025.75dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578025.75dzlu"]', '1162578026.78dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578026.78dzlu"]', '1162578038.0dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578038.0dzlu"]', '1162578047.75dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578047.75dzlu"]', '1162578056.12dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578056.12dzlu"]', '1162578057.4dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578057.4dzlu"]', '1162578070.86dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578070.86dzlu"]', '1162578075.4dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578075.4dzlu"]', '1162578081.51dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578081.51dzlu"]', '1162578094.39dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578094.39dzlu"]', '1162578096.17dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578096.17dzlu"]', '1162578108.39dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578108.39dzlu"]', '1162578113.9dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578113.9dzlu"]', '1162578121.26dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578121.26dzlu"]', '1162578126.61dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578126.61dzlu"]', '1162578259.32dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578259.32dzlu"]', '1162578426.64dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578426.64dzlu"]', '1162578793.03dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578793.03dzlu"]', '1162578861.75dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578861.75dzlu"]', '1162578920.61dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578920.61dzlu"]', '1162578961.18dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578961.18dzlu"]', '1162578998.36dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162578998.36dzlu"]', '1162579001.46dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579001.46dzlu"]', '1162579041.9dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579041.9dzlu"]', '1162579056.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579056.73dzlu"]', '1162579058.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579058.98dzlu"]', '1162579060.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579060.73dzlu"]', '1162579199.21dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579199.21dzlu"]', '1162579331.75dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579331.75dzlu"]', '1162579341.32dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579341.32dzlu"]', '1162579346.82dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579346.82dzlu"]', '1162579382.68dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579382.68dzlu"]', '1162579401.92dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579401.92dzlu"]', '1162579450.07dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579450.07dzlu"]', '1162579477.28dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579477.28dzlu"]', '1162579495.23dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579495.23dzlu"]', '1162579496.87dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579496.87dzlu"]', '1162579552.36dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579552.36dzlu"]', '1162579651.42dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579651.42dzlu"]', '1162579990.07dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162579990.07dzlu"]', '1162580050.34dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162580050.34dzlu"]', '1162580134.26dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162580134.26dzlu"]', '1162580160.11dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162580160.11dzlu"]', '1162580232.92dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162580232.92dzlu"]', '1162580882.43dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162580882.43dzlu"]', '1162580923.68dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162580923.68dzlu"]', '1162581117.26dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581117.26dzlu"]', '1162581120.68dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581120.68dzlu"]', '1162581126.39dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581126.39dzlu"]', '1162581133.0dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581133.0dzlu"]', '1162581141.04dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581141.04dzlu"]', '1162581144.36dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581144.36dzlu"]', '1162581145.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581145.98dzlu"]', '1162581162.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581162.73dzlu"]', '1162581164.37dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581164.37dzlu"]', '1162581164.98dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581164.98dzlu"]', '1162581166.56dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581166.56dzlu"]', '1162581167.39dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581167.39dzlu"]', '1162581168.25dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581168.25dzlu"]', '1162581168.79dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581168.79dzlu"]', '1162581169.65dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581169.65dzlu"]', '1162581170.36dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581170.36dzlu"]', '1162581171.11dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581171.11dzlu"]', '1162581171.96dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581171.96dzlu"]', '1162581173.79dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581173.79dzlu"]', '1162581176.57dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581176.57dzlu"]', '1162581177.9dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581177.9dzlu"]', '1162581181.67dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581181.67dzlu"]', '1162581182.32dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581182.32dzlu"]', '1162581183.29dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581183.29dzlu"]', '1162581183.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581183.73dzlu"]', '1162581184.18dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581184.18dzlu"]', '1162581185.78dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581185.78dzlu"]', '1162581187.11dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581187.11dzlu"]', '1162581188.48dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581188.48dzlu"]', '1162581188.82dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581188.82dzlu"]', '1162581189.76dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581189.76dzlu"]', '1162581190.51dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581190.51dzlu"]', '1162581192.04dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581192.04dzlu"]', '1162581194.96dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581194.96dzlu"]', '1162581195.62dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581195.62dzlu"]', '1162581196.4dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581196.4dzlu"]', '1162581198.9dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581198.9dzlu"]', '1162581199.73dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581199.73dzlu"]', '1162581201.07dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581201.07dzlu"]', '1162581201.68dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581201.68dzlu"]', '1162581202.34dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581202.34dzlu"]', '1162581230.31dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581230.31dzlu"]', '1162581230.92dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581230.92dzlu"]', '1162581234.92dzlu': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162581234.92dzlu"]', '1162600301.39sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1162600301.39sdnaik"]', '1163119773.31sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1163119773.31sdnaik"]', '1163119776.08sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1163119776.08sdnaik"]', '1163130907.42sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1163130907.42sdnaik"]', '1163130908.98sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1163130908.98sdnaik"]', '1163462918.28sdnaik': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1163462918.28sdnaik"]', '1171314304.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171314304.0dxschafe"]', '1171314304.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171314304.0dxschafe"]', '1171315072.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315072.0dxschafe"]', '1171315200.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe"]', '1171315200.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe0"]', '1171315200.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe1"]', '1171315200.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe2"]', '1171315200.0dxschafe3': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe3"]', '1171315200.0dxschafe4': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe4"]', '1171315200.0dxschafe5': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe5"]', '1171315200.0dxschafe6': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe6"]', '1171315200.0dxschafe7': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe7"]', '1171315200.0dxschafe8': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe8"]', '1171315200.0dxschafe9': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe9"]', '1171315200.0dxschafe:': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe:"]', '1171315200.0dxschafe;': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe;"]', '1171315200.0dxschafe<': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe<"]', '1171315200.0dxschafe=': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315200.0dxschafe="]', '1171315712.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315712.0dxschafe"]', '1171315840.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315840.0dxschafe"]', '1171315968.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315968.0dxschafe"]', '1171315968.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171315968.0dxschafe1"]', '1171316096.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316096.0dxschafe"]', '1171316096.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316096.0dxschafe0"]', '1171316096.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316096.0dxschafe1"]', '1171316224.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe"]', '1171316224.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe0"]', '1171316224.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe1"]', '1171316224.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe2"]', '1171316224.0dxschafe3': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe3"]', '1171316224.0dxschafe4': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe4"]', '1171316224.0dxschafe5': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe5"]', '1171316224.0dxschafe6': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe6"]', '1171316352.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316352.0dxschafe"]', '1171316480.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316480.0dxschafe0"]', '1171316480.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316480.0dxschafe1"]', '1171316480.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316480.0dxschafe2"]', '1171316864.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316864.0dxschafe"]', '1171316864.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316864.0dxschafe"]', '1171316864.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316864.0dxschafe1"]', '1171316864.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316864.0dxschafe1"]', '1171317248.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171317248.0dxschafe"]', '1171317248.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171317248.0dxschafe"]', '1171317760.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171317760.0dxschafe"]', '1171317760.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171317760.0dxschafe0"]', '1171318016.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171318016.0dxschafe"]', '1171318016.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171318016.0dxschafe0"]', '1171318016.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171318016.0dxschafe1"]', '1171318016.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171318016.0dxschafe2"]', '1171318144.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171318144.0dxschafe"]', '1171318144.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171318144.0dxschafe0"]', '1171319168.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319168.0dxschafe"]', '1171319168.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319168.0dxschafe0"]', '1171319424.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319424.0dxschafe"]', '1171319424.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319424.0dxschafe0"]', '1171319424.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319424.0dxschafe1"]', '1171319424.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319424.0dxschafe2"]', '1171319552.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319552.0dxschafe0"]', '1171319680.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319680.0dxschafe"]', '1171319680.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319680.0dxschafe0"]', '1171319680.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319680.0dxschafe2"]', '1171319808.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319808.0dxschafe"]', '1171319808.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319808.0dxschafe2"]', '1171319808.0dxschafe4': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319808.0dxschafe4"]', '1171319808.0dxschafe5': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319808.0dxschafe5"]', '1171319808.0dxschafe6': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319808.0dxschafe6"]', '1171319936.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319936.0dxschafe"]', '1171319936.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319936.0dxschafe0"]', '1171319936.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319936.0dxschafe1"]', '1171319936.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319936.0dxschafe2"]', '1171319936.0dxschafe3': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319936.0dxschafe3"]', '1171319936.0dxschafe4': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171319936.0dxschafe4"]', '1171320064.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320064.0dxschafe"]', '1171320064.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320064.0dxschafe0"]', '1171320832.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320832.0dxschafe"]', '1171320832.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320832.0dxschafe0"]', '1171320832.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320832.0dxschafe1"]', '1171320832.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320832.0dxschafe2"]', '1171320832.0dxschafe3': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320832.0dxschafe3"]', '1171320832.0dxschafe4': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320832.0dxschafe4"]', '1171320960.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320960.0dxschafe"]', '1171320960.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171320960.0dxschafe0"]', '1171321088.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171321088.0dxschafe"]', '1171321088.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171321088.0dxschafe0"]', '1171321216.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171321216.0dxschafe"]', '1171321216.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171321216.0dxschafe0"]', '1171321216.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171321216.0dxschafe1"]', '1171321216.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171321216.0dxschafe2"]', '1172863796.36kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1172863796.36kmuller"]', '1172863850.45kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1172863850.45kmuller"]', '1176186151.42mike': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176186151.42mike"]', '1176258388.82kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258388.82kmuller"]', '1176258538.6kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258538.6kmuller"]', '1176258603.27kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258603.27kmuller"]', '1176258613.43kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258613.43kmuller"]', '1176258631.8kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258631.8kmuller"]', '1176258756.88kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258756.88kmuller"]', '1176258807.22kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258807.22kmuller"]', '1176258867.93kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258867.93kmuller"]', '1176258898.99kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258898.99kmuller"]', '1176258906.33kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258906.33kmuller"]', '1176258927.07kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258927.07kmuller"]', '1176258938.58kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1176258938.58kmuller"]', '1179791064.29Aholdun': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1179791064.29Aholdun"]', '1179791120.18Aholdun': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1179791120.18Aholdun"]', '1179791134.46Aholdun': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1179791134.46Aholdun"]', '1179791701.57Aholdun': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1179791701.57Aholdun"]', '1179793974.72Aholdun': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1179793974.72Aholdun"]', '1184716513.73kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184716513.73kmuller"]', '1184716531.9kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184716531.9kmuller"]', '1184716542.28kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184716542.28kmuller"]', '1184717296.78kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184717296.78kmuller"]', '1184717358.28kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184717358.28kmuller"]', '1184717384.58kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184717384.58kmuller"]', '1184717452.25kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184717452.25kmuller"]', '1184717774.17kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184717774.17kmuller"]', '1184717903.14kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184717903.14kmuller"]', '1184717982.65kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184717982.65kmuller"]', '1184718346.58kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184718346.58kmuller"]', '1184718408.0kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184718408.0kmuller"]', '1184718756.09kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184718756.09kmuller"]', '1184718783.67kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184718783.67kmuller"]', '1184718841.62kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316224.0dxschafe2"]["Objects"]["1184718841.62kmuller"]', '1184718870.54kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184718870.54kmuller"]', '1184718946.59kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184718946.59kmuller"]', '1184719009.84kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184719009.84kmuller"]', '1184719101.83kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184719101.83kmuller"]', '1184719164.21kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184719164.21kmuller"]', '1184719190.71kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184719190.71kmuller"]', '1184719277.87kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184719277.87kmuller"]', '1184719296.98kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184719296.98kmuller"]', '1184720985.21kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184720985.21kmuller"]', '1184721040.78kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721040.78kmuller"]', '1184721161.87kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721161.87kmuller"]', '1184721215.28kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721215.28kmuller"]', '1184721233.95kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721233.95kmuller"]', '1184721291.89kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721291.89kmuller"]', '1184721406.87kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721406.87kmuller"]', '1184721426.18kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721426.18kmuller"]', '1184721812.36kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721812.36kmuller"]', '1184721823.17kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721823.17kmuller"]', '1184721880.11kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721880.11kmuller"]', '1184721907.79kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721907.79kmuller"]', '1184721953.51kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721953.51kmuller"]', '1184721971.46kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184721971.46kmuller"]', '1184722024.71kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184722024.71kmuller"]', '1184722304.54kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184722304.54kmuller"]', '1184891648.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184891648.0dxschafe"]', '1184891776.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184891776.0dxschafe0"]', '1184891776.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1184891776.0dxschafe1"]', '1185921614.52kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1185921614.52kmuller"]', '1186006891.46kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1186006891.46kmuller"]', '1186007092.14kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1186007092.14kmuller"]', '1186007187.37kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1186007187.37kmuller"]', '1186007215.54kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1186007215.54kmuller"]', '1186007267.03kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1186007267.03kmuller"]', '1186007415.7kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1186007415.7kmuller"]', '1187913949.14akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187913949.14akelts"]', '1187914011.44akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914011.44akelts"]', '1187914045.25akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914045.25akelts"]', '1187914073.11akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914073.11akelts"]', '1187914104.39akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914104.39akelts"]', '1187914121.33akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914121.33akelts"]', '1187914162.75akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914162.75akelts"]', '1187914199.86akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914199.86akelts"]', '1187914236.7akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914236.7akelts"]', '1187914264.97akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914264.97akelts"]', '1187914386.67akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914386.67akelts"]', '1187914411.34akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914411.34akelts"]', '1187914475.81akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914475.81akelts"]', '1187914641.95akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914641.95akelts"]', '1187914830.53akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914830.53akelts"]', '1187914874.69akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187914874.69akelts"]', '1187993555.97akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187993555.97akelts"]', '1187993700.22akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187993700.22akelts"]', '1187995354.27akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1187995354.27akelts"]', '1189212260.31kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1189212260.31kmuller"]', '1189212260.31kmuller0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1189212260.31kmuller"]', '1189456314.77kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1189212260.31kmuller"]["Objects"]["1189456314.77kmuller"]', '1189456366.97kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1189456366.97kmuller"]', '1189456447.08kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1189212260.31kmuller"]["Objects"]["1189456447.08kmuller"]', '1190666496.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1190666496.0dxschafe"]', '1190666880.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1190666880.0dxschafe"]', '1190741504.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1190741504.0dxschafe"]', '1190741504.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1190741504.0dxschafe"]', '1191629336.42kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629336.42kmuller"]', '1191629553.64kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629553.64kmuller"]', '1191629599.67kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629599.67kmuller"]', '1191629616.42kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629616.42kmuller"]', '1191629698.66kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629698.66kmuller"]', '1191629817.2kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629817.2kmuller"]', '1191629838.92kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629838.92kmuller"]', '1191629854.09kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629854.09kmuller"]', '1191629933.37kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629933.37kmuller"]', '1191629959.37kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629959.37kmuller"]', '1191629982.17kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191629982.17kmuller"]', '1191630016.62kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1191630016.62kmuller"]', '1192043971.0kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192043971.0kmuller"]', '1192044061.55kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192044061.55kmuller"]', '1192044102.02kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192044102.02kmuller"]', '1192228480.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228480.0dxschafe"]', '1192228608.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe"]', '1192228608.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe0"]', '1192228608.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe1"]', '1192228608.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe2"]', '1192228608.0dxschafe3': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe3"]', '1192228608.0dxschafe4': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe4"]', '1192228608.0dxschafe5': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe5"]', '1192228608.0dxschafe6': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe6"]', '1192228608.0dxschafe7': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe7"]', '1192228608.0dxschafe8': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228608.0dxschafe8"]', '1192228736.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228736.0dxschafe"]', '1192228736.0dxschafe0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228736.0dxschafe0"]', '1192228736.0dxschafe1': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228736.0dxschafe1"]', '1192228736.0dxschafe2': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228736.0dxschafe2"]', '1192228736.0dxschafe3': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228736.0dxschafe3"]', '1192228736.0dxschafe4': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228736.0dxschafe4"]', '1192228736.0dxschafe5': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192228736.0dxschafe5"]', '1192733824.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192733824.0dxschafe"]', '1192733952.0dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192733952.0dxschafe"]', '1192836222.09kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1192836222.09kmuller"]', '1201041675.97dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1190741504.0dxschafe"]["Objects"]["1201041675.97dxschafe"]', '1201041677.36dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1190741504.0dxschafe"]["Objects"]["1201041677.36dxschafe"]', '1201041677.5dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316864.0dxschafe"]["Objects"]["1201041677.5dxschafe"]', '1201041678.38dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171314304.0dxschafe"]["Objects"]["1201041678.38dxschafe"]', '1201041678.91dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1171316864.0dxschafe1"]["Objects"]["1201041678.91dxschafe"]', '1201041679.88dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1201041679.88dxschafe"]', '1201041679.89dxschafe': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1201041679.89dxschafe"]', '1210704118.17kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1210704118.17kmuller"]', '1210704165.86kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1210704165.86kmuller"]', '1241909300.25ian': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1241909300.25ian"]', '1241909665.5ian': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1241909665.5ian"]', '1242332258.36kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1242332258.36kmuller"]', '1242332511.27kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1242332511.27kmuller"]', '1242332757.48kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1242332757.48kmuller"]', '1242332822.32kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1242332822.32kmuller"]', '1242332871.57kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1242332871.57kmuller"]', '1242332937.68kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1242332937.68kmuller"]', '1242332966.9kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1242332966.9kmuller"]', '1248385792.0jloehrle': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1248385792.0jloehrle"]', '1248385792.0jloehrle0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1248385792.0jloehrle0"]', '1251247369.96caoconno': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1189212260.31kmuller"]["Objects"]["1251247369.96caoconno"]', '1251247425.66caoconno': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1189212260.31kmuller"]["Objects"]["1251247425.66caoconno"]', '1257284346.42caoconno': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1257284346.42caoconno"]', '1257284362.29caoconno': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1257284362.29caoconno"]', '1257284384.94caoconno': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1257284384.94caoconno"]', '1267728993.08Jared': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]', '1269464043.62akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464043.62akelts"]', '1269464058.34akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464058.34akelts"]', '1269464073.87akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464073.87akelts"]', '1269464088.19akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464088.19akelts"]', '1269464139.18akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464139.18akelts"]', '1269464174.37akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464174.37akelts"]', '1269464413.98akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464413.98akelts"]', '1269464456.65akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1267728993.08Jared"]["Objects"]["1269464456.65akelts"]', '1270764606.85akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1270764606.85akelts"]', '1270764673.88akelts': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1270764673.88akelts"]', '1286984549.68gcarranza': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1286984549.68gcarranza"]', '1287611070.99kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1287611070.99kmuller"]', '1287611070.99kmuller0': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1287611070.99kmuller"]', '1287611234.14kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1287611070.99kmuller"]["Objects"]["1287611234.14kmuller"]', '1287612604.35kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1287612604.35kmuller"]', '1287612652.97kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1287611070.99kmuller"]["Objects"]["1287612652.97kmuller"]', '1287612731.53kmuller': '["Objects"]["1160614528.73sdnaik"]["Objects"]["1287612731.53kmuller"]'}}
extraInfo = {'camPos': Point3(-122.569, -547.187, 17.3722), 'camHpr': VBase3(76.7509, -0.849717, 0), 'focalLength': 0.657999992371, 'skyState': 2, 'fog': 0} | 31,032.285714 | 216,785 | 0.666099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135,120 | 0.622025 |
03940eed9922e68c7391552d1a5597a7b61786aa | 429 | py | Python | src/waldur_slurm/migrations/0006_allocationusage_deposit_usage.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_slurm/migrations/0006_allocationusage_deposit_usage.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_slurm/migrations/0006_allocationusage_deposit_usage.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | # Generated by Django 1.11.7 on 2018-03-05 22:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_slurm', '0005_add_deposit'),
]
operations = [
migrations.AddField(
model_name='allocationusage',
name='deposit_usage',
field=models.DecimalField(decimal_places=2, default=0, max_digits=8),
),
]
| 23.833333 | 81 | 0.62704 | 336 | 0.783217 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.261072 |
039656005db308c2cb41feae58290804aa2ef90a | 5,940 | py | Python | scripts/error_metrics_parser.py | PoonLab/gromstole | 8c8ec259c5a98fd2ef5aad8ec0293eddb0184432 | [
"MIT"
] | null | null | null | scripts/error_metrics_parser.py | PoonLab/gromstole | 8c8ec259c5a98fd2ef5aad8ec0293eddb0184432 | [
"MIT"
] | 27 | 2022-01-07T17:57:57.000Z | 2022-03-29T19:34:56.000Z | scripts/error_metrics_parser.py | PoonLab/gromstole | 8c8ec259c5a98fd2ef5aad8ec0293eddb0184432 | [
"MIT"
] | 1 | 2022-02-24T17:01:34.000Z | 2022-02-24T17:01:34.000Z | """
from https://github.com/PoonLab/MiCall-Lite, which was forked from
https://github.com/cfe-lab/MiCall.
MiCall is distributed under a dual AGPLv3 license.
"""
import sys
import argparse
from csv import DictWriter
from struct import unpack
import csv
import os
from operator import itemgetter
import sys
import math
from itertools import groupby
def read_records(data_file, min_version):
""" Read records from an Illumina Interop file.
:param file data_file: an open file-like object. Needs to have a two-byte
header with the file version and the length of each record, followed by the
records.
:param int min_version: the minimum accepted file version.
:return: an iterator over the records in the file. Each record will be a raw
byte string of the length from the header.
"""
header = data_file.read(2)
version, record_length = unpack('!BB', header)
if version < min_version:
raise IOError(
'File version {} is less than minimum version {} in {}.'.format(
version,
min_version,
data_file.name))
while True:
data = data_file.read(record_length)
read_length = len(data)
if read_length == 0:
break
if read_length < record_length:
raise IOError('Partial record of length {} found in {}.'.format(
read_length,
data_file.name))
yield data
def read_errors(data_file):
""" Read error rate data from a phiX data file.
:param file data_file: an open file-like object. Needs to have a two-byte
header with the file version and the length of each record, followed by the
records.
:return: an iterator over the records of data in the file. Each record is a
dictionary with the following keys:
- lane [uint16]
- tile [uint16]
- cycle [uint16]
- error_rate [float]
- num_0_errors [uint32]
- num_1_error [uint32]
- num_2_errors [uint32]
- num_3_errors [uint32]
- num_4_errors [uint32]
"""
PARSED_LENGTH = 30
for data in read_records(data_file, min_version=3):
fields = unpack('<HHHfLLLLL', data[:PARSED_LENGTH])
yield dict(lane=fields[0],
tile=fields[1],
cycle=fields[2],
error_rate=fields[3],
num_0_errors=fields[4],
num_1_error=fields[5],
num_2_errors=fields[6],
num_3_errors=fields[7],
num_4_errors=fields[8])
def _yield_cycles(records, read_lengths):
sorted_records = sorted(map(itemgetter('tile', 'cycle', 'error_rate'),
records))
max_forward_cycle = read_lengths and read_lengths[0] or sys.maxsize
min_reverse_cycle = read_lengths and sum(read_lengths[:-1])+1 or sys.maxsize
for record in sorted_records:
cycle = record[1]
if cycle >= min_reverse_cycle:
cycle = min_reverse_cycle - cycle - 1
elif cycle > max_forward_cycle:
continue
rate = round(record[2], 4)
yield record[0], cycle, rate
def _record_grouper(record):
# Group by tile and sign of cycle (forward or reverse).
return (record[0], int(math.copysign(1, record[1])))
def write_phix_csv(out_file, records, read_lengths=None, summary=None):
""" Write phiX error rate data to a comma-separated-values file.
Missing cycles are written with blank error rates, index reads are not
written, and reverse reads are written with negative cycles.
:param out_file: an open file to write to
:param records: a sequence of dictionaries like those yielded from
read_phix().
:param read_lengths: a list of lengths for each type of read: forward,
indexes, and reverse
:param dict summary: a dictionary to hold the summary values:
error_rate_fwd and error_rate_rev.
"""
writer = csv.writer(out_file, lineterminator=os.linesep)
writer.writerow(['tile', 'cycle', 'errorrate'])
error_sums = [0.0, 0.0]
error_counts = [0, 0]
for (_tile, sign), group in groupby(_yield_cycles(records, read_lengths),
_record_grouper):
previous_cycle = 0
record = None
for record in group:
cycle = record[1]
previous_cycle += sign
while previous_cycle*sign < cycle*sign:
writer.writerow((record[0], previous_cycle, ''))
previous_cycle += sign
writer.writerow(record)
summary_index = (sign+1) // 2
error_sums[summary_index] += record[2]
error_counts[summary_index] += 1
if read_lengths:
read_length = read_lengths[0] if sign == 1 else -read_lengths[-1]
while previous_cycle*sign < read_length*sign:
previous_cycle += sign
writer.writerow((record[0], previous_cycle, ''))
if error_counts[1] > 0 and summary is not None:
summary['error_rate_fwd'] = error_sums[1]/error_counts[1]
if error_counts[0] > 0 and summary is not None:
summary['error_rate_rev'] = error_sums[0]/error_counts[0]
def main():
aparser = argparse.ArgumentParser(description='Extract phiX174 error rates from InterOp file')
aparser.add_argument('bin', type=argparse.FileType('rb'), help='ErrorMetricsOut.bin file from run')
aparser.add_argument('output', type=argparse.FileType('w'), help='File to write CSV output')
aparser.add_argument('-l', '--len', type=int, default=151, help='Read length')
aparser.add_argument('-a', type=int, default=8, help='Adapter sequence length, defaults to 8')
args = aparser.parse_args()
#parse_interop(args.bin, args.output)
records = read_errors(args.bin)
write_phix_csv(args.output, records, [args.len, args.a, args.a, args.len])
if __name__ == '__main__':
main()
| 36.89441 | 103 | 0.640236 | 0 | 0 | 2,797 | 0.470875 | 0 | 0 | 0 | 0 | 2,199 | 0.370202 |
0396dd73ecef7ab82a3884a101c60a156ca888d8 | 3,754 | py | Python | Image Segmtation on COCO Dataset/keras_segmentation/data_utils/data_loader.py | joykour/COCO-Dataset-2018-Stuff-Segmentation-Challenge | 973ef2d75c1821c8348fd01a3f2b084c4243ebd2 | [
"MIT"
] | 1 | 2019-11-14T06:49:17.000Z | 2019-11-14T06:49:17.000Z | Image Segmtation on COCO Dataset/keras_segmentation/data_utils/data_loader.py | joykour/COCO-Dataset-2018-Stuff-Segmentation-Challenge | 973ef2d75c1821c8348fd01a3f2b084c4243ebd2 | [
"MIT"
] | null | null | null | Image Segmtation on COCO Dataset/keras_segmentation/data_utils/data_loader.py | joykour/COCO-Dataset-2018-Stuff-Segmentation-Challenge | 973ef2d75c1821c8348fd01a3f2b084c4243ebd2 | [
"MIT"
] | 1 | 2019-11-16T07:39:00.000Z | 2019-11-16T07:39:00.000Z |
import numpy as np
import cv2
import glob
import itertools
import os
from tqdm import tqdm
from ..models.config import IMAGE_ORDERING
from .augmentation import augment_seg
import random
random.seed(0)
class_colors = [ ( random.randint(0,255),random.randint(0,255),random.randint(0,255) ) for _ in range(5000) ]
def get_pairs_from_paths( images_path , segs_path ):
images = glob.glob( os.path.join(images_path,"*.png") ) + glob.glob( os.path.join(images_path,"*.jpg") ) + glob.glob( os.path.join(images_path,"*.jpeg") )
segmentations = glob.glob( os.path.join(segs_path,"*.png") )
segmentations_d = dict( zip(segmentations,segmentations ))
ret = []
for im in images:
seg_bnme = os.path.basename(im).replace(".jpg" , ".png").replace(".jpeg" , ".png")
seg = os.path.join( segs_path , seg_bnme )
#this line i have commented as error was showing
#assert ( seg in segmentations_d ), (im + " is present in "+images_path +" but "+seg_bnme+" is not found in "+segs_path + " . Make sure annotation image are in .png" )
ret.append((im , seg) )
return ret
def get_image_arr( path , width , height , imgNorm="sub_mean" , odering='channels_first' ):
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
if imgNorm == "sub_and_divide":
img = np.float32(cv2.resize(img, ( width , height ))) / 127.5 - 1
elif imgNorm == "sub_mean":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
img = img[ : , : , ::-1 ]
elif imgNorm == "divide":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img = img/255.0
if odering == 'channels_first':
img = np.rollaxis(img, 2, 0)
return img
def get_segmentation_arr( path , nClasses , width , height , no_reshape=False ):
seg_labels = np.zeros(( height , width , nClasses ))
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
img = cv2.resize(img, ( width , height ) , interpolation=cv2.INTER_NEAREST )
img = img[:, : , 0]
for c in range(nClasses):
seg_labels[: , : , c ] = (img == c ).astype(int)
if no_reshape:
return seg_labels
seg_labels = np.reshape(seg_labels, ( width*height , nClasses ))
return seg_labels
def verify_segmentation_dataset( images_path , segs_path , n_classes ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
assert len(img_seg_pairs)>0 , "Dataset looks empty or path is wrong "
for im_fn , seg_fn in tqdm(img_seg_pairs) :
img = cv2.imread( im_fn )
seg = cv2.imread( seg_fn )
assert ( img.shape[0]==seg.shape[0] and img.shape[1]==seg.shape[1] ) , "The size of image and the annotation does not match or they are corrupt "+ im_fn + " " + seg_fn
assert ( np.max(seg[:,:,0]) < n_classes) , "The pixel values of seg image should be from 0 to "+str(n_classes-1) + " . Found pixel value "+str(np.max(seg[:,:,0]))
print("Dataset verified! ")
def image_segmentation_generator( images_path , segs_path , batch_size, n_classes , input_height , input_width , output_height , output_width , do_augment=False ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
random.shuffle( img_seg_pairs )
zipped = itertools.cycle( img_seg_pairs )
while True:
X = []
Y = []
for _ in range( batch_size) :
im , seg = next(zipped)
im = cv2.imread(im , 1 )
seg = cv2.imread(seg , 1 )
if do_augment:
img , seg[:,:,0] = augment_seg( img , seg[:,:,0] )
X.append( get_image_arr(im , input_width , input_height ,odering=IMAGE_ORDERING ) )
Y.append( get_segmentation_arr( seg , n_classes , output_width , output_height ) )
yield np.array(X) , np.array(Y)
| 27.40146 | 171 | 0.661161 | 0 | 0 | 747 | 0.198988 | 0 | 0 | 0 | 0 | 558 | 0.148641 |
03972df064d30776ab61ace4cb50e0a249055d77 | 6,032 | py | Python | tests/tests_basic.py | mehrdad-shokri/fluxcapacitor | b4e646e3048a317b33f5a84741b7962fd69cdc61 | [
"MIT"
] | 648 | 2015-01-08T18:05:33.000Z | 2022-03-30T04:35:08.000Z | tests/tests_basic.py | mehrdad-shokri/fluxcapacitor | b4e646e3048a317b33f5a84741b7962fd69cdc61 | [
"MIT"
] | 4 | 2017-08-24T18:05:01.000Z | 2018-01-19T09:06:06.000Z | tests/tests_basic.py | mehrdad-shokri/fluxcapacitor | b4e646e3048a317b33f5a84741b7962fd69cdc61 | [
"MIT"
] | 21 | 2015-05-12T13:03:15.000Z | 2022-03-12T15:12:26.000Z | import os
import tests
from tests import at_most, compile, savefile
import subprocess
node_present = True
erlang_present = True
if os.system("node -v >/dev/null 2>/dev/null") != 0:
print " [!] ignoring nodejs tests"
node_present = False
if (os.system("erl -version >/dev/null 2>/dev/null") != 0 or
os.system("which escript >/dev/null 2>/dev/null") != 0):
print " [!] ignoring erlang tests"
erlang_present = False
sleep_sort_script='''\
#!/bin/bash
echo "Unsorted: $*"
function f() {
sleep "$1"
echo -n "$1 "
}
while [ -n "$1" ]; do
f "$1" &
shift
done
wait
echo
'''
class SingleProcess(tests.TestCase):
@at_most(seconds=2)
def test_bash_sleep(self):
self.system("sleep 10")
@at_most(seconds=2)
def test_bash_bash_sleep(self):
self.system("bash -c 'sleep 120;'")
@at_most(seconds=2)
def test_python2_sleep(self):
self.system('python2 -c "import time; time.sleep(10)"')
@at_most(seconds=2)
def test_python2_select(self):
self.system('python2 -c "import select; select.select([],[],[], 10)"')
@at_most(seconds=2)
def test_python2_poll(self):
self.system('python2 -c "import select; select.poll().poll(10000)"')
@at_most(seconds=2)
def test_python2_epoll(self):
self.system('python2 -c "import select; select.epoll().poll(10000)"')
@at_most(seconds=2)
def test_node_epoll(self):
if node_present:
self.system('node -e "setTimeout(function(){},10000);"')
def test_bad_command(self):
self.system('command_that_doesnt exist',
returncode=127, ignore_stderr=True)
def test_return_status(self):
self.system('python2 -c "import sys; sys.exit(188)"', returncode=188)
self.system('python2 -c "import sys; sys.exit(-1)"', returncode=255)
@at_most(seconds=2)
@compile(code='''
#include <unistd.h>
int main() {
sleep(10);
return(0);
}''')
def test_c_sleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=2)
@compile(code='''
#include <time.h>
int main() {
struct timespec ts = {1, 0};
nanosleep(&ts, NULL);
return(0);
}''')
def test_c_nanosleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K false -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp_no_epoll(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
self() ! msg,
proc(10),
receive
_ -> ok
end.
proc(0) ->
receive
_ -> halt(0)
end;
proc(N) ->
Pid = spawn(fun () -> proc(N-1) end),
receive
_ -> timer:sleep(1000),
Pid ! msg
end.
''')
def test_erlang_process_staircase(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=2)
def test_perl_sleep(self):
self.system("perl -e 'sleep 10'")
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 1 12 1231 123213 13212 > /dev/null" % (filename,))
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 5 3 6 3 6 3 1 4 7 > /dev/null" % (filename,))
@at_most(seconds=10)
def test_parallel_sleeps(self):
for i in range(10):
stdout = self.system(' -- '.join(['bash -c "date +%s"',
'bash -c "sleep 60; date +%s"',
'bash -c "sleep 120; date +%s"']),
capture_stdout=True)
a, b, c = [int(l) for l in stdout.split()]
assert 55 < (b - a) < 65, str(b-a)
assert 55 < (c - b) < 65, str(c-b)
assert 110 < (c - a) < 130, str(c-a)
@at_most(seconds=3)
def test_file_descriptor_leak(self):
out = subprocess.check_output("ls /proc/self/fd", shell=True)
normal_fds = len(out.split('\n'))
stdout = self.system(' -- '.join(['sleep 1',
'sleep 60',
'sleep 120',
'bash -c "sleep 180; ls /proc/self/fd"']),
capture_stdout=True)
after_fork_fds = len(stdout.split('\n'))
assert normal_fds == after_fork_fds
@at_most(seconds=4)
def test_2546_wraparound(self):
if os.uname()[4] == "x86_64":
stdout = self.system("bash -c 'for i in `seq 1 55`; do sleep 315360000; done; date +%Y'",
capture_stdout=True)
assert int(stdout) > 2500
if __name__ == '__main__':
import unittest
unittest.main()
| 27.418182 | 101 | 0.54443 | 5,353 | 0.887434 | 0 | 0 | 4,865 | 0.806532 | 0 | 0 | 2,314 | 0.383621 |
039b50ec23666881fdd70d72494a3f55144b2adf | 444 | py | Python | tests/test_root.py | oclyke-dev/blue-heron | 05d59b66ff1cb10a40e0fb01ee65f778a7c157a8 | [
"MIT"
] | null | null | null | tests/test_root.py | oclyke-dev/blue-heron | 05d59b66ff1cb10a40e0fb01ee65f778a7c157a8 | [
"MIT"
] | null | null | null | tests/test_root.py | oclyke-dev/blue-heron | 05d59b66ff1cb10a40e0fb01ee65f778a7c157a8 | [
"MIT"
] | null | null | null |
import blue_heron
import pytest
from pathlib import Path
from lxml import etree as ET
from blue_heron import Root, Drawing
@pytest.fixture(scope='module')
def test_board():
with open(Path(__file__).parent/'data/ArtemisDevKit.brd', 'r') as f:
root = ET.parse(f).getroot()
yield root
def test_get_drawing(test_board):
root = Root(test_board)
drawing = root.drawing
assert type(drawing) == type(blue_heron.drawing.Drawing(None))
| 23.368421 | 70 | 0.747748 | 0 | 0 | 134 | 0.301802 | 166 | 0.373874 | 0 | 0 | 35 | 0.078829 |
039d7b8e1580ac94ac977880c70a567b910c07cd | 2,223 | py | Python | scripts/evidence/base.py | Oneledger/protocol | 1008fd12d384c9821be2a2ea34b3061cf24ae6bf | [
"Apache-2.0"
] | 38 | 2018-06-30T15:22:06.000Z | 2022-03-20T22:23:07.000Z | scripts/evidence/base.py | Oneledger/protocol | 1008fd12d384c9821be2a2ea34b3061cf24ae6bf | [
"Apache-2.0"
] | 27 | 2018-09-02T09:57:19.000Z | 2021-09-17T18:06:35.000Z | scripts/evidence/base.py | Oneledger/protocol | 1008fd12d384c9821be2a2ea34b3061cf24ae6bf | [
"Apache-2.0"
] | 13 | 2018-06-30T15:22:08.000Z | 2020-07-28T15:00:40.000Z | from __future__ import print_function
from sdk.actions import (
ListValidators,
NodeID,
)
from sdk.cmd_call import (
GetNodeCreds,
Account_Add,
Send,
GetNodeKey,
)
from sdk.rpc_call import (
node_0,
node_2,
node_3,
)
validators = ListValidators()
valDict = {data['name']: data for data in validators}
reporter = valDict['0']['address'][3:]
reporter_staking = valDict['0']['stakeAddress'][3:]
reporter_staking_key = GetNodeKey('0')
malicious = valDict['1']['address'][3:]
voter_2 = valDict['2']['address'][3:]
voter_3 = valDict['3']['address'][3:]
reporter_creads = GetNodeCreds('0')
voter_2_creds = GetNodeCreds('2')
voter_3_creds = GetNodeCreds('3')
statuses = {
1: 'Voting',
2: 'Innocent',
3: 'Guilty',
}
def get_status_display(status):
return statuses[status]
def set_up():
# adding accouts for validators
is_added = Account_Add(node_0, reporter_creads['pub'], reporter_creads['priv'], '1234')
assert is_added is True, 'Failed to add account for %s' % reporter_creads['address']
is_added = Account_Add(node_2, voter_2_creds['pub'], voter_2_creds['priv'], '1234')
assert is_added is True, 'Failed to add account for %s' % voter_2_creds['address']
is_added = Account_Add(node_3, voter_3_creds['pub'], voter_3_creds['priv'], '1234')
assert is_added is True, 'Failed to add account for %s' % voter_3_creds['address']
staking_pub_key = NodeID()
is_added = Account_Add(node_0, staking_pub_key, reporter_staking_key['priv'], '1234')
assert is_added is True, 'Failed to add staking account for %s' % reporter_creads['address']
print("Accounts for nodes 0, 2, 3 and staking were created!")
is_sent = Send(node_0, reporter_staking, reporter, 10, '1234', currency='OLT', fee='0.0001')
assert is_sent is True, 'Failed to send on %s' % reporter
is_sent = Send(node_0, reporter_staking, voter_2, 10, '1234', currency='OLT', fee='0.0001')
assert is_sent is True, 'Failed to send on %s' % voter_2
is_sent = Send(node_0, reporter_staking, voter_3, 10, '1234', currency='OLT', fee='0.0001')
assert is_sent is True, 'Failed to send on %s' % voter_3
print("Validator balances for 0, 2, 3 ready!")
| 30.452055 | 96 | 0.68601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.262258 |
039de2956a93e9fe91351642653c24e863f9b4ef | 219 | py | Python | py-data/plugin.video.arteplussept/problems/api-related/1/correct-usages/get_last7days.py | ualberta-smr/NFBugs | 65d9ef603e9527b3d83f53af0606b1ae240513f1 | [
"MIT"
] | 3 | 2019-10-01T19:58:24.000Z | 2021-09-17T04:03:21.000Z | py-data/plugin.video.arteplussept/problems/api-related/1/correct-usages/get_last7days.py | senseconcordia/NFBugsExtended | 60058ccbd64107018a92ede73056d08ecbdaaed2 | [
"MIT"
] | 22 | 2018-08-23T15:15:37.000Z | 2019-03-15T17:09:41.000Z | py-data/plugin.video.arteplussept/problems/api-related/1/correct-usages/get_last7days.py | senseconcordia/NFBugsExtended | 60058ccbd64107018a92ede73056d08ecbdaaed2 | [
"MIT"
] | 1 | 2019-02-11T18:26:36.000Z | 2019-02-11T18:26:36.000Z | from xbmcswift2 import Plugin
from xbmcswift2 import actions
import requests
import os
import urllib2
import time
import datetime
def get_last7days():
return flatten([get_day(date) for (date, _) in get_dates()])
| 19.909091 | 64 | 0.780822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
039fea1ecb4a2b365be7d8720f22d5ffa6995f74 | 864 | py | Python | pharma/utils.py | RishiMenon2004/med-bay | ed039b1bf3b10fb1b5097567df28fb4575c95b18 | [
"MIT"
] | null | null | null | pharma/utils.py | RishiMenon2004/med-bay | ed039b1bf3b10fb1b5097567df28fb4575c95b18 | [
"MIT"
] | null | null | null | pharma/utils.py | RishiMenon2004/med-bay | ed039b1bf3b10fb1b5097567df28fb4575c95b18 | [
"MIT"
] | 1 | 2021-09-17T07:01:28.000Z | 2021-09-17T07:01:28.000Z | from .models import Bill, BillUnit
# Getting cart total
def get_total(orders):
total = 0
for order in orders:
total += order.quantity * order.item.price
return total
# Getting total for bill objects
def get_bill_total(bill_units):
total = 0
for unit in bill_units:
total += unit.quantity * unit.price
return total
# Generating bills
def generate_bill(name, contact_num, date, orders):
bill = Bill(name=name, contact_num=contact_num, date=date)
bill.save()
total = 0
# Creating the bill_units and removing orders
for order in orders:
unit = BillUnit(name=order.item.name, quantity=order.quantity,
desc=order.item.desc, price=order.item.price,
bill=bill)
total += order.item.price * order.quantity
unit.save()
return bill
| 24.685714 | 70 | 0.643519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.133102 |
03a03bedee87cbb3ff98b4472ee9a56d99d7f812 | 7,070 | py | Python | myconnectome/rsfmri/mk_connectome_figures.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 28 | 2015-04-02T16:43:14.000Z | 2020-06-17T20:04:26.000Z | myconnectome/rsfmri/mk_connectome_figures.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 11 | 2015-05-19T02:57:22.000Z | 2017-03-17T17:36:16.000Z | myconnectome/rsfmri/mk_connectome_figures.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 10 | 2015-05-21T17:01:26.000Z | 2020-11-11T04:28:08.000Z | # -*- coding: utf-8 -*-
"""
make images for connnectivity adjmtx
also compute within/between hemisphere stats
Created on Sun Jun 21 09:19:06 2015
@author: poldrack
"""
import os
import numpy
import nilearn.plotting
import scipy.stats
from myconnectome.utils.get_parcel_coords import get_parcel_coords
import matplotlib.pyplot as plt
def get_mean_connection_distance(input):
from scipy.spatial.distance import euclidean
adj=input.copy()
adj[numpy.tril_indices(adj.shape[0])]=0
coords=get_parcel_coords()
dist=[]
hits=numpy.where(adj>0)
for h in range(hits[0].shape[0]):
dist.append(euclidean(coords[hits[0][h]],coords[hits[1][h]]))
return numpy.mean(dist)
def r_to_z(r):
# fisher transform
z=0.5*numpy.log((1.0+r)/(1.0-r))
z[numpy.where(numpy.isinf(z))]=0
z[numpy.where(numpy.isnan(z))]=0
return z
def z_to_r(z):
# inverse transform
return (numpy.exp(2.0*z) - 1)/(numpy.exp(2.0*z) + 1)
basedir=os.environ['MYCONNECTOME_DIR']
def mk_connectome_figures(use_abs_corr=False,thresh=0.0025):
dtidata=numpy.loadtxt(os.path.join(basedir,'diffusion/tracksumm_distcorr.txt'),skiprows=1)
dtidata=dtidata[:,1:]
dtidata=dtidata+dtidata.T
dtibin=dtidata>0
rsfmridata=numpy.load(os.path.join(basedir,'rsfmri/corrdata.npy'))
rsfmridata=r_to_z(rsfmridata)
meancorr_z=numpy.mean(rsfmridata,0)
meancorr=z_to_r(meancorr_z)
if use_abs_corr:
meancorr=numpy.abs(meancorr)
meancorr[numpy.isnan(meancorr)]=0
adjsize=630
utr=numpy.triu_indices(adjsize,1)
meandti=dtidata[utr]
task_connectome=numpy.loadtxt(os.path.join(basedir,'taskfmri/task_connectome.txt'))
taskdata=task_connectome[utr]
l2data=numpy.load(os.path.join(basedir,'rsfmri/l2_utr_data.npy'))
l2mean=z_to_r(numpy.mean(r_to_z(l2data),0))
l1data=numpy.load(os.path.join(basedir,'rsfmri/quic_utr_data_0.1.npy'))
l1mean=z_to_r(numpy.mean(r_to_z(l1data),0))
rsthresh=meancorr > scipy.stats.scoreatpercentile(meancorr,100-100*thresh)
dtithresh=meandti > scipy.stats.scoreatpercentile(meandti,100-100*thresh)
taskthresh=taskdata > scipy.stats.scoreatpercentile(taskdata,100-100*thresh)
l2thresh=l2mean > scipy.stats.scoreatpercentile(l2mean,100-100*thresh)
l1thresh=l1mean > scipy.stats.scoreatpercentile(l1mean,100-100*thresh)
rsadj=numpy.zeros((adjsize,adjsize))
l2adj=numpy.zeros((adjsize,adjsize))
l1adj=numpy.zeros((adjsize,adjsize))
dtiadj=numpy.zeros((adjsize,adjsize))
taskadj=numpy.zeros((adjsize,adjsize))
rsadj[utr]=rsthresh
l2adj[utr]=l2thresh
l1adj[utr]=l1thresh
dtiadj[utr]=dtithresh
taskadj[utr]=taskthresh
rsadj=rsadj+rsadj.T
l2adj=l2adj+l2adj.T
l1adj=l1adj+l1adj.T
dtiadj=dtiadj+dtiadj.T
taskadj=taskadj+taskadj.T
coords=get_parcel_coords()
hemis=numpy.zeros((630,630))
# get inter/intrahemispheric marker - 1=intra, -1=inter
for i in range(630):
for j in range(i+1,630):
if numpy.sign(coords[i,0])==numpy.sign(coords[j,0]):
hemis[i,j]=1
else:
hemis[i,j]=-1
hemisutr=hemis[utr]
inter=numpy.where(hemisutr==-1)
intra=numpy.where(hemisutr==1)
densities=[0.001,0.005,0.01,0.025,0.05,0.075,0.1]
hemisdata=numpy.zeros((len(densities),5))
for d in range(len(densities)):
rsthresh=meancorr > scipy.stats.scoreatpercentile(meancorr,100-100*densities[d])
hemisdata[d,0]=numpy.sum(rsthresh[inter])/float(numpy.sum(rsthresh))
dtithresh=meandti > scipy.stats.scoreatpercentile(meandti,100-100*densities[d])
hemisdata[d,1]=numpy.sum(dtithresh[inter])/float(numpy.sum(dtithresh))
taskthresh=taskdata > scipy.stats.scoreatpercentile(taskdata,100-100*densities[d])
hemisdata[d,2]=numpy.sum(taskthresh[inter])/float(numpy.sum(taskthresh))
l2thresh=l2mean > scipy.stats.scoreatpercentile(l2mean,100-100*densities[d])
hemisdata[d,3]=numpy.sum(l2thresh[inter])/float(numpy.sum(l2thresh))
l1thresh=l1mean > scipy.stats.scoreatpercentile(l1mean,100-100*densities[d])
hemisdata[d,4]=numpy.sum(l1thresh[inter])/float(numpy.sum(l1thresh))
print hemisdata
plt.plot(hemisdata,linewidth=2)
plt.legend(['Full correlation','DTI','Task','L1 partial','L2 partial'],loc=5)
plt.xticks(range(len(densities)),densities*100)
plt.xlabel('Density (proportion of possible connections)',fontsize=14)
plt.ylabel('Proportion of connections that are interhemispheric',fontsize=14)
plt.savefig(os.path.join(basedir,'rsfmri/interhemispheric_connection_plot.pdf'))
print 'mean connection distances (%0.04f density)'%thresh
print 'fullcorr:',get_mean_connection_distance(rsadj)
print 'l1 pcorr:',get_mean_connection_distance(l1adj)
print 'l2 pcorr:',get_mean_connection_distance(l2adj)
print 'task corr:',get_mean_connection_distance(taskadj)
print 'dti:',get_mean_connection_distance(dtiadj)
dti_sum=numpy.sum(dtiadj,0)
tmp=dtiadj[dti_sum>0,:]
dtiadj_reduced=tmp[:,dti_sum>0]
#dtiadj_reduced=dtiadj_reduced+dtiadj_reduced.T
nilearn.plotting.plot_connectome(dtiadj_reduced,coords[dti_sum>0,:],node_size=2,
output_file=os.path.join(basedir,'diffusion/dti_connectome_thresh%f.pdf'%thresh))
rs_sum=numpy.sum(rsadj,0)
rsadj_match=rsadj*0.01 + rsadj*dtibin*0.8 # add one to matches to change edge color
tmp=rsadj_match[rs_sum>0,:]
rsadj_reduced=tmp[:,rs_sum>0]
#rsadj_reduced=rsadj_reduced+rsadj_reduced.T
nilearn.plotting.plot_connectome(rsadj_reduced,coords[rs_sum>0,:],node_size=2,
edge_vmin=0,edge_vmax=1,edge_cmap='seismic',edge_kwargs={'linewidth':1},
output_file=os.path.join(basedir,'rsfmri/rsfmri_corr_connectome_thresh%f.pdf'%thresh))
l2_sum=numpy.sum(l2adj,0)
l2adj_match=l2adj*0.01 + l2adj*dtibin*0.8 # add one to matches to change edge color
tmp=l2adj_match[l2_sum>0,:]
l2adj_reduced=tmp[:,l2_sum>0]
#l2adj_reduced=l2adj_reduced+l2adj_reduced.T
nilearn.plotting.plot_connectome(l2adj_reduced,coords[l2_sum>0,:],node_size=2,
edge_vmin=0,edge_vmax=1,edge_cmap='seismic',edge_kwargs={'linewidth':1},
output_file=os.path.join(basedir,'rsfmri/rsfmri_l2_connectome_thresh%f.pdf'%thresh))
task_sum=numpy.sum(taskadj,0)
taskadj_match=taskadj*0.01 + taskadj*dtibin*0.8 # add one to matches to change edge color
tmp=taskadj_match[task_sum>0,:]
taskadj_reduced=tmp[:,task_sum>0]
#taskadj_reduced=taskadj_reduced+taskadj_reduced.T
nilearn.plotting.plot_connectome(taskadj_reduced,coords[task_sum>0,:],node_size=2,
edge_vmin=0,edge_vmax=1,edge_cmap='seismic',edge_kwargs={'linewidth':1},
output_file=os.path.join(basedir,'taskfmri/task_connectome_thresh%f.pdf'%thresh))
if __name__ == "__main__":
mk_connectome_figures()
| 40.170455 | 123 | 0.698727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,251 | 0.176945 |
03a3b7e2b44fd96667892818b55fb97c799166f0 | 7,820 | py | Python | src/tools/python/shipment_report_3x.py | Justintime50/easypost-tools | b5118eec331cd9ec5502e617c73ead61fc322c94 | [
"MIT"
] | 1 | 2022-02-17T21:04:05.000Z | 2022-02-17T21:04:05.000Z | src/tools/python/shipment_report_3x.py | Justintime50/easypost-tools | b5118eec331cd9ec5502e617c73ead61fc322c94 | [
"MIT"
] | null | null | null | src/tools/python/shipment_report_3x.py | Justintime50/easypost-tools | b5118eec331cd9ec5502e617c73ead61fc322c94 | [
"MIT"
] | null | null | null | # Shipment Details Download script
# Outputs CSV text report for purchased production shipments
#
# Usage:
# python3 ShipmentReport_3x.py "optional API KEY" (if not using env vars)
#
# 0.2 Revised API key display 02 Jan 2020 joshua.biagio@easypost.com
# 0.1 Corrected handling of zero shipments returned 02 Jan 2020 joshua.biagio@easypost.com
# 0.0 Initial version 27 Dec 2019 joshua.biagio@easypost.com
#
# Note: this script makes raw endpoint queries instead of using the easypost
# API Python modules to limit the amount of dependencies that are required
#############################################################################
# Copyright (C) 2019 by EasyPost, Inc. <support@easypost.com>
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#############################################################################
import csv
import json
import os
import sys
from base64 import b64encode
from datetime import datetime
from http.client import HTTPSConnection
from urllib.parse import urlencode
# environmental var that stores our production API key;
# set to "" if not used
ENV_VAR_API_KEY = ""
# output folder for generated location
# defaults to ~/Documents (Linux/MacOS) or C:\\Users\\<CURRENT_USER_NAME>\\Documents (Windows)
# hard-code to some other path if desired
OUTPUT_FOLDER = os.path.join(os.path.expanduser('~'), 'Documents')
# modify startDate below to suit
startDate = "2019-01-01T00:00:00Z"
endDate = datetime.utcnow().isoformat()
URLBASE = "/v2/"
def getURL(api_key, url, list_data):
"""
inspired by https://stackoverflow.com/a/7000784
"""
# create our connection
conn = HTTPSConnection("api.easypost.com")
# build our authentication header
b64userpassword = b64encode(bytes(api_key + ":", encoding='ascii')).decode("ascii")
headers = {
'Authorization': 'Basic %s' % b64userpassword,
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-Agent': 'python3 ShipmentReport_3x.py v0.2',
}
# build our urlecode parameters dictionary by iterating through the values passed in and
# splitting on '='
ueparams = dict(val.split('=') for val in list_data)
params = urlencode(ueparams)
try:
conn.request('GET', f'{URLBASE}{url}', params, headers=headers)
res = conn.getresponse()
print(res.status, res.reason)
res_str = res.read()
data = json.loads(res_str)
except Exception:
data = {}
return data
if __name__ == "__main__":
# first look for the API key passed in from the command line
if len(sys.argv) == 2:
API_KEY = sys.argv[1]
API_KEY = API_KEY.replace('"', '').replace("'", '')
# otherwise, try to load it from the environment
else:
try:
# attempt to read the key from the environment
# N.B. needs to be a production key
API_KEY = os.environ[ENV_VAR_API_KEY]
except Exception:
API_KEY = ''
print(f"Using API key: '{API_KEY[:5]}" + ("*" * (len(API_KEY) - 5)) + "'...")
# retrieve the shipments in pages
# on the first page, just use dates
# each subsequent page, pass in the last seen shipment ID, to force the next page
has_more = True
shipments = []
params = ['page_size=100', f'start_datetime={startDate}', f'end_datetime={endDate}']
while has_more:
data = getURL(API_KEY, 'shipments', params)
if 'shipments' in data and len(data['shipments']) > 0:
for s in data['shipments']:
shipments.append(s)
print(f'Shipments processed: {len(shipments)}')
has_more = data['has_more']
params = [
'page_size=100',
f'start_datetime={startDate}',
f'end_datetime={endDate}',
f'before_id={shipments[-1]["id"]}',
]
else:
has_more = False
# build file name
n = str(datetime.now())
n = n.replace('-', '').replace(' ', '_').replace(':', '')
outfile = os.path.join(OUTPUT_FOLDER, (n + '.csv'))
print(f"Creating file '{outfile}'...")
# format all the returned shipment data
rows = []
for shipment in shipments:
data = [
shipment['created_at'],
shipment['id'],
shipment['reference'],
shipment['mode'],
shipment['to_address']['id'],
shipment['from_address']['id'],
shipment['return_address']['id'],
shipment['buyer_address']['id'],
shipment['parcel']['id'],
]
data += [
shipment['customs_info']['id'] if shipment['customs_info'] else '',
shipment['scan_form']['id'] if shipment['scan_form'] else '',
]
fees = {f['type']: f['amount'] for f in shipment['fees']}
sign = '-' if str(shipment['refund_status']) == 'refunded' else ''
data += [
(sign if ('LabelFee' in fees and float(fees['LabelFee']) > 0.0) else '')
+ (fees['LabelFee'] if 'LabelFee' in fees else ''), # noqa
(sign if ('PostageFee' in fees and float(fees['PostageFee']) > 0.0) else '')
+ (fees['PostageFee'] if 'PostageFee' in fees else ''),
(sign if ('InsuranceFee' in fees and float(fees['InsuranceFee']) > 0.0) else '')
+ (fees['InsuranceFee'] if 'InsuranceFee' in fees else ''),
]
sr = shipment['selected_rate']
data += [shipment['insurance'], sr['id'], sr['carrier'], sr['service'], sr['rate']]
pl = shipment['postage_label']
data += [pl['id'], pl['label_url']]
data += [shipment['is_return'], shipment['tracking_code'], shipment['usps_zone'], shipment['status']]
data += [
shipment['tracker']['id'] if shipment['tracker'] else '',
shipment['tracker']['public_url'] if shipment['tracker'] else '',
]
data += [shipment['refund_status'], shipment['batch_id'], shipment['batch_status'], shipment['batch_message']]
data = [(i if i else '') for i in data]
rows.append(data)
cols = (
'created_at',
'id',
'reference',
'mode',
'to_address.id',
'from_address.id',
'return_address.id',
'buyer_address.id',
'parcel.id',
'customs_info.id',
'scan_form.id',
'label_fee',
'postage_fee',
'insurance_fee',
'insured_value',
'selected_rate.id',
'selected_rate.carrier',
'selected_rate.service',
'selected_rate.rate',
'postage_label.id',
'postage_label.url',
'is_return',
'tracking_code',
'usps_zone',
'status',
'tracker.id',
'tracker.public_url',
'refund_status',
'batch_id',
'batch_status',
'batch_message',
)
# store data in a CSV
with open(outfile, mode='w', encoding='utf-8', newline='\n') as f:
writer = csv.writer(f, dialect='excel', quoting=csv.QUOTE_MINIMAL)
writer.writerow(cols)
writer.writerows(rows)
print(f'Total number of shipments in file: {len(rows)}')
| 34.910714 | 118 | 0.592455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,105 | 0.524936 |
03a54ff191c236e664a934c1ff06719cccfcad11 | 44,640 | py | Python | backend/unpp_api/apps/project/views.py | unicef/un-partner-portal | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | [
"Apache-2.0"
] | 6 | 2017-11-21T10:00:44.000Z | 2022-02-12T16:51:48.000Z | backend/unpp_api/apps/project/views.py | unicef/un-partner-portal | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | [
"Apache-2.0"
] | 995 | 2017-07-31T02:08:36.000Z | 2022-03-08T22:44:03.000Z | backend/unpp_api/apps/project/views.py | unicef/un-partner-portal | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | [
"Apache-2.0"
] | 1 | 2021-07-21T10:45:15.000Z | 2021-07-21T10:45:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from django.db import transaction
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status as statuses, serializers
from rest_framework.views import APIView
from rest_framework.generics import (
ListCreateAPIView,
ListAPIView,
CreateAPIView,
RetrieveUpdateAPIView,
RetrieveAPIView,
DestroyAPIView,
)
from rest_framework.response import Response
from rest_framework.filters import OrderingFilter
from rest_framework.exceptions import PermissionDenied
from django_filters.rest_framework import DjangoFilterBackend
from account.models import User
from agency.permissions import AgencyPermission
from common.consts import CFEI_TYPES, DIRECT_SELECTION_SOURCE, CFEI_STATUSES, APPLICATION_STATUSES
from common.pagination import SmallPagination
from common.permissions import HasUNPPPermission, check_unpp_permission, current_user_has_permission
from notification.consts import NotificationType
from notification.helpers import (
get_partner_users_for_application_queryset,
send_notification_cfei_completed,
send_agency_updated_application_notification,
send_notification_application_created,
send_notification,
send_cfei_review_required_notification,
user_received_notification_recently,
send_partner_made_decision_notification,
send_eoi_sent_for_decision_notification,
send_project_draft_sent_for_review_notification,
)
from partner.permissions import PartnerPermission
from project.exports.excel.application_compare import ApplicationCompareSpreadsheetGenerator
from project.exports.pdf.cfei import CFEIPDFExporter
from project.exports.pdf.cfei_questions import CFEIClarificationQuestionPDFExporter
from project.models import Assessment, Application, EOI, Pin, ClarificationRequestQuestion, \
ClarificationRequestAnswerFile
from project.serializers import (
BaseProjectSerializer,
DirectProjectSerializer,
CreateProjectSerializer,
PartnerProjectSerializer,
CreateDirectProjectSerializer,
ApplicationFullSerializer,
ApplicationFullEOISerializer,
AgencyUnsolicitedApplicationSerializer,
CreateDirectApplicationNoCNSerializer,
ApplicationsListSerializer,
ReviewersApplicationSerializer,
ReviewerAssessmentsSerializer,
ManageUCNSerializer,
ApplicationPartnerOpenSerializer,
ApplicationPartnerUnsolicitedDirectSerializer,
ApplicationPartnerDirectSerializer,
ApplicationFeedbackSerializer,
ConvertUnsolicitedSerializer,
ReviewSummarySerializer,
EOIReviewersAssessmentsSerializer,
AwardedPartnersSerializer,
CompareSelectedSerializer,
AgencyProjectSerializer,
ClarificationRequestQuestionSerializer,
ClarificationRequestAnswerFileSerializer,
PartnerApplicationSerializer,
)
from project.filters import (
BaseProjectFilter,
ApplicationsFilter,
ApplicationsEOIFilter,
ApplicationsUnsolicitedFilter,
)
class BaseProjectAPIView(ListCreateAPIView):
"""
Base endpoint for Call of Expression of Interest.
"""
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW,
],
partner_permissions=[
PartnerPermission.CFEI_VIEW
]
),
)
queryset = EOI.objects.select_related("agency").prefetch_related("specializations").distinct()
serializer_class = BaseProjectSerializer
pagination_class = SmallPagination
filter_backends = (DjangoFilterBackend, OrderingFilter)
filter_class = BaseProjectFilter
ordering_fields = (
'title', 'agency', 'specializations__name', 'deadline_date', 'created', 'start_date', 'completed_date'
)
def get_queryset(self):
queryset = super(BaseProjectAPIView, self).get_queryset()
if self.request.user.is_partner_user:
queryset = queryset.filter(is_published=True)
elif self.request.user.agency:
if not self.request.method == 'GET':
queryset = queryset.filter(agency=self.request.user.agency)
else:
queryset = queryset.filter(Q(agency=self.request.user.agency) | Q(is_published=True))
return queryset
class OpenProjectAPIView(BaseProjectAPIView):
"""
Endpoint for getting OPEN Call of Expression of Interest.
"""
def get_queryset(self):
queryset = super(OpenProjectAPIView, self).get_queryset().filter(display_type=CFEI_TYPES.open)
if self.request.active_partner:
# Either active projects or ones CSO has won
query = Q(deadline_date__gte=date.today(), is_completed=False) | Q(
applications__partner=self.request.active_partner,
applications__did_win=True,
applications__did_accept=True,
)
queryset = queryset.filter(query)
return queryset
@check_unpp_permission(agency_permissions=[AgencyPermission.CFEI_DRAFT_CREATE])
def post(self, request, *args, **kwargs):
serializer = CreateProjectSerializer(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
instance = serializer.save()
if instance.reviewers.exists():
send_notification(NotificationType.SELECTED_AS_CFEI_REVIEWER, instance, instance.reviewers.all())
return Response(serializer.data, status=statuses.HTTP_201_CREATED)
class EOIAPIView(RetrieveUpdateAPIView, DestroyAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW,
],
partner_permissions=[
PartnerPermission.CFEI_VIEW
]
),
)
queryset = EOI.objects.all()
def retrieve(self, request, *args, **kwargs):
if request.GET.get('export', '').lower() == 'pdf':
return CFEIPDFExporter(self.get_object()).get_as_response()
return super(EOIAPIView, self).retrieve(request, *args, **kwargs)
def get_serializer_class(self, *args, **kwargs):
return AgencyProjectSerializer if self.request.user.is_agency_user else PartnerProjectSerializer
def get_queryset(self):
queryset = super(EOIAPIView, self).get_queryset()
if not self.request.method == 'GET':
valid_ids = EOI.objects.filter(
Q(created_by=self.request.user) | Q(focal_points=self.request.user)
).values_list('id', flat=True).distinct()
queryset = queryset.filter(is_completed=False, id__in=valid_ids)
if self.request.active_partner:
queryset = queryset.filter(is_published=True)
return queryset
def perform_update(self, serializer):
eoi = self.get_object()
currently_invited_partners = list(eoi.invited_partners.all().values_list('id', flat=True))
current_deadline = eoi.deadline_date
current_reviewers = list(eoi.reviewers.all().values_list('id', flat=True))
instance = serializer.save()
# New partners added
for partner in instance.invited_partners.exclude(id__in=currently_invited_partners):
context = {
'eoi_url': eoi.get_absolute_url()
}
send_notification(NotificationType.CFEI_INVITE, eoi, partner.get_users(), context=context)
# Deadline Changed
if current_deadline != instance.deadline_date:
users = get_partner_users_for_application_queryset(instance.applications.all())
context = {
'initial_date': current_deadline,
'revised_date': instance.deadline_date,
'eoi_url': eoi.get_absolute_url()
}
send_notification(NotificationType.CFEI_DEADLINE_UPDATE, eoi, users, context=context)
# New Reviewers Added
new_reviewer_ids = []
for reviewer in instance.reviewers.all():
if reviewer.id not in current_reviewers:
new_reviewer_ids.append(reviewer.id)
if new_reviewer_ids:
send_notification(
NotificationType.SELECTED_AS_CFEI_REVIEWER, eoi, User.objects.filter(id__in=new_reviewer_ids)
)
# Completed
if instance.is_completed:
send_notification_cfei_completed(instance)
def perform_destroy(self, cfei):
if cfei.is_direct:
if cfei.is_published:
required_permissions = [AgencyPermission.CFEI_DIRECT_CANCEL]
else:
required_permissions = [AgencyPermission.CFEI_DIRECT_DELETE_DRAFT]
else:
if cfei.is_published:
required_permissions = [AgencyPermission.CFEI_PUBLISHED_CANCEL]
else:
required_permissions = [AgencyPermission.CFEI_DRAFT_MANAGE]
current_user_has_permission(self.request, agency_permissions=required_permissions)
return super(EOIAPIView, self).perform_destroy(cfei)
class DirectProjectAPIView(BaseProjectAPIView):
"""
Endpoint for getting DIRECT Call of Expression of Interest.
"""
serializer_class = DirectProjectSerializer
def get_serializer_class(self):
if self.request.method == 'GET':
return self.serializer_class
return CreateDirectProjectSerializer
def get_queryset(self):
return super(DirectProjectAPIView, self).get_queryset().filter(display_type=CFEI_TYPES.direct).distinct()
@check_unpp_permission(agency_permissions=[AgencyPermission.CFEI_DIRECT_CREATE_DRAFT_MANAGE_FOCAL_POINTS])
def post(self, request, *args, **kwargs):
data = request.data
try:
data['eoi']['created_by'] = request.user.id
data['eoi']['selected_source'] = DIRECT_SELECTION_SOURCE.un
except Exception:
pass
return super(DirectProjectAPIView, self).post(request, *args, **kwargs)
class PinProjectAPIView(BaseProjectAPIView):
"""
Endpoint for getting PINNED Call of Expression of Interest for User Partner.
"""
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.CFEI_VIEW
]
),
)
ERROR_MSG_WRONG_EOI_PKS = "At least one of given CFEIs could not be found."
ERROR_MSG_WRONG_PARAMS = "Couldn't properly identify input parameters like 'eoi_ids' and 'pin'."
def get_queryset(self):
return super(PinProjectAPIView, self).get_queryset().filter(
pins__partner_id=self.request.active_partner.id, deadline_date__gte=date.today()
).distinct()
@check_unpp_permission(partner_permissions=[PartnerPermission.CFEI_PINNING])
def patch(self, request, *args, **kwargs):
eoi_ids = request.data.get("eoi_ids", [])
pin = request.data.get("pin")
if EOI.objects.filter(id__in=eoi_ids).count() != len(eoi_ids):
raise serializers.ValidationError({
'non_field_errors': self.ERROR_MSG_WRONG_EOI_PKS
})
partner_id = self.request.active_partner.id
if pin and eoi_ids:
Pin.objects.bulk_create([
Pin(eoi_id=eoi_id, partner_id=partner_id, pinned_by=request.user) for eoi_id in eoi_ids
])
return Response({"eoi_ids": eoi_ids}, status=statuses.HTTP_201_CREATED)
elif pin is False and eoi_ids:
Pin.objects.filter(eoi_id__in=eoi_ids, partner_id=partner_id, pinned_by=request.user).delete()
return Response(status=statuses.HTTP_204_NO_CONTENT)
else:
raise serializers.ValidationError({
'non_field_errors': self.ERROR_MSG_WRONG_PARAMS
})
class AgencyApplicationListAPIView(ListAPIView):
"""
Endpoint to allow agencies to get applications
"""
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW_APPLICATIONS,
]
),
)
filter_backends = (DjangoFilterBackend, OrderingFilter)
filter_class = ApplicationsFilter
serializer_class = ApplicationFullEOISerializer
pagination_class = SmallPagination
def get_queryset(self):
valid_eoi_ids = EOI.objects.filter(
Q(created_by=self.request.user) | Q(focal_points=self.request.user)
).values_list('id', flat=True).distinct()
quesryset = Application.objects.filter(
Q(eoi_id__in=valid_eoi_ids) | Q(eoi=None)
)
return quesryset
class PartnerEOIApplicationCreateAPIView(CreateAPIView):
"""
Create Application for open EOI by partner.
"""
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
]
),
)
serializer_class = PartnerApplicationSerializer
def create(self, request, *args, **kwargs):
if self.request.active_partner.is_hq:
raise serializers.ValidationError(
"You don't have the ability to submit an application if "
"you are currently toggled under the HQ profile."
)
if not self.request.active_partner.profile_is_complete:
raise serializers.ValidationError(
"You don't have the ability to submit an application if Your profile is not completed."
)
return super(PartnerEOIApplicationCreateAPIView, self).create(request, *args, **kwargs)
@transaction.atomic
def perform_create(self, serializer):
eoi = get_object_or_404(
EOI, deadline_date__gte=date.today(), id=self.kwargs.get('pk')
)
if eoi.applications.filter(partner=self.request.active_partner).exists():
raise serializers.ValidationError("You already applied for this project.")
save_kwargs = {
'eoi': eoi,
'agency': eoi.agency,
'partner': self.request.active_partner,
'submitter': self.request.user,
}
if self.request.active_partner.pk in eoi.preselected_partners:
save_kwargs['status'] = APPLICATION_STATUSES.preselected
serializer.save(**save_kwargs)
send_notification_application_created(serializer.instance)
class PartnerEOIApplicationRetrieveAPIView(RetrieveAPIView):
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.CFEI_VIEW,
]
),
)
queryset = Application.objects.all()
serializer_class = PartnerApplicationSerializer
def get_object(self):
return get_object_or_404(self.get_queryset(), **{
'partner_id': self.request.active_partner.id,
'eoi_id': self.kwargs.get('pk'),
})
class AgencyEOIApplicationCreateAPIView(CreateAPIView):
"""
Create Application for direct EOI by agency.
"""
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_DIRECT_INDICATE_CSO,
]
),
)
queryset = Application.objects.all()
serializer_class = CreateDirectApplicationNoCNSerializer
def perform_create(self, serializer):
eoi = get_object_or_404(EOI, id=self.kwargs['pk'], agency=self.request.user.agency)
instance = serializer.save(
did_win=True, eoi=eoi, submitter=self.request.user, agency=eoi.agency
)
send_notification_application_created(instance)
class AgencyEOIApplicationDestroyAPIView(DestroyAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_DIRECT_INDICATE_CSO,
],
),
)
queryset = Application.objects.all()
def get_queryset(self):
return super(AgencyEOIApplicationDestroyAPIView, self).get_queryset().filter(
eoi__agency=self.request.user.agency, eoi_id=self.kwargs['eoi_id']
)
class PartnerEOIApplicationDestroyAPIView(DestroyAPIView):
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.CFEI_SUBMIT_CONCEPT_NOTE,
],
),
)
queryset = Application.objects.all()
def get_object(self):
return get_object_or_404(
self.get_queryset(), pk=self.kwargs['pk'], partner_id__in=self.request.user.partner_ids
)
def perform_destroy(self, instance: Application):
if instance.eoi and instance.eoi.deadline_passed:
raise PermissionDenied('You cannot delete application past submission deadline.')
return super(PartnerEOIApplicationDestroyAPIView, self).perform_destroy(instance)
class ApplicationAPIView(RetrieveUpdateAPIView):
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.CFEI_VIEW,
],
agency_permissions=[]
),
)
queryset = Application.objects.select_related("partner", "eoi", "cn").prefetch_related("eoi__reviewers").all()
def get_serializer_class(self):
if self.request.agency_member:
return ApplicationFullSerializer
else:
return PartnerApplicationSerializer
def check_object_permissions(self, request, obj: Application):
if self.request.user.agency and not obj.agency == self.request.user.agency and obj.eoi and obj.eoi.is_completed:
raise PermissionDenied
def get_queryset(self):
queryset = super(ApplicationAPIView, self).get_queryset()
if self.request.active_partner:
return queryset.filter(partner_id__in=self.request.user.partner_ids)
elif self.request.agency_member:
queryset = queryset.filter(Q(is_unsolicited=True, is_published=True) | Q(is_unsolicited=False))
if not self.request.method == 'GET':
queryset = queryset.filter(eoi__agency=self.request.user.agency)
return queryset
return queryset.none()
@check_unpp_permission(
partner_permissions=[
PartnerPermission.CFEI_ANSWER_SELECTION,
],
agency_permissions=[
AgencyPermission.CFEI_PRESELECT_APPLICATIONS,
]
)
@transaction.atomic
def perform_update(self, serializer):
data = serializer.validated_data
agency_decision = data.get('did_win')
partner_decision = data.get('did_accept', False) or data.get('did_decline', False)
save_kwargs = {}
if agency_decision:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_SELECT_RECOMMENDED_PARTNER
], raise_exception=True)
save_kwargs['agency_decision_date'] = timezone.now().date()
save_kwargs['agency_decision_maker'] = self.request.user
if partner_decision:
save_kwargs['partner_decision_date'] = timezone.now().date()
save_kwargs['partner_decision_maker'] = self.request.user
instance = serializer.save(**save_kwargs)
if self.request.agency_member:
send_agency_updated_application_notification(instance)
elif self.request.active_partner and partner_decision:
send_partner_made_decision_notification(instance)
class EOIApplicationsListAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[]
),
)
queryset = Application.objects.select_related(
"partner", "eoi", "cn"
).prefetch_related("assessments", "eoi__reviewers").all()
serializer_class = ApplicationsListSerializer
pagination_class = SmallPagination
filter_backends = (
DjangoFilterBackend,
OrderingFilter,
)
filter_class = ApplicationsEOIFilter
ordering_fields = ('status', )
lookup_field = lookup_url_kwarg = 'pk'
def get_queryset(self, *args, **kwargs):
eoi = get_object_or_404(EOI, pk=self.kwargs['pk'])
if eoi.is_completed:
if eoi.agency == self.request.user.agency:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_FINALIZED_VIEW_WINNER_AND_CN,
], raise_exception=True)
queryset = super(EOIApplicationsListAPIView, self).get_queryset()
else:
raise PermissionDenied
else:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_VIEW_APPLICATIONS,
], raise_exception=True)
valid_eoi_ids = EOI.objects.filter(
Q(created_by=self.request.user) | Q(focal_points=self.request.user) | Q(reviewers=self.request.user)
).values_list('id', flat=True).distinct()
queryset = super(EOIApplicationsListAPIView, self).get_queryset().filter(eoi_id__in=valid_eoi_ids)
return queryset.filter(eoi_id=self.kwargs.get(self.lookup_field))
class ReviewersStatusAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW_ALL_REVIEWS,
]
),
)
serializer_class = ReviewersApplicationSerializer
lookup_url_kwarg = 'application_id'
def get_object(self):
valid_eoi_ids = EOI.objects.filter(
Q(created_by=self.request.user) | Q(focal_points=self.request.user) | Q(reviewers=self.request.user)
).values_list('id', flat=True).distinct()
return get_object_or_404(
Application.objects.filter(
eoi_id__in=valid_eoi_ids
).select_related('eoi').prefetch_related('eoi__reviewers'),
pk=self.kwargs.get(self.lookup_url_kwarg)
)
def get_queryset(self, *args, **kwargs):
eoi: EOI = self.get_object().eoi
user = self.request.user
if eoi.status == CFEI_STATUSES.finalized:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_FINALIZED_VIEW_ALL_REVIEWS,
], raise_exception=True)
elif eoi.created_by == user or eoi.focal_points.filter(pk=user.pk).exists():
pass
elif eoi.reviewers.filter(pk=user.pk).exists():
return eoi.reviewers.filter(pk=user.pk)
else:
return eoi.reviewers.none()
return eoi.reviewers.all()
class ReviewerAssessmentsAPIView(ListCreateAPIView, RetrieveUpdateAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW_ALL_REVIEWS,
]
),
)
serializer_class = ReviewerAssessmentsSerializer
reviewer_url_kwarg = 'reviewer_id'
application_url_kwarg = 'application_id'
def check_permissions(self, request):
super(ReviewerAssessmentsAPIView, self).check_permissions(request)
if not Application.objects.filter(
status__in=[
APPLICATION_STATUSES.preselected, APPLICATION_STATUSES.recommended,
],
id=self.kwargs.get(self.application_url_kwarg),
eoi__reviewers=self.request.user,
).exists():
raise PermissionDenied
def get_queryset(self, *args, **kwargs):
return Assessment.objects.filter(application_id=self.kwargs.get(self.application_url_kwarg))
def get_object(self):
obj = get_object_or_404(
self.get_queryset(),
reviewer_id=self.kwargs.get(self.reviewer_url_kwarg),
application_id=self.kwargs.get(self.application_url_kwarg),
)
self.check_object_permissions(self.request, obj)
return obj
def create(self, request, *args, **kwargs):
request.data['application'] = self.kwargs.get(self.application_url_kwarg)
request.data['reviewer'] = self.kwargs.get(self.reviewer_url_kwarg)
return super(ReviewerAssessmentsAPIView, self).create(request, *args, **kwargs)
def perform_update(self, serializer):
if not serializer.instance.created_by == self.request.user:
raise PermissionDenied
if serializer.instance.completed:
raise serializers.ValidationError('You have marked this review as completed, It can no longer be edited')
super(ReviewerAssessmentsAPIView, self).perform_update(serializer)
class UnsolicitedProjectListAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[]
),
)
queryset = Application.objects.filter(is_unsolicited=True, is_published=True).distinct()
pagination_class = SmallPagination
filter_backends = (DjangoFilterBackend, )
filter_class = ApplicationsUnsolicitedFilter
serializer_class = AgencyUnsolicitedApplicationSerializer
class PartnerApplicationOpenListAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.UCN_VIEW,
]
),
)
queryset = Application.objects.filter(eoi__display_type=CFEI_TYPES.open).distinct()
serializer_class = ApplicationPartnerOpenSerializer
pagination_class = SmallPagination
filter_backends = (DjangoFilterBackend, )
filter_class = ApplicationsFilter
def get_queryset(self):
query = Q(partner=self.request.active_partner)
if self.request.active_partner.is_hq:
query |= Q(partner__hq=self.request.active_partner)
return super(PartnerApplicationOpenListAPIView, self).get_queryset().filter(query)
class UCNListCreateAPIView(ListCreateAPIView):
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.UCN_VIEW,
]
),
)
queryset = Application.objects.filter(is_unsolicited=True).distinct()
filter_class = ApplicationsUnsolicitedFilter
pagination_class = SmallPagination
filter_backends = (DjangoFilterBackend, )
def get_serializer_class(self, *args, **kwargs):
if self.request.method == 'POST':
current_user_has_permission(
self.request, partner_permissions=[PartnerPermission.UCN_DRAFT], raise_exception=True
)
return ManageUCNSerializer
return ApplicationPartnerUnsolicitedDirectSerializer
def get_queryset(self, *args, **kwargs):
query = Q(partner=self.request.active_partner)
if self.request.active_partner.is_hq:
query |= Q(partner__hq=self.request.active_partner)
return super(UCNListCreateAPIView, self).get_queryset().filter(query)
class PartnerApplicationDirectListCreateAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
partner_permissions=[
PartnerPermission.DSR_VIEW,
]
),
)
queryset = Application.objects.filter(eoi__display_type=CFEI_TYPES.direct, eoi__is_published=True).distinct()
filter_class = ApplicationsUnsolicitedFilter
pagination_class = SmallPagination
filter_backends = (DjangoFilterBackend, )
serializer_class = ApplicationPartnerDirectSerializer
def get_queryset(self, *args, **kwargs):
query = Q(partner=self.request.active_partner)
if self.request.active_partner.is_hq:
query |= Q(partner__hq=self.request.active_partner)
return super(PartnerApplicationDirectListCreateAPIView, self).get_queryset().filter(query)
class ApplicationFeedbackListCreateAPIView(ListCreateAPIView):
serializer_class = ApplicationFeedbackSerializer
pagination_class = SmallPagination
permission_classes = (
HasUNPPPermission(
agency_permissions=[],
partner_permissions=[],
),
)
def get_queryset(self):
application = get_object_or_404(Application, id=self.kwargs['pk'])
if self.request.active_partner:
if not application.partner == self.request.active_partner:
raise PermissionDenied
if not application.eoi.status == CFEI_STATUSES.finalized:
raise PermissionDenied('Partner Feedback is available after CFEI is finalized.')
return application.application_feedbacks.all()
def perform_create(self, serializer):
application = get_object_or_404(Application, id=self.kwargs['pk'])
if application.eoi:
eoi = application.eoi
if eoi.created_by == self.request.user or eoi.focal_points.filter(id=self.request.user.id).exists():
return serializer.save(provider=self.request.user, application=application)
raise PermissionDenied(
'Only CFEI creator or focal point can input comments in the “Feedback to partner” section'
)
class ConvertUnsolicitedAPIView(CreateAPIView):
serializer_class = ConvertUnsolicitedSerializer
queryset = Application.objects.filter(is_unsolicited=True)
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.UCN_CONVERT_TO_DSR,
]
),
)
def perform_create(self, serializer):
instance = serializer.save()
send_notification_application_created(instance)
class ReviewSummaryAPIView(RetrieveUpdateAPIView):
"""
Endpoint for review summary - comment & attachment
"""
permission_classes = (
HasUNPPPermission(
agency_permissions=[]
),
)
serializer_class = ReviewSummarySerializer
queryset = EOI.objects.all()
def check_object_permissions(self, request, obj: EOI):
super(ReviewSummaryAPIView, self).check_object_permissions(request, obj)
if obj.is_completed:
if obj.agency == request.user.agency:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_FINALIZED_VIEW_WINNER_AND_CN,
], raise_exception=True)
else:
raise PermissionDenied
else:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_VIEW_APPLICATIONS,
], raise_exception=True)
if request.method == 'GET':
return
if not obj.sent_for_decision and (
obj.created_by == request.user or obj.focal_points.filter(id=request.user.id).exists()
):
return
self.permission_denied(request)
@check_unpp_permission(agency_permissions=[AgencyPermission.CFEI_ADD_REVIEW_SUMMARY])
def perform_update(self, serializer):
super(ReviewSummaryAPIView, self).perform_update(serializer)
class EOIReviewersAssessmentsListAPIView(ListAPIView):
"""
Reviewers with their assessments - summary
"""
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW_ALL_REVIEWS,
]
),
)
serializer_class = EOIReviewersAssessmentsSerializer
lookup_field = 'eoi_id'
def get_queryset(self):
eoi: EOI = get_object_or_404(EOI, id=self.kwargs['eoi_id'])
if not eoi.agency == self.request.user.agency:
raise PermissionDenied
return eoi.reviewers.all()
class EOIReviewersAssessmentsNotifyAPIView(APIView):
"""
Create Notification to remind users
"""
NOTIFICATION_MESSAGE_SENT = "Notification message sent successfully"
NOTIFICATION_MESSAGE_WAIT = "Notification message sent recently. Need to wait 24 hours."
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW_ALL_REVIEWS,
]
),
)
def post(self, request, *args, **kwargs):
eoi = get_object_or_404(EOI, id=self.kwargs['eoi_id'])
user = get_object_or_404(eoi.reviewers.all(), id=self.kwargs['reviewer_id'])
if not user_received_notification_recently(user, eoi, NotificationType.CFEI_REVIEW_REQUIRED):
send_cfei_review_required_notification(eoi, [user])
message = self.NOTIFICATION_MESSAGE_SENT
status = statuses.HTTP_201_CREATED
else:
message = self.NOTIFICATION_MESSAGE_WAIT
status = statuses.HTTP_200_OK
return Response({"success": message}, status=status)
class AwardedPartnersListAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW,
]
),
)
serializer_class = AwardedPartnersSerializer
lookup_field = 'eoi_id'
def get_queryset(self):
eoi_id = self.kwargs['eoi_id']
return Application.objects.filter(did_win=True, did_decline=False, eoi_id=eoi_id)
class CompareSelectedListAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW_ALL_REVIEWS,
]
),
)
serializer_class = CompareSelectedSerializer
def get(self, request, *args, **kwargs):
export = self.request.query_params.get("export")
if export == 'xlsx':
response = HttpResponse(
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
generator = ApplicationCompareSpreadsheetGenerator(
self.filter_queryset(self.get_queryset()), write_to=response
)
generator.generate()
response['Content-Disposition'] = 'attachment; filename="{}"'.format(generator.filename)
return response
return super(CompareSelectedListAPIView, self).get(request, *args, **kwargs)
def get_queryset(self):
queryset = Application.objects.select_related("partner").filter(eoi_id=self.kwargs['eoi_id'])
application_ids = self.request.query_params.get("application_ids")
if application_ids is not None:
ids = filter(lambda x: x.isdigit(), application_ids.split(","))
queryset = queryset.filter(id__in=ids)
else:
queryset.none()
return queryset
class EOISendToPublishAPIView(RetrieveAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_DRAFT_SEND_TO_FOCAL_POINT_TO_PUBLISH,
]
),
)
serializer_class = AgencyProjectSerializer
queryset = EOI.objects.filter(sent_for_publishing=False, is_published=False)
def check_object_permissions(self, request, obj):
super(EOISendToPublishAPIView, self).check_object_permissions(request, obj)
if obj.created_by == request.user:
return
self.permission_denied(request)
@transaction.atomic
def post(self, *args, **kwargs):
project: EOI = self.get_object()
if project.deadline_passed:
raise serializers.ValidationError('Deadline date is set in the past, please update it before publishing.')
project.sent_for_publishing = True
project.save()
send_project_draft_sent_for_review_notification(project)
return Response(self.serializer_class(project).data)
class PublishCFEIAPIView(RetrieveAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[]
),
)
serializer_class = AgencyProjectSerializer
queryset = EOI.objects.filter(is_published=False)
def check_object_permissions(self, request, obj: EOI):
super(PublishCFEIAPIView, self).check_object_permissions(request, obj)
if obj.created_by == request.user or obj.focal_points.filter(id=request.user.id).exists():
return
self.permission_denied(request)
@transaction.atomic
def post(self, *args, **kwargs):
cfei = self.get_object()
if cfei.is_open:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_PUBLISH
], raise_exception=True)
else:
current_user_has_permission(self.request, agency_permissions=[
AgencyPermission.CFEI_DIRECT_PUBLISH
], raise_exception=True)
if cfei.deadline_passed:
raise serializers.ValidationError('Deadline date is set in the past, please update it before publishing.')
if cfei.is_direct:
if not all(map(lambda a: a.partner.is_verified, cfei.applications.all())):
raise serializers.ValidationError('All partners need to be verified before publishing.')
if not cfei.applications.count() == 1:
raise serializers.ValidationError('Only a single partner can be indicated.')
list(map(send_notification_application_created, cfei.applications.all()))
cfei.is_published = True
cfei.published_timestamp = timezone.now()
cfei.save()
return Response(AgencyProjectSerializer(cfei).data)
class SendCFEIForDecisionAPIView(RetrieveAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[]
),
)
serializer_class = AgencyProjectSerializer
queryset = EOI.objects.filter(is_published=True)
def check_object_permissions(self, request, obj):
super(SendCFEIForDecisionAPIView, self).check_object_permissions(request, obj)
if obj.created_by == request.user or obj.focal_points.filter(id=request.user.id).exists():
return
self.permission_denied(request)
@transaction.atomic
def post(self, *args, **kwargs):
cfei: EOI = self.get_object()
if not any((
cfei.review_summary_comment,
cfei.review_summary_attachment,
)):
raise serializers.ValidationError(
'Review summary needs to be filled in before forwarding for partner selection.'
)
if not cfei.applications.filter(status=APPLICATION_STATUSES.recommended).exists():
raise serializers.ValidationError(
'You need to recommend at least one application before forwarding for partner selection.'
)
cfei.sent_for_decision = True
cfei.save()
send_eoi_sent_for_decision_notification(cfei)
return Response(AgencyProjectSerializer(cfei).data)
class UCNManageAPIView(RetrieveUpdateAPIView, DestroyAPIView):
permission_classes = (
HasUNPPPermission(
partner_permissions=[]
),
)
serializer_class = ApplicationPartnerUnsolicitedDirectSerializer
queryset = Application.objects.filter(is_published=False, is_unsolicited=True)
def get_serializer_class(self):
if self.request.method == 'PATCH':
return ManageUCNSerializer
return self.serializer_class
def get_queryset(self):
queryset = super(UCNManageAPIView, self).get_queryset()
query = Q(partner=self.request.active_partner)
if self.request.active_partner.is_hq:
query |= Q(partner__hq=self.request.active_partner)
return queryset.filter(query)
@check_unpp_permission(partner_permissions=[PartnerPermission.UCN_SUBMIT])
def post(self, *args, **kwargs):
obj = self.get_object()
obj.published_timestamp = timezone.now()
obj.is_published = True
obj.save()
send_notification_application_created(obj)
return Response(self.serializer_class(obj).data)
@check_unpp_permission(partner_permissions=[PartnerPermission.UCN_DELETE])
def perform_destroy(self, instance):
return super(UCNManageAPIView, self).perform_destroy(instance)
class CompleteAssessmentsAPIView(ListAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[]
),
)
serializer_class = ReviewerAssessmentsSerializer
queryset = Assessment.objects.filter()
def get_queryset(self):
queryset = super(CompleteAssessmentsAPIView, self).get_queryset()
return queryset.filter(created_by=self.request.user, reviewer=self.request.user)
@transaction.atomic
def post(self, *args, **kwargs):
eoi = get_object_or_404(EOI, id=self.kwargs['eoi_id'])
all_assessments = self.get_queryset().filter(
application__eoi=eoi, application__status=APPLICATION_STATUSES.preselected
)
applications = eoi.applications.filter(status=APPLICATION_STATUSES.preselected)
if not all_assessments.count() == applications.count():
raise serializers.ValidationError('You need to review all applications before completing.')
assessments = list(all_assessments.filter(completed=False))
for ass in assessments:
ass.completed = True
ass.completed_date = timezone.now().date()
ass.save()
return Response(self.serializer_class(assessments, many=True).data)
class ClarificationRequestQuestionAPIView(ListCreateAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_PUBLISHED_VIEW_AND_ANSWER_CLARIFICATION_QUESTIONS,
],
partner_permissions=[
PartnerPermission.CFEI_VIEW,
]
),
)
serializer_class = ClarificationRequestQuestionSerializer
pagination_class = SmallPagination
def list(self, request, *args, **kwargs):
if request.GET.get('export', '').lower() == 'pdf' and request.agency_member:
return CFEIClarificationQuestionPDFExporter(EOI.objects.get(pk=self.kwargs['eoi_id'])).get_as_response()
return super(ClarificationRequestQuestionAPIView, self).list(request, *args, **kwargs)
def get_queryset(self):
queryset = ClarificationRequestQuestion.objects.filter(eoi_id=self.kwargs['eoi_id'])
if self.request.active_partner:
queryset = queryset.filter(partner=self.request.active_partner)
return queryset
@check_unpp_permission(partner_permissions=[PartnerPermission.CFEI_SEND_CLARIFICATION_REQUEST])
def perform_create(self, serializer):
eoi: EOI = get_object_or_404(EOI, id=self.kwargs.get('eoi_id'))
if eoi.clarification_request_deadline_date < timezone.now().date():
raise PermissionDenied('Clarification Request Deadline has passed.')
return serializer.save(eoi=eoi, partner=self.request.active_partner, created_by=self.request.user)
class ClarificationRequestAnswerFileAPIView(ListCreateAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[
AgencyPermission.CFEI_VIEW,
],
partner_permissions=[
PartnerPermission.CFEI_VIEW,
]
),
)
serializer_class = ClarificationRequestAnswerFileSerializer
pagination_class = SmallPagination
def get_queryset(self):
return ClarificationRequestAnswerFile.objects.filter(eoi_id=self.kwargs.get('eoi_id'))
@check_unpp_permission(agency_permissions=[AgencyPermission.CFEI_PUBLISHED_VIEW_AND_ANSWER_CLARIFICATION_QUESTIONS])
def perform_create(self, serializer):
eoi: EOI = get_object_or_404(EOI, id=self.kwargs.get('eoi_id'))
if not eoi.created_by == self.request.user and not eoi.focal_points.filter(pk=self.request.user.pk).exists():
raise PermissionDenied('Only creators / focal points can add answer files.')
if eoi.clarification_request_deadline_date > timezone.now().date():
raise PermissionDenied('Clarification Request Deadline has not passed yet.')
if eoi.question_answers.count() >= 3:
raise serializers.ValidationError(
'A maximum of 3 Answer Files is allowed per project, remove some to upload new.'
)
return serializer.save(eoi=eoi)
class ClarificationRequestAnswerFileDestroyAPIView(DestroyAPIView):
permission_classes = (
HasUNPPPermission(
agency_permissions=[]
),
)
def get_queryset(self):
return ClarificationRequestAnswerFile.objects.filter(created_by=self.request.user)
| 37.014925 | 120 | 0.679256 | 41,448 | 0.928411 | 0 | 0 | 9,478 | 0.212302 | 0 | 0 | 3,256 | 0.072933 |
03a7b879acb5698f0c96f69d7464741824b42f9a | 1,422 | py | Python | older versions/older_version_1d_calculator.py | vishalbelsare/KramersMoyal | 6047cd303a474cd0411abf90ef7c81ec53500625 | [
"MIT"
] | 32 | 2019-11-26T06:45:56.000Z | 2022-03-15T18:47:07.000Z | older versions/older_version_1d_calculator.py | NikVard/KramersMoyal | 57e50278b0d31567054f763f3e0f3cc2c1e08315 | [
"MIT"
] | 9 | 2019-09-11T15:27:47.000Z | 2021-03-22T14:44:43.000Z | older versions/older_version_1d_calculator.py | NikVard/KramersMoyal | 57e50278b0d31567054f763f3e0f3cc2c1e08315 | [
"MIT"
] | 9 | 2019-08-23T16:55:24.000Z | 2022-02-10T14:08:02.000Z | # coding: utf-8
#! /usr/bin/env python
# FrequencyJumpLibrary
import numpy as np
from scipy import stats
import math as math
def KM (y, delta_t=1, Moments = [1,2,4,6,8], bandwidth = 1.5, Lowerbound = False, Upperbound = False, Kernel = 'Epanechnikov'): #Kernel-based Regression
Moments = [0] + Moments
length=len(Moments)
n = 5000
Mn = int(n * bandwidth / 10) #Minor n
res = np.zeros([n + Mn, length])
# Epanechnikov kernel: 3/4(1 - x²), x=-1 to x=1
# #Uniform kernel: 1/2, , x=-1 to x=1
Kernel = (3 * (1 - (np.linspace(-1 * bandwidth, 1 * bandwidth, Mn) / bandwidth) ** 2)) / (4 * bandwidth) # Kernel1 = ones([Mn]) / (2 * bandwidth)
yDist = y[1:] - y[:-1]
if (Lowerbound == False):
Min = min(y)
else:
Min = Lowerbound
if (Upperbound == False):
Max = max(y)
else:
Max = Upperbound
space = np.linspace(Min, Max, n + Mn)
b = ((((y[:-1]-Min) / (abs(Max - Min))) * (n))).astype(int)
trueb = np.unique(b[(b>=0)*(b<n)])
for i in trueb:
r = yDist[b==i]
for l in range(length):
res[i:i + Mn, l] += Kernel * (sum(r ** Moments[l]))
res[:, 0][res[:, 0]==0]=1.
for l in range(length-1):
res[:, l+1] = np.divide(res[:, l+1],(res[:, 0] * math.factorial(Moments[l+1]) * (delta_t)))
return res, space
| 32.318182 | 161 | 0.510549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.169361 |
03aa122f7d46f999001e9b311a609665d78ad637 | 527 | py | Python | snoop/data/management/commands/migratecollections.py | liquidinvestigations/hoover-snoop2 | 28e328401609f53fb56abaa4817619085aa3fbee | [
"MIT"
] | null | null | null | snoop/data/management/commands/migratecollections.py | liquidinvestigations/hoover-snoop2 | 28e328401609f53fb56abaa4817619085aa3fbee | [
"MIT"
] | 168 | 2019-11-07T12:38:07.000Z | 2021-04-19T09:53:51.000Z | snoop/data/management/commands/migratecollections.py | liquidinvestigations/hoover-snoop2 | 28e328401609f53fb56abaa4817619085aa3fbee | [
"MIT"
] | null | null | null | """Creates and migrates databases and indexes.
"""
from django.core.management.base import BaseCommand
from ... import collections
from ...logs import logging_for_management_command
class Command(BaseCommand):
help = "Create and migrate the collection databases"
def handle(self, *args, **options):
logging_for_management_command(options['verbosity'])
collections.create_databases()
collections.migrate_databases()
collections.create_es_indexes()
collections.create_roots()
| 27.736842 | 60 | 0.736243 | 340 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.201139 |
03aa7413f879d796eb4888768a9fac1efb6019e3 | 10,816 | py | Python | pm.py | chandrabhan-singh-98/pmpy | 8a34c7766a049b68f6c74e1085822e6aa3732c1e | [
"MIT"
] | null | null | null | pm.py | chandrabhan-singh-98/pmpy | 8a34c7766a049b68f6c74e1085822e6aa3732c1e | [
"MIT"
] | null | null | null | pm.py | chandrabhan-singh-98/pmpy | 8a34c7766a049b68f6c74e1085822e6aa3732c1e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
------------------------------------------------------------------------
A re-write of my original pm shell script in python
pm is a script that is meant to act as a status tracker
for my projects. It will
use VCS integration to provide in-depth information on all projects.
Cheers.
The quality of code is extremely bad. I'm not a python programmer
and this script is solely meant to be used by me but is extensible
for other users as well at your own risk obviously.
Author : canopeerus
License : MIT
------------------------------------------------------------------------
"""
import os,sys,json,getopt,configparser
# some global variable declarations for directory and file locations
# will need to clean this up to not make these options hardcoded
homedir = os.getenv("HOME")
config_dir = homedir + "/.config/pm"
config_fil = config_dir + "/config"
# This is run everytime to read configuration values like project locations
# Maybe this doesn't need to run everytime. We'll see later
config = configparser.ConfigParser()
config.read(config_fil)
if config['OPTIONS']['DatabaseFileLocation'] == 'Default':
db_fil = homedir + "/.cache/pm/db.json"
else:
db_fil = config['OPTIONS']['DatabaseFileLocation']
dbdir = db_fil.strip("db.json")
db_fil_old = db_fil + ".old"
proj_dir = config['OPTIONS']['ProjectDirectory']
class color:
FG_BLUE = "\033[1;34m"
FG_CYAN = "\033[1;36m"
FG_GREEN = "\033[0;32m"
FG_RESET = "\033[0;0m"
FG_BOLD = "\033[;1m"
FG_GREY = '\033[90m'
FG_BLACK = '\033[30m'
REVERSE = "\033[;7m"
END = '\033[0m'
FG_RED = '\033[31m'
BG_RED = '\033[41m'
BG_GREEN = '\033[42m'
BG_BLUE = '\033[46m'
BG_GREY = '\033[47m'
ULINE = '\033[4m'
class pmpy_info_class:
version = '0.0.1'
name = 'pmpy'
license = 'MIT'
author = 'canopeerus'
class misc_text_func:
def query_yes_no(self,question, default="yes"):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def print_help(self):
sys.stdout.write("usage : pm [-ildhv] [ -m active,inactive,abandoned,complete]\n"+
"Supported options:\n"+
"\t-i : initialization process to populate project database\n"+
"\t-d : Delete the central database json file\n"+
"\t-h : Print usage help\n"+
"\t-v : Print pmpy version info\n"+
"\t-m : Set project status\n"+
"\t-s <name> : Show detailed project information for one or all projects\n"+
"\t-l : List the names of all projects\n"+
"Status options : active,inactive,abandoned,complete\n"+
"\nThis project is hosted at https://github.com/canopeerus/pmpy\n")
sys.exit(1)
def print_version(self):
sys.stdout.write("pmpy version: "+pmpy_info_class.version+"\n"+
"License: "+pmpy_info_class.license+"\n"+
"Author: "+pmpy_info_class.author+"\n")
class pm_write_database:
def delete_db_arg_func(self):
local_screen = misc_text_func()
if os.path.isfile(db_fil):
if local_screen.query_yes_no("Are you sure you want to delete the database?"):
os.remove(db_fil)
sys.stdout.write(color.FG_GREEN+"Project database successfully deleted\n"+color.END)
else:
sys.stdout.write(color.FG_RED+"Operation aborted\n"+color.END)
sys.exit(1)
else:
sys.stdout.write("Database not found. Run pm -i to populate database.\n")
def backup(self,db_option = "current"):
if db_option == "old":
os.remove(db_fil_old)
os.rename(db_fil,db_fil_old)
os.remove(db_fil)
elif db_option == "current":
os.rename(db_fil,db_fil_old)
def pm_init(self):
if os.path.isfile(db_fil) and os.path.isfile(db_fil_old):
local_screen = misc_text_func()
sys.stdout.write("There is a database file and a backup file already available!!\n")
user_choice_init = local_screen.query_yes_no("Delete old db and backup current db file?")
if user_choice_init:
self.backup("old")
else:
sys.stdout.write(color.FG_RED+"Operation aborted!\n"+color.END)
sys.exit(2)
elif os.path.isfile(db_fil):
sys.stdout.write("Found existing database file. Backing it up to db.json.old\n")
self.backup("current")
if not os.path.isdir(dbdir):
os.mkdir(dbdir)
sys.stdout.write("Beginnning pm init process...\n")
sys.stdout.write("Using projects location "+proj_dir+"\n")
all_p_files = os.listdir(proj_dir)
if len(all_p_files) == 0:
sys.stdout.write(color.FG_RED+"No project directories found in central code directory!!\n"+color.END)
sys.exit(3)
else:
db_file_out = open(db_fil,'w+')
proj_json_obj = {}
proj_json_obj['project']=[]
count = 0
for i in all_p_files:
if os.path.isdir(proj_dir+"/"+i):
count += 1
sys.stdout.write("\nShort description for "+i+" : ")
s_desc = input()
sys.stdout.write("Project status for "+i+" [active,inactive,complete,abandoned]: ")
p_status = input()
proj_json_obj['project'].append({
'name':i,
'status':p_status,
'short_desc': s_desc,
'author':'canopeerus',
'location':proj_dir+"/"+i
})
sys.stdout.write(color.FG_GREEN+"\nFound "+str(count)+" projects\n")
json.dump(proj_json_obj,db_file_out)
db_file_out.close()
sys.stdout.write("Init process complete. Database created at "+db_fil+"\n"+color.END)
class pm_read_database:
def list_projects(self) -> bool:
if not os.path.isfile(db_fil):
sys.stdout.write("Project database not found. Run pmpy -i to populate the database\n")
else:
p_file_in = open(db_fil,'r')
data_dict = json.load(p_file_in)
for pname in data_dict['project']:
sys.stdout.write(pname['name']+"\n")
p_file_in.close()
def set_p_status_colour(self,pstatus) -> str:
if pstatus == "active":
return color.BG_GREEN + color.FG_BLACK + pstatus + color.END
elif pstatus == "abandoned":
return color.BG_RED + color.FG_BLACK + pstatus + color.END
elif pstatus == "inactive":
return color.BG_GREY + color.FG_BLACK + pstatus + color.END
elif pstatus == "complete":
return color.BG_GREEN + color.FG_BLACK + pstatus + color.END
def show_single_project(self,name):
"""
despite the misleading name this function will print out all projects too if
you pass the all argument
"""
if not os.path.isfile(db_fil):
sys.stdout.write("Project database not found.Run pmpy -i to populate the database\n")
else:
p_file_in = open(db_fil,'r')
data_dict = json.load(p_file_in)
if name == "all":
for pname in data_dict['project']:
sys.stdout.write(
"Name : "+pname['name']+"\n"+
"Author : "+pname['author']+"\n"+
"Short description : "+pname['short_desc']+"\n"+
"Status : "+self.set_p_status_colour(pname['status'])+"\n"+
"Location : "+color.ULINE+pname['location']+color.END+"\n\n")
sys.exit(3)
else:
for pname in data_dict['project']:
if name == pname['name']:
sys.stdout.write(
"Name : "+pname['name']+"\n"+
"Author : "+pname['author']+"\n"+
"Short description : "+pname['short_desc']+"\n"+
"Status : "+self.set_p_status_colour(pname['status'])+"\n"+
"Location : "+color.ULINE+pname['location']+color.END+"\n")
sys.exit(3)
sys.stdout.write("No matching project found for "+name+"\n")
def main_func(argv):
screen = misc_text_func()
write_db = pm_write_database()
read_db = pm_read_database()
try:
options,args = getopt.getopt(argv,"hldivms:",["help","list","delete","init","version","show="])
except getopt.GetoptError as err:
sys.stdout.write(color.FG_RED + "pmpy : " + str(err) + color.END+"\n" )
screen.print_help()
if len(argv) == 0:
sys.stdout.write(color.FG_RED + "pmpy : No options specified\n\n" + color.END)
screen.print_help()
for opt,arg in options:
if opt in ("-h","--help"):
screen.print_help()
elif opt in ("-d","--delete"):
write_db.delete_db_arg_func()
sys.exit(2)
elif opt in ("-i","--init"):
write_db.pm_init()
elif opt in ("-v","--version"):
screen.print_version()
elif opt in ("-l","--list"):
read_db.list_projects()
elif opt in ("-s","--show"):
proj_arg = arg
read_db.show_single_project(proj_arg)
elif opt == "-m":
sys.stdout.write("Updating is not supported at the moment.\nRun pmpy -di to reinitiate with changes.\n")
else:
assert False
if __name__ == "__main__":
main_func(sys.argv[1:])
| 41.282443 | 116 | 0.530788 | 8,148 | 0.753328 | 0 | 0 | 0 | 0 | 0 | 0 | 3,872 | 0.357988 |
03acdee8d03255dcc11e51424c9d56a4f5a10599 | 251 | py | Python | epikjjh/baekjoon/17413.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
] | 3 | 2019-05-19T13:44:39.000Z | 2019-07-03T11:15:20.000Z | epikjjh/baekjoon/17413.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
] | 7 | 2019-05-06T02:37:26.000Z | 2019-06-29T07:28:02.000Z | epikjjh/baekjoon/17413.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
] | 1 | 2019-07-28T06:24:54.000Z | 2019-07-28T06:24:54.000Z | import re
stream = input()
p = re.compile("<?[^<>]+>?")
ans = ""
for elem in p.findall(stream):
if elem[0] == "<":
ans += elem
else:
for e in elem.split():
ans += e[::-1] + " "
ans = ans.rstrip()
print(ans) | 19.307692 | 32 | 0.450199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.079681 |
03ada8be65d6325b4e0a4f2d0137005593cfbd56 | 396 | py | Python | day_09/test_solution.py | anguswilliams91/advent-of-code-2022 | 00cc08900fe5e50f0bf5d657e9dfc0691eccac48 | [
"MIT"
] | null | null | null | day_09/test_solution.py | anguswilliams91/advent-of-code-2022 | 00cc08900fe5e50f0bf5d657e9dfc0691eccac48 | [
"MIT"
] | null | null | null | day_09/test_solution.py | anguswilliams91/advent-of-code-2022 | 00cc08900fe5e50f0bf5d657e9dfc0691eccac48 | [
"MIT"
] | null | null | null | """Tests for day 9."""
from day_09.solution import sum_of_low_points, product_of_biggest_basins
_EXAMPLE_INPUT = """2199943210
3987894921
9856789892
8767896789
9899965678
"""
def test_part_one_example_solution_is_recovered():
assert sum_of_low_points(_EXAMPLE_INPUT) == 15
def test_part_two_example_solution_is_recovered():
assert product_of_biggest_basins(_EXAMPLE_INPUT) == 1134
| 19.8 | 72 | 0.813131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.209596 |
03b0e27edf370cce2d7d3b2ec53ff51621fdc4ee | 6,605 | py | Python | hubspot/crm/extensions/accounting/models/create_user_account_request_external.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/crm/extensions/accounting/models/create_user_account_request_external.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | hubspot/crm/extensions/accounting/models/create_user_account_request_external.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Accounting Extension
These APIs allow you to interact with HubSpot's Accounting Extension. It allows you to: * Specify the URLs that HubSpot will use when making webhook requests to your external accounting system. * Respond to webhook calls made to your external accounting system by HubSpot # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.accounting.configuration import Configuration
class CreateUserAccountRequestExternal(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"account_id": "str", "account_name": "str", "currency_code": "str"}
attribute_map = {
"account_id": "accountId",
"account_name": "accountName",
"currency_code": "currencyCode",
}
def __init__(
self,
account_id=None,
account_name=None,
currency_code=None,
local_vars_configuration=None,
): # noqa: E501
"""CreateUserAccountRequestExternal - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._account_id = None
self._account_name = None
self._currency_code = None
self.discriminator = None
self.account_id = account_id
self.account_name = account_name
self.currency_code = currency_code
@property
def account_id(self):
"""Gets the account_id of this CreateUserAccountRequestExternal. # noqa: E501
The id of the account in your system. # noqa: E501
:return: The account_id of this CreateUserAccountRequestExternal. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this CreateUserAccountRequestExternal.
The id of the account in your system. # noqa: E501
:param account_id: The account_id of this CreateUserAccountRequestExternal. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and account_id is None
): # noqa: E501
raise ValueError(
"Invalid value for `account_id`, must not be `None`"
) # noqa: E501
self._account_id = account_id
@property
def account_name(self):
"""Gets the account_name of this CreateUserAccountRequestExternal. # noqa: E501
The name of the account in your system. This is normally the name visible to your users. # noqa: E501
:return: The account_name of this CreateUserAccountRequestExternal. # noqa: E501
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""Sets the account_name of this CreateUserAccountRequestExternal.
The name of the account in your system. This is normally the name visible to your users. # noqa: E501
:param account_name: The account_name of this CreateUserAccountRequestExternal. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and account_name is None
): # noqa: E501
raise ValueError(
"Invalid value for `account_name`, must not be `None`"
) # noqa: E501
self._account_name = account_name
@property
def currency_code(self):
"""Gets the currency_code of this CreateUserAccountRequestExternal. # noqa: E501
The default currency that this account uses. # noqa: E501
:return: The currency_code of this CreateUserAccountRequestExternal. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this CreateUserAccountRequestExternal.
The default currency that this account uses. # noqa: E501
:param currency_code: The currency_code of this CreateUserAccountRequestExternal. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and currency_code is None
): # noqa: E501
raise ValueError(
"Invalid value for `currency_code`, must not be `None`"
) # noqa: E501
self._currency_code = currency_code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateUserAccountRequestExternal):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CreateUserAccountRequestExternal):
return True
return self.to_dict() != other.to_dict()
| 33.025 | 290 | 0.614232 | 6,039 | 0.914307 | 0 | 0 | 3,054 | 0.462377 | 0 | 0 | 3,244 | 0.491143 |
03b167654e95b104f240f4988e88ebf5e5a4f208 | 1,764 | py | Python | 03_spider_douyin/action_douyin.py | theThreeKingdom/python-exercises | fc08a7bbb9d6b53d5761b9e1017f293bff4e26db | [
"Apache-2.0"
] | null | null | null | 03_spider_douyin/action_douyin.py | theThreeKingdom/python-exercises | fc08a7bbb9d6b53d5761b9e1017f293bff4e26db | [
"Apache-2.0"
] | null | null | null | 03_spider_douyin/action_douyin.py | theThreeKingdom/python-exercises | fc08a7bbb9d6b53d5761b9e1017f293bff4e26db | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/4/2 22:55
# @Author : Nixin
# @Email : nixin@foxmail.com
# @File : action_douyin.py
# @Software: PyCharm
from appium import webdriver
from time import sleep
import random
class Action():
def __init__(self):
# 初始化配置,设置Desired Capabilities参数
self.desired_caps = {
"platformName": "Android",
"deviceName": "192.168.0.135:5555",
"appPackage": "com.ss.android.ugc.aweme.lite",
"appActivity": "com.ss.android.ugc.aweme.main.MainActivity",
'newCommandTimeout': "36000",
"noReset": True,
"noSign": True
}
# 指定Appium Server
self.server = 'http://localhost:4723/wd/hub'
# 新建一个Session
self.driver = webdriver.Remote(self.server, self.desired_caps)
print(self.driver.get_window_size())
# 设置滑动初始坐标和滑动距离
self.x = self.driver.get_window_size()['width']
self.y = self.driver.get_window_size()['height']
self.start_x = 1/2*self.x
self.start_y = 1/2*self.y
self.distance = 120
def comments(self):
sleep(3)
# app开启之后点击一次屏幕,确保页面的展示
# self.driver.tap([(360, 604)], 500)
def scroll(self):
# 无限滑动
while True:
# 设置延时等待 5-10秒 随机
r = random.choice(range(3, 11))
print("%d秒后再滑屏:%d,%d,%d,%d" % (r, self.start_x, int(1 / 2 * self.y), self.start_x, int(1 / 6 * self.y)))
sleep(r)
# 模拟滑动
self.driver.swipe(self.start_x, int(1/2*self.y), self.start_x, int(1/6*self.y), 300)
def start(self):
self.comments()
self.scroll()
if __name__ == '__main__':
action = Action()
action.start()
pass
| 28 | 116 | 0.557823 | 1,602 | 0.841387 | 0 | 0 | 0 | 0 | 0 | 0 | 726 | 0.381303 |
03b25b5a5faef2e80acf0a941b25849bf40608d7 | 26 | py | Python | data/studio21_generated/interview/1624/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/interview/1624/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/interview/1624/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | def sq_cub_rev_prime(n):
| 13 | 24 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
03b31105b49366639294bcbe79b90c112f7393bb | 189 | py | Python | glitter_documents/apps.py | developersociety/django-glitter-documents | 8d13d6fc7133f7d6f595a4e780f291caf3ab4efa | [
"BSD-3-Clause"
] | null | null | null | glitter_documents/apps.py | developersociety/django-glitter-documents | 8d13d6fc7133f7d6f595a4e780f291caf3ab4efa | [
"BSD-3-Clause"
] | null | null | null | glitter_documents/apps.py | developersociety/django-glitter-documents | 8d13d6fc7133f7d6f595a4e780f291caf3ab4efa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class DocumentsConfig(AppConfig):
name = 'glitter_documents'
label = 'glitter_documents'
verbose_name = 'Documents'
| 18.9 | 33 | 0.698413 | 127 | 0.671958 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.380952 |
03b414db96d625b3d6c44cb1055d545e44f688f9 | 1,332 | py | Python | my_web_project/authentication/forms.py | AlexYankoff/my_web_project | d7d2c26289c561bc39d713ad5a1adff7a01b6508 | [
"MIT"
] | null | null | null | my_web_project/authentication/forms.py | AlexYankoff/my_web_project | d7d2c26289c561bc39d713ad5a1adff7a01b6508 | [
"MIT"
] | null | null | null | my_web_project/authentication/forms.py | AlexYankoff/my_web_project | d7d2c26289c561bc39d713ad5a1adff7a01b6508 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from my_web_project.common.forms import BootstrapFormMixin
from my_web_project.main.models import Student, Teacher
class StudentForm(BootstrapFormMixin,forms.ModelForm):
class Meta:
model = Student
# fields = '__all__'
exclude = ('user','is_complete')
class TeacherForm(BootstrapFormMixin,forms.ModelForm):
class Meta:
model = Teacher
exclude = ('user', 'is_complete')
class LoginForm(BootstrapFormMixin,forms.Form):
user = None
username = forms.CharField(max_length=30, )
password = forms.CharField(
max_length=15,
widget=forms.PasswordInput(),
)
def clean(self):
self.user = authenticate(
username=self.cleaned_data['username'],
password=self.cleaned_data['password'],
)
if not self.user:
raise ValidationError('Incorrect username and/or passworord ')
def save(self):
return self.user
class MyUserCreationForm(BootstrapFormMixin,UserCreationForm):
pass
# class Meta:
# model = User
# fields = ("username","is_staff",)
| 25.132075 | 74 | 0.686186 | 901 | 0.676426 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.147147 |
03b4819121dac16a6891e3a0fa802981205674c3 | 9,024 | py | Python | experiments_pu/compute_prediction.py | 6Ulm/unbalanced_gromov_wasserstein | be23571f653dab16fd0722cb1ec2c3412a1e3f30 | [
"MIT"
] | 22 | 2020-09-10T21:57:02.000Z | 2022-03-16T14:42:47.000Z | experiments_pu/compute_prediction.py | 6Ulm/unbalanced_gromov_wasserstein | be23571f653dab16fd0722cb1ec2c3412a1e3f30 | [
"MIT"
] | null | null | null | experiments_pu/compute_prediction.py | 6Ulm/unbalanced_gromov_wasserstein | be23571f653dab16fd0722cb1ec2c3412a1e3f30 | [
"MIT"
] | 8 | 2020-09-11T00:59:31.000Z | 2022-03-29T22:19:08.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 10:58:27 2020
Experiments where one marginal is fixed
"""
import os
import numpy as np
from joblib import Parallel, delayed
import torch
import ot
from unbalancedgw.batch_stable_ugw_solver import log_batch_ugw_sinkhorn
from unbalancedgw._batch_utils import compute_batch_flb_plan
import utils
from partial_gw import compute_cost_matrices
folder = "marginals_without_rescaling"
path = os.getcwd() + "/saved_plans"
if not os.path.isdir(path):
os.mkdir(path)
path = path + "/" + folder
if not os.path.isdir(path):
os.mkdir(path)
def euclid_dist(x, y):
"""
Computes the euclidean distance between two pointclouds, returning a
matrix whose coordinates are the distance between two points.
Parameters
----------
x: torch.Tensor of size [size_X, dim]
coordinates of the first group of vectors of R^d.
y: torch.Tensor of size [size_Y, dim]
coordinates of the second group of vectors of R^d.
Returns
-------
torch.Tensor of size [size_X, size_Y]
Matrix of all pairwise distances.
"""
return (x[:, None, :] - y[None, :, :]).norm(p=2, dim=2)
def prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl, prior, nb_try):
"""
Compute the tensor used as initialization for UGW.
The init is obtained by solving partial EMD as in Chapel et al. when the
domains are the same.
Parameters
----------
dataset_p: string
name of the dataset used for positive data
dataset_u: string
name of the dataset used for unlabeled data
n_pos: int
number of positives samples
n_unl: int
number of unlabeled samples
prior: float
proportion of positive samples in the unlabeled dataset
nb_try: int
number of folds to perform PU learning
Returns
-------
init_plan: torch.Tensor of size [nb_try, n_pos, n_unl]
Set of initialization plans used to init UGW.
"""
init_plan = torch.zeros([nb_try, n_pos, n_unl])
for i in range(nb_try):
# Draw dataset
P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
Ctot, C1, C2, mu, nu = compute_cost_matrices(P, U, prior,
nb_dummies=10)
# Compute init
init_plan[i] = torch.tensor(ot.emd(mu, nu, Ctot)[:n_pos, :])
return init_plan
def compute_plan_ugw(dataset_p, dataset_u, n_pos, n_unl, prior, eps, rho, rho2,
nb_try, device=0):
# Set default type and GPU device
torch.cuda.set_device(device)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# keep constant to normalize cost, uniform over folds by taking first batch
# P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos, n_unl,
# prior, 0)
# U = torch.tensor(U.values,dtype=torch.float) # Convert to torch
# cst_norm = euclid_dist(U, U).max()
# Draw cost for all seeds as batch
Cx = torch.zeros([nb_try, n_pos, n_pos])
Cy = torch.zeros([nb_try, n_unl, n_unl])
for i in range(nb_try):
P, U, y_u = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
P, U = torch.tensor(P.values, dtype=torch.float), \
torch.tensor(U.values, dtype=torch.float)
cx, cy = euclid_dist(P, P), euclid_dist(U, U)
Cx[i], Cy[i] = cx, cy
# Cx[i], Cy[i] = cx / cst_norm, cy / cst_norm
del cx, cy
# Compute init and weights
mu = (torch.ones([n_pos]) / n_pos).expand(nb_try, -1)
nu = (torch.ones([n_unl]) / n_unl).expand(nb_try, -1)
if P.shape[1] == U.shape[1]: # If domains are the same
init_plan = prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl,
prior, nb_try)
else:
_, _, init_plan = compute_batch_flb_plan(
mu, Cx, nu, Cy, eps=eps, rho=rho, rho2=rho2,
nits_sinkhorn=50000, tol_sinkhorn=1e-5)
# Compute the marginal of init and save as file
pi_numpy = init_plan.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_init_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
# Set params and start the grid wrt entropic param eps
pi = log_batch_ugw_sinkhorn(mu, Cx, nu, Cy, init=init_plan,
eps=eps, rho=rho, rho2=rho2,
nits_plan=3000, tol_plan=1e-5,
nits_sinkhorn=3000, tol_sinkhorn=1e-6)
if torch.any(torch.isnan(pi)):
raise Exception(f"Solver got NaN plan with params (eps, rho) = "
f"{dataset_p, dataset_u, nb_try, eps, rho, rho2}")
# Compute the marginal and save as file
pi_numpy = pi.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_plan_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
print(
f"DONE = Dataset {dataset_p, dataset_u}, eps = {eps}, "
f"rho = {rho, rho2}, reps = {nb_try}")
return
if __name__ == '__main__':
parallel_gpu = True
# epsilon Set to 2**-9 but an be optimized via grid-search
grid_eps = [2. ** k for k in range(-9, -8, 1)]
grid_rho = [2. ** k for k in range(-10, -4, 1)]
nb_try = 40
# List all tasks for the Caltech datasets
list_tasks = []
# # Matching similar features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching similar features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# Matching different features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching different features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
if parallel_gpu:
assert torch.cuda.is_available()
list_device = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
total_devices = torch.cuda.device_count()
print(
f"Parallel computation // Total GPUs available = {total_devices}")
pll = Parallel(n_jobs=total_devices)
iterator = (
delayed(compute_plan_ugw)(data_pos, data_unl, n_pos, n_unl, prior,
eps, rho, rho2, nb_try,
device=list_device[k % total_devices])
for
k, (
data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in
enumerate(list_tasks))
pll(iterator)
else:
print("Not Parallel")
for (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in list_tasks:
compute_plan_ugw(data_pos, data_unl, n_pos, n_unl, prior, eps, rho,
rho2, nb_try)
print(f'{data_pos, data_unl} done.')
| 37.757322 | 79 | 0.610372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,341 | 0.370235 |
03b7252c4a570f5045354d6d3a9bb828ebea09f4 | 3,340 | py | Python | source_code/ghc2018.py | nuno-chicoria/GHC_2018 | d3a19c4f6293dd24ca06d24fdde58da04800781b | [
"Unlicense"
] | null | null | null | source_code/ghc2018.py | nuno-chicoria/GHC_2018 | d3a19c4f6293dd24ca06d24fdde58da04800781b | [
"Unlicense"
] | null | null | null | source_code/ghc2018.py | nuno-chicoria/GHC_2018 | d3a19c4f6293dd24ca06d24fdde58da04800781b | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 18:44:04 2018
@author: JavaWizards
"""
import numpy as np
file = "/Users/nuno_chicoria/Downloads/b_should_be_easy.in"
handle = open(file)
R, C, F, N, B, T = handle.readline().split()
rides = []
index = []
for i in range(int(N)):
index.append(i)
for line in handle:
rides.append(line.split())
rides_np = np.asarray(rides)
rides_np = np.column_stack([rides_np, index])
rides_np = rides_np.astype(np.int)
rides_np = rides_np[rides_np[:,5].argsort()]
vehicles = {}
for i in range(int(F)):
vehicles [i] = ["A", [0, 0], [0, 0], [0, 0], []]
for i in range(int(T)):
rides_np = rides_np[rides_np[:,5] > i]
for item in range(len(vehicles)):
if vehicles[item][0] == "A":
if rides_np.size != 0:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i >= rides_np[0, 4]:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i + abs(rides_np[0,0] - rides_np[0,2]) + abs(rides_np[0,1] - rides_np[0,3]) <= rides_np[0, 5]:
vehicles[item][0] = "C"
vehicles[item][2] = [rides_np[0, 0], rides_np[0, 1]]
vehicles[item][3] = [rides_np[0, 2], rides_np[0, 3]]
vehicles[item][4].append(rides_np[0, 6])
rides_np = np.delete(rides_np, (0), axis=0)
else:
rides_np = np.delete(rides_np, (0), axis=0)
for item in range(len(vehicles)):
if vehicles[item][0] == "C":
if vehicles[item][1][0] < vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] + 1
elif vehicles[item][1][0] > vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] - 1
elif vehicles[item][1][0] == vehicles[item][2][0]:
if vehicles[item][1][1] < vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] + 1
elif vehicles[item][1][1] > vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] - 1
else:
vehicles[item][0] = "D"
for item in range(len(vehicles)):
if vehicles[item][0] == "D":
if vehicles[item][1][0] < vehicles[item][3][0]:
vehicles[item][1][0] += 1
elif vehicles[item][1][0] > vehicles[item][3][0]:
vehicles[item][1][0] -= 1
elif vehicles[item][1][0] == vehicles[item][3][0]:
if vehicles[item][1][1] < vehicles[item][3][1]:
vehicles[item][1][1] += 1
elif vehicles[item][1][1] > vehicles[item][3][1]:
vehicles[item][1][1] -= 1
else:
vehicles[item][0] = "A"
vehicles[item][2] = None
vehicles[item][3] = None
results = open("ghc2018.txt", "w+")
for item in range(len(vehicles)):
if len(vehicles[item][4]) !=0:
results.write(str(len(vehicles[item][4])))
for ride in vehicles[item][4]:
results.write(" ")
results.write(str(ride))
results.write("\n")
results.close()
| 38.390805 | 208 | 0.498204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.061976 |