content stringlengths 5 1.05M |
|---|
"""
CreateFloors
"""
__author__ = 'Danny Bentley - danny_bentley@hotmail.com'
__twitter__ = '@danbentley'
__Website__ = 'http://dannybentley.tech/'
__version__ = '1.0.0'
"""
Sample on how to create multiple floors.
Use this sample along with the Video on Youtube.
"""
import clr
# import ProtoGeometry
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
# import Revit API
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
# import RevitServices
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
# import RevitNodes
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
# get current Revit document.
doc = DocumentManager.Instance.CurrentDBDocument
# Dynamo input
ArrayCurves = IN[0] # array curves to build slab
floorType = UnwrapElement(IN[1]) # Floor Type to apply
levels = UnwrapElement(IN[2]) # levels to create floor.
structural = IN[3] # is floor structural.
# Empty list
curveList = []
out = []
# built in paramter of floor offset.
builtInParam = BuiltInParameter.FLOOR_HEIGHTABOVELEVEL_PARAM
# loop over curves to add to list.
for curve in ArrayCurves:
curveArray = CurveArray()
for c in curve:
curveArray.Append(c.ToRevitType())
curveList.Add(curveArray)
# loop over each floor and curve list to build floor on each level.
for l in levels:
for c in curveList:
# start Revit transaction.
TransactionManager.Instance.EnsureInTransaction(doc)
# create new floor in Revit.
newFloor = doc.Create.NewFloor(c, floorType, l, structural)
# set offset of floor from level.
p = newFloor.get_Parameter(builtInParam)
p.Set(0)
# convert to Dynamo element to send out.
out.Add(newFloor.ToDSType(False))
TransactionManager.Instance.TransactionTaskDone()
OUT = out |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import mongoengine
from datetime import datetime
# RiBuild Modules
import delphin_6_automation.database_interactions.database_collections as collections
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions.db_templates import time_model_entry
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
class Sample(mongoengine.Document):
# Meta Data
added_date = mongoengine.DateTimeField(default=datetime.now)
# References
samples = mongoengine.DictField(required=True)
delphin_docs = mongoengine.ListField(field=mongoengine.ReferenceField(document_type=delphin_entry.Delphin))
iteration = mongoengine.IntField(required=True)
mean = mongoengine.DictField()
standard_deviation = mongoengine.DictField()
meta = collections.sample_db
def __str__(self):
return f'ID: {self.id}'
class SampleRaw(mongoengine.Document):
# Meta Data
added_date = mongoengine.DateTimeField(default=datetime.now)
# References
samples_raw = mongoengine.ListField(required=True)
sequence_number = mongoengine.IntField(required=True)
meta = collections.sample_raw_db
def __str__(self):
return f'ID: {self.id}'
class Strategy(mongoengine.Document):
# Meta Data
added_date = mongoengine.DateTimeField(default=datetime.now)
current_iteration = mongoengine.IntField(default=0)
used_samples_per_set = mongoengine.IntField(default=0)
meta = collections.strategy_db
# References
samples = mongoengine.ListField(field=mongoengine.ReferenceField(document_type=Sample))
samples_raw = mongoengine.ListField(field=mongoengine.ReferenceField(document_type=SampleRaw))
standard_error = mongoengine.DictField()
strategy = mongoengine.DictField(required=True)
time_prediction_model = mongoengine.ReferenceField(document_type=time_model_entry.TimeModel)
def __str__(self):
return f'ID: {self.id}'
|
from utils.alter import *
from utils.db import exec_return_query, exec_void_query, load_query
from utils.s3 import list_bucket_contents
from abc import *
class QueryBuilder(metaclass=ABCMeta):
# def __init__(self, table_name):
def __init__(self, schema_name='bobsim_schema', table_name=None, att_name=None,
value=None, where_clause: str = None,
group_by=None,
having=None, order_by: str = None,
limit: str = None, offset: int = None):
self.init_dict = {"TABLE_NAME": table_name,
"ATT_NAME": att_name,
"VALUE": value,
"WHERE": where_clause,
"GROUP_BY": group_by,
"HAVING": having,
"ORDER_BY": order_by,
"LIMIT": limit,
"OFFSET": offset,
}
"""
TODO : divide init_dict into 2~4 part
and have to more functional programming
( init_dict , where_condition, dict, join etc..)
"""
self.schema_name = schema_name
self.query = None
def store_query(self, sql_filename):
self.query = load_query(sql_filename)
@abstractmethod
def exec_query(self, query):
pass
class VoidQueryBuilder(QueryBuilder):
def exec_query(self, query, args):
exec_void_query(args=args, query=query, schema_name=self.schema_name)
def check_query(self):
pass
class ReturnQueryBuilder(QueryBuilder):
def exec_query(self, query):
exec_return_query(query=query, schema_name=self.schema_name)
# QueryBuilder-VoidQueryBuilder-CreateBuilder
class CreateBuilder(VoidQueryBuilder):
def execute(self):
self.process()
def process(self):
# 1. Store sql 2. Exec stored sql 3. check
self.store_query('create_{}.sql'.format(self.init_dict["TABLE_NAME"]))
self.exec_query(self.query)
self.check_create()
def check_create(self):
# sql = "SHOW FULL COLUMNS FROM %s " % self.init_dict["TABLE_NAME"]
check_select_builder = SelectBuilder(self.init_dict["TABLE_NAME"], ' * ')
print(check_select_builder.execute())
# QueryBuilder-VoidQueryBuilder-InsertBuilder
class InsertBuilder(VoidQueryBuilder):
# InsertBuilder('table_name', 'input_data')
def execute(self):
self.process()
def process(self):
# 1. Store sql 2. manipulate sql 3. exec sql 4. check
self.store_query('insert_{}.sql'.format(self.init_dict["TABLE_NAME"])) # 1
# query = self.manipulate(self.query) # 2
self.exec_query(args=self.init_dict["VALUE"], query=self.query)
# self.check_insert()
def check_insert(self):
# TODO: error for table that has no id(PK)
a = SelectBuilder(
schema_name=self.schema_name,
table_name=self.init_dict["TABLE_NAME"],
att_name=' * '
# order_by='ORDER BY id DESC',
# limit='LIMIT 1'
)
print(a.execute())
def manipulate(self, query):
# let's think about lots of input_values
mani_query = query.format(self.init_dict["VALUE"])
return mani_query
# QueryBuilder-VoidQueryBuilder-InsertBuilder
class DeleteBuilder(VoidQueryBuilder):
# value = (v1,v2,....,vn) tuple type
def execute(self):
self.process()
def process(self):
# TODO: 1. Store sql 2. manipulate sql 3. exec sql 4. check
self.store_query('delete.sql') # 1
query = self.manipulate(self.query) # 2
self.exec_query(query) # 3
return self.check_delete() # 4
def manipulate(self, query):
first_mani_query = query % self.init_dict["TABLE_NAME"]
list_rest_data = alter_type_dict_to_list(self.init_dict, 3, len(self.init_dict))
clean_rest_data = remove_none(list_rest_data)
str_rest_data = alter_type_list_to_str(clean_rest_data)
second_mani_query = combine_sentence(first_mani_query, str_rest_data)
return second_mani_query
def exec_query(self, query):
exec_void_query(query)
def check_delete(self):
check_delete_query = SelectBuilder(self.init_dict["TABLE_NAME"], ' * ')
print(check_delete_query.execute())
# QueryBuilder-VoidQueryBuilder-UpdatetBuilder
class UpdateBuilder(VoidQueryBuilder):
def execute(self):
self.process()
def process(self):
"""
1. Store sql
2. Manipulate sql
3. exec sql
4. check data in db
"""
self.store_query('update.sql') # 1
completed_query = self.manipulate(self.query) # 2
self.exec_query(completed_query) # 3
return self.check_update() # 4
def manipulate(self, query):
"""
TODO: 1. update {table_name} set {update_value} % merge
2. clean
"""
first_mani_query = query % (self.init_dict["TABLE_NAME"], self.init_dict["VALUE"])
list_rest_data = alter_type_dict_to_list(self.init_dict, 3, len(self.init_dict))
clean_rest_data = remove_none(list_rest_data)
str_rest_data = alter_type_list_to_str(clean_rest_data)
second_mani_query = combine_sentence(first_mani_query, str_rest_data)
return second_mani_query
def exec_query(self, query):
exec_void_query(query)
def check_update(self):
if bool(self.init_dict.get("WHERE")) == 1:
check_update_query = SelectBuilder(self.init_dict["TABLE_NAME"], '*',
where_clause=self.init_dict["WHERE"] + " AND " + self.init_dict["VALUE"])
else:
check_update_query = SelectBuilder(self.init_dict["TABLE_NAME"], '*',
where_clause="WHERE " + self.init_dict["VALUE"])
print(check_update_query.execute())
class SelectBuilder(ReturnQueryBuilder):
def execute(self):
return self.process()
def process(self):
"""
1. Store select sql
2. manipulate sql
3. exec sql
"""
self.store_query('select.sql') # 1
query = self.manipulate(self.query) # 2
return self.exec_query(query)
def manipulate(self, query): # extract from where_clause to offset
"""
1.dict -> list
2.add att,table name to 'SELECT {} FROM {}' sql file
3.remove None & list -> str
4.combine
"""
rest_data = alter_type_dict_to_list(self.init_dict, 3, len(self.init_dict))
first_mani_query = query % (self.init_dict["ATT_NAME"], self.init_dict["TABLE_NAME"])
clean_data = remove_none(rest_data)
str_rest_data = alter_type_list_to_str(clean_data)
second_mani_query = combine_sentence(first_mani_query, str_rest_data)
return second_mani_query
class DropBuilder(VoidQueryBuilder):
def execute(self):
self.process()
def process(self):
self.store_query('drop.sql') # 1
mani_query = self.query % self.init_dict["TABLE_NAME"]
exec_void_query(mani_query)
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class SurfaceCodesCode(GenericTypeCode):
"""
SurfaceCodes
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
This value set includes a smattering of FDI tooth surface codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/FDI-surface
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/FDI-surface"
class SurfaceCodesCodeValues:
"""
The surface of a tooth that is closest to the midline (middle) of the face.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Mesial = SurfaceCodesCode("M")
"""
The chewing surface of posterior teeth.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Occlusal = SurfaceCodesCode("O")
"""
The biting edge of anterior teeth.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Incisal = SurfaceCodesCode("I")
"""
The surface of a tooth that faces away from the midline of the face.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Distal = SurfaceCodesCode("D")
"""
The surface of a posterior tooth facing the cheeks.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Buccal = SurfaceCodesCode("B")
"""
The surface of a tooth facing the lips.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Ventral = SurfaceCodesCode("V")
"""
The surface of a tooth facing the tongue.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Lingual = SurfaceCodesCode("L")
"""
The Mesioclusal surfaces of a tooth.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Mesioclusal = SurfaceCodesCode("MO")
"""
The Distoclusal surfaces of a tooth.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Distoclusal = SurfaceCodesCode("DO")
"""
The Distoincisal surfaces of a tooth.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Distoincisal = SurfaceCodesCode("DI")
"""
The Mesioclusodistal surfaces of a tooth.
From: http://terminology.hl7.org/CodeSystem/FDI-surface in valuesets.xml
"""
Mesioclusodistal = SurfaceCodesCode("MOD")
|
import os
import psutil
_vmb_data = []
_pid = 0
def _VmB():
'''Private.
'''
process = psutil.Process()
mem_info = process.memory_info()
data = [mem_info.rss, mem_info.vms]
# for key in keys:
#
# try:
# i = v.index(key)
# except ValueError:
# continue
# new_v = v[i:].split(None, 3) # whitespace
# if len(new_v) < 3:
# print(new_v)
# raise Exception()# invalid format?
#
# # convert Vm value to bytes
# data.append(float(new_v[1]) * _scale[new_v[2]])
return data
def mem_before():
global _vmb_data
global _pid
_pid = os.getpid()
_vmb_data = _VmB()
def mem_after(fcn_name):
global _vmb_data
new_data = _VmB()
diff_data = [d - _vmb_data[i] for i, d in enumerate(new_data)]
if sum(diff_data) > 1 or sum(diff_data) < -1:
print("Function " + fcn_name + " mem usage: " + str(sum(diff_data)))
else:
print("Function " + fcn_name + ": no memory usage seen")
#-------USING SMEM---------
# https://www.selenic.com/smem/
# https://www.selenic.com/smem/download/smem.pdf
import subprocess
def report_memory(pid):
proc = subprocess.Popen(['smem', '-P test_'], stdout = subprocess.PIPE)
while True:
line = proc.stdout.readline().decode('utf-8')
if not line:
break
print("test: " + str(line))
if line.strip().startswith(str(pid)):
print("Found!")
|
"""
Test matching module.
"""
import cog.matching
def test_substr_ind():
assert cog.util.substr_ind('ale', 'alex') == [0, 3]
assert cog.util.substr_ind('ALEX'.lower(), 'Alexander'.lower()) == [0, 4]
assert cog.util.substr_ind('nde', 'Alexander') == [5, 8]
assert not cog.util.substr_ind('ALe', 'Alexander')
assert not cog.util.substr_ind('not', 'alex')
assert not cog.util.substr_ind('longneedle', 'alex')
assert cog.util.substr_ind('16 cyg', '16 c y gni', skip_spaces=True) == [0, 9]
|
import asyncio
import collections
import logging
from elasticsearch.connection_pool import RoundRobinSelector
logger = logging.getLogger('elasticsearch')
class ConnectionPool:
def __init__(self, connections, *, dead_timeout=60, timeout_cutoff=5,
selector_factory=RoundRobinSelector,
loop):
self._dead_timeout = dead_timeout
self._timeout_cutoff = timeout_cutoff
self._selector = selector_factory(None)
self._dead = asyncio.PriorityQueue(len(connections), loop=loop)
self._dead_count = collections.Counter()
self._connections = connections
self._loop = loop
def close(self):
for connection in self._connections:
connection.close()
while not self._dead.empty():
_, connection = self._dead.get_nowait()
connection.close()
ret = asyncio.Future(loop=self._loop)
ret.set_result(None)
return ret
@property
def connections(self):
return list(self._connections)
async def mark_dead(self, connection):
"""
Mark the connection as dead (failed). Remove it from the live pool and
put it on a timeout.
:arg connection: the failed instance
"""
now = self._loop.time()
try:
self._connections.remove(connection)
except ValueError:
# connection not alive or another thread marked it already, ignore
return
else:
self._dead_count[connection] += 1
dead_count = self._dead_count[connection]
timeout = self._dead_timeout * 2 ** min(dead_count - 1,
self._timeout_cutoff)
await self._dead.put((now + timeout, connection))
logger.warning(
"Connection %r has failed for %i times in a row, "
"putting on %i second timeout.",
connection, dead_count, timeout
)
async def mark_live(self, connection):
"""
Mark connection as healthy after a resurrection. Resets the fail
counter for the connection.
:arg connection: the connection to redeem
"""
del self._dead_count[connection]
async def resurrect(self, force=False):
"""
Attempt to resurrect a connection from the dead pool. It will try to
locate one (not all) eligible (it's timeout is over) connection to
return to th live pool.
:arg force: resurrect a connection even if there is none eligible (used
when we have no live connections)
"""
if self._dead.empty():
return
timeout, connection = self._dead.get_nowait()
if not force and timeout > self._loop.time():
# return it back if not eligible and not forced
await self._dead.put((timeout, connection))
return
# either we were forced or the connection is elligible to be retried
self._connections.append(connection)
logger.info('Resurrecting connection %r (force=%s).',
connection, force)
async def get_connection(self):
"""
Return a connection from the pool using the `ConnectionSelector`
instance.
It tries to resurrect eligible connections, forces a resurrection when
no connections are availible and passes the list of live connections to
the selector instance to choose from.
Returns a connection instance
"""
await self.resurrect()
# no live nodes, resurrect one by force
if not self._connections:
await self.resurrect(True)
connection = self._selector.select(self._connections)
return connection
|
from fastapi import APIRouter
from fastapi import Depends
from auth.auth import auth_wrapper
from schema import Transfer as TransferSchema
from db import SQLITE_DB
from db import Person
from db import Card
from db import BankStatement
from datetime import datetime
transfer = APIRouter()
@transfer.post("/transfer")
def make_transfer(transfer_schema: TransferSchema, username=Depends(auth_wrapper)):
try:
with SQLITE_DB.atomic():
person = Person.select().where(Person.username == username).get()
friend = Person.select().where(Person.user_id == transfer_schema.friend_id).get()
card = Card.select().where(Card.card_id == transfer_schema.billing_card.card_id).get()
BankStatement.create(
user_id=person,
friend_id=friend,
value=transfer_schema.total_to_transfer,
date=datetime.today().date(),
from_card=card
)
except Exception as error:
raise error
|
import sys,os
import subprocess
import time, glob
L=2000
sim_prefix = '/20140820_seqs'
dt=100
valdt = 1
jobcount = 0
# inner loops = 2*2*4*1*3*4 = 2**7 * 3 approx 400
n=10
i=0
D=0.5
istep=0
first_gen = 5000
last_gen = 25000 #min(5000 + istep*(i+1)*dt, 24800)
dir_list= glob.glob('../data_new/N_10000_L_2*_sdt_1*')
for year in xrange(first_gen, last_gen, n*dt):
for dir_name in dir_list:
#cmd = 'rm '+dir_name+sim_prefix+'*prediction_results*dt_?00.dat'
#os.system(cmd)
if True:
for dscale in [0.5]: #, 1.0,2.0,3.0]:
for sample_size in [200]:
args = ['--base_name',dir_name+sim_prefix, '--sample_size', sample_size, '--dscale', dscale, '--diffusion',D,'--gen', year '--valdt', valdt, n, dt]
cmd = 'qsub -cwd -l h_rt=0:59:59 -l h_vmem=8G analyze_multiple_toy_data.py '\
+ ' '.join(map(str, args))
print cmd
os.system(cmd)
jobcount+=1
if jobcount>3000:
exit()
|
import numpy as np
x = np.arange(3, 9)
z = x.reshape(2, 3)
print(z[1][1])
x = np.arange(1, 5)
x = x*2
print(x[:3].sum())
|
# -*- coding: utf-8 -*-#
# -------------------------------------------------------------------------------
# Name: backtrace
# Description:
# Author: zhengyiming
# Date: 2020/10/10
# -------------------------------------------------------------------------------
# from labuladong
# 回溯算法的框架
# 🛩其实可以看出来,同时这个也是DSF,就是深度优先算法。不停的去试路。遇到走不通才折回去,这就是撞到南墙才停下来。
# result = []
# def backtrack(路径, 选择列表):
# if 满⾜结束条件: result.add(路径) (1)结束条件
# return
# for 选择 in 选择列表: (2)做选择
# if 不能走:
# continue # 排除掉不合法的;1。不能走;2。走过了
# backtrack(路径, 选择列表) (3) 递归《此处可以在参数上做运算,增加或者减少,类似尾递归)
# 撤销选择 (4)撤销选择(回溯)
# def backtrack(...):
# for 选择 in 选择列表:
# 做选择
# backtrack(...)
# 撤销选择
# for循环内的递归,递归之前做选择,递归之后 撤销选择。
# 不重复的东西,树的节点,又是和二叉树相关,只要涉及到 递归,
import copy
track = [] # 默认设置为,可选择列表 1,2,3 默认
res = [] # 已做的选择列表 ,显示还是隐式的进行相关的操作都是可以的
nums = 3
result = []
# 1.全排列的解决,列表来存储做过的选择,和结果; 如果是bsf就是队列
def template(input):
result = [] # 这里用列表来处理这个东西
# 函数的函数
def trace(path, choices):
# 结束条件
if len(path) == len(input):
result.append([path.copy()]) # 一条路径回溯到底才添加进来,这样又涉及到深拷贝浅拷贝了。这种情况如果使用[]那就是浅拷贝,后面的pop会清空;list则返回一个新的
# result.append(list(path)) # 一条路径回溯到底才添加进来,这样又涉及到深拷贝浅拷贝了。这种情况如果使用[]那就是浅拷贝,后面的pop会清空;list则返回一个新的
return
# 做选择
for item in choices:
if item in path: # 排除不合法的,做选择;已经做过的,或者不能做的那种限制的。
continue
path.append(item) # 前面没选过这个的话那就添加进来
trace(path, choices) # 路径迭代
path.pop() # 撤销选择的操作(回溯)核心名词在这里
trace([], input) # 运行函数,默认的path是空的,没做任何选择
return result
# 2.N 皇后
res = []
def N_queen(n):
def check(board, row, col):
'''
是否可以在board row(index),col上放置皇后;因为这儿的board是list
:param board: 是一个list ,里面的index 代表row, value代表 col;其实算是二维数据了
:param row: 行
:param col: 列
:return: True/False
'''
# print(board)
# print(row)
# print(col)
# print()
for i_row in range(row): #
列重合 = abs(board[i_row] - col) # 发生重合就不行
左斜线或者右斜线的距离 = abs(i_row - row)
if abs(board[i_row] - col) == 0: # 列不能重复
return False
if abs(board[i_row] - col) == abs(i_row - row): # 相等时候距离为斜线距离内
return False
return True
def eightqueen(board, row): # 8皇后,当然这儿可不一定要8皇后
'''
:param board: 这个是设置的棋盘的长宽n*n
:param row: 当前放置第几行的皇后
:return:
'''
border = len(board)
if row >= border: # 1.结束条件
# print(row)
# print(temp)
res.append(copy.deepcopy(board)) # 要做一个深拷贝,不然就只保留了最后一下的结果了
for i, col in enumerate(board): # 这个方法是返回下标+col列
print('□ ' * col + '■ ' + '□ ' * (len(board) - 1 - col)) # 这种是
print("")
# 2.遍历选择,做选择
for col in range(border): # 这儿是按列来遍历,每次递归进来列都是0,开始
if check(board, row, col): # 3.判断是有有效,无效就取消这个选择。重新做选择,那这个就叫回溯
board[row] = col
eightqueen(board, row + 1) # 4.递归
# 这儿没有显示回溯,其实是有回溯的,只是因为这儿的board直接是用index 当成row,value当成col,所以不用再使用"*"填充了
# 这儿是一纬数组,用二维数组的board[row][col]应该更好的理解,那样就可以默认先填充"*",然后再填充"Q"
board = [0 for i in range(n)] # n条边的情况
eightqueen(board, 0) # 默认从0行开始,不知道怎么看就一步一步来运行。《变量要知道代表什么,然后就可以操作了》
print("res 是什么")
print(res)
def N_queen2(n):
# 创建一个二维数组board =
board = [["*" for col in range(n)] for row in range(n)]
# print(board)
res = []
def put_queen(board, row): # 从row 0 开始遍历
# 1.停止条件
if row >= len(board):
res.append(copy.deepcopy(board)) # 把这个结果放进去 ,果然是python神拷贝,浅拷贝的问题
# return 这儿不返回,是因为不是迭代的返回,回滚不通过这样的操作,结果还可能很多呢
for col in range(len(board)): # 每一行的每一列来进行开始操作
if not check_position(board, row, col): # 排除不合法的选择
continue # 继续循环
# 2.做选择
board[row][col] = "Q" # 放置皇后
# 3.进入下一步决策
put_queen(board, row + 1)
# 4. 撤销选择
board[row][col] = "*"
# 因为这儿有一个撤销的操作,所以必须要深拷贝,
# 也可以不要这个的。不过为了保持那个结构好理解
# 还是带上好
def check_position(board, row, col): # 检查这个位置可不可以再放置皇后;竖直下来,还有斜着的,这儿不是
for row_ in range(len(board[:row + 1])):
for col_ in range(len(board)):
if board[row_][col_] == "Q": # 列放置列Q那就不行
if col - col_ == 0:
return False
if abs(col - col_) == abs(row - row_):
return False # 没绝对值的就分成两个式子
return True
put_queen(board, row=0)
for u in res:
print(u)
print()
# return res
# todo 随机生成一个迷宫,然后再使用回溯算法,探索出迷宫的路,生成路
# todo 水满寻路算法,叫什么给忘记了,可以多线程,多方向进行寻路的吧。
if __name__ == '__main__':
# nums = 3
# backtrack(3)
# 这个是全排列的
# result = template([1, 2, 3])
# print(result)
# 这个是n皇后
# N_queen(4)
# 这个是N皇后,使用二维数组的,更加清晰一点
N_queen2(4)
|
sec = range(10)
def gen(sec):
for valor in sec:
yield valor
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
# Test functions for normal dot fluxes
def spacetime_metric_normal_dot_flux(spacetime_metric, pi, phi, gamma1, gamma2,
lapse, shift, inverse_spatial_metric,
unit_normal):
return - (1. + gamma1) * np.dot(shift, unit_normal) * spacetime_metric
def pi_normal_dot_flux(spacetime_metric, pi, phi, gamma1, gamma2, lapse, shift,
inverse_spatial_metric, unit_normal):
return - np.dot(shift, unit_normal) * pi \
+ lapse * np.einsum("ki,k,iab->ab",
inverse_spatial_metric, unit_normal, phi) \
- gamma1 * gamma2 * np.dot(shift, unit_normal) * spacetime_metric
def phi_dot_flux(spacetime_metric, pi, phi, gamma1, gamma2, lapse, shift,
inverse_spatial_metric, unit_normal):
return - np.dot(shift, unit_normal) * phi \
+ lapse * np.einsum("i,ab->iab", unit_normal, pi) \
- gamma2 * lapse * np.einsum("i,ab->iab", unit_normal,
spacetime_metric)
def gauge_constraint(gauge_function, spacetime_normal_one_form,
spacetime_normal_vector, inverse_spatial_metric,
inverse_spacetime_metric, pi, phi):
# Sums only go over spatial indices of second index of phi
phi_ija = phi[:,1:,:]
spacetime_normal_vector_I = spacetime_normal_vector[1:]
constraint = gauge_function \
+ np.einsum("ij,ija->a", inverse_spatial_metric, phi_ija) \
+ np.einsum("b,ba->a", spacetime_normal_vector, pi) \
- 0.5 * np.insert(np.einsum("bc,ibc->i", inverse_spacetime_metric, phi), \
0, np.zeros(phi[0][0][0].shape)) \
- 0.5 * np.einsum("a,i,bc,ibc->a", spacetime_normal_one_form, \
spacetime_normal_vector_I, \
inverse_spacetime_metric, phi) \
- 0.5 * np.einsum("a,bc,bc->a", spacetime_normal_one_form, \
inverse_spacetime_metric, pi)
return constraint
|
import numpy as np
class Layer():
def __init__(self, in_size, out_size, lam, name):
self.in_size = in_size
self.out_size = out_size
self.name = name
self.isActivation = (False if name is "linear" else True)
if not self.isActivation:
# Weights
self.W = np.random.normal(loc=0.0, scale=0.01, size=(out_size, in_size))
# Bias
self.b = np.random.normal(loc=0.0, scale=0.01, size=(out_size, 1))
# Weight regularization
self.lam = lam
self.mom = {
'W' : np.zeros(self.W.shape),
'b' : np.zeros(self.b.shape)
}
self.resetGrad()
# this is a memory variable between forward/backward
self.x = np.empty(shape=(self.in_size, 1))
def forward(self, x):
assert x is not None
# sometimes we need to store input for backward
self.x = x
#print(self.name + " forward")
#print(self.x.shape)
def backward(self):
assert self.x is not None
#print(self.name + " back")
#print(self.x.shape)
def cost(self):
return 0
def resetGrad(self):
self.gW = np.zeros(self.W.shape)
self.gB = np.zeros(self.b.shape)
# for non-activation layers to implement
def update(self, l_rate=0.001):
pass
def updateMom(self, l_rate=0.001, momentum=0.0):
pass
class Linear(Layer):
def __init__(self, in_size, out_size, lam=0, name="linear"):
super().__init__(in_size, out_size, lam, name)
def forward(self, x):
Layer.forward(self, x)
# Wx + b
return np.dot(self.W, x) + self.b
def backward(self, grad):
Layer.backward(self)
N = self.x.shape[1]
self.resetGrad()
for i in range(N):
p = self.x[:, i]
g = grad[i, :]
self.gW += np.outer(g, p)
self.gB += np.reshape(g, self.gB.shape)
# here's the difference in (10) and (11)
self.gW = (1.0/N) * self.gW + 2 * self.lam * self.W
self.gB /= N
return np.dot(grad, self.W)
def cost(self):
return self.lam * np.power(self.W, 2).sum()
def update(self, l_rate=0.001):
self.W -= l_rate * self.gW
self.b -= l_rate * self.gB
def updateMom(self, l_rate=0.001, momentum=0.0):
self.mom['W'] = momentum * self.mom['W'] + l_rate * self.gW
self.mom['b'] = momentum * self.mom['b'] + l_rate * self.gB
self.W -= self.mom['W']
self.b -= self.mom['b']
class ReLU(Layer):
def __init__(self, in_size, name="relu"):
super().__init__(in_size, in_size, -1, name)
def forward(self, x):
Layer.forward(self, x)
# max(0, x)
return self.x * (self.x > 0)
def backward(self, grad):
Layer.backward(self)
return np.multiply(grad, self.x.T > 0)
class Softmax(Layer):
def __init__(self, in_size, name="softmax"):
super().__init__(in_size, in_size, -1, name)
def forward(self, x):
assert x is not None
try:
# this should prevent error tried for
e = np.exp(x - x.max())
res = e / np.sum(e, axis=0)
except FloatingPointError:
# Gradient explosion scenario
print("jesus take the wheel")
res = np.ones(x)
Layer.forward(self, res)
return res
def backward(self, truth):
Layer.backward(self)
assert self.x.shape[1] == truth.shape[1]
N = truth.shape[1]
cols = ((truth[:,i], self.x[:,i]) for i in range(N))
grad = [self.softGrad(t, p) for (t, p) in cols]
return np.vstack(grad)
@staticmethod
def softGrad(t, p):
# Jacobian according for formulas in Ass1
a = np.outer(p,p)
b = np.dot(t, (np.diag(p) - a))
c = np.dot(t, p)
return -b/c
def cost(self, truth, prob=None):
x = self.x if prob is None else prob
assert x.shape[1] == truth.shape[1]
N = x.shape[1]
Py = np.multiply(truth, x).sum(axis=0)
Py[Py == 0] = np.finfo(float).eps # fix floats
return - np.log(Py).sum() / N
class BatchNorm(Layer):
# https://wiseodd.github.io/techblog/2016/07/04/batchnorm/
def __init__(self, in_size, mu=None, s=None, name="batch_norm"):
super().__init__(in_size, in_size, -1, name)
self.mu = mu if mu is not None else np.zeros(shape=(in_size, 1), dtype=float)
self.s = s if s is not None else np.eye(in_size, dtype=float)
def forward(self, x, train=False):
Layer.forward(self, x)
# if mu, s is passed: then it's eval time not training
self.mu = x.mean(axis=1) if train else self.mu
self.s = x.var(axis=1) if train else self.s
return np.dot(self.s, (x.T - self.mu.T).T)
def backward(self, grad):
Layer.backward(self)
# Not implemented yet
return grad
|
#!/usr/bin/python3
import sys
from PySide2.QtWidgets import *
from PySide2.QtGui import *
def main():
app = QApplication(sys.argv)
e1 = QLineEdit()
e1.setFont(QFont("Arial", 24))
e1.show()
app.exec_()
if __name__ == '__main__':
main()
|
from .. import app, cache
from ..dash_app_model import arima_model, arima_df, fare_arima_df, start_index, end_index, today
import plotly.graph_objs as go
import pandas as pd
from dash.dependencies import Input, Output, State
from datetime import datetime
import numpy as np
from .graph_utilities import *
@app.callback([Output('arima-buttons', 'style'), Output('arima-slider-container', 'style')],
[Input('arima-graph', 'figure')])
@cache.memoize(timeout=timeout)
def show_buttons_arima(figure):
return {'visibility': 'visible'}, {'visibility': 'visible'}
@app.callback(Output('training-data-range-arima', 'children'), [Input('training-data-range-arima', 'id')])
def show_training_data_range(id):
training_df = pd.read_json(cache.get('fare-price-arima-data'))
size = int(len(training_df) * 0.80)
x = str(training_df.index[0]).split(' ')[0]
y = str(training_df.index[size]).split(' ')[0]
result_string = str(x) + ", " + str(y)
return "The data used to train the model is the Average Fare Price in the range: " + result_string + " Which is 80% of the data"
@app.callback(Output('arima-train-button', 'disabled'),
[Input('arima-checkbox', 'value')])
def train_with_button_arima(value):
if 'button' in value:
return ""
else:
return "true"
@app.callback(Output('arima-data', 'data'),
[Input('arima-train-button', 'n_clicks')])
def train_model_arima(clicks):
training_df = pd.read_json(cache.get('fare-price-arima-data'))
arima_model.fit(training_df)
new_model_df = arima_model.predict(start_index, end_index)
cache.set('arima-data', new_model_df.to_json())
return str(datetime.now())
@app.callback(Output('arima-reset-button', 'name'),
[Input('arima-reset-button', 'n_clicks')])
def reset_data_arima(n_clicks):
""" Reset the graph on click. This function sets the data in the cache to the data comming from the inputhandler
Parameters
----------
n_clicks: int
A string containing the name of the model. this value is used to create the id's for the components.
Returns
-------
The current date and time and sets the name of the reset button
"""
cache.set('fare-price-arima-data', fare_arima_df.to_json())
cache.set('arima-data', arima_df.to_json())
return str(datetime.now())
@app.callback([Output('arima-point-slider', 'disabled'),
Output('arima-point-slider', 'max'),
Output('arima-point-slider', 'min'),
Output('arima-point-slider', 'value'),
Output('arima-point-slider', 'marks')],
[Input('arima-graph', 'selectedData'), Input('arima-reset-button', 'n_clicks')])
@cache.memoize(timeout=timeout)
def set_slider(selectedData, n_clicks):
""" Enable the slider when the user clicks on a point in the graph. This function sets the disabled, min, max, and value
property for the slider component.
Parameters
----------
selectedData: dict
A dictionary containing the selected points. this dictionary can be None or it can be a dictionary containing
the key "points" and the value for this key is a list of points
Returns
-------
The value for the disable, max, min, and the value of the Slider object
"""
r = 100
indices = []
for i in range(-r, r + 1):
if i % 25 == 0:
indices.append(i)
# If the user has not selected any points, then disable the slider
if selectedData is None:
return True, r, -r, 0, {i: f"{i}" for i in range(-100, 100 + 1) if i % 25 == 0}
# If the user selected some points then enable the slider
points = []
for i in selectedData['points']:
if i['curveNumber'] == selected_curve_number:
points.append(i['y'])
# get the average of the selected points
avg = get_average(points)
if len(points) == 1:
my_marks = {}
for m in indices:
if m == 0:
my_marks[int(m + avg)] = str(round(m + avg, 2))
else:
my_marks[int(m + avg)] = str(round(m + avg))
return False, avg + r, avg - r, avg, my_marks
return False, avg + r, avg - r, avg, {int(avg + i): f"{i}" for i in indices}
@app.callback(Output('fare-price-arima-data', 'data'),
[Input('arima-point-slider', 'value'),
Input('arima-reset-button', 'name')],
[State('arima-graph', 'selectedData'), State('arima-checkbox', 'value')])
@cache.memoize(timeout=timeout)
def update_training_data_arima(value, update_on_reset, selectedData, disabled_checkbox):
""" Update the fare price data for the visualization and for the training data when the user moves the slider.
This function is also triggered when the user clicks on the reset button.
Parameters
----------
value: int
A dictionary containing the selected points. this dictionary can be None or it can be a dictionary containing
the key "points" and the value for this key is a list of points.
update_on_reset: list
The list of children components in the div with id "modified-data"
selectedData dict
A dictionary containing the selected points. this dictionary can be None or it can be a dictionary containing
the key "points" and the value for this key is a list of points.
Returns
-------
The data that the visualizations are going to consume
"""
# Get the data from the cache
cached_data = [cache.get('fare-price-arima-data'), cache.get('arima-data')]
training_df = pd.read_json(cached_data[0])
new_model_df = pd.read_json(cached_data[1])
disabled = True
if 'button' in disabled_checkbox:
disabled = False
# Get the values of the selected points and the index of the points
y_values = get_y_values(selectedData, selected_curve_number)
point_indices = get_index_numbers(selectedData, selected_curve_number)
# get the average to ge the difference and update the real value
if len(y_values) != 0:
avg = sum(y_values) / len(y_values)
else:
avg = 0
# Change the dataframe and put it in the cache
if selectedData is not None:
updated_value = value - avg
if updated_value != 0:
# Update all the changed values
for i in range(len(point_indices)):
index = point_indices[i]
training_df.iloc[index] = y_values[i] + updated_value
if disabled:
arima_model.fit(training_df)
new_model_df = arima_model.predict(start_index, end_index)
cache.set('arima-data', new_model_df.to_json())
cache.set('fare-price-arima-data', training_df.to_json())
updated_on = str(datetime.now())
return {'fare-price-arima-data': updated_on}
@app.callback(Output('arima-graph', 'figure'),
[Input('fare-price-arima-data', 'data'),
Input('arima-data', 'data'),
Input('arima-graph', 'relayoutData'), Input('arima-graph', 'clickData'),
Input('columns', 'value')],
[State('arima-graph', 'selectedData'),
State('arima-graph', 'figure')])
def make_arima_graph(fare_data_update, arima_data, relayout_data, clickData, columns, selectedData, figure):
""" This function creates the figure property of the main graph for this model. It draws the graph every time the data
comming from the cache changes."""
training_dataframe = pd.read_json(cache.get('fare-price-arima-data'))[:cutoff_date]
model_dataframe = pd.read_json(cache.get('arima-data'))
model_y_axis = model_dataframe['FARE']
model_x_axis = model_dataframe.index
training_x_axis = training_dataframe.index
training_y_axis = training_dataframe['AVG_FARE']
figure = create_model_plot(model_xaxis=model_x_axis, model_yaxis=model_y_axis, train_xaxis=training_x_axis,
train_yaxis=training_y_axis, relayout_data=relayout_data, selectedData=selectedData,
model_df=model_dataframe, training_df=training_dataframe, model_name='Arima model (Predictive model)',
training_data_name='Average Fare Price (Actual price)', title='Average Fare Price with ARIMA Model', columns=columns)
return figure
@app.callback(Output("fare-price-arima-error", "figure"),
[Input("fare-price-arima-error", "id"),
Input('fare-price-arima-data', 'data'),
Input('arima-graph', 'figure'),
Input('columns', 'value')])
@cache.memoize(timeout=timeout)
def make_error_graph_arima(id, data, figure, columns):
# Get the difference between the fare_price data and the arima model
narima_df = pd.read_json(cache.get('arima-data'))
fare_df2 = pd.read_json(cache.get('fare-price-arima-data'))[:cutoff_date]
narima_df['AVG_FARE'] = narima_df.FARE
dif_df = narima_df.subtract(fare_df2)
dif_df = dif_df.dropna(how='all').dropna(axis=1, how='all')
dif_df
data = [
go.Scatter(
name="ARIMA Error",
x=dif_df.index,
y=dif_df['AVG_FARE'],
mode="lines+markers",
showlegend=True, line={'color': '#EFE031'}
)
]
layout = {
'title': {'text': "Error graph for the ARIMA model", 'font': {'color': '#ffffff'}},
'xaxis': dict(
# showline=True,
ticks="outside",
type='date',
color='#ffffff'
),
'yaxis': dict(
color='#ffffff'
),
'legend': dict(
font=dict(
color='#ffffff'
)
),
'autosize': True,
'paper_bgcolor': '#00004d',
'plot_bgcolor': '#00004d'
}
return {'data': data, 'layout': layout}
|
"""시뮬레이션에 사용되는 모듈들을 연동하여 시뮬레이션을 운영하는 SimulationOperator 클래스"""
import time
from .log_manager import LogManager
from .operator import Operator
class SimulationOperator(Operator):
"""각 모듈을 연동해 시뮬레이션을 진행하는 클래스"""
def __init__(self):
super().__init__()
self.logger = LogManager.get_logger(__class__.__name__)
self.turn = 0
self.budget = 0
def _execute_trading(self, task):
"""자동 거래를 실행 후 타이머를 실행한다
simulation_terminated 상태는 시뮬레이션에만 존재하는 상태로서 시뮬레이션이 끝났으나
Operator는 중지되지 않은 상태. Operator의 시작과 중지는 외부부터 실행되어야 한다.
"""
del task
self.logger.info(f"############# Simulation trading is started : {self.turn + 1}")
self.is_timer_running = False
try:
trading_info = self.data_provider.get_info()
self.strategy.update_trading_info(trading_info)
self.analyzer.put_trading_info(trading_info)
def send_request_callback(result):
self.logger.debug("send_request_callback is called")
if result == "pass":
return
if result == "error!":
self.logger.error("request fail")
return
if result["msg"] == "game-over":
trading_info = self.data_provider.get_info()
self.analyzer.put_trading_info(trading_info)
self.last_report = self.analyzer.create_report(tag=self.tag)
self.state = "simulation_terminated"
return
self.strategy.update_result(result)
self.analyzer.put_result(result)
target_request = self.strategy.get_request()
if target_request is None:
self.logger.error("request should be submitted at simulation!")
return
self.trader.send_request(target_request, send_request_callback)
self.analyzer.put_requests(target_request)
except AttributeError as err:
self.logger.error(f"excuting fail: {err}")
self.turn += 1
self.logger.debug("############# Simulation trading is completed")
self._start_timer()
def get_score(self, callback, index_info=None):
"""현재 수익률을 인자로 전달받은 콜백함수를 통해 전달한다
시뮬레이션이 종료된 경우 마지막 수익률 전달한다
Returns:
(
start_budget: 시작 자산
final_balance: 최종 자산
cumulative_return : 기준 시점부터 누적 수익률
price_change_ratio: 기준 시점부터 보유 종목별 가격 변동률 딕셔너리
graph: 그래프 파일 패스
return_high: 기간내 최고 수익률
return_low: 기간내 최저 수익률
)
"""
if self.state != "running":
self.logger.debug("already terminated return last report")
callback(self.last_report["summary"])
return
def get_score_callback(task):
graph_filename = f"{self.OUTPUT_FOLDER}gs{round(time.time())}.jpg"
try:
index_info = task["index_info"]
task["callback"](
self.analyzer.get_return_report(
graph_filename=graph_filename, index_info=index_info
)
)
except TypeError as err:
self.logger.error(f"invalid callback: {err}")
self.worker.post_task(
{"runnable": get_score_callback, "callback": callback, "index_info": index_info}
)
|
board = []
for loop in range(0, 5):
treta = ["O"] * 5
board.append(treta)
print(board)
|
from __future__ import annotations
from types import MethodType
from copy import copy
from mongoengine import ValidationError
__all__ = ('Validator',)
class ValidatorMeta(type):
def __new__(cls, name, bases, attrs):
def wrap(f, m):
def check(self, value):
if not f(value):
raise ValidationError(m)
return check
attrs['check'] = wrap(attrs['condition'], attrs['__annotations__']['m'])
return super().__new__(cls, name, bases, attrs)()
def prepare(Validator):
Validator = Validator.__class__
del Validator.__annotations__
del Validator.condition
return Validator
@prepare
class Validator(metaclass=ValidatorMeta):
m: 'TEMP'
condition = lambda self, value: False
def __and__(self, other: Validator) -> Validator:
this_check = self.check
def check(self, value):
this_check(value)
other.check(value)
return self.with_new_check(check)
def __or__(self, other: Validator) -> Validator:
this_check = self.check
def check(self, value):
try:
this_check(value)
except ValidationError as e1:
try:
other.check(value)
except ValidationError as e2:
raise ValidationError(' or '.join((e1.args[0], e2.args[0])))
return self.with_new_check(check)
def __call__(self, value):
return self.check(value)
def with_new_check(self, check):
self = copy(self)
self.check = MethodType(check, self)
return self |
from impedance.models.circuits import BaseCircuit, CustomCircuit, Randles
import json
import matplotlib.pyplot as plt
import numpy as np
import os
def test_BaseCircuit():
initial_guess = [0.01, 0.02, 50]
base_circuit = BaseCircuit(initial_guess)
assert base_circuit.initial_guess == initial_guess
def test_Randles():
# check for proper functionality
# get example data
data = np.genfromtxt(os.path.join("./data/",
"exampleData.csv"), delimiter=',')
f = data[:, 0]
Z = data[:, 1] + 1j * data[:, 2]
randles = Randles(initial_guess=[.01, .005, .1, .0001, 200])
randles.fit(f[np.imag(Z) < 0], Z[np.imag(Z) < 0])
np.testing.assert_almost_equal(randles.parameters_,
np.array([1.86146620e-02, 1.15477171e-02,
1.33331949e+00, 6.31473571e-02,
2.22407275e+02]), decimal=2)
# check that plotting returns a plt.Axes() object
_, ax = plt.subplots()
assert isinstance(randles.plot(ax, None, Z, kind='nyquist'), type(ax))
assert isinstance(randles.plot(None, None, Z, kind='nyquist'), type(ax))
_, axes = plt.subplots(nrows=2)
assert isinstance(randles.plot(axes, f, Z, kind='bode')[0], type(ax))
assert isinstance(randles.plot(None, f, Z, kind='bode')[0], type(ax))
chart = randles.plot(f_data=f, Z_data=Z)
datasets = json.loads(chart.to_json())['datasets']
for dataset in datasets.keys():
assert len(datasets[dataset]) == len(Z)
# check that predicting impedance from fit works
assert np.isclose(randles.predict(np.array([10.0])),
np.complex(0.02495749, -0.00614842))
# check that it rejects improper inputs - enforcing initial guess types
try:
r = Randles(initial_guess=['hi', 0.1])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects improper inputs - enforcing data types
try:
r = Randles(initial_guess=[.01, .005, .1, .0001, 200])
r.fit(['hi', 'hello'], [0.5, 0.2])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects improper inputs - enforcing data lengths
try:
r = Randles(initial_guess=[.01, .005, .1, .0001, 200])
r.fit(f[np.imag(Z) < 0][:5], Z[np.imag(Z) < 0])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects improper inputs
# enforcing the length of initial_guess
try:
r = Randles(initial_guess=[.01, .005, .1, .0001])
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
# check that it rejects missing input
try:
r = Randles()
except(AssertionError):
pass
else:
raise Exception('unhandled error occured')
def test_CustomCircuit():
initial_guess = [.01, .005, .1, .005, .1, .001, 200]
custom_string = 'R0-p(R1,C1)-p(R2,C2)-Wo1'
custom_circuit = CustomCircuit(initial_guess=initial_guess,
circuit=custom_string)
# check get_param_names()
full_names, all_units = custom_circuit.get_param_names()
assert full_names == ['R0', 'R1', 'C1', 'R2', 'C2', 'Wo1_0', 'Wo1_1']
assert all_units == ['Ohm', 'Ohm', 'F', 'Ohm', 'F', 'Ohm', 'sec']
# check _is_fit()
assert not custom_circuit._is_fit()
initial_guess = [.01, .005, .1]
custom_string = 'R0-p(R1,C1)'
custom_circuit = CustomCircuit(initial_guess=initial_guess,
circuit=custom_string, name='Test')
assert str(custom_circuit) == \
'\nName: Test\n' + \
'Circuit string: R0-p(R1,C1)\n' + \
'Fit: False\n' + \
'\nInitial guesses:\n' + \
' R0 = 1.00e-02 [Ohm]\n' + \
' R1 = 5.00e-03 [Ohm]\n' + \
' C1 = 1.00e-01 [F]\n'
# check that it rejects improper inputs
# enforcing the length of initial_guess
try:
initial_guess = [.01, .005, .1, .005, .1, .001, 200]
custom_string = 'R0-p(R1,CPE1)-p(R1,C1)-Wo1'
custom_circuit = CustomCircuit(initial_guess=initial_guess,
circuit=custom_string)
except(AssertionError):
pass
else:
raise Exception('unhandled error occurred')
return
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import WatsonComponents, WatsonAccess, WatsonLogs
@admin.register(WatsonComponents)
class AssistantComponentsAdmin(admin.ModelAdmin):
list_display = (
'pk_watson_components', 'dsc_comp', 'insert_date', 'update_date',
)
list_filter = ('dsc_comp', 'insert_date')
# fieldsets = (
# (None, {
# 'fields': ('pk_watson_components',)
# }),
# ('Components', {
# 'fields': ('dsc_comp',)
# }),
# )
@admin.register(WatsonAccess)
class AssistantAccessAdmin(admin.ModelAdmin):
list_display = (
'fk_watson_components', 'component_name', 'component_url', 'insert_date', 'update_date',
)
@admin.register(WatsonLogs)
class AssistantLogsAdmin(admin.ModelAdmin):
list_display = (
'fk_user', 'fk_watson_components', 'sender_name', 'sender_message',
'flag_invalid_response', 'flag_resolve', 'insert_date', 'update_date',
)
|
import csv
'''
data = [[["Peter Lovett", "plovett@uoregon.edu"], ["Cathy Webster", "cwebster@uoregon.edu"]], [["Kathryn Lovett", "klovett2@uoregon.edu"], ["Wyatt Reed", "wyatt@uoregon.edu"], ["Bob Bogus", "bbogus@uoregon.edu"]]]
'''
def csv_writer(data, path):
with open(path, "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in data:
writer.writerow(line)
#csv_writer(data, "results.csv") |
"""Convert from .npy to .js dictionaries.
Usage:
convert.py <input_path> [options]
convert.py <input_path> <label_path> [options]
convert.py dir <raw_dir> <label_dir> [options]
Options:
--transform Whether or not
--out=<out> Directory or path for output [default: ./data/js]
--variable=<v> Name of variable to assign all data [default: data]
"""
import docopt
import glob
import os
import os.path
import numpy as np
def main():
arguments = docopt.docopt(__doc__)
input_path = arguments['<input_path>']
out_dir = arguments['--out']
variable = arguments['--variable']
label_path = arguments['<label_path>']
if arguments['dir']:
write_clusters_to_js(
arguments['<raw_dir>'],
arguments['<label_dir>'],
arguments['--out'],
variable)
else:
if not out_dir.endswith('.js'):
out_path = os.path.join(out_dir, 'output.js')
else:
out_path = out_dir
write_cluster_to_js(input_path, label_path, out_path, variable)
def write_clusters_to_js(raw_dir: str, label_dir: str, out_dir: str, variable: str):
"""Write all clouds contained in subdirectories from cloud_dir/.
Hardcoded to use the directory structure generated by label.py
raw_dir/<drive>/<cloud>/*.npy
"""
for raw, label in zip(os.listdir(raw_dir), os.listdir(label_dir)):
drive_dir_raw = os.path.join(raw_dir, raw)
drive_dir_label = os.path.join(label_dir, label)
for subdirectory in os.listdir(drive_dir_raw):
raw_path = os.path.join(drive_dir_raw, subdirectory, '*.npy')
label_path = os.path.join(drive_dir_label, subdirectory, 'labels.npy')
if not os.path.exists(label_path):
continue
out_path = os.path.join(out_dir, raw, subdirectory, 'output.js')
write_cluster_to_js(raw_path, label_path, out_path, variable)
def write_cluster_to_js(raw_path: str, label_path: str, out_path: str, variable: str):
"""Write all clusters to js files. Each cloud has its own js file."""
os.makedirs(os.path.dirname(out_path), exist_ok=True)
data = {}
pcs = [np.load(path) for path in sorted(glob.iglob(raw_path))]
paths, i = list(glob.iglob(raw_path)), 0
labels = np.load(label_path) if label_path is not None else paths
if not len(paths):
print('No files found at %s' % raw_path)
for path, pc, label in zip(paths, pcs, labels):
obj_name = os.path.basename(path).replace('.npy', '').replace('.stl', '')
if label_path is not None:
M = np.ones((4, pc.shape[0]))
M[:3, :] = pc.T
T = label[2: 18].reshape((4, 4))
s = label[18]
print(s)
pc = T.dot(M)[:3, :].T * s
data[obj_name] = {
'vertices': [{'x': x, 'y': y, 'z': z} for x, y, z in pc]}
print(label[19:])
if label_path is not None:
data[obj_name]['label'] = int(label[0])
with open(out_path, 'w') as f:
f.write('var %s = %s' % (variable, str(data)))
print(' * [INFO] Finished processing timestep. (saved to ', out_path, ')')
if __name__ == '__main__':
main()
|
import sys
import os
dirpath = os.getcwd()
sys.path.append(dirpath)
sys.path.append(dirpath+"\\..\\")
if getattr(sys, "frozen", False):
os.chdir(sys._MEIPASS)
import engine
import game.data.components
import game.data.buttonCallbacks as buttonCallbacks
def main():
app = engine.getApp()
app.initGame("data/main.json", game.data.components, buttonCallbacks)
app.run()
if __name__ == "__main__":
main() |
import glob
import logging
import os
import re
import shutil
import tempfile
import hydra
import numpy as np
import pyparsing as pp
from edalize import *
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from do_blink.edalize.vivado import Vivado
# A logger for this file
log = logging.getLogger(__name__)
from pathlib import Path
def build_do_blink_designs(cfg):
project_dir = get_original_cwd()
verilog_common_src_dir = os.path.join(
project_dir, f"do_blink/{cfg.do_blink.backend}_benchmark/hdl/common/"
)
verilog_common_out_dir = os.path.join(
"/tmp", f"do_blink/{cfg.do_blink.backend}_benchmark/hdl/common"
)
verilog_src_dir = os.path.join(
project_dir,
f"do_blink/{cfg.do_blink.backend}_benchmark/hdl/{cfg.do_blink.num_luts}/src/",
)
verilog_out_dir = os.path.join(
"/tmp",
f"do_blink/{cfg.do_blink.backend}_benchmark/hdl/{cfg.do_blink.num_luts}/src",
)
overlay_dir = os.path.join(
project_dir,
f"do_blink/{cfg.do_blink.backend}_benchmark/overlay/{cfg.do_blink.figure}_{cfg.do_blink.sub_figure}/{cfg.do_blink.bft}/{cfg.do_blink.num_luts}/{cfg.do_blink.device}/",
)
overlay_out_dir = os.path.join(
"/tmp",
f"do_blink/{cfg.do_blink.backend}_benchmark/overlay/{cfg.do_blink.figure}_{cfg.do_blink.sub_figure}/{cfg.do_blink.bft}/{cfg.do_blink.num_luts}/{cfg.do_blink.device}",
)
name = "_".join(
[
cfg.do_blink.backend,
cfg.do_blink.figure,
cfg.do_blink.sub_figure,
str(cfg.do_blink.device),
str(cfg.do_blink.num_luts),
cfg.do_blink.bft,
re.sub("\D", "", os.getcwd()),
]
)
work_root = tempfile.mkdtemp(prefix=name + "_")
tool_options = {
"common_src_dir": verilog_common_out_dir,
"src_dir": verilog_out_dir,
"overlay_dir": overlay_out_dir,
"part": cfg.do_blink.part,
"load_vivado": cfg.do_blink.load_vivado,
"use_abs": cfg.do_blink.get("use_abs", False),
}
pnr_script = "pnr_abs.tcl" if cfg.do_blink.get("use_abs", False) else "pnr.tcl"
hooks = {
"pre_run": [
{"cmd": ["mkdir", "-p", verilog_common_out_dir], "name": "mkdir"},
{"cmd": ["mkdir", "-p", verilog_out_dir], "name": "mkdir"},
{"cmd": ["mkdir", "-p", overlay_out_dir], "name": "mkdir"},
{
"cmd": ["rsync", "-a", verilog_common_src_dir, verilog_common_out_dir],
"name": "get common verilog files",
},
{
"cmd": ["rsync", "-a", verilog_src_dir, verilog_out_dir],
"name": "get verilog files",
},
{
"cmd": ["rsync", "-a", overlay_dir, overlay_out_dir],
"name": "get overlay files",
},
{
"cmd": ["sh", "vivado.sh", "-mode", "batch", "-source", "synth.tcl"],
"name": "run synth.tcl",
},
{
"cmd": ["sh", "vivado.sh", "-mode", "batch", "-source", pnr_script],
"name": "run pnr.tcl",
},
]
}
edam = {
"name": name,
"tool_options": {"vivado": tool_options},
"toplevel": "top",
"hooks": hooks,
}
backend = Vivado(edam=edam, work_root=work_root)
backend.configure()
try:
backend.run()
run_synth_log = open(
glob.glob("{}/**/run_synth.log".format(work_root), recursive=True)[0], "r"
).read()
run_log_name = "run_abs" if cfg.do_blink.get("use_abs", False) else "run"
run_log = open(
glob.glob("{}/**/{}.log".format(work_root, run_log_name), recursive=True)[
0
],
"r",
).read()
syn_time_pattern = pp.Literal("syn:") + pp.Word(pp.nums) + pp.Literal("seconds")
rdchk_time_pattern = (
pp.Literal("read_checkpoint:") + pp.Word(pp.nums) + pp.Literal("seconds")
)
opt_time_pattern = pp.Literal("opt:") + pp.Word(pp.nums) + pp.Literal("seconds")
place_time_pattern = (
pp.Literal("place:") + pp.Word(pp.nums) + pp.Literal("seconds")
)
route_time_pattern = (
pp.Literal("route:") + pp.Word(pp.nums) + pp.Literal("seconds")
)
bit_gen_time_pattern = (
pp.Literal("bit_gen:") + pp.Word(pp.nums) + pp.Literal("seconds")
)
syn_time = float(syn_time_pattern.searchString(run_synth_log).asList()[0][1])
rdchk_time = float(rdchk_time_pattern.searchString(run_log).asList()[0][1])
opt_time = float(opt_time_pattern.searchString(run_log).asList()[0][1])
place_time = float(place_time_pattern.searchString(run_log).asList()[0][1])
route_time = float(route_time_pattern.searchString(run_log).asList()[0][1])
bit_gen_time = float(bit_gen_time_pattern.searchString(run_log).asList()[0][1])
total_time = (
syn_time + rdchk_time + opt_time + place_time + route_time + bit_gen_time
)
log.info(
f"{cfg.do_blink.device},{int(syn_time)},{int(rdchk_time)},{int(opt_time)},{int(place_time)},{int(route_time)},{int(bit_gen_time)},{int(total_time)}"
)
return total_time
except RuntimeError as e:
log.info(e)
log.info(f"Experiment failed!")
total_time = 10000
return total_time
@hydra.main(config_path="../../conf", config_name="config")
def parse_config(cfg: DictConfig) -> None:
return build_do_blink_designs(cfg)
if __name__ == "__main__":
parse_config()
|
import stactools.core
from stactools.bc_dem.stac import create_collection, create_item
__all__ = ['create_collection', 'create_item']
stactools.core.use_fsspec()
def register_plugin(registry):
from stactools.bc_dem import commands
registry.register_subcommand(commands.create_bcdem_command)
__version__ = "0.1.0"
|
import requests
from bs4 import BeautifulSoup
from Card import Card
class Character:
name = ""
cards = list()
link = ""
def __init__(self, title, link):
self.name = title
self.link = link
def __str__(self):
return "<" + self.name + " , " + self.link + ">"
def print_cards(self):
for i in self.cards:
print(i)
def fill_cards(self):
URL = self.link
gotlink = requests.get(URL)
soup = BeautifulSoup(gotlink.text, 'lxml')
tables = soup.find_all('table', class_='sf')
for i in tables:
text = i.find('th').get_text() # name of card
trxt = i.find_all('td') # Rest of the text of the card
listfortext = list()
crd = Card(text)
for j in trxt:
txtarr = j.get_text()
listfortext.append(txtarr)
# Retrieve information about the card and store it in a temp variable #
clss = listfortext[listfortext.index("Class\n") + 1] # Class
rng = listfortext[listfortext.index("Range\n") + 1] # Range
insignia = listfortext[listfortext.index("Insignia\n") + 1] # Insignia
gender = listfortext[listfortext.index("Gender ") + 1] # Gender
weapon = listfortext[listfortext.index("Weapon ") + 1] # Weapon
other = listfortext[listfortext.index("Other") + 1] # Other
cost = listfortext[listfortext.index("Cost") + 1] # Cost
promotion = listfortext[listfortext.index("Promotion\n") + 1] # Promotion
attack = listfortext[listfortext.index("Attack\n") + 1] # Attack
support = listfortext[listfortext.index("Support\n") + 1] # Support
series = listfortext[listfortext.index("Series\n") + 1] # Series
illustrator = listfortext[listfortext.index("Illustrator\n") + 1] # Illustrator
quote = listfortext[listfortext.index("Quote\n") + 1] # Quote
# NEED ERROR HANDLING AS NOT ALL UNITS HAVE ALL SKILLS
try:
support_skill = listfortext[listfortext.index("Support Skill") + 1] # Support Skill
except ValueError:
print("HAPPENED S")
support_skill = ""
try:
skill_ichi = listfortext[listfortext.index("Skill 1\n") + 1] # Skill one
except ValueError:
print("HAPPENED 1")
skill_ichi = ""
try:
skill_ni = listfortext[listfortext.index("Skill 2\n") + 1] # Skill two
except ValueError:
print("HAPPENED 2")
skill_ni = ""
try:
skill_san = listfortext[listfortext.index("Skill 3\n") + 1] # Skill three
except ValueError:
print("HAPPENED 3")
skill_san = ""
try:
skill_yon = listfortext[listfortext.index("Skill 4\n") + 1] # Skill four
except ValueError:
print("HAPPENED 4")
skill_yon = ""
try:
skill_go = listfortext[listfortext.index("Skill 5\n") + 1] # Skill five
except ValueError:
print("HAPPENED 5")
skill_go = ""
# Add the info of the card to the previously created card object
crd.add_info([find_color(insignia), clss, rng, insignia, gender, weapon, other, cost, promotion,
attack, support, series, illustrator, quote, skill_ichi, skill_ni, skill_san, skill_yon,
skill_go, support_skill
])
self.cards.append(crd) # Add card to the list
return self
def find_color(x):
return {
"Blade of Light": "Red",
"Mark of Naga": "Blue",
"Hoshido": "White",
"Nohr": "Black",
"Hoshido / Nohr": "Black and White",
"Medallion": "Green",
"Divine Weapons (Purple)": "Purple",
"Holy War Flag": "Yellow",
}.get(x, "Colorless")
|
#!/usr/bin/env python3
##########################################################################################
# Developers: Icaro Alzuru and Aditi Malladi
# Project: HuMaIN (http://humain.acis.ufl.edu)
# Description: Simulated version of the regular expression Event Date extraction.
##########################################################################################
# Copyright 2019 Advanced Computing and Information Systems (ACIS) Lab - UF
# (https://www.acis.ufl.edu/)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
##########################################################################################
import argparse, shutil
import pandas as pd
from humain.constants import *
from humain.utils import *
if __name__ == '__main__':
""" Simulated version of the regular expression Event Date extraction
"""
parser = argparse.ArgumentParser("Run the simulated version of the regular expression Event Date extraction.")
parser.add_argument('-d', '--fulltext_dir', action="store", required=True, help="Directory with the fulltext transcription files of the images.")
parser.add_argument('-f', '--regexp_file', action="store", required=True, help="File with the correspondent Event Date extracted using the regular expresion algorithm.")
parser.add_argument('-m', '--metric', action="append", required=False, help="One or more metrics that will be collected when running the regular expression extraction.")
parser.add_argument('-o', '--output_dir', action="store", required=True, help="Directory where the accepted and rejected extractions will be stored.")
args = parser.parse_args()
# Usage example:
# python3 ed_reg_expr.py -d ~/Summer2019/HuMaIN_Simulator/humain/selfie/results/event_date_001/ocr_ds -f ~/Summer2019/HuMaIN_Simulator/datasets/aocr_insects/reg_exp/gc-ocr/reg_expr.tsv -m duration -o ~/Summer2019/HuMaIN_Simulator/humain/selfie/results/event_date_001/reg_expr_ds
################################################################################################################################
# ARGUMENTS VALIDATIONS
################################################################################################################################
#### INPUTS
# args.fulltext_dir
verify_dir( args.fulltext_dir, 'The fulltext transcription directory (' + args.fulltext_dir + ') was not found: ', parser, 1 )
# args.regexp_file
verify_file( args.regexp_file, 'The event dates file (' + args.regexp_file + ') was not found: ', parser, 2 )
# args.metric
metrics_dir = os.path.dirname( args.regexp_file )
if len(args.metric) > 0:
# Metric directory
metrics_dir = metrics_dir + "/metrics"
verify_dir( metrics_dir, 'The metrics directory was not found.', parser, 3 )
# Metric files
for m_name in args.metric:
metric_file = metrics_dir + "/" + m_name + ".csv"
verify_file( metric_file, 'The file metric ' + metric_file + ' was not found in the metrics directory.', parser, 4 )
#### OUTPUTS
# Output directory: args.output_dir
verify_create_dir( args.output_dir, 'The output directory could not be created.', parser, 5 )
# Output subdirectories for the accepted values and rejected specimens
verify_create_dir( args.output_dir + "/accepted", 'The output directory for the accepted event date values could not be created.', parser, 6 )
verify_create_dir( args.output_dir + "/rejected", 'The output directory for the rejected specimens could not be created.', parser, 7 )
# Output files
accepted_file = args.output_dir + "/accepted/accepted.tsv"
rejected_file = args.output_dir + "/rejected/rejected.txt"
verify_create_file( accepted_file, 'The output file, for the extracted event dates, could not be created.', parser, 8 )
verify_create_file( rejected_file, 'The output file of rejected specimens, could not be created.', parser, 9 )
# Metric folders
verify_create_dir( args.output_dir + "/accepted/metrics", 'The output metrics directory for the accepted event date values could not be created.', parser, 10 )
verify_create_dir( args.output_dir + "/rejected/metrics", 'The output metrics directory for the rejected specimens could not be created.', parser, 11 )
################################################################################################################################
# BUILD A DATAFRAME WITH THE EXTRACTED EVENT DATE VALUES USING REGULAR EXPRESIONS
df = pd.read_csv( args.regexp_file, sep='\t', names=['filename', 'value'] )
df = df.fillna('')
################################################################################################################################
# LOAD IN DIFFERENT STRUCTURES THE ACCEPTED (WITH EVENT DATE) AND REJECTED SPECIMENS
accepted_dict = {}
rejected_list = []
for index, row in df.iterrows():
if row['value'] == '':
rejected_list.append( row['filename'] )
else:
accepted_dict[ row['filename'] ] = row['value']
################################################################################################################################
# CREATE THE METRIC FILES
# For each metric, divide the values in Accepted and Rejected
for m_name in args.metric:
# Loads the metric values in a dataframe
metric_file = metrics_dir + "/" + m_name + ".csv"
df_metric = pd.read_csv( metric_file, names=['filename', 'value'] )
accepted_txt = ""
rejected_txt = ""
# Divide the metric value in Accepted and Rejected
for index, row in df_metric.iterrows():
if row['filename'] in accepted_dict.keys():
accepted_txt += row['filename'] + "," + str(row['value']) + "\n"
else:
rejected_txt += row['filename'] + "," + str(row['value']) + "\n"
# Create and fill the Accepted metric file
new_metric_filename = args.output_dir + "/accepted/metrics/" + m_name + ".csv"
with open(new_metric_filename, "w+") as f_m:
f_m.write( accepted_txt )
# Create and fill the Rejected metric file
new_metric_filename = args.output_dir + "/rejected/metrics/" + m_name + ".csv"
with open(new_metric_filename, "w+") as f_m:
f_m.write( rejected_txt )
################################################################################################################################
# SAVE THE ACCEPTED VALUES AND REJECTED SPECIMENS
# Accepted Values
accepted_txt = ""
for filename, value in accepted_dict.items():
accepted_txt += filename + "\t" + value + "\n"
with open(accepted_file, "w+") as f_a:
f_a.write( accepted_txt )
# Rejected Specimens
rejected_txt = ""
for filename in rejected_list:
rejected_txt += filename + "\n"
with open(rejected_file, "w+") as f_r:
f_r.write( rejected_txt )
sys.exit(0)
|
import argparse
import asyncio
import datetime
import gzip
import json
import sys
import zlib
import aiohttp
import influxdb
import os
import time
ANALYSIS_DIRECTORY = "../analysis/kairosdb"
BENCHMARK_TEST = "latency_meter"
QUERY_TYPE_LIST = {
'count': json.dumps({
"start_absolute": 1546300800000,
"end_relative": {
"value": "1",
"unit": "days"
},
"metrics": [
{
"name": "sensor001",
"aggregators": [
{
"name": "count",
"sampling": {
"value": 10000,
"unit": "minutes"
}
}
]
}
]
}),
'max': json.dumps({
"start_absolute": 1546300800000,
"end_relative": {
"value": "1",
"unit": "days"
},
"metrics": [
{
"name": "sensor001",
"aggregators": [
{
"name": "max",
"sampling": {
"value": 10000,
"unit": "minutes"
}
}
]
}
]
}),
'sum': json.dumps({
"start_absolute": 1546300800000,
"end_relative": {
"value": "1",
"unit": "days"
},
"metrics": [
{
"name": "sensor001",
"aggregators": [
{
"name": "sum",
"sampling": {
"value": 10000,
"unit": "minutes"
}
}
]
}
]
}),
'avg': json.dumps({
"start_absolute": 1546300800000,
"end_relative": {
"value": "1",
"unit": "days"
},
"metrics": [
{
"name": "sensor001",
"aggregators": [
{
"name": "avg",
"sampling": {
"value": 10000,
"unit": "minutes"
}
}
]
}
]
})
}
LATENCY_TYPE = None
PACKETLOSS_TYPE = None
def setup_report():
if not os.path.exists('{}/{}'.format(ANALYSIS_DIRECTORY, BENCHMARK_TEST)):
os.makedirs('{}/{}'.format(ANALYSIS_DIRECTORY, BENCHMARK_TEST))
async def run_test(number_of_day, total_number, type_request):
start_time = datetime.datetime(2019, 1, 1, 0, 0, 0) - datetime.timedelta(days=number_of_day - 1)
query = QUERY_TYPE_LIST[type_request].replace('1546300800000',
'{}000'.format(
int(time.mktime(start_time.timetuple()))))
sensor_list = ['sensor{:03d}'.format(i) for i in range(1, 11)]
async with aiohttp.ClientSession() as session:
try:
with open('../data/csv/csv_1sec_{}d.dat'.format(number_of_day), 'rt') as f:
f.readline()
network_setup = ''
if LATENCY_TYPE or PACKETLOSS_TYPE:
network_setup = '_{}_{}'.format(LATENCY_TYPE, PACKETLOSS_TYPE)
with open('{}/{}/{}_{}_{}_{}{}.txt'.format(ANALYSIS_DIRECTORY, BENCHMARK_TEST, BENCHMARK_TEST, type_request,
total_number, number_of_day, network_setup),
'w') as fw:
i = 0
for i in range(864):
bulk_data = {}
for sensor in sensor_list:
bulk_data[sensor] = []
for j in range(100):
l = f.readline()
d = l.strip().split(';')
cnt = 0
for sensor in sensor_list:
cnt += 1
bulk_data[sensor].append([int(d[0]) * 1000, float(d[cnt])])
data = []
for sensor in sensor_list:
data.append(
{
"name": sensor,
"datapoints": bulk_data[sensor],
"tags": {
"project": "benchmarkdb"
}
}
)
gzipped = gzip.compress(bytes(json.dumps(data), 'UTF-8'))
headers = {'content-type': 'application/gzip'}
prev_time = time.time()
b = await session.post('http://localhost:8080/api/v1/datapoints', data=gzipped, headers=headers)
bulk_time = time.time()
a = await session.post('http://localhost:8080/api/v1/datapoints/query', data=query)
curr_time = time.time()
fw.write('{}\t{}\t{}\t{}\n'.format(i, curr_time - prev_time, bulk_time - prev_time,
bulk_time - curr_time))
except FileNotFoundError:
print('You need to generate data first. Please use "data_generator.py" with csv data format.')
def main(argv):
parser = argparse.ArgumentParser(description='Load data test')
parser.add_argument('--thread', type=int, help='number of threads. default is 1')
parser.add_argument('--aggregate', type=str, help='type of aggregate function [max, count, avg, sum]. default is "sum"')
parser.add_argument('--latency_type', type=str, help='latency type')
parser.add_argument('--packetloss_type', type=str, help='packet loss type')
TOTAL_NUMBER = 1
TYPE_REQUEST = 'sum'
if len(argv) > 1:
args = parser.parse_args(argv[1:])
if args.thread:
TOTAL_NUMBER = args.thread
if args.aggregate:
TYPE_REQUEST = args.aggregate
if args.latency_type:
LATENCY_TYPE = args.latency_type
if args.packetloss_type:
PACKETLOSS_TYPE = args.packetloss_type
setup_report()
several_futures = asyncio.gather(*[run_test(i, TOTAL_NUMBER, TYPE_REQUEST) for i in range(1, TOTAL_NUMBER + 1)])
asyncio.get_event_loop().run_until_complete(several_futures)
if __name__ == '__main__':
sys.exit(main(argv=sys.argv))
|
from unittest import TestCase
from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex1 import RemoveDupsSinglyLinkedList
class TestRemoveDupsSinglyLinkedList(TestCase):
def setUp(self):
self.sll = RemoveDupsSinglyLinkedList()
def test_remove_dups_empty(self):
with self.assertRaises(Empty):
self.sll.remove_dups_iterative()
def test_remove_no_dupes(self):
for i in range(10):
self.sll.add(i)
prev_len = len(self.sll)
self.sll.remove_dups_iterative()
self.assertTrue(len(self.sll) == prev_len)
def test_single_dupe_front(self):
self.sll.add(1)
self.sll.add(1)
prev_len = len(self.sll)
self.sll.remove_dups_iterative()
self.assertTrue(len(self.sll) == prev_len - 1)
def test_single_dupe_back(self):
self.sll.add(1)
self.sll.add(2)
self.sll.add(1)
self.sll.remove_dups_iterative()
self.assertTrue(self.sll.tail() == 2)
def test_single_dupe_consecutive(self):
self.sll.add(1)
self.sll.add(2)
self.sll.add(1)
self.sll.add(1)
self.sll.remove_dups_iterative()
self.assertTrue(self.sll.tail() == 2)
def test_single_dupe_inside(self):
self.sll.add(1)
self.sll.add(2)
self.sll.add(3)
self.sll.add(2)
self.sll.remove_dups_iterative()
self.assertTrue(self.sll.tail() == 3)
def test_single_dupe_inside_consecutive(self):
self.sll.add(1)
self.sll.add(2)
self.sll.add(3)
self.sll.add(2)
self.sll.add(2)
self.sll.remove_dups_iterative()
self.assertTrue(self.sll.tail() == 3)
def test_single_dupe_tail(self):
self.sll.add(1)
self.sll.add(2)
self.sll.add(2)
self.sll.add(2)
self.sll.remove_dups_iterative()
self.assertTrue(len(self.sll) == 2)
def test_remove_multiple_dupes(self):
self.sll.add(1)
self.sll.add(2)
self.sll.add(1)
self.sll.add(2)
self.sll.add(1)
self.sll.add(3)
self.sll.add(3)
self.sll.add(3)
self.sll.add(3)
self.sll.add(3)
self.sll.add(4)
self.sll.remove_dups_iterative()
self.assertTrue(len(self.sll) == 4)
|
import numpy as np
def calculate_distance(point1:tuple, point2:tuple) -> float:
"""
Description:
Calculates the distance between two points
Arguments:
- point1 : `tuple` first point
- point2 : `tuple` second point
Returns:
- `float` : distance between two points
"""
x1, y1 = point1
x2, y2 = point2
return np.sqrt((x1-x2)**2 + (y1-y2)**2)
class Node:
def __init__(self, id:str, x:float, y:float, z:float) -> None:
self.__id = id
self.__x = x
self.__y = y
self.__z = z
self.__covered_nodes = set()
def find_covered_nodes(self, node_set:set, distance_threshold:float) -> None:
for node in node_set:
point = (self.__x, self.__y)
node_point = (node.get_x(), node.get_y())
distance = calculate_distance(point, node_point)
if distance <= distance_threshold:
self.__covered_nodes.add(node)
def get_id(self) -> str:
return self.__id
def get_x(self) -> float:
return self.__x
def get_y(self) -> float:
return self.__y
def get_z(self) -> float:
return self.__z
def get_covered_nodes(self) -> set:
return self.__covered_nodes
def set_x(self, x:float) -> None:
self.__x = x
def set_y(self, y:float) -> None:
self.__y = y
def set_z(self, z:float) -> None:
self.__z = z
class Sensor(Node):
def __init__(self, id:str, x:float, y:float, z:float, score:float=0.0) -> None:
super().__init__(id, x, y, z)
self.__score = score
def find_covering_gateway(self, gateway_set:set, distance_threshold:float) -> None:
self.find_covered_nodes(gateway_set, distance_threshold)
def get_covering_gateway(self) -> set:
gateway = self.get_covered_nodes().pop()
self.get_covered_nodes().add(gateway)
return gateway
def get_score(self) -> float:
return self.__score
def set_score(self, score:float) -> None:
self.__score = score
class Gateway(Node):
def __init__(self, id:str, x:float, y:float, z:float) -> None:
super().__init__(id, x, y, z)
def __calc_scores(self) -> tuple:
total_score = 0
covered_sensors = self.get_covered_sensors()
for sensor in covered_sensors:
total_score += sensor.get_score()
return total_score, total_score / len(covered_sensors)
def find_covered_sensors(self, sensor_set:set, distance_threshold:float) -> None:
self.find_covered_nodes(sensor_set, distance_threshold)
def get_covered_sensors(self) -> set:
return self.get_covered_nodes()
def get_total_score(self) -> float:
return self.__calc_scores()[0]
def get_average_score(self) -> float:
return self.__calc_scores()[1]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 3 20:50:16 2018
@author: jimmybow
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import visdcc
app = dash.Dash()
app.layout = html.Div([
visdcc.Network(id = 'net',
selection = {'nodes':[], 'edges':[]},
options = dict(height= '600px', width= '100%')),
html.Div(id = 'nodes'),
html.Div(id = 'edges')
])
@app.callback(
Output('nodes', 'children'),
[Input('net', 'selection')])
def myfun(x):
s = 'Selected nodes : '
if len(x['nodes']) > 0 : s += str(x['nodes'][0])
return s
@app.callback(
Output('edges', 'children'),
[Input('net', 'selection')])
def myfun(x):
s = 'Selected edges : '
if len(x['edges']) > 0 : s = [s] + [html.Div(i) for i in x['edges']]
return s
if __name__ == '__main__':
app.run_server(debug=True)
|
import os
import base64
import posixpath
import logging
import requests
from constance import config
from urllib.parse import urljoin, quote, unquote
import xml.etree.ElementTree as ET
from django.http import HttpResponse
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.ocm_via_webdav.settings import ENABLE_OCM_VIA_WEBDAV, \
OCM_VIA_WEBDAV_OCM_ENDPOINT, OCM_VIA_WEBDAV_OCM_PROVIDER_URI, \
OCM_VIA_WEBDAV_NOTIFICATIONS_URI
from seahub.ocm_via_webdav.models import ReceivedShares
from seahub.ocm.settings import ENABLE_OCM, OCM_SEAFILE_PROTOCOL, \
OCM_RESOURCE_TYPE_LIBRARY, OCM_API_VERSION, OCM_SHARE_TYPES, \
OCM_ENDPOINT
logger = logging.getLogger(__name__)
def get_remote_domain_by_shared_by(shared_by):
return shared_by.split('@')[-1]
def get_remote_webdav_root_uri(remote_domain):
ocm_provider_url = urljoin(remote_domain, OCM_VIA_WEBDAV_OCM_PROVIDER_URI)
resp = requests.get(ocm_provider_url)
# {
# 'apiVersion': '1.0-proposal1',
# 'enabled': True,
# 'endPoint': 'https://nextcloud.seafile.top/index.php/ocm',
# 'resourceTypes': [
# {
# 'name': 'file',
# 'protocols': {'webdav': '/public.php/webdav/'},
# 'shareTypes': ['user', 'group']
# }
# ]
# }
resource_types = resp.json().get('resourceTypes', [])
if not resource_types:
logger.error('Can not get resource_types from {}'.format(ocm_provider_url))
logger.error(resp.content)
return ''
protocols = resource_types[0].get('protocols')
if not protocols:
logger.error('Can not get protocols from {}'.format(ocm_provider_url))
logger.error(resp.content)
return ''
root_webdav_uri = protocols.get('webdav')
if not root_webdav_uri:
logger.error('Can not get webdav root uri from {}'.format(ocm_provider_url))
logger.error(resp.content)
return ''
return root_webdav_uri
def get_remote_webdav_root_href(remote_domain):
root_webdav_uri = get_remote_webdav_root_uri(remote_domain)
if not root_webdav_uri:
return ''
return urljoin(remote_domain, root_webdav_uri)
def get_webdav_auth_headers(shared_secret):
def format_string(string):
return string + (4 - len(string) % 4) * ':'
token = base64.b64encode('{}'.format(format_string(shared_secret)).encode('utf-8'))
headers = {"Authorization": "Basic {}".format(token.decode('utf-8'))}
return headers
def get_remote_ocm_endpoint(remote_domain):
ocm_provider_url = urljoin(remote_domain, OCM_VIA_WEBDAV_OCM_PROVIDER_URI)
resp = requests.get(ocm_provider_url)
end_point = resp.json().get('endPoint')
if not end_point:
logger.error('Can not get endPoint from {}'.format(ocm_provider_url))
logger.error(resp.content)
return ''
return end_point if end_point.endswith('/') else end_point + '/'
class OCMProviderView(APIView):
throttle_classes = (UserRateThrottle,)
def get(self, request):
"""
Return ocm protocol info to remote server
"""
result = {}
if ENABLE_OCM:
result = {
'enabled': True,
'apiVersion': OCM_API_VERSION,
'endPoint': config.SERVICE_URL + '/' + OCM_ENDPOINT,
'resourceTypes': {
'name': OCM_RESOURCE_TYPE_LIBRARY,
'shareTypes': OCM_SHARE_TYPES,
'protocols': {
OCM_SEAFILE_PROTOCOL: OCM_SEAFILE_PROTOCOL,
}
}
}
if ENABLE_OCM_VIA_WEBDAV:
result = {
'apiVersion': '1.0-proposal1',
'enabled': True,
'endPoint': urljoin(config.SERVICE_URL, OCM_VIA_WEBDAV_OCM_ENDPOINT),
'resourceTypes': {
'name': 'file',
'protocols': {'webdav': 'TODO'},
'shareTypes': ['user'],
}
}
return Response(result)
class SharesView(APIView):
throttle_classes = (UserRateThrottle,)
def post(self, request):
"""
Receive share from other service
"""
if not ENABLE_OCM_VIA_WEBDAV:
error_msg = 'OCM via webdav feature is not enabled.'
return api_error(501, error_msg)
# {'description': '',
# 'name': 'file-3-in-nextcloud-folder.md',
# 'owner': 'lian@https://nextcloud.seafile.top/',
# 'ownerDisplayName': 'lian',
# 'protocol': {'name': 'webdav',
# 'options': {'permissions': '{http://open-cloud-mesh.org/ns}share-permissions',
# 'sharedSecret': 'HdjKpI4o6lamWwN'}},
# 'providerId': 9,
# 'resourceType': 'file',
# 'shareType': 'user',
# 'shareWith': 'lian@lian.com@https://demo.seafile.top', # or 'lian@https://demo.seafile.top',
# 'sharedBy': 'lian@https://nextcloud.seafile.top/',
# 'sharedByDisplayName': 'lian'}
protocol_dict = request.data.get('protocol', {})
protocol_name = protocol_dict.get('name')
shared_secret = protocol_dict.get('options').get('sharedSecret')
permissions = protocol_dict.get('options').get('permissions')
owner = request.data.get('owner')
owner_display_name = request.data.get('owner_display_name')
name = request.data.get('name')
description = request.data.get('description')
provider_id = request.data.get('providerId')
resource_type = request.data.get('resourceType')
share_type = request.data.get('shareType')
share_with = request.data.get('shareWith').split('http')[0].rstrip('@')
shared_by = request.data.get('sharedBy')
shared_by_display_name = request.data.get('sharedByDisplayName')
share = ReceivedShares(description=description,
name=name,
owner=owner,
owner_display_name=owner_display_name,
protocol_name=protocol_name,
shared_secret=shared_secret,
permissions=permissions,
provider_id=provider_id,
resource_type=resource_type,
share_type=share_type,
share_with=share_with,
shared_by=shared_by,
shared_by_display_name=shared_by_display_name)
share.save()
# get webdav url
remote_domain = get_remote_domain_by_shared_by(shared_by)
webdav_root_href = get_remote_webdav_root_href(remote_domain)
if not webdav_root_href:
logger.error("Can't get remote webdav root href")
error_msg = 'Internal Server Error'
return api_error(501, error_msg)
headers = get_webdav_auth_headers(shared_secret)
prepared_request = requests.Request('propfind',
webdav_root_href,
headers=headers).prepare()
request_session = requests.Session()
resp = request_session.send(prepared_request)
root = ET.fromstring(resp.content)
if root[0].find('{DAV:}propstat') \
.find('{DAV:}prop') \
.find('{DAV:}resourcetype') \
.find('{DAV:}collection') is not None:
share.is_dir = True
share.save()
result = {
"recipientDisplayName": email2nickname(share_with)
}
return Response(result, status=status.HTTP_201_CREATED)
class ReceivedSharesView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request):
"""
Get items shared from other service.
"""
if not ENABLE_OCM_VIA_WEBDAV:
error_msg = 'OCM via webdav feature is not enabled.'
return api_error(501, error_msg)
username = request.user.username
info_list = []
for share in ReceivedShares.objects.filter(share_with=username):
info = {}
info['id'] = share.id
info['name'] = share.name
info['ctime'] = share.ctime
info['is_dir'] = share.is_dir
info['shared_by'] = share.shared_by
info['path'] = '/'
info_list.append(info)
result = {
'received_share_list': info_list,
}
return Response(result)
class ReceivedShareView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request, share_id):
if not ENABLE_OCM_VIA_WEBDAV:
error_msg = 'OCM via webdav feature is not enabled.'
return api_error(501, error_msg)
path = request.GET.get('path')
if not path:
error_msg = 'path invalid.'
return api_error(400, error_msg)
try:
share = ReceivedShares.objects.get(id=share_id)
except ReceivedShares.DoesNotExist:
error_msg = "OCM share {} not found.".format(share_id)
return api_error(404, error_msg)
username = request.user.username
if share.share_with != username:
error_msg = 'Permission denied.'
return api_error(403, error_msg)
remote_domain = get_remote_domain_by_shared_by(share.shared_by)
webdav_root_uri = get_remote_webdav_root_uri(remote_domain)
if path == '/':
webdav_uri = webdav_root_uri
else:
webdav_uri = posixpath.join(webdav_root_uri, path.lstrip('/'))
webdav_href = urljoin(remote_domain, webdav_uri)
headers = get_webdav_auth_headers(share.shared_secret)
prepared_request = requests.Request('propfind',
webdav_href,
headers=headers).prepare()
request_session = requests.Session()
resp = request_session.send(prepared_request)
if resp.status_code != 207:
logger.error(resp.content)
error_msg = 'Internal Server Error'
return api_error(501, error_msg)
info_list = []
root = ET.fromstring(resp.content)
for child in root:
href_text = child.find('{DAV:}href').text
href_text = unquote(href_text)
if href_text == webdav_uri:
continue
is_collection = child.find('{DAV:}propstat') \
.find('{DAV:}prop') \
.find('{DAV:}resourcetype') \
.find('{DAV:}collection') is not None
last_modified = child.find('{DAV:}propstat') \
.find('{DAV:}prop') \
.find('{DAV:}getlastmodified').text
info = {}
info['id'] = share_id
info['name'] = unquote(os.path.basename(href_text.rstrip('/')))
info['ctime'] = last_modified
info['shared_by'] = share.shared_by
if is_collection:
info['is_dir'] = True
info['path'] = href_text.replace(webdav_root_uri.rstrip('/'), '')
else:
info['is_dir'] = False
info['path'] = href_text.replace(webdav_root_uri, '')
info_list.append(info)
result = {
'received_share_list': info_list,
'parent_dir': posixpath.join('/', share.name, path.lstrip('/'))
}
return Response(result)
def delete(self, request, share_id):
"""
Delete item shared from other service.
"""
if not ENABLE_OCM_VIA_WEBDAV:
error_msg = 'OCM via webdav feature is not enabled.'
return api_error(501, error_msg)
try:
share = ReceivedShares.objects.get(id=share_id)
except ReceivedShares.DoesNotExist:
error_msg = "OCM share {} not found.".format(share_id)
return api_error(404, error_msg)
username = request.user.username
if share.share_with != username:
error_msg = 'Permission denied.'
return api_error(403, error_msg)
# get remote server endpoint
remote_domain = get_remote_domain_by_shared_by(share.shared_by)
ocm_endpoint = get_remote_ocm_endpoint(remote_domain)
if not ocm_endpoint:
error_msg = 'Internal Server Error'
return api_error(501, error_msg)
notifications_url = urljoin(ocm_endpoint, OCM_VIA_WEBDAV_NOTIFICATIONS_URI.lstrip('/'))
# send SHARE_DECLINED notification
data = {
"notification": {
"message": "Recipient declined the share",
"sharedSecret": ""
},
"notificationType": "SHARE_DECLINED",
"providerId": "",
"resourceType": ""
}
data['notification']['sharedSecret'] = share.shared_secret
data['providerId'] = share.provider_id
data['resourceType'] = share.resource_type
resp = requests.post(notifications_url, json=data)
if resp.status_code != 201:
logger.error('Error occurred when send notification to {}'.format(notifications_url))
logger.error(resp.content)
share.delete()
result = {
'success': True
}
return Response(result)
class DownloadReceivedFileView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def get(self, request):
"""
Download received file.
"""
if not ENABLE_OCM_VIA_WEBDAV:
error_msg = 'OCM via webdav feature is not enabled.'
return api_error(501, error_msg)
# parameter check
share_id = request.GET.get('share_id')
if not share_id:
error_msg = 'share_id invalid.'
return api_error(400, error_msg)
try:
share_id = int(share_id)
except ValueError as e:
logger.error(e)
error_msg = 'share_id invalid.'
return api_error(400, error_msg)
path = request.GET.get('path')
if not path:
error_msg = 'path invalid.'
return api_error(400, error_msg)
# resource check
try:
share = ReceivedShares.objects.get(id=share_id)
except ReceivedShares.DoesNotExist:
error_msg = "OCM share {} not found.".format(share_id)
return api_error(404, error_msg)
username = request.user.username
if share.share_with != username:
error_msg = 'Permission denied.'
return api_error(403, error_msg)
# download file via webdav
remote_domain = get_remote_domain_by_shared_by(share.shared_by)
headers = get_webdav_auth_headers(share.shared_secret)
if path == '/':
webdav_href = get_remote_webdav_root_href(remote_domain)
else:
webdav_href = urljoin(get_remote_webdav_root_href(remote_domain), quote(path))
download_file_resp = requests.get(webdav_href, headers=headers)
response = HttpResponse(download_file_resp.content, content_type="application/octet-stream")
if path == '/':
filename = share.name
else:
filename = os.path.basename(path)
response['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
class NotificationsView(APIView):
throttle_classes = (UserRateThrottle,)
def post(self, request):
"""
Receive notification from remote server.
"""
if not ENABLE_OCM_VIA_WEBDAV:
error_msg = 'OCM via webdav feature is not enabled.'
return api_error(501, error_msg)
# {'notification': {'messgage': 'file is no longer shared with you',
# 'sharedSecret': 'QoVQuBhqphvVYvz'},
# 'notificationType': 'SHARE_UNSHARED',
# 'providerId': '13',
# 'resourceType': 'file'}
error_result_not_found = {
"message": "RESOURCE_NOT_FOUND",
"validationErrors": [
{
"name": "",
"message": "NOT_FOUND"
}
]
}
provider_id = request.data.get('providerId')
notification_type = request.data.get('notificationType')
notification_dict = request.data.get('notification')
shared_secret = notification_dict.get('sharedSecret')
if notification_type == 'SHARE_UNSHARED':
try:
share = ReceivedShares.objects.get(shared_secret=shared_secret)
except ReceivedShares.DoesNotExist:
error_msg = "OCM share with secret {} not found.".format(shared_secret)
error_result_not_found['validationErrors']['name'] = 'sharedSecret'
return Response(error_result_not_found, status=400)
if share.provider_id != provider_id:
error_msg = "OCM share with provider id {} not found.".format(provider_id)
error_result_not_found['validationErrors'][0]['name'] = 'providerID'
return Response(error_result_not_found, status=400)
share.delete()
return Response({'success': True}, status=status.HTTP_201_CREATED)
|
from oic.utils.time_util import in_a_while
__author__ = 'roland'
RT = {"C": "code", "D": "client cred", "T": "token"}
ATTR = ["profile"]
def to_profile(session, representation="list"):
p = session["profile"].split(".")
prof = [RT[p[0]]]
if representation == "list":
return prof
elif representation == "dict":
ret = {}
for r in range(0, len(prof)):
ret[ATTR[r]] = prof[r]
return ret
def get_profile_info(session, test_id=None):
try:
_conv = session["conv"]
except KeyError:
pass
else:
try:
iss = _conv.entity.provider_info["issuer"]
except (TypeError, KeyError):
iss = ""
profile = to_profile(session, "dict")
if test_id is None:
try:
test_id = session["testid"]
except KeyError:
return {}
return {"Issuer": iss, "Profile": profile, "Test ID": test_id,
"Test description": session.test_flows[test_id]['desc'],
"Timestamp": in_a_while()}
return {}
def make_client(**kw_args):
"""
Have to get own copy of keyjar
:param kw_args:
:return:
"""
c_keyjar = kw_args["keyjar"].copy()
_cli = Client(client_authn_method=CLIENT_AUTHN_METHOD, keyjar=c_keyjar)
c_info = {'keyjar': c_keyjar}
for arg, val in list(kw_args.items()):
if arg in ['keyjar']:
continue
setattr(_cli, arg, val)
c_info[arg] = val
return _cli, c_info
# def get_check(check_id):
# package = ocheck
# prefix = package.__name__ + "."
# for importer, modname, ispkg in pkgutil.iter_modules(package.__path__,
# prefix):
# module = __import__(modname, fromlist="dummy")
# chk = module.factory(check_id)
# if chk:
# return chk
#
# return aa_check.factory(check_id)
|
from abc import ABC
import grpc
from anilius.core.permission import Permission
from anilius.core.serializer import Serializer
from anilius.core.serializer_field import SerializerField
from anilius.utils.jwt import decode_jwt
from jwt import InvalidAlgorithmError, InvalidSignatureError
class Controller(ABC):
permissions = ()
payload = {}
is_authorize = False
request_serializer = None
request_deserializer = None
response_serializer = None
response_deserializer = None
_serialized_data = None
meta = {}
def __init__(self, request, context, request_deserializer, response_serializer):
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.request = request
self.context = context
self.metadata = context.invocation_metadata()
self.parse()
def check_permissions(self):
for permission in self.permissions:
has_perm = permission.has_perm(self)
if not has_perm:
self.raise_permission()
break
def raise_permission(self):
self.context.set_code(grpc.StatusCode.PERMISSION_DENIED)
self.context.set_details("You have not permission for this action")
def parse(self):
self._serialized_data = self.get_request_serializer(self.request).to_dict()
for data in self.metadata:
self.meta[data.key] = data.value
for permission in self.permissions:
assert isinstance(
permission, Permission
), "permissions should be type of Permission"
if self.authorization is not None:
self.extract_payload()
def extract_payload(self):
try:
self.payload = decode_jwt(self.authorization)
self.is_authorize = True
except (ValueError, InvalidAlgorithmError, InvalidSignatureError):
pass
def get_valid_data(self, key, default=None):
field = self._serialized_data.get(key, None)
if not isinstance(field, SerializerField):
return default
return field.get_value()
@property
def get_request_serializer(self):
return (
self.request_serializer
if self.request_serializer is not None
else Serializer
)
@property
def client_id(self):
return self.meta.get("client-id", None)
@property
def client_secret(self):
return self.meta.get("client-secret", None)
@property
def sdk_id(self):
return self.meta.get("sdk-id", None)
@property
def sdk_secret(self):
return self.meta.get("sdk-secret", None)
@property
def authorization(self):
return self.meta.get("authorization", None)
def get_response(self):
self.check_permissions()
|
import json
import yaml
from aatest import Unknown
from aatest.func import factory as aafactory
__author__ = 'roland'
class MissingParent(Exception):
pass
def _get_cls(name, factories, use=''):
if use:
try:
cls = factories[use](name)
except Unknown:
pass
else:
return cls
try:
cls = factories[''](name)
except Unknown:
raise Exception("Unknown Class: '{}'".format(name))
return cls
def _get_func(dic, func_factory):
"""
Convert function names into function references
:param dic: A key, value dictionary where keys are function names
:param func_factory: Factory function used to find functions
:return: A dictionary with the keys replace with references to functions
"""
res = {}
for fname, val in dic.items():
func = func_factory(fname)
if func is None:
func = aafactory(fname)
if func is None:
raise Exception("Unknown function: '{}'".format(fname))
res[func] = val
return res
def parse_yaml_conf(cnf_file, cls_factories, func_factory, use=''):
"""
:param cnf_file:
:param use:
:return:
"""
stream = open(cnf_file, 'r')
yc = yaml.safe_load(stream)
stream.close()
for tid, spec in yc['Flows'].items():
seq = []
for oper in spec["sequence"]:
if isinstance(oper, dict): # Must be only one key, value item
if len(oper) > 1:
raise SyntaxError(tid)
key, val = list(oper.items())[0]
try:
seq.append((_get_cls(key, cls_factories, use),
_get_func(val, func_factory)))
except Exception:
print('tid:{}'.format(tid))
raise
else:
try:
seq.append(_get_cls(oper, cls_factories, use))
except Exception:
print('tid:{}'.format(tid))
raise
spec["sequence"] = seq
return yc
def parse_json_conf(cnf_file, cls_factories, func_factory, use=''):
"""
:param cnf_file:
:param use:
:return:
"""
stream = open(cnf_file, 'r')
js = json.load(stream)
stream.close()
for tid, spec in js['Flows'].items():
seq = []
for oper in spec["sequence"]:
if isinstance(oper, dict): # Must be only one key, value item
if len(oper) > 1:
raise SyntaxError(tid)
key, val = list(oper.items())[0]
try:
seq.append((_get_cls(key, cls_factories, use),
_get_func(val, func_factory)))
except Exception:
print('tid:{}'.format(tid))
raise
else:
try:
seq.append(_get_cls(oper, cls_factories, use))
except Exception:
print('tid:{}'.format(tid))
raise
spec["sequence"] = seq
return js
class Item(object):
def __init__(self, parent, name, desc):
self.parent = parent
self.desc = desc
self.name = name
self.child = []
def build_hierarchy(flows):
items = {}
for id, desc in flows.items():
items[id] = Item('', id, desc)
for item in items.values():
try:
_pre = item.desc['super']
except KeyError:
continue
else:
try:
_parent = items[_pre]
_parent.child.append(item)
item.parent = _parent
except KeyError:
raise MissingParent(item.desc['super'])
return items
def flatten(interim):
res = []
for f in interim:
res.append(f)
if f.child:
res.extend(flatten(sorted(f.child, key=lambda x: x.name)))
return res
def sort(display_order, flows):
items = build_hierarchy(flows)
# toplevel
f_names = [f for f in items.values() if not f.parent]
interim = []
for k in display_order:
k += '-'
l = [z for z in f_names if z.name.startswith(k)]
interim.extend(sorted(l, key=lambda x: x.name))
return flatten(interim)
|
################################
# ARIMA DEVELOP AND DIAGNOSTIC #
################################
# This code takes raw (and corrected if applicable) data, applies an ARIMA time series model, and identifies anomalies.
import rules_detect
import anomaly_utilities
import modeling_utilities
import matplotlib.pyplot as plt
import pandas as pd
print("ARIMA exploration script begin.")
# DEFINE SITE and VARIABLE #
#########################################
# site = "BlackSmithFork"
# site = "FranklinBasin"
site = "MainStreet"
# site = "Mendon"
# site = "TonyGrove"
# site = "WaterLab"
# sensor = ['temp']
sensor = ['cond']
# sensor = ['ph']
# sensor = ['do']
# sensor = ['turb']
# sensor = ['stage']
year = [2017]
# PARAMETER SELECTION #
#########################################
# Need to use an automated method to generalize getting p,d,q parameters
# These are the results of using auto.ARIMA to determine p,d,q parameters in R
sites = {'BlackSmithFork': 0,
'FranklinBasin': 1,
'MainStreet': 2,
'Mendon': 3,
'TonyGrove': 4,
'WaterLab': 5}
sensors = {'cond': 0,
'do': 1,
'ph': 2,
'temp': 3,
'turb': 4}
pdqParams = [
[[0, 0, 5], [0, 0, 5], [0, 1, 4], [1, 1, 0], [9, 1, 5]], # BlackSmithFork
[[10, 1, 3], [0, 1, 5], [10, 1, 1], [6, 1, 4], [0, 1, 5]], # FranklinBasin
[[1, 1, 5], [1, 1, 1], [3, 1, 1], [0, 0, 0], [1, 1, 5]], # MainStreet
[[9, 1, 4], [10, 1, 3], [0, 1, 2], [3, 1, 1], [9, 1, 1]], # Mendon
[[6, 1, 2], [10, 1, 0], [8, 1, 4], [10, 1, 0], [10, 1, 5]], # TonyGrove
[[7, 1, 0], [1, 1, 1], [10, 1, 0], [0, 1, 5], [1, 1, 3]] # WaterLab
]
pdqParam = pd.DataFrame(pdqParams, columns=sensors, index=sites)
print(pdqParam)
p, d, q = pdqParam[sensor[0]][site]
print("p: " + str(p))
print("d: " + str(d))
print("q: " + str(q))
# GET DATA #
#########################################
df_full, sensor_array = anomaly_utilities.get_data(site, sensor, year, path="./LRO_data/")
df = sensor_array[sensor[0]]
# RULES BASED DETECTION #
#########################################
# General sensor ranges for LRO data:
# Temp min: -5, max: 30
# SpCond min: 100, max: 900
# pH min: 7.5, max: 9.0
# do min: 2, max: 16
maximum = 900
minimum = 150
df = rules_detect.range_check(df, maximum, minimum)
length = 6
df = rules_detect.persistence(df, length)
size = rules_detect.group_size(df)
df = rules_detect.interpolate(df)
# MODEL CREATION #
#########################################
model_fit, residuals, predictions = modeling_utilities.build_arima_model(df['observed'], p, d, q, summary=True)
# DETERMINE THRESHOLD AND DETECT ANOMALIES #
#########################################
threshold = anomaly_utilities.set_dynamic_threshold(residuals[0], 75, 0.01, 4)
threshold.index = residuals.index
plt.figure()
# plt.plot(df['raw'], 'b', label='original data')
plt.plot(residuals, 'b', label='residuals')
plt.plot(threshold['low'], 'c', label='thresh_low')
plt.plot(threshold['high'], 'm', mfc='none', label='thresh_high')
plt.legend()
plt.ylabel(sensor)
plt.show()
detections = anomaly_utilities.detect_anomalies(df['observed'], predictions, residuals, threshold, summary=True)
# Use events function to widen and number anomalous events
df['labeled_event'] = anomaly_utilities.anomaly_events(df['labeled_anomaly'], 1)
df['detected_anomaly'] = detections['anomaly']
df['all_anomalies'] = df.eval('detected_anomaly or anomaly')
df['detected_event'] = anomaly_utilities.anomaly_events(df['all_anomalies'], 1)
# DETERMINE METRICS #
#########################################
anomaly_utilities.compare_events(df, 1)
metrics = anomaly_utilities.metrics(df)
# OUTPUT RESULTS #
#########################################
print('\n\n\nScript report:\n')
print('Sensor: ' + sensor[0])
print('Year: ' + str(year))
print('Parameters: ARIMA(%i, %i, %i)' % (p, d, q))
anomaly_utilities.print_metrics(metrics)
print("\nTime Series ARIMA script end.")
# GENERATE PLOTS #
#########################################
plt.figure()
plt.plot(df['raw'], 'b', label='original data')
plt.plot(predictions, 'c', label='predicted values')
plt.plot(df['raw'][df['labeled_anomaly']], 'mo', mfc='none', label='technician labeled anomalies')
plt.plot(predictions[df['detected_event'] > 0], 'r+', label='machine detected anomalies')
plt.legend()
plt.ylabel(sensor)
plt.show()
|
import asyncio
import base64
import json
import logging
import os
from time import sleep
from typing import Tuple
from aiohttp import (
web,
ClientRequest,
)
from qrcode import QRCode
from python.agent_backchannel import (
AgentBackchannel,
RUN_MODE,
)
from python.utils import (
log_msg,
prompt_loop,
)
from python.storage import (
push_resource,
pop_resource,
)
# from helpers.jsonmapper.json_mapper import JsonMapper
LOGGER = logging.getLogger(__name__)
MAX_TIMEOUT = 5
DEFAULT_BIN_PATH = "../venv/bin"
DEFAULT_PYTHON_PATH = ".."
if RUN_MODE == "docker":
DEFAULT_BIN_PATH = "./bin"
DEFAULT_PYTHON_PATH = "."
elif RUN_MODE == "pwd":
DEFAULT_BIN_PATH = "./bin"
DEFAULT_PYTHON_PATH = "."
class MobileAgentBackchannel(AgentBackchannel):
def __init__(
self,
ident: str,
http_port: int,
admin_port: int,
genesis_data: str = None,
params: dict = {},
):
super().__init__(ident, http_port, admin_port, genesis_data, params)
self.connection_state = "n/a"
def get_agent_args(self):
result = [
("--endpoint", self.endpoint),
("--label", self.label),
# "--auto-ping-connection",
# "--auto-accept-invites",
# "--auto-accept-requests",
# "--auto-respond-messages",
("--inbound-transport", "http", "0.0.0.0", str(self.http_port)),
("--outbound-transport", "http"),
("--admin", "0.0.0.0", str(self.admin_port)),
"--admin-insecure-mode",
"--public-invites",
("--wallet-type", self.wallet_type),
("--wallet-name", self.wallet_name),
("--wallet-key", self.wallet_key),
]
if self.get_acapy_version_as_float() > 56:
result.append(("--auto-provision", "--recreate-wallet"))
if self.genesis_data:
result.append(("--genesis-transactions", self.genesis_data))
if self.seed:
result.append(("--seed", self.seed))
if self.storage_type:
result.append(("--storage-type", self.storage_type))
if self.postgres:
result.extend(
[
("--wallet-storage-type", "postgres_storage"),
("--wallet-storage-config", json.dumps(self.postgres_config)),
("--wallet-storage-creds", json.dumps(self.postgres_creds)),
]
)
if self.webhook_url:
result.append(("--webhook-url", self.webhook_url))
# This code for Tails Server is included here because aca-py does not support the env var directly yet.
# when it does (and there is talk of supporting YAML) then this code can be removed.
if os.getenv("TAILS_SERVER_URL") is not None:
# if the env var is set for tails server then use that.
result.append(("--tails-server-base-url", os.getenv("TAILS_SERVER_URL")))
else:
# if the tails server env is not set use the gov.bc TEST tails server.
result.append(
(
"--tails-server-base-url",
"https://tails-server-test.pathfinder.gov.bc.ca",
)
)
if os.getenv("EMIT-NEW-DIDCOMM-PREFIX") is not None:
# if the env var is set for tails server then use that.
result.append(("--emit-new-didcomm-prefix"))
if os.getenv("EMIT-NEW-DIDCOMM-MIME-TYPE") is not None:
# if the env var is set for tails server then use that.
result.append(("--emit-new-didcomm-mime-type"))
# This code for log level is included here because aca-py does not support the env var directly yet.
# when it does (and there is talk of supporting YAML) then this code can be removed.
if os.getenv("LOG_LEVEL") is not None:
result.append(("--log-level", os.getenv("LOG_LEVEL")))
# if self.extra_args:
# result.extend(self.extra_args)
return result
async def listen_webhooks(self, webhook_port):
self.webhook_port = webhook_port
if RUN_MODE == "pwd":
self.webhook_url = f"http://localhost:{str(webhook_port)}/webhooks"
else:
self.webhook_url = (
f"http://{self.external_host}:{str(webhook_port)}/webhooks"
)
app = web.Application()
app.add_routes([web.post("/webhooks/topic/{topic}/", self._receive_webhook)])
runner = web.AppRunner(app)
await runner.setup()
self.webhook_site = web.TCPSite(runner, "0.0.0.0", webhook_port)
await self.webhook_site.start()
print("Listening to web_hooks on port", webhook_port)
async def _receive_webhook(self, request: ClientRequest):
topic = request.match_info["topic"]
payload = await request.json()
await self.handle_webhook(topic, payload)
# TODO web hooks don't require a response???
return web.Response(text="")
async def handle_webhook(self, topic: str, payload):
if topic != "webhook": # would recurse
handler = f"handle_{topic}"
# Remove this handler change when bug is fixed.
if handler == "handle_oob-invitation":
handler = "handle_oob_invitation"
method = getattr(self, handler, None)
# put a log message here
log_msg("Passing webhook payload to handler " + handler)
if method:
await method(payload)
else:
log_msg(
f"Error: agent {self.ident} "
f"has no method {handler} "
f"to handle webhook on topic {topic}"
)
else:
log_msg(
"in webhook, topic is: " + topic + " payload is: " + json.dumps(payload)
)
async def handle_connections(self, message):
if "invitation_msg_id" in message:
# This is an did-exchange message based on a Non-Public DID invitation
invitation_id = message["invitation_msg_id"]
push_resource(invitation_id, "didexchange-msg", message)
elif "request_id" in message:
# This is a did-exchange message based on a Public DID non-invitation
request_id = message["request_id"]
push_resource(request_id, "didexchange-msg", message)
else:
connection_id = message["connection_id"]
push_resource(connection_id, "connection-msg", message)
log_msg("Received a Connection Webhook message: " + json.dumps(message))
async def handle_issue_credential(self, message):
thread_id = message["thread_id"]
push_resource(thread_id, "credential-msg", message)
log_msg("Received Issue Credential Webhook message: " + json.dumps(message))
if "revocation_id" in message: # also push as a revocation message
push_resource(thread_id, "revocation-registry-msg", message)
log_msg("Issue Credential Webhook message contains revocation info")
async def handle_present_proof(self, message):
thread_id = message["thread_id"]
push_resource(thread_id, "presentation-msg", message)
log_msg("Received a Present Proof Webhook message: " + json.dumps(message))
async def handle_revocation_registry(self, message):
# No thread id in the webhook for revocation registry messages
cred_def_id = message["cred_def_id"]
push_resource(cred_def_id, "revocation-registry-msg", message)
log_msg("Received Revocation Registry Webhook message: " + json.dumps(message))
async def handle_oob_invitation(self, message):
# No thread id in the webhook for revocation registry messages
invitation_id = message["invitation_id"]
push_resource(invitation_id, "oob-inviation-msg", message)
log_msg(
"Received Out of Band Invitation Webhook message: " + json.dumps(message)
)
async def handle_problem_report(self, message):
thread_id = message["thread_id"]
push_resource(thread_id, "problem-report-msg", message)
log_msg("Received Problem Report Webhook message: " + json.dumps(message))
async def make_agent_POST_request(
self, op, rec_id=None, data=None, text=False, params=None
) -> (int, str):
print("make_agent_POST_request:", op)
if op["topic"] == "connection":
operation = op["operation"]
if operation == "receive-invitation":
self.connection_state = "invited"
print(
"================================================================="
)
message_bytes = json.dumps(data).encode("ascii")
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode("ascii")
invitation_url = data["serviceEndpoint"] + "?c_i=" + base64_message
qr = QRCode(border=1)
qr.add_data(invitation_url)
log_msg(
"Use the following JSON to accept the invite from another demo agent."
" Or use the QR code to connect from a mobile agent."
)
log_msg(json.dumps(data), label="Invitation Data:", color=None)
qr.print_ascii(invert=True)
log_msg("If you can't scan the QR code here is the url.")
print("Invitation url:", invitation_url)
print(
"================================================================="
)
return (
200,
'{"result": "ok", "connection_id": "1", "state": "'
+ self.connection_state
+ '"}',
)
elif (
operation == "accept-invitation"
or operation == "accept-request"
or operation == "remove"
or operation == "start-introduction"
or operation == "send-ping"
):
self.connection_state = "requested"
return (
200,
'{"result": "ok", "connection_id": "1", "state": "'
+ self.connection_state
+ '"}',
)
elif op["topic"] == "issue-credential":
operation = op["operation"]
if operation == "send-request":
print(
"================================================================="
)
print("Please respond to the Credential Offer!")
print(
"================================================================="
)
return (
200,
'{"result": "ok", "thread_id": "1", "state": "request-sent"}',
)
elif operation == "store":
return (
200,
'{"result": "ok", "thread_id": "1", "credential_id": "'
+ rec_id
+ '", "state": "done"}',
)
else:
return (200, '{"result": "ok", "thread_id": "1", "state": "N/A"}')
elif op["topic"] == "proof":
operation = op["operation"]
if operation == "send-presentation":
print(
"================================================================="
)
print("Please respond to the Proof Request!")
print(
"================================================================="
)
return (
200,
'{"result": "ok", "thread_id": "1", "state": "presentation-sent"}',
)
else:
return (200, '{"result": "ok", "thread_id": "1", "state": "N/A"}')
return (501, "501: Not Implemented\n\n".encode("utf8"))
async def make_agent_GET_request(
self, op, rec_id=None, text=False, params=None
) -> Tuple[int, str]:
print("make_agent_GET_request:", op)
if op["topic"] == "status":
status = 200 if self.ACTIVE else 418
status_msg = "Active" if self.ACTIVE else "Inactive"
return (status, json.dumps({"status": status_msg}))
elif op["topic"] == "connection":
return (200, '{"result": "ok", "connection_id": "1", "state": "N/A"}')
elif op["topic"] == "issue-credential":
return (
200,
'{"result": "ok", "credential_id": "' + rec_id + '", "state": "N/A"}',
)
elif op["topic"] == "credential":
return (
200,
'{"result": "ok", "credential_id": "' + rec_id + '", "state": "N/A"}',
)
elif op["topic"] == "proof":
return (
200,
'{"result": "ok", "thread_id": "' + rec_id + '", "state": "N/A"}',
)
if op["topic"] == "version":
return (200, '{"result": "ok"}')
return (501, "501: Not Implemented\n\n".encode("utf8"))
async def make_agent_DELETE_request(
self, op, rec_id=None, data=None, text=False, params=None
) -> Tuple[int, str]:
return (501, "501: Not Implemented\n\n".encode("utf8"))
async def make_agent_GET_request_response(
self, topic, rec_id=None, text=False, params=None
) -> Tuple[int, str]:
if topic == "connection" and rec_id:
connection_msg = pop_resource(rec_id, "connection-msg")
i = 0
while connection_msg is None and i < MAX_TIMEOUT:
sleep(1)
connection_msg = pop_resource(rec_id, "connection-msg")
i = i + 1
resp_status = 200
if connection_msg:
resp_text = json.dumps(connection_msg)
else:
resp_text = "{}"
return (resp_status, resp_text)
return (501, "501: Not Implemented\n\n".encode("utf8"))
def map_test_json_to_admin_api_json(self, topic, operation, data):
# If the translation of the json get complicated in the future we might want to consider a switch to JsonMapper or equivalent.
# json_mapper = JsonMapper()
# map_specification = {
# 'name': ['person_name']
# }
# JsonMapper(test_json).map(map_specification)
if topic == "proof":
if operation == "send-request" or operation == "create-request":
if operation == "send-proposal":
request_type = "presentation_proposal"
attachment = "presentations~attach"
else:
request_type = "proof_request"
attachment = "request_presentations~attach"
if (
data.get("presentation_proposal", {})
.get(attachment, {})
.get("data", {})
.get("requested_attributes")
is None
):
requested_attributes = {}
else:
requested_attributes = data["presentation_proposal"][attachment][
"data"
]["requested_attributes"]
if (
data.get("presentation_proposal", {})
.get(attachment, {})
.get("data", {})
.get("requested_predicates")
is None
):
requested_predicates = {}
else:
requested_predicates = data["presentation_proposal"][attachment][
"data"
]["requested_predicates"]
if (
data.get("presentation_proposal", {})
.get(attachment, {})
.get("data", {})
.get("name")
is None
):
proof_request_name = "test proof"
else:
proof_request_name = data["presentation_proposal"][attachment][
"data"
]["name"]
if (
data.get("presentation_proposal", {})
.get(attachment, {})
.get("data", {})
.get("version")
is None
):
proof_request_version = "1.0"
else:
proof_request_version = data["presentation_proposal"][attachment][
"data"
]["version"]
if (
data.get("presentation_proposal", {})
.get(attachment, {})
.get("data", {})
.get("non_revoked")
is None
):
non_revoked = None
else:
non_revoked = data["presentation_proposal"][attachment]["data"][
"non_revoked"
]
if "connection_id" in data:
admin_data = {
"comment": data["presentation_proposal"]["comment"],
"trace": False,
"connection_id": data["connection_id"],
request_type: {
"name": proof_request_name,
"version": proof_request_version,
"requested_attributes": requested_attributes,
"requested_predicates": requested_predicates,
},
}
else:
admin_data = {
"comment": data["presentation_proposal"]["comment"],
"trace": False,
request_type: {
"name": proof_request_name,
"version": proof_request_version,
"requested_attributes": requested_attributes,
"requested_predicates": requested_predicates,
},
}
if non_revoked is not None:
admin_data[request_type]["non_revoked"] = non_revoked
# Make special provisions for proposal. The names are changed in this operation. Should be consistent imo.
# this whole condition can be removed for V2.0 of the protocol. It will look like more of a send-request in 2.0.
elif operation == "send-proposal":
request_type = "presentation_proposal"
if (
data.get("presentation_proposal", {}).get("requested_attributes")
== None
):
requested_attributes = []
else:
requested_attributes = data["presentation_proposal"][
"requested_attributes"
]
if (
data.get("presentation_proposal", {}).get("requested_predicates")
== None
):
requested_predicates = []
else:
requested_predicates = data["presentation_proposal"][
"requested_predicates"
]
admin_data = {
"comment": data["presentation_proposal"]["comment"],
"trace": False,
request_type: {
"@type": data["presentation_proposal"]["@type"],
"attributes": requested_attributes,
"predicates": requested_predicates,
},
}
if "connection_id" in data:
admin_data["connection_id"] = data["connection_id"]
elif operation == "send-presentation":
if data.get("requested_attributes") == None:
requested_attributes = {}
else:
requested_attributes = data["requested_attributes"]
if data.get("requested_predicates") == None:
requested_predicates = {}
else:
requested_predicates = data["requested_predicates"]
if data.get("self_attested_attributes") == None:
self_attested_attributes = {}
else:
self_attested_attributes = data["self_attested_attributes"]
admin_data = {
"comment": data["comment"],
"requested_attributes": requested_attributes,
"requested_predicates": requested_predicates,
"self_attested_attributes": self_attested_attributes,
}
else:
admin_data = data
# Add on the service decorator if it exists.
if "~service" in data:
admin_data["~service"] = data["~service"]
return admin_data
def agent_state_translation(self, topic, operation, data):
# This method is used to translate the agent states passes back in the responses of operations into the states the
# test harness expects. The test harness expects states to be as they are written in the Protocol's RFC.
# the following is what the tests/rfc expect vs what aca-py communicates
# Connection Protocol:
# Tests/RFC | Aca-py
# invited | invitation
# requested | request
# responded | response
# complete | active
#
# Issue Credential Protocol:
# Tests/RFC | Aca-py
# proposal-sent | proposal_sent
# proposal-received | proposal_received
# offer-sent | offer_sent
# offer_received | offer_received
# request-sent | request_sent
# request-received | request_received
# credential-issued | issued
# credential-received | credential_received
# done | credential_acked
#
# Present Proof Protocol:
# Tests/RFC | Aca-py
resp_json = json.loads(data)
# Check to see if state is in the json
if "state" in resp_json:
agent_state = resp_json["state"]
# if "did_exchange" in topic:
# if "rfc23_state" in resp_json:
# rfc_state = resp_json["rfc23_state"]
# else:
# rfc_state = resp_json["connection"]["rfc23_state"]
# data = data.replace('"state"' + ": " + '"' + agent_state + '"', '"state"' + ": " + '"' + rfc_state + '"')
# else:
# Check the thier_role property in the data and set the calling method to swap states to the correct role for DID Exchange
if "their_role" in data:
# if resp_json["connection"]["their_role"] == "invitee":
if "invitee" in data:
de_state_trans_method = (
self.didExchangeResponderStateTranslationDict
)
elif "inviter" in data:
de_state_trans_method = (
self.didExchangeRequesterStateTranslationDict
)
else:
# make the trans method any one, since it doesn't matter. It's probably Out of Band.
de_state_trans_method = self.didExchangeResponderStateTranslationDict
if topic == "connection":
# if the response contains invitation id, swap out the connection states for the did exchange states
if "invitation_msg_id" in data:
data = data.replace(
'"state"' + ": " + '"' + agent_state + '"',
'"state"'
+ ": "
+ '"'
+ de_state_trans_method[agent_state]
+ '"',
)
else:
data = data.replace(
agent_state, self.connectionStateTranslationDict[agent_state]
)
elif topic == "issue-credential":
data = data.replace(
agent_state, self.issueCredentialStateTranslationDict[agent_state]
)
elif topic == "proof":
data = data.replace(
'"state"' + ": " + '"' + agent_state + '"',
'"state"'
+ ": "
+ '"'
+ self.presentProofStateTranslationDict[agent_state]
+ '"',
)
elif topic == "out-of-band":
data = data.replace(
'"state"' + ": " + '"' + agent_state + '"',
'"state"' + ": " + '"' + de_state_trans_method[agent_state] + '"',
)
elif topic == "did-exchange":
data = data.replace(
'"state"' + ": " + '"' + agent_state + '"',
'"state"' + ": " + '"' + de_state_trans_method[agent_state] + '"',
)
return data
async def get_agent_operation_acapy_version_based(
self, topic, operation, rec_id=None, data=None
):
# Admin api calls may change with acapy releases. For example revocation related calls change
# between 0.5.4 and 0.5.5. To be able to handle this the backchannel is made aware of the acapy version
# and constructs the calls based off that version
# construct some number to compare to with > or < instead of listing out the version number
comparibleVersion = self.get_acapy_version_as_float()
if topic == "revocation":
if operation == "revoke":
if comparibleVersion > 54:
agent_operation = "/revocation/" + operation
if "cred_ex_id" in data:
admindata = {
"cred_ex_ed": data["cred_ex_id"],
}
else:
admindata = {
"cred_rev_id": data["cred_rev_id"],
"rev_reg_id": data["rev_registry_id"],
"publish": str(data["publish_immediately"]).lower(),
}
data = admindata
else:
agent_operation = "/issue-credential/" + operation
if (
data is not None
): # Data should be included with 0.5.4 or lower acapy. Then it takes them as inline parameters.
cred_rev_id = data["cred_rev_id"]
rev_reg_id = data["rev_registry_id"]
publish = data["publish_immediately"]
agent_operation = (
agent_operation
+ "?cred_rev_id="
+ cred_rev_id
+ "&rev_reg_id="
+ rev_reg_id
+ "&publish="
+ rev_reg_id
+ str(publish).lower()
)
data = None
elif operation == "credential-record":
agent_operation = "/revocation/" + operation
if "cred_ex_id" in data:
cred_ex_id = data["cred_ex_id"]
agent_operation = agent_operation + "?cred_ex_id=" + cred_ex_id
else:
cred_rev_id = data["cred_rev_id"]
rev_reg_id = data["rev_registry_id"]
agent_operation = (
agent_operation
+ "?cred_rev_id="
+ cred_rev_id
+ "&rev_reg_id="
+ rev_reg_id
)
data = None
# elif (topic == "credential"):
return agent_operation, data
async def main(start_port: int, show_timing: bool = False, interactive: bool = True):
genesis = None
agent = None
try:
print("Starting mobile backchannel ...")
agent = MobileAgentBackchannel(
"mobile", start_port + 1, start_port + 2, genesis_data=genesis
)
# start backchannel (common across all types of agents)
await agent.listen_backchannel(start_port)
agent.activate()
# now wait ...
if interactive:
async for option in prompt_loop("(X) Exit? [X] "):
if option is None or option in "xX":
break
else:
print("Press Ctrl-C to exit ...")
remaining_tasks = asyncio.Task.all_tasks()
await asyncio.gather(*remaining_tasks)
finally:
terminated = True
try:
if agent:
await agent.terminate()
except Exception:
LOGGER.exception("Error terminating agent:")
terminated = False
await asyncio.sleep(0.1)
if not terminated:
os._exit(1)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs a Faber demo agent.")
parser.add_argument(
"-p",
"--port",
type=int,
default=8020,
metavar=("<port>"),
help="Choose the starting port number to listen on",
)
parser.add_argument(
"-i",
"--interactive",
type=str2bool,
default=True,
metavar=("<interactive>"),
help="Start agent interactively",
)
args = parser.parse_args()
try:
asyncio.get_event_loop().run_until_complete(
main(start_port=args.port, interactive=args.interactive)
)
except KeyboardInterrupt:
os._exit(1)
|
"""
Django settings for Twitter Scraper App project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
BASE_DIR = ROOT_DIR
APPS_DIR = ROOT_DIR / "twitter_scraper"
# SECURITY WARNING: don't run with debug turned on in production!
env = environ.Env()
DEBUG = env.bool("DJANGO_DEBUG", False)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("DJANGO_SECRET_KEY")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS")
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = [
"rest_framework",
"health_check", # required
"health_check.db", # stock Django health checkers
"health_check.cache",
"health_check.contrib.migrations",
]
LOCAL_APPS = [
"twitter_scraper.users",
"twitter_scraper.scraper",
"twitter_scraper.healthcheck.apps.HealthcheckConfig",
# Your stuff: custom apps go here
]
LOCAL_DEV_APPS = env.list("DJANGO_LOCAL_DEV_APPS", default=[])
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS + LOCAL_DEV_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [str(APPS_DIR / "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noq
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "{levelname} {asctime} {module} {process:d} {thread:d} {message}",
"style": "{",
},
"simple": {
"format": "{levelname} {message}",
"style": "{",
},
},
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
},
"require_debug_true": {
"()": "django.utils.log.RequireDebugTrue",
},
},
"handlers": {
"console": {
"level": "DEBUG",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
"formatter": "simple",
},
"file": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "logging.FileHandler",
"filename": "error.log",
"formatter": "verbose",
},
},
"loggers": {
"twitter_scraper": {
"handlers": ["console", "file"],
"level": "DEBUG",
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "Europe/Istanbul"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS # noqa
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
AUTH_USER_MODEL = "users.User"
TWEET_LISTING_DEFAULT_LIMIT = env.int("TWEET_LISTING_DEFAULT_LIMIT", 30)
NOSE_ARGS = [
"--nocapture",
"--nologcapture",
]
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
],
}
|
# Generated by Django 3.1.13 on 2021-10-15 23:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('menus', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='module',
name='name',
field=models.CharField(error_messages={'unique': 'The module already exists'}, max_length=15),
),
migrations.AddConstraint(
model_name='module',
constraint=models.UniqueConstraint(fields=('name',), name='unique_module'),
),
]
|
"""
==================================================
Transport Metabolism and Gene Expression Composite
==================================================
"""
import os
import argparse
from vivarium.library.units import units
from vivarium.core.process import Generator
from vivarium.core.emitter import path_timeseries_from_embedded_timeseries
from vivarium.core.composition import (
simulate_compartment_in_experiment,
save_flat_timeseries,
load_timeseries,
assert_timeseries_close,
)
from vivarium.plots.simulation_output import plot_simulation_output
# processes
from vivarium.processes.meta_division import MetaDivision
from vivarium.processes.tree_mass import TreeMass
from vivarium_cell.processes.division_volume import DivisionVolume
from vivarium_cell.processes.metabolism import (
Metabolism,
get_minimal_media_iAF1260b,
)
from vivarium_cell.processes.metabolism import get_iAF1260b_config as get_iAF1260b_path_config
from vivarium_cell.processes.convenience_kinetics import ConvenienceKinetics
from vivarium_cell.processes.ode_expression import ODE_expression
# plots
from chemotaxis.plots.transport_metabolism import plot_glc_lcts_environment
# directories
from chemotaxis import COMPOSITE_OUT_DIR, REFERENCE_DATA_DIR
NAME = 'transport_metabolism'
def get_iAF1260b_config():
"""
:py:class:`Metabolism` configuration for with iAF1260b BiGG model,
initial_mass, and tolerances set on the glucose/lactose exchange
reactions.
"""
config = get_iAF1260b_path_config()
# flux bound tolerance for reactions in glucose_lactose_transport_config
metabolism_config = {
'initial_mass': 1339.0, # fg of metabolite pools
'tolerance': {
'EX_glc__D_e': [1.05, 1.0],
'EX_lcts_e': [1.05, 1.0]}}
config.update(metabolism_config)
return config
def get_lacY_expression_config():
"""
:py:class:`ODE_expression` configuration for expression of glucose
and lactose transporters
"""
# expression
transcription_rates = {
'lacy_RNA': 5e-6}
translation_rates = {
'LacY': 2e-4}
protein_map = {
'LacY': 'lacy_RNA'}
degradation_rates = {
'lacy_RNA': 3e-3, # a single RNA lasts about 5 minutes
'LacY': 3e-5} # proteins degrade ~100x slower
# regulation
regulators = [
('external', 'glc__D_e'),
('internal', 'lcts_p')]
regulation_condition = {
'lacy_RNA': 'if [(external, glc__D_e) > 0.05 ' # limiting concentration of glc
'or (internal, lcts_p) < 0.05]'} # internal lcts is hypothesized to disinhibit lacY transcription
transcription_leak = {
'rate': 5e-4,
'magnitude': 5e-7}
# initial state
initial_state = {
'internal': {
'lacy_RNA': 0.0,
'LacY': 0.0},
'external': {
'glc__D_e': 8.0,
'lcts_e': 8.0}}
return {
'transcription_rates': transcription_rates,
'translation_rates': translation_rates,
'degradation_rates': degradation_rates,
'protein_map': protein_map,
'regulators': regulators,
'regulation': regulation_condition,
'transcription_leak': transcription_leak,
'initial_state': initial_state}
def get_glucose_lactose_transport_config():
"""
:py:class:`ConvenienceKinetics` configuration for simplified glucose
and lactose transport.Glucose uptake simplifies the PTS/GalP system
to a single uptake kinetic with ``glc__D_e_external`` as the only
cofactor.
"""
transport_reactions = {
'EX_glc__D_e': {
'stoichiometry': {
('internal', 'g6p_c'): 1.0,
('external', 'glc__D_e'): -1.0,
('internal', 'pep_c'): -1.0,
('internal', 'pyr_c'): 1.0},
'is reversible': False,
'catalyzed by': [
('internal', 'EIIglc')]},
'EX_lcts_e': {
'stoichiometry': {
('external', 'lcts_e'): -1.0,
('internal', 'lcts_p'): 1.0},
'is reversible': False,
'catalyzed by': [
('internal', 'LacY')]}}
transport_kinetics = {
'EX_glc__D_e': {
('internal', 'EIIglc'): {
('external', 'glc__D_e'): 1e-1, # (mM) k_m for glc
('internal', 'pep_c'): None, # k_m = None makes a reactant non-limiting
'kcat_f': 1e2}},
'EX_lcts_e': {
('internal', 'LacY'): {
('external', 'lcts_e'): 1e-1, # (mM) k_m for lcts
'kcat_f': 1e2}}}
transport_initial_state = {
'internal': {
'EIIglc': 1.0e-3, # (mmol/L)
'g6p_c': 0.0,
'pep_c': 1.0e-1,
'pyr_c': 0.0,
'LacY': 0,
'lcts_p': 0.0},
'external': {
'glc__D_e': 10.0,
'lcts_e': 10.0}}
transport_ports = {
'internal': [
'g6p_c', 'pep_c', 'pyr_c', 'EIIglc', 'LacY', 'lcts_p'],
'external': [
'glc__D_e', 'lcts_e']}
return {
'reactions': transport_reactions,
'kinetic_parameters': transport_kinetics,
'initial_state': transport_initial_state,
'ports': transport_ports}
def get_metabolism_initial_external_state(
scale_concentration=1,
override={}
):
# get external state from iAF1260b metabolism
config = get_iAF1260b_config()
metabolism = Metabolism(config)
molecules = {
mol_id: conc * scale_concentration
for mol_id, conc in metabolism.initial_state['external'].items()
}
for mol_id, conc in override.items():
molecules[mol_id] = conc
return molecules
class TransportMetabolismExpression(Generator):
""" Transport/Metabolism/Expression composite
Metabolism is an FBA BiGG model, transport is a kinetic model with
convenience kinetics, gene expression is an ODE model
"""
name = NAME
defaults = {
'boundary_path': ('boundary',),
'agents_path': ('agents',),
'daughter_path': tuple(),
'fields_path': ('fields',),
'dimensions_path': ('dimensions',),
'division': {},
'transport': get_glucose_lactose_transport_config(),
'metabolism': get_iAF1260b_config(),
'expression': get_lacY_expression_config(),
'divide': True,
}
def __init__(self, config=None):
super(TransportMetabolismExpression, self).__init__(config)
def generate_processes(self, config):
daughter_path = config['daughter_path']
agent_id = config['agent_id']
# Transport
transport = ConvenienceKinetics(config['transport'])
# Metabolism
# get target fluxes from transport, and update constrained_reaction_ids
metabolism_config = config['metabolism']
target_fluxes = transport.kinetic_rate_laws.reaction_ids
metabolism_config.update({'constrained_reaction_ids': target_fluxes})
metabolism = Metabolism(metabolism_config)
# Gene expression
expression = ODE_expression(config['expression'])
# Mass deriver
mass_deriver = TreeMass({})
# Division
division_condition = DivisionVolume(config['division'])
processes = {
'transport': transport,
'metabolism': metabolism,
'expression': expression,
'mass_deriver': mass_deriver,
'division': division_condition,
}
# divide process set to true, add meta-division processes
if config['divide']:
meta_division_config = dict(
{},
daughter_path=daughter_path,
agent_id=agent_id,
compartment=self)
meta_division = MetaDivision(meta_division_config)
processes['meta_division'] = meta_division
return processes
def generate_topology(self, config):
boundary_path = config['boundary_path']
agents_path = config['agents_path']
fields_path = config['fields_path']
dimensions_path = config['dimensions_path']
external_path = boundary_path + ('external',)
topology = {
'transport': {
'internal': ('cytoplasm',),
'external': external_path,
'fields': ('null',), # metabolism's exchange is used
'fluxes': ('flux_bounds',),
'global': boundary_path,
'dimensions': dimensions_path,
},
'metabolism': {
'internal': ('cytoplasm',),
'external': external_path,
'reactions': ('reactions',),
'fields': fields_path,
'flux_bounds': ('flux_bounds',),
'global': boundary_path,
'dimensions': dimensions_path,
},
'expression': {
'counts': ('cytoplasm_counts',),
'internal': ('cytoplasm',),
'external': external_path,
'global': boundary_path,
},
'mass_deriver': {
'global': boundary_path,
},
'division': {
'global': boundary_path,
},
}
if config['divide']:
topology.update({
'meta_division': {
'global': boundary_path,
'agents': agents_path,
}})
return topology
# simulate
def test_txp_mtb_ge(out_dir='out'):
timeseries = run_txp_mtb_ge(
env_volume=1e-12,
total_time=10
)
path_timeseries = path_timeseries_from_embedded_timeseries(timeseries)
save_flat_timeseries(path_timeseries, out_dir)
reference = load_timeseries(
os.path.join(REFERENCE_DATA_DIR, NAME + '.csv'))
assert_timeseries_close(path_timeseries, reference)
def run_txp_mtb_ge(
env_volume=1e-12,
total_time=10,
minimal_media=get_minimal_media_iAF1260b()
):
# make the compartment
compartment = TransportMetabolismExpression({
'agent_id': '0',
'divide': False})
# configure simulation
default_test_setting = {
'environment': {
'volume': env_volume * units.L,
'concentrations': minimal_media,
'ports': {
'fields': ('fields',),
'external': ('boundary', 'external'),
'dimensions': ('dimensions',),
'global': ('boundary',)}},
'total_time': total_time}
# run simulation
return simulate_compartment_in_experiment(compartment, default_test_setting)
if __name__ == '__main__':
out_dir = os.path.join(COMPOSITE_OUT_DIR, NAME)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
parser = argparse.ArgumentParser(description='transport metabolism')
parser.add_argument('--shift', '-s', action='store_true', default=False)
args = parser.parse_args()
if args.shift:
out_dir = os.path.join(out_dir, 'shift')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
minimal_media = get_metabolism_initial_external_state(
scale_concentration=100,
override={'glc__D_e': 1.0, 'lcts_e': 1.0})
environment_volume = 1e-13
else:
minimal_media = get_minimal_media_iAF1260b()
environment_volume = 1e-6
# simulate
total_time = 2500
timeseries = run_txp_mtb_ge(
env_volume=environment_volume,
total_time=total_time,
minimal_media=minimal_media)
# print resulting growth
volume_ts = timeseries['boundary']['volume']
mass_ts = timeseries['boundary']['mass']
print('volume growth: {}'.format(volume_ts[-1] / volume_ts[1]))
print('mass growth: {}'.format(mass_ts[-1] / mass_ts[1]))
# plot
# simulation plot
plot_settings = {
'max_rows': 30,
'remove_flat': True,
'remove_zeros': True,
'skip_ports': ['null', 'reactions']}
plot_simulation_output(timeseries, plot_settings, out_dir)
# glucose-lactose plot
settings = {
'internal_path': ('cytoplasm',),
'external_path': ('boundary', 'external'),
'global_path': ('boundary',),
'environment_volume': environment_volume}
plot_glc_lcts_environment(timeseries, settings, out_dir)
|
"""
Description:
Author: Jiaqi Gu (jqgu@utexas.edu)
Date: 2021-06-06 01:11:12
LastEditors: Jiaqi Gu (jqgu@utexas.edu)
LastEditTime: 2021-06-06 01:11:12
"""
from __future__ import print_function
import os
from os.path import join
import scipy.io
from torchvision import transforms
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import (
download_url,
extract_archive,
)
__all__ = ["StanfordCars"]
class StanfordCars(VisionDataset):
"""
https://github.com/lvyilin/pytorch-fgvc-dataset/blob/master/cars.py
`Stanford Cars <https://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_ Dataset.
Args:
root (string): Root directory of the dataset.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
folder = "StanfordCars"
file_list = {
"imgs": ("http://ai.stanford.edu/~jkrause/car196/car_ims.tgz", "car_ims.tgz"),
"annos": ("http://ai.stanford.edu/~jkrause/car196/cars_annos.mat", "cars_annos.mat"),
}
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
root = join(os.path.expanduser(root), self.folder)
if transform is None:
transform = transforms.Compose([transforms.Resize(size=(256, 256)), transforms.ToTensor()])
super(StanfordCars, self).__init__(root, transform=transform, target_transform=target_transform)
self.loader = default_loader
self.train = train
if self._check_exists():
print("Files already downloaded and verified.")
elif download:
self._download()
else:
raise RuntimeError("Dataset not found. You can use download=True to download it.")
loaded_mat = scipy.io.loadmat(os.path.join(self.root, self.file_list["annos"][1]))
loaded_mat = loaded_mat["annotations"][0]
self.samples = []
for item in loaded_mat:
if self.train != bool(item[-1][0]):
path = str(item[0][0])
label = int(item[-2][0]) - 1
self.samples.append((path, label))
def __getitem__(self, index):
path, target = self.samples[index]
path = os.path.join(self.root, path)
image = self.loader(path)
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def __len__(self):
return len(self.samples)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.file_list["imgs"][1])) and os.path.exists(
os.path.join(self.root, self.file_list["annos"][1])
)
def _download(self):
print("Downloading...")
for url, filename in self.file_list.values():
download_url(url, root=self.root, filename=filename)
print("Extracting...")
archive = os.path.join(self.root, self.file_list["imgs"][1])
extract_archive(archive)
|
#!/usr/bin/env python3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: Giangi Sacco
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import isce
import logging
import logging.config
from iscesys.Component.Application import Application
from iscesys.Component.Component import Component
import os
STITCHER = Component.Parameter('_stitcher',
public_name='stitcher',
default = 'version3',
type = str,
mandatory = False,
doc = "Use as argument for the stitcher factory. Supported old version 2 or new version 3 SRTM")
class Stitcher(Application):
def main(self):
# prevent from deleting local files
if(self.demStitcher._useLocalDirectory):
self.demStitcher._keepAfterFailed = True
self.demStitcher._keepDems = True
# is a metadata file is created set the right type
if(self.demStitcher._meta == 'xml'):
self.demStitcher.setCreateXmlMetadata(True)
elif(self.demStitcher._meta == 'rsc'):
self.demStitcher.setCreateRscMetadata(True)
# check for the action to be performed
if(self.demStitcher._action == 'stitch'):
if(self.demStitcher._bbox):
lat = self.demStitcher._bbox[0:2]
lon = self.demStitcher._bbox[2:4]
if (self.demStitcher._outputFile is None):
self.demStitcher._outputFile = self.demStitcher.defaultName(self.demStitcher._bbox)
if not(self.demStitcher.stitchDems(lat,lon,self.demStitcher._source,self.demStitcher._outputFile,self.demStitcher._downloadDir, \
keep=self.demStitcher._keepDems)):
print('Could not create a stitched DEM. Some tiles are missing')
else:
if(self.demStitcher._correct):
width = self.demStitcher.getDemWidth(lon,self.demStitcher._source)
self.demStitcher.correct()
#self.demStitcher.correct(self.demStitcher._output,self.demStitcher._source,width,min(lat[0],lat[1]),min(lon[0],lon[1]))
else:
print('Error. The --bbox (or -b) option must be specified when --action stitch is used')
raise ValueError
elif(self.demStitcher._action == 'download'):
if(self.demStitcher._bbox):
lat = self.demStitcher._bbox[0:2]
lon = self.demStitcher._bbox[2:4]
self.demStitcher.getDemsInBox(lat,lon,self.demStitcher._source,self.demStitcher._downloadDir)
#can make the bbox and pairs mutually esclusive if replace the if below with elif
if(self.demStitcher._pairs):
self.demStitcher.downloadFilesFromList(self.demStitcher._pairs[::2],self.demStitcher._pairs[1::2],self.demStitcher._source,self.demStitcher._downloadDir)
if(not (self.demStitcher._bbox or self.demStitcher._pairs)):
print('Error. Either the --bbox (-b) or the --pairs (-p) options must be specified when --action download is used')
raise ValueError
else:
print('Unrecognized action -a or --action',self.demStitcher._action)
return
if(self.demStitcher._report):
for k,v in list(self.demStitcher._downloadReport.items()):
print(k,'=',v)
def _facilities(self):
"""
Define the user configurable facilities for this application.
"""
self.demStitcher = self.facility(
'demStitcher',
public_name='demStitcher',
module='contrib.demUtils',
factory='createDemStitcher',
args=(self.stitcher,'iscestitcher',),
mandatory=False,
doc=(
"Object that based on the frame bounding boxes creates a DEM"
)
)
def Usage(self):
print("\nUsage: stitcher.py input.xml\n")
print("NOTE: if you don't want to store your password in a file you can run it as\n" +\
"'stitcher.py input.xml sticher.demStitcher.username=yourUsername\n" +\
"sticher.demStitcher.password=yourPassword'\n\n" )
family = 'stitcher'
parameter_list = (STITCHER,)
@property
def stitcher(self):
return self._stitcher
@stitcher.setter
def stitcher(self,stitcher):
self._stitcher = stitcher
def __init__(self,family = '', name = ''):
super(Stitcher, self).__init__(family if family else self.__class__.family, name=name)
if __name__ == "__main__":
import sys
ds = Stitcher()
ds.configure()
ds.run()
|
import feedparser
import sqlite3, psycopg2
import datetime, time
import os
import string
from random import choice, randint
import lxml.html
import uuid
import random
def fill_start_data_news():
#db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
id = 1
news_title = "Test news"
news_category_id = 1
news_post_date = datetime.datetime.now()
news_post_text = "Test news text"
news_post_text_translate = "Test news text translate"
news_portal_name_id = 1
news_company_owner_id = 1
news_author_id = 1
news_main_cover = ""
news_likes = 0
news_dislikes = 0
query = """INSERT INTO news(news_title, news_category_id, news_post_date, news_post_text, news_post_text_translate, news_portal_name_id, news_company_owner_id, news_author_id, news_main_cover, news_likes, news_dislikes) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
data_query = (news_title, news_category_id, news_post_date, news_post_text, news_post_text_translate, news_portal_name_id, news_company_owner_id, news_author_id, news_main_cover, news_likes, news_dislikes)
cursor.execute(query, data_query)
db.commit()
db.close()
def get_feed_urls():
# with open("rssurls.txt", "r") as file:
# url_feeds = file.readlines()
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query = "SELECT link FROM rss_channels"
# data_query = ()
cursor.execute(query)
url_feeds = cursor.fetchall()
db.close()
print(url_feeds)
urls = []
for i in range(len(url_feeds)):
print(url_feeds[i])
# url_feeds[i] = url_feeds[i]#[:-1]
urls.append(url_feeds[i][0])
# return url_feeds
return urls
def parse_current_url(url=''):
url_parse = feedparser.parse(url)
return [i for i in url_parse.entries]
def last_element(feed):
args = {"title": feed[0].title, "link": feed[0].link, "main_cover": ""}
keys = feed[0].keys()
# AUTHOR
if "author" in keys: args["author"] = feed[0].author
else: args["author"] = ""
# CONTENT
if "content" in keys: args["content"] = feed[0].content[0]["value"]
else: args["content"] = ""
# DATE
if "date" in keys: args["date"] = feed[0].published
elif "published" in keys: args["date"] = feed[0]["published"]
elif "updated" in keys: args["date"] = feed[0]["updated"]
else: args["date"] = feed[0]["updated"]
# DESCRIPTION
if "description" in keys: args["description"] = feed[0].description
elif "summary_detail" in keys: args["description"] = feed[0]['summary_detail']['value']
else: args["description"] = feed[0]['summary']
return args
def set_user_rss_read(user_id, rss_news_id, rss_portal_id):
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query = "INSERT INTO user_rss_news_read(user_id, rss_news_id, rss_portal_id, read) VALUES(%s,%s,%s,%s)"
data_query = (user_id, rss_news_id, rss_portal_id, False)
cursor.execute(query, data_query)
db.commit()
return 0
def get_amount_of_user_readers(portal_id):
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query = "SELECT user_id FROM user_rss_news UR WHERE portal_id=%s AND UR.check=TRUE"
data_query = [portal_id]
cursor.execute(query, data_query)
amount = cursor.fetchall()
# print("Count: ", len(amount), "\nUsers with IDs: ", amount)
# for i in amount:
# print(i[0])
return [len(amount), amount]
def parse_img(url):
import urllib.request as r
from urllib import error
try:
return lxml.html.parse(r.urlopen(url)).xpath('//img')
except error.HTTPError:
return False
def result(url):
if parse_img(url) == False:
return False
else:
array = []
for i in parse_img(url):
if i.get('width') and i.get('height'):
array.append({'size': str(int(i.get('width'))*int(i.get('height'))), 'src': i.get('src')})
else:
pass
return array
def connect_to_db(urls):
# db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
import uuid
cursor = db.cursor()
num = 0
for url in urls:
num += 1
print("#%s Current url: %s" % (num, url))
data = last_element(parse_current_url(url=url))
# print(data["date"])
try:
new_date = data["date"].split()
time = new_date[4].split(":")
if len(new_date[1]) > len(new_date[2]):
tmp = new_date[1][:3]
new_date[1] = new_date[2][:2]
new_date[2] = tmp
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
mon = months.index(new_date[2])+1
date_posted = datetime.datetime(int(new_date[3][:4]), mon, int(new_date[1]), int(time[0]), int(time[1]), int(time[2]))
except IndexError:
date_posted = data["date"]
query_0 = "SELECT ID FROM news_rss WHERE link=%s"
data_query_0 = [data["link"]]
cursor.execute(query_0, data_query_0)
count = cursor.fetchall()
import re
match_2 = re.findall(r'src=\"(.*?)\"\s.*/>', data["content"])
if len(match_2) >= 1:
# a = re.findall(r'([=\-_.:](\d+x\d+)+)', str(match_2[0]))[0]
data["main_cover"] = str(match_2[0])#.replace(a, '')
else:
# a = re.findall(r'([=\-_.:](\d+x\d+)+)', str(match_2))[0]
data["main_cover"] = str(match_2)#.replace(a, '')
if len(match_2) == 0:
match_3 = re.findall(r'src=\"(.*?)\"\s.*/>', data["description"])
a = str(match_3)
if len(match_3) >= 1:
data["main_cover"] = str(match_3[0])#.replace(a, '')
else:
# a = re.findall(r'([=\-_.:](\d+x\d+)+)', str(match_2))[0]
data["main_cover"] = str(match_3)#.replace(a, '')
data["content"] = data["content"].replace("\xa0", " ").replace("%", "%%").replace("> ", "> ").replace(" </", "</").replace(" <", " <").replace("\n<", "<").replace("\n", " ").replace("'", "’")
data["title"] = data["title"].replace('"', '').replace("\xa0", " ").replace("%", "%%").replace("> ", "> ").replace(" </", "</").replace(" <", " <").replace("\n<", "<").replace("\n", " ").replace("'", "’")
data["description"] = data["description"].replace("\xa0", "").replace("%", "%%").replace("> ", "> ").replace(" </", "</").replace(" <", " <").replace("\n<", "<").replace("\n", " ").replace("'", "’")
# TEST
# match = re.findall(r'<.*?>', data["description"])
# for i in match:
# data["description"] = data["description"].replace(i, "")
############## Parse all images from current url #######################
# def parse_img(url):
# return lxml.html.parse(url).xpath('//img')
# def print_matches(url):
# for i in parse_img(url):
# print(i.get('width'), i.get('height'), i.get('src'))
# def result(url):
# array = []
# for i in parse_img(url):
# if i.get('width') and i.get('height'):
# array.append({'size':str(int(i.get('width'))*int(i.get('height'))), 'src': i.get('src')})
# else:
# pass
# return array
#
if data["main_cover"] == '[]':
end = result(data['link'])
if end != False:
max_item = 0
for i in range(len(end)):
if int(end[i]['size']) > max_item:
max_item = int(end[i]['size'])
for i in range(len(end)):
if int(end[i]['size']) == max_item:
current_cover = end[i]['src']
data["main_cover"] = current_cover
############################################################################
match_tabs = re.findall(r'[\s]{2,}', data["description"])
for i in match_tabs:
data["description"] = data["description"].replace(i, " ")
data["description"] = data["description"].replace("\n", "").replace("\t", "")
query_for_rss = "SELECT * FROM rss_portals"
cursor.execute(query_for_rss)
portals_list = cursor.fetchall()
for current_portal in portals_list:
if current_portal[2] in data["link"]:
current_rss_news_id = current_portal[0] # CURRENT PORTAL ID
current_rss_news_cat_id = current_portal[7]
if len(count) == 0:
#cursor.execute("""INSERT INTO news_rss(title, date_posted, post_text, link, portal_name_id, category_id, content_value, author) VALUES(?, ?, ?, ?, ?, ?, ?, ?)""",(data["title"], datetime.datetime(int(new_date[3]), 11, int(new_date[1]), int(time[0]), int(time[1]), int(time[2])), data["description"], data["link"], 1, 1, data["content"], data["author"]))
query = """INSERT INTO news_rss(title, date_posted, post_text, link, portal_name_id, category_id, content_value, author, nuid) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)"""
data_query = (data["title"],
date_posted,
data["description"],
data["link"],
current_rss_news_id,
current_rss_news_cat_id,
data["content"],
data["author"],
''.join(choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _
in range(33)))
cursor.execute(query, data_query)
query_2 = "SELECT ID FROM news_rss WHERE title=%s"
data_query_2 = [data["title"]]
cursor.execute(query_2, data_query_2)
current_rss_id = cursor.fetchone()[0]
query_3 = "INSERT INTO rss_news_covers(rss_news_id, main_cover) VALUES (%s, %s)"
data_query_3 = (int(current_rss_id), data["main_cover"])
cursor.execute(query_3, data_query_3)
query_rss_portal = "UPDATE rss_portals SET cover=%s WHERE id=%s"
query_rss_portal_data=(data["main_cover"], int(current_rss_news_id))
cursor.execute(query_rss_portal, query_rss_portal_data)
db.commit()
instance = get_amount_of_user_readers(current_rss_news_id)
user_amount = instance[0]
users = [i[0] for i in instance[1]]
for i in range(len(users)):
set_user_rss_read(users[i], current_rss_id, current_rss_news_id)
print("Inserted from: ", url)
else:
print("Already exists: ", url)
print("================END ONE MORE LOOP====================")
db.close()
def fill_rss_table():
import json
#db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("dictionary_portals.json", encoding="utf-8-sig") as json_file_list:
json_data_list = list(json.load(json_file_list))
with open("dictionary_portals.json", encoding="utf-8-sig") as json_file:
json_data = json.load(json_file)
query_0 = "SELECT * FROM rss_portals"
cursor.execute(query_0)
list_cur = cursor.fetchall()
query_1 = "SELECT * FROM news_rss"
cursor.execute(query_1)
rss = cursor.fetchall()
end = len(rss)*len(list_cur)
cur_iter = 0
for i in range(len(rss)):
for j in range(len(list_cur)):
cur_iter += 1
if str(list_cur[j][2]) in str(rss[i][6]):
id = str(rss[i][0])
query = "UPDATE news_rss SET portal_name_id=%s WHERE id=%s"
data_query = (str(list_cur[j][0]), id)
cursor.execute(query, data_query)
db.commit()
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "When total end is ", end)
else:
continue
db.close()
def fill_rss_portals():
import json
#db = sqlite3.connect(BASE_DIR+"\\db.sqlite3")
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("dictionary_portals.json", encoding="utf-8-sig") as file_list:
file_list = json.load(file_list)
with open("dictionary_portals.json", encoding="utf-8-sig") as file:
portals = json.load(file)
end = len(portals)
print(end)
cur_iter = 0
for i in range(1,len(file_list)):
i = i+1
query_0 = "SELECT ID FROM rss_portals WHERE portal=%s"
data_query_0 = [file_list['object-%s'%i]["name"]]
cursor.execute(query_0, data_query_0)
count = cursor.fetchall()
if len(count) == 0:
cur_iter += 1
categories = {"Technology": 1, "Entertainment": 2, "Auto": 3, "Space": 4, "BIO": 5}
query = "INSERT INTO rss_portals(portal, portal_base_link, follows, description, cover, favicon, verbose_name, category_id, puid) VALUES(%s, %s, %s, %s, %s,%s,%s,%s, %s)"
data_query = (file_list['object-%s'%i]["name"],
file_list['object-%s'%i]["base_link"],
0,
file_list['object-%s'%i]["description"],
file_list['object-%s'%i]["cover"],
file_list['object-%s'%i]["favicon"],
file_list['object-%s'%i]["verbose"],
categories[file_list['object-%s'%i]["category"]],
str(uuid.uuid4()),
)
cursor.execute(query, data_query)
db.commit()
# Add feed to each portal
query_test = "SELECT DISTINCT ON (ID) ID FROM rss_portals WHERE portal_base_link=%s"
query_test_data = [file_list['object-%s'%i]['base_link']]
cursor.execute(query_test, query_test_data)
rss_id = cursor.fetchall()
query_channel = "INSERT INTO rss_channels(portal_id, link) VALUES(%s, %s)"
query_channel_data = (rss_id[0], file_list['object-%s'%i]["feed"])
cursor.execute(query_channel, query_channel_data)
db.commit()
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "When total end is ", end)
else:
# Add feed to each portal
query_test = "SELECT DISTINCT ON (ID) ID FROM rss_portals WHERE portal=%s"
query_test_data = [file_list['object-%s'%i]['name']]
cursor.execute(query_test, query_test_data)
rss_id = cursor.fetchall()
query_channel = "INSERT INTO rss_channels(portal_id, link) VALUES(%s, %s)"
query_channel_data = (rss_id[0], file_list['object-%s'%i]["feed"])
cursor.execute(query_channel, query_channel_data)
db.commit()
db.close()
def fill_companies():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("c_2.json", encoding="utf-8-sig") as file_list:
file_list = list(json.load(file_list))
with open("c_2.json", encoding="utf-8-sig") as file:
companies = json.load(file)
end = len(companies)
cur_iter = 0
current_category_id = 0
for i in range(end):
if companies[file_list[i]]['category'] == "technology": current_category_id = 1
if companies[file_list[i]]['category'] == "entertainment": current_category_id = 2
if companies[file_list[i]]['category'] == "auto": current_category_id = 3
if companies[file_list[i]]['category'] == "space": current_category_id = 4
if companies[file_list[i]]['category'] == "bio" or companies[file_list[i]]['category'] == "bit" : current_category_id = 5
description = ""
cur_iter += 1
query_check = "SELECT * FROM companies WHERE site=%s"
data_query_check = [companies[file_list[i]]['site']]
cursor.execute(query_check, data_query_check)
check = cursor.fetchall()
print(check)
print(len(check))
if len(check) > 0:
pass
else:
query = "INSERT INTO companies(name, verbose_name, site, category_id, logo, description) VALUES(%s, %s, %s, %s, %s, %s)"
data_query = (companies[file_list[i]]['name'], companies[file_list[i]]['verbose'],
companies[file_list[i]]['site'], current_category_id,
companies[file_list[i]]['logo'], description)
cursor.execute(query, data_query)
db.commit()
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "Dealed with ", companies[file_list[i]]['name'])
db.close()
def fill_portals():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query_portals = "INSERT INTO news_portal(portal_name, portal_base_link) VALUES(%s,%s)"
query_data = ("Appleinsider", "appleinsider.ru")
cursor.execute(query_portals, query_data)
db.commit()
db.close()
def fill_news():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
with open("news_zaharov_2.json", encoding="utf-8-sig") as file_list:
file_list = list(json.load(file_list))
with open("news_zaharov_2.json", encoding="utf-8-sig") as file:
news = json.load(file)
end = len(news)
cur_iter = 0
for i in range(end):
try:
for j in [1,2,3,4,5]:
# if news[file_list[i]]['category'] == "technology" and news[file_list[i]]['date'] != "date":
cur_iter += 1
news_title_english = "[eng]"+news[file_list[i]]["title"]
news_title_russian = "[rus]"+news[file_list[i]]["title"]
news_title_chinese = "[ch]"+news[file_list[i]]["title"]
news_category_id = j # Technology
news_post_date = news[file_list[i]]["date"]
teaser_english = "[eng] Teaser"
teaser_russian = "[rus] Teaser"
teaser_chinese = "[ch] Teaser"
news_post_text_english = news[file_list[i]]["text"]
news_post_text_russian = news[file_list[i]]["text"]
news_post_text_chinese = news[file_list[i]]["text"]
news_portal_name_id = 1 # Insydia
news_company_owner_id = 1 # Insydia
news_author_id = 1 # Saqel
news_main_cover = "" # None
news_likes = 0
news_dislikes = 0
photo = ""
news_tags = "{}"
slug = "%s-b-a-a-%s" % (j, i)
query_set = "INSERT INTO news(news_title_english, news_title_russian, news_title_chinese, news_category_id, news_post_date, news_post_text_english, " \
"teaser_english, teaser_russian, teaser_chinese, news_post_text_russian, news_post_text_chinese, news_portal_name_id, news_company_owner_id, news_author_id, " \
"news_main_cover, photo, news_likes, news_dislikes, news_tags, slug) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
data_query_set = (news_title_english,
news_title_russian,
news_title_chinese,
news_category_id,
news_post_date,
news_post_text_english,
teaser_english,
teaser_russian,
teaser_chinese,
news_post_text_russian,
news_post_text_chinese,
news_portal_name_id,
news_company_owner_id,
news_author_id,
news_main_cover,
photo,
news_likes,
news_dislikes,
news_tags,
slug)
cursor.execute(query_set, data_query_set)
db.commit()
#print(cur_iter, data_query_set)
print("Iter #", cur_iter, "Complete..........", cur_iter/end*100, "%", "Dealed with ", news_title_english)
except KeyError:
print(news[file_list[i]])
db.close()
def save_rss_news():
import json
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
query_set = "SELECT * FROM news_rss"
cursor.execute(query_set)
db.commit()
data = cursor.fetchall()
end = len(data)
count = 0
with open("save_rss.json", "a+", encoding="utf-8") as file:
file.write("{")
for i in range(len(data)):
count += 1
file.write('"object":')
data_dict = {}
data_dict["title"] = data[i][1]#["title"]
data_dict["date_posted"] = data[i][2].isoformat()#["date_posted"]
data_dict["post_text"] = data[i][3]#["post_text"]
data_dict["portal_name"] = data[i][4]#["portal_name"]
data_dict["category"] = data[i][5]#["category"]
data_dict["link"] = data[i][6]#["link"]
data_dict["author"] = data[i][7]#["author"]
data_dict["content_value"] = data[i][8]#["content_value"]
file.write(json.dumps(data_dict))
file.write(",")
print("Saving RSS # ", count, " success. In total - ", end, " items")
file.write("}")
db.close()
def create_categories():
db = psycopg2.connect("dbname='test' user='testuser' host='' password='test'")
cursor = db.cursor()
categories = ["Technology", "Entertainment", "Auto", "Space", "BIO"]
for i in categories:
query_set = "INSERT into news_category(category_name) VALUES(%s)"
query_data = [i]
cursor.execute(query_set, query_data)
db.commit()
print("Category ", i, "added")
db.close()
def work_func():
urls_of_portals = get_feed_urls()
print("1. Fill Rss Portals\n2. Syndicate news\n3. Fill Companies\n4. Fill news\n5. Save RSS\n6.User readers\n7. Create categories\n8. Fill portals")
x = int(input("What can I help you? Enter number: "))
if x == 1:
fill_rss_portals()
elif x == 2:
while True:
# try:
connect_to_db(urls=urls_of_portals)
time.sleep(1)
# except IndexError:
# pass
elif x == 3:
fill_companies()
elif x == 4:
fill_news()
elif x == 5:
save_rss_news()
elif x == 6:
get_amount_of_user_readers(3)
elif x == 7:
create_categories()
elif x == 8:
fill_portals()
else:
import sys
print("Good bye!")
sys.exit(0)
#fill_start_data_news()
while True:
work_func()
|
import requests
import sys
class API(object):
def __init__(self, token, cache):
self.addr = 'https://api.vscale.io/v1/'
self.token = token
self.cache = cache
self.cache.location = self._closest_location() or self.cache.location or 'spb0'
self.cache.plan = self.cache.plan or 'small'
self.cache.image = self.cache.image or self._best_image() or 'ubuntu_16.04_64_001_master'
self.cache.keys = self.cache.keys or [k['id'] for k in self.call('sshkeys')]
self.cache.save()
def call(self, path, method='GET', data=None):
methods_map = {
'GET': requests.get,
'PUT': requests.put,
'POST': requests.post,
'PATCH': requests.patch,
'DELETE': requests.delete,
}
headers = {'X-Token': self.token}
try:
response = methods_map[method](self.addr + path, headers=headers, json=data)
result = response.json()
except:
print("Error occured: {}".format(sys.exc_info()[0]))
result = False
if result is False:
print('Could not call API or parse output, exiting. Response:')
print(data)
print(response)
sys.exit(1)
return result
def _best_image(self):
images_all = self.call('images')
# Narrow by location, plan, purpose and then os type and version
images_select = list(filter(lambda i: i['active'], images_all))
images_select = list(filter(lambda i: 'ubuntu' in i['id'], images_select))
images_select = list(filter(lambda i: 'master' in i['id'], images_select))
images_select = list(filter(lambda i: self.cache.location in i['locations'], images_select))
images_select = list(filter(lambda i: self.cache.plan in i['rplans'], images_select))
# Trying to find latest LTS version by seeking for xx.04 where xx is even number
try:
images_select = list(filter(
lambda i: '.04' in i['id'] and int(i['id'][7:9]) % 2 == 0,
images_select
))
except:
images_select = list(filter(
lambda i: '.04' in i['id'],
images_select
))
# Make list of IDs
images_select = [i['id'] for i in images_select]
try:
res = images_select[-1]
except IndexError:
res = None
return res
def _closest_location(self):
"""Try to get user's geolocation and calculate closest which of data-centers is closest. Fallback to msk0 when errors occur."""
try:
from haversine import haversine
user_geo = requests.get('http://freegeoip.net/json/').json()
except:
print("Warning, haversine could not detect geo info: {}".format(sys.exc_info()[0]))
user_geo = False
if user_geo \
and isinstance(user_geo['latitude'], float) \
and user_geo['latitude'] > 0 \
and isinstance(user_geo['longitude'], float) \
and user_geo['longitude'] > 0:
available_locations = {
'msk0': (55.4521, 37.3704),
'spb0': (59.5700, 30.1900)
}
user = (user_geo['latitude'], user_geo['longitude'])
# Earth circumference / 2 to represent maximum value possible
# (2 * pi * 6371) / 2
min_distance = 20015.08
for loc in available_locations:
distance = haversine(user, available_locations[loc])
if distance < min_distance:
min_distance = distance
res = loc
else:
res = 'msk0'
return res
def account(self):
return self.call('account')
def images(self):
return self.call('images')
def locations(self):
return self.call('locations')
def plans(self):
return self.call('rplans')
def servers_list(self):
return self.call('scalets')
def servers_one(self, ctid):
return self.call('scalets/' + str(ctid))
|
# coding: utf-8
"""
Upbit Open API
## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [open-api@upbit.com] # noqa: E501
OpenAPI spec version: 1.0.0
Contact: ujhin942@gmail.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WithdrawLimit(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency': 'str',
'minimum': 'str',
'onetime': 'str',
'daily': 'str',
'remaining_daily': 'str',
'remaining_daily_krw': 'str',
'fixed': 'float',
'can_withdraw': 'bool'
}
attribute_map = {
'currency': 'currency',
'minimum': 'minimum',
'onetime': 'onetime',
'daily': 'daily',
'remaining_daily': 'remaining_daily',
'remaining_daily_krw': 'remaining_daily_krw',
'fixed': 'fixed',
'can_withdraw': 'can_withdraw'
}
def __init__(self, currency=None, minimum=None, onetime=None, daily=None, remaining_daily=None, remaining_daily_krw=None, fixed=None, can_withdraw=None): # noqa: E501
"""WithdrawLimit - a model defined in Swagger""" # noqa: E501
self._currency = None
self._minimum = None
self._onetime = None
self._daily = None
self._remaining_daily = None
self._remaining_daily_krw = None
self._fixed = None
self._can_withdraw = None
self.discriminator = None
if currency is not None:
self.currency = currency
if minimum is not None:
self.minimum = minimum
if onetime is not None:
self.onetime = onetime
if daily is not None:
self.daily = daily
if remaining_daily is not None:
self.remaining_daily = remaining_daily
if remaining_daily_krw is not None:
self.remaining_daily_krw = remaining_daily_krw
if fixed is not None:
self.fixed = fixed
if can_withdraw is not None:
self.can_withdraw = can_withdraw
@property
def currency(self):
"""Gets the currency of this WithdrawLimit. # noqa: E501
화폐를 의미하는 영문 대문자 코드 # noqa: E501
:return: The currency of this WithdrawLimit. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this WithdrawLimit.
화폐를 의미하는 영문 대문자 코드 # noqa: E501
:param currency: The currency of this WithdrawLimit. # noqa: E501
:type: str
"""
self._currency = currency
@property
def minimum(self):
"""Gets the minimum of this WithdrawLimit. # noqa: E501
출금 최소 금액/수량 # noqa: E501
:return: The minimum of this WithdrawLimit. # noqa: E501
:rtype: str
"""
return self._minimum
@minimum.setter
def minimum(self, minimum):
"""Sets the minimum of this WithdrawLimit.
출금 최소 금액/수량 # noqa: E501
:param minimum: The minimum of this WithdrawLimit. # noqa: E501
:type: str
"""
self._minimum = minimum
@property
def onetime(self):
"""Gets the onetime of this WithdrawLimit. # noqa: E501
1회 출금 한도 # noqa: E501
:return: The onetime of this WithdrawLimit. # noqa: E501
:rtype: str
"""
return self._onetime
@onetime.setter
def onetime(self, onetime):
"""Sets the onetime of this WithdrawLimit.
1회 출금 한도 # noqa: E501
:param onetime: The onetime of this WithdrawLimit. # noqa: E501
:type: str
"""
self._onetime = onetime
@property
def daily(self):
"""Gets the daily of this WithdrawLimit. # noqa: E501
1일 출금 한도 # noqa: E501
:return: The daily of this WithdrawLimit. # noqa: E501
:rtype: str
"""
return self._daily
@daily.setter
def daily(self, daily):
"""Sets the daily of this WithdrawLimit.
1일 출금 한도 # noqa: E501
:param daily: The daily of this WithdrawLimit. # noqa: E501
:type: str
"""
self._daily = daily
@property
def remaining_daily(self):
"""Gets the remaining_daily of this WithdrawLimit. # noqa: E501
1일 잔여 출금 한도 # noqa: E501
:return: The remaining_daily of this WithdrawLimit. # noqa: E501
:rtype: str
"""
return self._remaining_daily
@remaining_daily.setter
def remaining_daily(self, remaining_daily):
"""Sets the remaining_daily of this WithdrawLimit.
1일 잔여 출금 한도 # noqa: E501
:param remaining_daily: The remaining_daily of this WithdrawLimit. # noqa: E501
:type: str
"""
self._remaining_daily = remaining_daily
@property
def remaining_daily_krw(self):
"""Gets the remaining_daily_krw of this WithdrawLimit. # noqa: E501
통합 1일 잔여 출금 한도 # noqa: E501
:return: The remaining_daily_krw of this WithdrawLimit. # noqa: E501
:rtype: str
"""
return self._remaining_daily_krw
@remaining_daily_krw.setter
def remaining_daily_krw(self, remaining_daily_krw):
"""Sets the remaining_daily_krw of this WithdrawLimit.
통합 1일 잔여 출금 한도 # noqa: E501
:param remaining_daily_krw: The remaining_daily_krw of this WithdrawLimit. # noqa: E501
:type: str
"""
self._remaining_daily_krw = remaining_daily_krw
@property
def fixed(self):
"""Gets the fixed of this WithdrawLimit. # noqa: E501
출금 금액/수량 소수점 자리 수 # noqa: E501
:return: The fixed of this WithdrawLimit. # noqa: E501
:rtype: float
"""
return self._fixed
@fixed.setter
def fixed(self, fixed):
"""Sets the fixed of this WithdrawLimit.
출금 금액/수량 소수점 자리 수 # noqa: E501
:param fixed: The fixed of this WithdrawLimit. # noqa: E501
:type: float
"""
self._fixed = fixed
@property
def can_withdraw(self):
"""Gets the can_withdraw of this WithdrawLimit. # noqa: E501
출금 지원 여부 # noqa: E501
:return: The can_withdraw of this WithdrawLimit. # noqa: E501
:rtype: bool
"""
return self._can_withdraw
@can_withdraw.setter
def can_withdraw(self, can_withdraw):
"""Sets the can_withdraw of this WithdrawLimit.
출금 지원 여부 # noqa: E501
:param can_withdraw: The can_withdraw of this WithdrawLimit. # noqa: E501
:type: bool
"""
self._can_withdraw = can_withdraw
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WithdrawLimit, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WithdrawLimit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Type
from audio_source.countable_pcm_volume_transformer import CountablePCMVolumeTransformer
class IPCMSource(ABC, CountablePCMVolumeTransformer):
'''Interface representing a song.
Classes implementing this interface must contain title, url and duration fields.
Instances of extending classes should be instantiated using from_search method.'''
title: str
url: str
duration: int
@classmethod
@abstractmethod
async def from_search(cls: Type[IPCMSource], search: str) -> IPCMSource:
'''Returns an instance representing a song identified by the search argument, being either a url or a query.'''
|
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from .amazon import Amazon
class Continent(models.Model):
"""!
Continents of the Earth
This model stores the data of the continents of the Earth.
@param name name of continent
@param alpha2 2-character ISO code
@param image image of the continent on the map
"""
name = models.CharField(max_length=32, unique=True, verbose_name=_('Name'))
alpha2 = models.CharField(max_length=2, unique=True, verbose_name=_('Alpha2'))
image = ProcessedImageField(upload_to='data/continent/',
format='PNG',
options={'quality': 60},
null=True,
verbose_name=_('Image'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Continents')
verbose_name_plural = _('Continents')
ordering = ["name"]
class Country(models.Model):
"""!
Countries of the Earth
This model stores the data of the countries of the Earth.
@param name simple name of country
@param official_name official name of country
@param alpha2 2-character ISO code
@param alpha3 3-character ISO code
@param isocode digital ISO code
@param area area of country
@param population population of country
@param independent dependent or independent country
@param rating findstudent rating of country
@param flag flag of country
"""
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
official_name = models.CharField(max_length=64, unique=True, verbose_name=_('Official name'))
alpha2 = models.CharField(max_length=2, unique=True, verbose_name=_('Alpha2'))
alpha3 = models.CharField(max_length=3, unique=True, verbose_name=_('Alpha3'))
iso_code = models.IntegerField(unique=True, verbose_name=_('ISO code'))
independent = models.BooleanField(default=True, verbose_name=_('Independent'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
area = models.IntegerField(null=True, blank=True, verbose_name=_('Area'))
population = models.IntegerField(null=True, blank=True, verbose_name=_('Population'))
flag = ProcessedImageField(processors=[ResizeToFill(68, 45)],
upload_to='data/country/',
format='PNG',
options={'quality': 60},
null=True,
verbose_name=_('Flag'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ["-rating", "name"]
class Timezone(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
abbr_short = models.CharField(max_length=64, unique=True, verbose_name=_('Short abbreviation'))
abbr_long = models.CharField(max_length=64, unique=True, verbose_name=_('Long abbreviation'))
raw_offset = models.IntegerField(default=0, verbose_name=_('GMT offset'))
dst_offset = models.IntegerField(default=0, verbose_name=_('DST offset'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Timezone')
verbose_name_plural = _('Timezones')
ordering = ["-rating", "name"]
class City(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
iata_code = models.CharField(max_length=3, unique=True, verbose_name=_('IATA code'))
latitude = models.FloatField(verbose_name=_('Latitude'))
longitude = models.FloatField(verbose_name=_('Longitude'))
altitude = models.IntegerField(verbose_name=_('Altitude'))
capital = models.BooleanField(default=False, verbose_name=_('Capital'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
area = models.IntegerField(null=True, blank=True, verbose_name=_('Area'))
population = models.IntegerField(null=True, blank=True, verbose_name=_('Population'))
continent = models.ForeignKey(Continent, on_delete=models.CASCADE,
verbose_name=_('Continent'))
country = models.ForeignKey(Country, on_delete=models.CASCADE, verbose_name=_('Country'))
timezone = models.ForeignKey(Timezone, on_delete=models.CASCADE,
verbose_name=_('Timezone'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('City')
verbose_name_plural = _('Cities')
ordering = ["-rating", "name"]
unique_together = ("latitude", "longitude")
class University(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
abbr = models.CharField(max_length=16, unique=True, verbose_name=_('Abbreviation'))
latitude = models.FloatField(verbose_name=_('Latitude'))
longitude = models.FloatField(verbose_name=_('Longitude'))
student_count = models.IntegerField(verbose_name=_('Student count'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
slogan = models.CharField(max_length=128, null=True, blank=True, verbose_name=_('Slogan'))
founded = models.DateField(null=True, blank=True, verbose_name=_('Founded'))
website = models.URLField(null=True, blank=True, verbose_name=_('Website'))
logo = ProcessedImageField(upload_to='data/university/',
format='PNG',
options={'quality': 60},
null=True,
verbose_name=_('Logo'))
city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name=_('City'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('University')
verbose_name_plural = _('Universities')
ordering = ["-rating", "name"]
unique_together = ("latitude", "longitude")
class Room(models.Model):
campus = models.CharField(max_length=1, verbose_name=_('Campus'))
number = models.CharField(max_length=64, verbose_name=_('Number'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
university = models.ForeignKey(University, on_delete=models.CASCADE, verbose_name=_('University'))
location = ProcessedImageField(upload_to='data/room/',
format='PNG',
options={'quality': 60},
null=True,
blank=True,
verbose_name=_('Location'))
def __str__(self):
s = "{} {}".format(self.campus, self.number)
return s
class Meta:
verbose_name = _('Room')
verbose_name_plural = _('Rooms')
ordering = ["campus", "number"]
class Institute(models.Model):
name = models.CharField(max_length=64, verbose_name=_('Name'))
abbr = models.CharField(max_length=64, verbose_name=_('Abbreviation'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
website = models.URLField(null=True, blank=True, verbose_name=_('Website'))
logo = ProcessedImageField(upload_to='data/institute/',
format='PNG',
options={'quality': 60},
null=True,
verbose_name=_('Logo'))
university = models.ForeignKey(University, on_delete=models.CASCADE, verbose_name=_('University'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Institute')
verbose_name_plural = _('Institutes')
ordering = ["-rating", "name"]
class Department(models.Model):
name = models.CharField(max_length=128, verbose_name=_('Name'))
abbr = models.CharField(max_length=64, verbose_name=_('Abbreviation'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
website = models.URLField(null=True, blank=True, verbose_name=_('Website'))
institute = models.ForeignKey(Institute, on_delete=models.CASCADE, verbose_name=_('Institute'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Department')
verbose_name_plural = _('Departments')
ordering = ["-rating", "name"]
class Lecturer(models.Model):
last_name = models.CharField(max_length=64, verbose_name=_('Last name'))
first_name = models.CharField(max_length=64, verbose_name=_('First name'))
patronymic = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Patronymic'))
photo = ProcessedImageField(upload_to='data/lecturer/',
format='PNG',
null=True,
blank=True,
verbose_name=_('Photo'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
department = models.ForeignKey(Department, on_delete=models.CASCADE, verbose_name=_('Department'))
def __str__(self):
return self.last_name
class Meta:
verbose_name = _('Lecturer')
verbose_name_plural = _('Lecturers')
ordering = ["-rating", "last_name"]
class Period(models.Model):
number = models.IntegerField(verbose_name=_('Number'))
start_time = models.TimeField(verbose_name=_('Start time'))
end_time = models.TimeField(verbose_name=_('End time'))
university = models.ForeignKey(University, on_delete=models.CASCADE, verbose_name=_('University'))
def __str__(self):
return str(self.number)
class Meta:
verbose_name = _('Period')
verbose_name_plural = _('Periods')
ordering = ["number"]
class StudentGroup(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
group_id = models.IntegerField(unique=True, verbose_name=_('VK group id'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
avatar = ProcessedImageField(upload_to='data/student_group/',
format='PNG',
null=True,
blank=True,
verbose_name=_('Avatar'))
avatar_url = models.URLField(null=True, blank=True, verbose_name=_('Avatar URL'))
institute = models.ForeignKey(Institute, on_delete=models.CASCADE, verbose_name=_('Institute'))
department = models.ForeignKey(Department, on_delete=models.CASCADE, null=True, blank=True,
verbose_name=_('Department'))
def save(self, *args, **kwargs):
from django.core.files.temp import NamedTemporaryFile
import shutil
import requests
import uuid
if self.avatar_url is not None:
response = requests.get(self.avatar_url, stream=True)
img_temp = NamedTemporaryFile()
shutil.copyfileobj(response.raw, img_temp)
random_name = uuid.uuid4().hex + ".png"
self.avatar.save(random_name, img_temp, save=False)
# now image data are in img_temp, how to pass that to ProcessedImageField?
super(StudentGroup, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name = _('StudentGroup')
verbose_name_plural = _('StudentGroup')
ordering = ["-rating", "name"]
class Sex(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Sex')
verbose_name_plural = _('Sexs')
class EmotionType(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
system_name = models.CharField(max_length=64, unique=True, verbose_name=_('System name'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Emotion type')
verbose_name_plural = _('Emotion types')
class LandmarkType(models.Model):
name = models.CharField(max_length=64, unique=True, verbose_name=_('Name'))
system_name = models.CharField(max_length=64, unique=True, verbose_name=_('System name'))
def __str__(self):
return self.name
class Meta:
verbose_name = _('Landmark type')
verbose_name_plural = _('Landmark types')
class Emotion(models.Model):
emotion_type = models.ForeignKey(EmotionType, on_delete=models.CASCADE, verbose_name=_('Emotion type'))
emotion_confidence = models.FloatField(verbose_name=_('Emotion confidence'))
def __str__(self):
return self.emotion_type.name
class Meta:
verbose_name = _('Emotion')
verbose_name_plural = _('Emotions')
ordering = ["-emotion_confidence"]
class Landmark(models.Model):
landmark_type = models.ForeignKey(LandmarkType, on_delete=models.CASCADE, verbose_name=_('Landmark type'))
x_coordinate = models.FloatField(verbose_name=_('X coordinate'))
y_coordinate = models.FloatField(verbose_name=_('Y coordinate'))
def __str__(self):
return self.landmark_type.name
class Meta:
verbose_name = _('Landmark')
verbose_name_plural = _('Landmarks')
class DetectedFace(models.Model):
bounding_box_width = models.FloatField(verbose_name=_('Bounding box width'))
bounding_box_height = models.FloatField(verbose_name=_('Bounding box height'))
bounding_box_left = models.FloatField(verbose_name=_('Bounding box left'))
bounding_box_top = models.FloatField(verbose_name=_('Bounding box top'))
age_range_low = models.IntegerField(verbose_name=_('Age range low'))
age_range_high = models.IntegerField(verbose_name=_('Age range high'))
smile_value = models.BooleanField(verbose_name=_('Smile value'))
smile_confidence = models.FloatField(verbose_name=_('Smile confidence'))
eyeglasses_value = models.BooleanField(verbose_name=_('Eyeglasses value'))
eyeglasses_confidence = models.FloatField(verbose_name=_('Eyeglasses confidence'))
sunglasses_value = models.BooleanField(verbose_name=_('Sunglasses value'))
sunglasses_confidence = models.FloatField(verbose_name=_('Sunglasses confidence'))
gender_value = models.ForeignKey(Sex, on_delete=models.CASCADE, verbose_name=_('Gender value'))
gender_confidence = models.FloatField(verbose_name=_('Gender confidence'))
beard_value = models.BooleanField(verbose_name=_('Beard value'))
beard_confidence = models.FloatField(verbose_name=_('Beard confidence'))
mustache_value = models.BooleanField(verbose_name=_('Mustache value'))
mustache_confidence = models.FloatField(verbose_name=_('Mustache confidence'))
open_eyes_value = models.BooleanField(verbose_name=_('Eyes open value'))
open_eyes_confidence = models.FloatField(verbose_name=_('Eyes open confidence'))
open_mouth_value = models.BooleanField(verbose_name=_('Mouth open value'))
open_mouth_confidence = models.FloatField(verbose_name=_('Mouth open confidence'))
emotions = models.ManyToManyField(Emotion, verbose_name=_('Emotions'))
landmarks = models.ManyToManyField(Landmark, verbose_name=_('Landmarks'))
roll_pose = models.FloatField(verbose_name=_('Roll pose'))
yam_pose = models.FloatField(verbose_name=_('Yaw pose'))
pitch_pose = models.FloatField(verbose_name=_('Pitch pose'))
brightness_quality = models.FloatField(verbose_name=_('Brightness quality'))
sharpness_quality = models.FloatField(verbose_name=_('Sharpness quality'))
confidence = models.FloatField(verbose_name=_('Confidence'))
face_id = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Face ID'))
image_id = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Image ID'))
def __str__(self):
return "{}x{}".format(self.bounding_box_width, self.bounding_box_height)
class Meta:
verbose_name = _('Detected Face')
verbose_name_plural = _('Detected Faces')
ordering = ["id"]
class Student(models.Model):
last_name = models.CharField(max_length=64, verbose_name=_('Last name'))
first_name = models.CharField(max_length=64, verbose_name=_('First name'))
user_id = models.IntegerField(unique=True, verbose_name=_('User ID'))
nickname = models.CharField(max_length=64, verbose_name=_('Nickname'), null=True, blank=True)
birthday = models.DateField(null=True, blank=True, verbose_name=_('Birthday'))
sex = models.ForeignKey(Sex, on_delete=models.CASCADE, verbose_name=_('Sex'))
rating = models.IntegerField(default=0, verbose_name=_('Rating'))
real_last_name = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Real last name'))
real_first_name = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Real first name'))
patronymic = models.CharField(max_length=64, null=True, blank=True, verbose_name=_('Patronymic'))
city = models.ForeignKey(City, on_delete=models.CASCADE, null=True, blank=True, related_name="city",
verbose_name=_('City'))
home_town = models.ForeignKey(City, on_delete=models.CASCADE, null=True, blank=True, related_name="home_town",
verbose_name=_('Home town'))
status = models.CharField(max_length=128, null=True, blank=True, verbose_name=_('Status'))
has_photo = models.BooleanField(default=False, verbose_name=_('Has photo'))
is_closed = models.BooleanField(default=False, verbose_name=_('Is closed'))
is_deactivated = models.BooleanField(default=False, verbose_name=_('Is deactivated'))
record_create_date = models.DateTimeField(auto_now_add=True, verbose_name=_('Record create date'))
record_update_date = models.DateTimeField(auto_now=True, verbose_name=_('Record update date'))
avatar = ProcessedImageField(upload_to='data/student/avatar/',
format='PNG',
null=True,
blank=True,
verbose_name=_('Avatar'))
avatar_original = ProcessedImageField(upload_to='data/student/avatar_original/',
format='PNG',
null=True,
blank=True,
verbose_name=_('Original avatar'))
avatar_url = models.URLField(null=True, blank=True, verbose_name=_('Avatar URL'))
avatar_original_url = models.URLField(null=True, blank=True, verbose_name=_('Original avatar URL'))
university = models.ForeignKey(University, on_delete=models.CASCADE, null=True, blank=True,
verbose_name=_('University'))
student_group = models.ForeignKey(StudentGroup, on_delete=models.CASCADE, null=True, blank=True,
verbose_name=_('Student Group'))
def save(self, *args, **kwargs):
from django.core.files.temp import NamedTemporaryFile
import shutil
import requests
import uuid
if self.avatar_url is not None:
response = requests.get(self.avatar_url, stream=True)
img_temp = NamedTemporaryFile()
shutil.copyfileobj(response.raw, img_temp)
random_name = uuid.uuid4().hex + ".png"
self.avatar.save(random_name, img_temp, save=False)
if self.avatar_original_url is not None:
response = requests.get(self.avatar_original_url, stream=True)
img_temp = NamedTemporaryFile()
shutil.copyfileobj(response.raw, img_temp)
random_name = uuid.uuid4().hex + ".png"
self.avatar_original.save(random_name, img_temp, save=False)
# now image data are in img_temp, how to pass that to ProcessedImageField?
super(Student, self).save(*args, **kwargs)
def __str__(self):
return "{} {}".format(self.last_name, self.first_name)
class Meta:
verbose_name = _('Student')
verbose_name_plural = _('Students')
ordering = ["-rating", "last_name"]
class StudentPhoto(models.Model):
owner = models.ForeignKey(Student, on_delete=models.CASCADE, verbose_name=_('Student'))
photo_create_date = models.DateTimeField(verbose_name=_('Photo create date'))
record_create_date = models.DateTimeField(auto_now_add=True, verbose_name=_('Record create date'))
record_update_date = models.DateTimeField(auto_now=True, verbose_name=_('Record update date'))
detected_faces = models.ManyToManyField(DetectedFace, verbose_name=_('Faces details'))
photo = ProcessedImageField(upload_to='data/student_photos/',
format='PNG',
verbose_name=_('Photo'))
photo_url = models.URLField(unique=True, verbose_name=_('Photo URL'))
def save(self, *args, **kwargs):
from django.core.files.temp import NamedTemporaryFile
import shutil
import requests
import uuid
if self.photo_url is not None:
response = requests.get(self.photo_url, stream=True)
img_temp = NamedTemporaryFile()
shutil.copyfileobj(response.raw, img_temp)
random_code = uuid.uuid4().hex
random_name = random_code + ".png"
self.photo.save(random_name, img_temp, save=False)
super(StudentPhoto, self).save(*args, **kwargs)
picture_name = self.photo.name
detected_faces = Amazon().detect_by_image(picture_name=picture_name)['FaceDetails']
for face in detected_faces:
details, created = DetectedFace.objects.get_or_create(
bounding_box_width=face["BoundingBox"]["Width"],
bounding_box_height=face["BoundingBox"]["Height"],
bounding_box_left=face["BoundingBox"]["Left"],
bounding_box_top=face["BoundingBox"]["Top"],
age_range_high=face["AgeRange"]["High"],
age_range_low=face["AgeRange"]["Low"],
smile_value=face["Smile"]["Value"],
smile_confidence=face["Smile"]["Confidence"],
eyeglasses_value=face["Eyeglasses"]["Value"],
eyeglasses_confidence=face["Eyeglasses"]["Confidence"],
sunglasses_value=face["Sunglasses"]["Value"],
sunglasses_confidence=face["Sunglasses"]["Confidence"],
gender_value=Sex.objects.get(name_en=face["Gender"]["Value"]),
gender_confidence=face["Gender"]["Confidence"],
beard_value=face["Beard"]["Value"],
beard_confidence=face["Beard"]["Confidence"],
mustache_value=face["Mustache"]["Value"],
mustache_confidence=face["Mustache"]["Confidence"],
open_eyes_value=face["EyesOpen"]["Value"],
open_eyes_confidence=face["EyesOpen"]["Confidence"],
open_mouth_value=face["MouthOpen"]["Value"],
open_mouth_confidence=face["MouthOpen"]["Confidence"],
roll_pose=face["Pose"]["Roll"],
yam_pose=face["Pose"]["Yaw"],
pitch_pose=face["Pose"]["Pitch"],
brightness_quality=face["Quality"]["Brightness"],
sharpness_quality=face["Quality"]["Sharpness"],
confidence=face["Confidence"]
)
self.detected_faces.add(details)
details.save()
Amazon().index_faces(picture_name=self.photo.name,
external_image_id=str(self.id))
def __str__(self):
return "{} {}".format(self.owner.last_name, self.owner.first_name)
class Meta:
verbose_name = _('Student Photo')
verbose_name_plural = _('Student Photos')
ordering = ["-photo_create_date"]
class IdentifiedFace(models.Model):
bounding_box_width = models.FloatField(verbose_name=_('Bounding box width'))
bounding_box_height = models.FloatField(verbose_name=_('Bounding box height'))
bounding_box_left = models.FloatField(verbose_name=_('Bounding box left'))
bounding_box_top = models.FloatField(verbose_name=_('Bounding box top'))
confidence = models.FloatField(verbose_name=_('Confidence'))
similarity = models.FloatField(verbose_name=_('Similarity'))
face_id = models.CharField(max_length=64, verbose_name=_('Face ID'))
image_id = models.CharField(max_length=64, verbose_name=_('Image ID'))
student = models.ForeignKey(Student, on_delete=models.CASCADE, verbose_name=_('Student'))
def __str__(self):
return self.face_id
class Meta:
verbose_name = _('Identified Face')
verbose_name_plural = _('Identified Faces')
ordering = ["-similarity"]
class UniversalIdentifiedFace(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE, verbose_name=_('Student'))
identified_faces = models.ManyToManyField(IdentifiedFace, verbose_name=_('Identified faces'))
def __str__(self):
return str(self.student.user_id)
class Meta:
verbose_name = _('Universal Identified Face')
verbose_name_plural = _('Universal Identified Faces')
class StudentSearch(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Owner'))
code = models.CharField(max_length=64, unique=True, verbose_name=_('Code'))
record_create_date = models.DateTimeField(auto_now_add=True, verbose_name=_('Record create date'))
record_update_date = models.DateTimeField(auto_now=True, verbose_name=_('Record update date'))
detected_faces = models.ManyToManyField(DetectedFace, verbose_name=_('Detected faces'))
identified_faces = models.ManyToManyField(IdentifiedFace, verbose_name=_('Identified faces'))
universal_identified_face = models.ManyToManyField(UniversalIdentifiedFace,
verbose_name=_('Universal identified faces'))
photo = ProcessedImageField(upload_to='data/student_search/',
format='PNG',
verbose_name=_('Photo'))
photo_url = models.URLField(null=True, blank=True, verbose_name=_('Photo URL'))
def __str__(self):
return "{}".format(self.owner)
class Meta:
verbose_name = _('Student Search')
verbose_name_plural = _('Student Searches')
ordering = ["-owner", '-record_create_date']
def save(self, *args, **kwargs):
from django.core.files.temp import NamedTemporaryFile
import shutil
import requests
import uuid
if self.photo_url is not None:
self.code = uuid.uuid4().hex
response = requests.get(self.photo_url, stream=True)
img_temp = NamedTemporaryFile()
shutil.copyfileobj(response.raw, img_temp)
random_name = uuid.uuid4().hex + ".png"
self.photo.save(random_name, img_temp, save=False)
else:
self.code = uuid.uuid4().hex
random_name = uuid.uuid4().hex + ".png"
self.photo.save(random_name, self.photo, save=False)
super(StudentSearch, self).save(*args, **kwargs)
|
# Generated by Django 2.0.9 on 2018-10-08 20:43
import colorfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20181008_2031'),
]
operations = [
migrations.AddField(
model_name='blogindexpage',
name='colour',
field=colorfield.fields.ColorField(default='#bbcee5', max_length=18),
),
]
|
import argparse
import pickle
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
import sys
import torch
from test_dl_model import get_predictions_from_FNC_1_Test
def parse_cmd_line_args():
parser = argparse.ArgumentParser(description='Visualize H-Vec & B-Vec')
parser.add_argument('-test_pkl', type=str, required=True,
default=None, help = 'Path to Test PKL File')
parser.add_argument('-weights_file', type=str, required=True,
default=None, help = 'Path to Weights File')
parser.add_argument('-plots_dir', type=str, required=True, default=None,
help='Path to the Plots Directory to Output')
parser.add_argument('-misclassified', action='store_true', default=False,
help = 'Show plots only for misclassified points')
parser.add_argument('-apply_pos_filter', action='store_true', default=False,
help = 'Apply POS Filters')
args = parser.parse_args()
return args
def apply_pca_gen_plot(grouped_predictions, output_dir):
fig = plt.figure()
fig.suptitle('PCA Visualization for Headline & Body FT Vectors', fontsize=9)
group_i = 1
num_cols = len(grouped_predictions)
set_ylabel_flag = False
for label, vecs in grouped_predictions.items():
print(f'Processing label {label.upper()}')
X = np.vstack([vecs['h_vec'], vecs['b_vec']])
print('Standardizing Data')
X = StandardScaler().fit_transform(X)
print('Applying PCA')
pca = PCA(n_components=2)
principal_components = pca.fit_transform(X)
print(f'Explained Variance: {pca.explained_variance_}')
print('Plotting Datapoints')
ax = fig.add_subplot(1, num_cols, group_i)
ax.set_xlabel('Principal Component 1', fontsize=8)
if not set_ylabel_flag:
ax.set_ylabel('Principal Component 2', fontsize=8)
set_ylabel_flag = True
ax.set_title(f'{label.upper()}', fontsize=8)
targets = ['Headline Features', 'Body Features']
ax.scatter(
principal_components[:len(vecs['h_vec']), 0],
principal_components[:len(vecs['h_vec']), 1],
c = 'r')
ax.scatter(
principal_components[len(vecs['h_vec']):, 0],
principal_components[len(vecs['h_vec']):, 1],
c = 'b')
ax.legend(targets, loc='upper left')
group_i += 1
# plt.tight_layout()
fig.subplots_adjust(wspace=0.5)
plt.savefig(f'{output_dir}/h_b_ft_visualization.png', dpi = 500)
# plt.show()
def get_cosine_sim_distribution_plot(grouped_predictions, output_dir):
fig = plt.figure()
fig.suptitle('Cosine Distance Distributions', fontsize=9)
group_i = 1
num_cols = len(grouped_predictions)
set_ylabel_flag = False
for label, vecs in grouped_predictions.items():
print('Calculating Cosine Distances')
dot_prod = np.sum(vecs['h_vec'] * vecs['b_vec'], axis = 1)
h_vec_norm = np.linalg.norm(vecs['h_vec'], axis = 1)
b_vec_norm = np.linalg.norm(vecs['b_vec'], axis = 1)
norm_prod = h_vec_norm * b_vec_norm
cos_sim = dot_prod / (0.000000001 + norm_prod)
ax = fig.add_subplot(1, num_cols, group_i)
n, bins, patches = ax.hist(cos_sim, facecolor='blue', alpha=0.5)
ax.set_xlabel('Cosine Distance', fontsize=8)
if not set_ylabel_flag:
ax.set_ylabel('Frequency', fontsize=8)
set_ylabel_flag = True
ax.set_title(f'{label.upper()}', fontsize=8)
group_i += 1
# plt.tight_layout()
fig.subplots_adjust(wspace=0.5)
plt.savefig(f'{output_dir}/h_b_cos_dis_visualization.png', dpi = 500)
# plt.show()
if __name__ == '__main__':
args = parse_cmd_line_args()
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('-------Start Visualize-------')
print('Getting Predictions from Test Dataset')
dl_model_pred, h_vec, b_vec = get_predictions_from_FNC_1_Test(
args.weights_file, args.apply_pos_filter, DEVICE)
print('Getting datapoints from PKL file')
with open(args.test_pkl, 'rb') as test_pkl_fp:
datapoints = pickle.load(test_pkl_fp)
print('Organizing the data')
dl_model_pred = np.array(dl_model_pred)
gold_labels = np.array([datapoint['y'] for datapoint in datapoints])
# Filter only related labels
non_unrelated_indices = gold_labels != 'unrelated'
gold_labels = gold_labels[non_unrelated_indices]
dl_model_pred = dl_model_pred[non_unrelated_indices]
h_vec = h_vec[non_unrelated_indices]
b_vec = b_vec[non_unrelated_indices]
labels = np.unique(gold_labels)
if args.misclassified:
misclassified_indices = gold_labels != dl_model_pred
gold_labels = gold_labels[misclassified_indices]
h_vec = h_vec[misclassified_indices]
b_vec = b_vec[misclassified_indices]
dl_model_pred = dl_model_pred[misclassified_indices]
grouped_predictions = {
label : {
'h_vec' : h_vec[gold_labels == label],
'b_vec' : b_vec[gold_labels == label]
}
for label in labels
}
apply_pca_gen_plot(grouped_predictions, args.plots_dir)
get_cosine_sim_distribution_plot(grouped_predictions, args.plots_dir)
print('--------End Visualize--------')
|
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from src.types.blockchain_format.coin import Coin
from src.types.blockchain_format.sized_bytes import bytes32
from src.util.ints import uint32, uint64
from src.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk
def create_puzzlehash_for_pk(pub_key: G1Element) -> bytes32:
return puzzle_for_pk(bytes(pub_key)).get_tree_hash()
def signature_for_coinbase(coin: Coin, pool_private_key: PrivateKey):
# noinspection PyTypeChecker
return G2Element.from_bytes(bytes(AugSchemeMPL.sign(pool_private_key, bytes(coin))))
def sign_coinbase_coin(coin: Coin, private_key: PrivateKey):
if private_key is None:
raise ValueError("unknown private key")
return signature_for_coinbase(coin, private_key)
def pool_parent_id(block_height: uint32, genesis_challenge: bytes32) -> uint32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def farmer_parent_id(block_height: uint32, genesis_challenge: bytes32) -> uint32:
return bytes32(genesis_challenge[16:] + block_height.to_bytes(16, "big"))
def create_pool_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = pool_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_farmer_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = farmer_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007,2009,2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Arabic language.
.. seealso:: http://en.wikipedia.org/wiki/Arabic_language
"""
import re
from translate.lang import common
def reverse_quotes(text):
def convertquotation(match):
return u"”%s“" % match.group(1)
return re.sub(u'“([^”]+)”', convertquotation, text)
class ar(common.Common):
"""This class represents Arabic."""
listseperator = u"، "
puncdict = {
u",": u"،",
u";": u"؛",
u"?": u"؟",
#This causes problems with variables, so commented out for now:
#u"%": u"٪",
}
ignoretests = ["startcaps", "simplecaps", "acronyms"]
def punctranslate(cls, text):
text = super(cls, cls).punctranslate(text)
return reverse_quotes(text)
punctranslate = classmethod(punctranslate)
|
import datetime
import io
import discord
from discord.ext import commands, tasks
import constants
NO_DELETE_ROLES = (
516369428615528459, # DDB Staff
# 516370028053004306 # Moderator
)
# data schema:
# autodelete.json: dict int->int channel->days
class AutoDelete(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.autodelete_channels = bot.db.jget("autodelete", {})
self.deleter.start()
def cog_unload(self):
self.deleter.cancel()
# ==== tasks ====
@tasks.loop(hours=24)
async def deleter(self):
def role_delete_check(msg):
# don't delete messages by anyone with any of these roles, or pinned messages
return not (msg.pinned
or (isinstance(msg.author, discord.Member)
and set(r.id for r in msg.author.roles).intersection(NO_DELETE_ROLES)))
await self.bot.wait_until_ready()
log_channel = self.bot.get_channel(constants.OUTPUT_CHANNEL_ID)
for channel_id, days in self.autodelete_channels.items():
# get channel
channel = self.bot.get_channel(int(channel_id))
if channel is None:
continue
print(f"Starting autodelete on #{channel.name}...")
# delete and log
try:
delete_log = io.StringIO()
deleted = await channel.purge(limit=None,
check=role_delete_check,
before=datetime.datetime.utcnow() - datetime.timedelta(days=days))
for message in deleted:
delete_log.write(f"[{message.created_at.isoformat()}] {message.author} ({message.author.id})\n"
f"{message.content}\n\n")
if not deleted:
continue
delete_log.seek(0)
date = datetime.date.today()
await log_channel.send(f"Deleted {len(deleted)} messages from {channel.mention}.",
file=discord.File(delete_log, filename=f"{channel.name}-{date}.log"))
except discord.HTTPException as e:
print(e)
await log_channel.send(f"Unable to delete messages from {channel.mention}: {e}")
# ==== commands ====
@commands.group(invoke_without_command=True)
@commands.has_role(constants.MOD_ROLE_ID)
async def autodelete(self, ctx):
"""Shows all channels with active autodelete."""
embed = discord.Embed(colour=0x60AFFF, title="Active Autodelete Channels")
embed.description = '\n'.join(f"<#{channel}>: {days} days"
for channel, days in self.autodelete_channels.items()) \
or "No active channels."
embed.set_footer(text="Use \".autodelete add #channel #\" to add a channel rule, "
"or \".autodelete remove #channel\" to remove one.")
await ctx.send(embed=embed)
@autodelete.command(name='add')
@commands.has_role(constants.MOD_ROLE_ID)
async def autodelete_add(self, ctx, channel: discord.TextChannel, days: int):
"""Adds or updates an autodelete rule for the given channel."""
if days < 1:
return await ctx.send("Days must be at least 1.")
self.autodelete_channels[channel.id] = days
self.bot.db.jset("autodelete", self.autodelete_channels)
await ctx.send(f"Okay, added autodelete rule to delete messages older than {days} days from {channel.mention}.")
@autodelete.command(name='remove')
@commands.has_role(constants.MOD_ROLE_ID)
async def autodelete_remove(self, ctx, channel: discord.TextChannel):
"""Removes an autodelete rule from a channel."""
if channel.id not in self.autodelete_channels:
return await ctx.send(f"{channel.mention} has no autodelete rule.")
del self.autodelete_channels[channel.id]
self.bot.db.jset("autodelete", self.autodelete_channels)
await ctx.send(f"Okay, removed autodelete rule from {channel.mention}.")
def setup(bot):
bot.add_cog(AutoDelete(bot))
|
"""
PFReader output module
Uses tablib
"""
import tablib
from pfreader.pfreader import data_classess
from . import exceptions
def get_output(lox_data):
for elem in data_classess:
try:
if hasattr(elem, "data_label"):
data = lox_data[elem.data_label]
else:
data = lox_data[elem.label]
except KeyError:
if not elem.required:
continue
raise exceptions.DataNotFound(elem.label)
d = elem(data)
if hasattr(d, "get_header"):
headers = d.get_header()
if headers is None:
if not elem.required:
continue
raise exceptions.HeaderNotFound(elem.label)
rowlen = len(headers)
data = []
if hasattr(d, "get_data"):
for row in d.get_data():
while len(row) < rowlen:
row += [""]
data.append(row[:rowlen])
yield (elem.label, headers, data)
def get_databook(lox_data):
db = tablib.Databook()
for label, headers, data in get_output(lox_data):
ds = tablib.Dataset(title=label)
ds.headers = headers
for elem in data:
ds.append(elem)
db.add_sheet(ds)
return db
|
# -*- coding:gb2312 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import os, hashlib,shutil,codecs
import numpy as np
from collections import defaultdict
from collections import OrderedDict
def VisitDir(arg,dirname,names):
for filespath in names:
name = os.path.join(dirname,filespath)
if not os.path.isdir(name):
fileList.append(name)
#if __name__=="__main__":
sep = os.sep
dirName = raw_input("Please input dir name.")#u"/Users/zhouhang/Pictures/ͼƬ"#
dirPath = unicode(dirName,"utf8") + sep
if not os.path.exists(dirPath):
print "%s is invalid!"%(dirPath)
exit(0)
fileList=[]
os.path.walk(dirPath, VisitDir, fileList)
print "complete"
md5list=defaultdict(list)
tenPercent = len(fileList) / 10
count = 0
for i in fileList:
count += 1
md5file=open(i,"rb")
md5=hashlib.md5(md5file.read()).hexdigest()
md5file.close()
md5list[md5].append(i)
if np.mod(count, tenPercent) == 0:
print "%d percent md5 complete"%(count * 100 / len(fileList))
print "md5 complete"
#trashDir = dirName + sep + "tmpTrash/"
repeatFileName = dirName + sep + "repeatFileList.txt"
tenPercent = len(md5list) / 10
count = 0
with codecs.open(repeatFileName, 'w') as f:
for i in md5list:
count += 1
if np.mod(count, tenPercent) == 0:
print "%d percent md5 complete" % (count * 100 / len(fileList))
if len(md5list[i]) > 1:
f.writelines("---------------\n")
#if not os.path.exists(trashDir):
# os.mkdir(trashDir)
ind = 1
for j in md5list[i]:
print ind,":",j
ind += 1
f.write(j)
f.write("\n")
# flag = int(raw_input("what file will you want to remove?"))
# if flag not in range(1,len(md5list[i])+1):
# continue
# else:
# rfile = md5list[i][flag-1]
# print "remove ", rfile," to %s"%(trashDir)
# try:
# shutil.move(rfile, trashDir)
# except Exception, e:
# print Exception,":",e
|
import pytest
import requests
from bs4 import BeautifulSoup
def test_templates_loaded(hub):
"""
Tests if every hub has loaded their custom login template.
It checks that each hub's login page has their configured institutional logo.
"""
hub_domains = hub.spec['domain'] if isinstance(hub.spec['domain'], list) else [hub.spec['domain']]
for domain in hub_domains:
url = f'https://{domain}/hub/login'
response = requests.get(url)
if hub.spec["template"] == "base-hub":
expected_logo_img = hub.spec["config"]["jupyterhub"]["homepage"]["templateVars"]["org"]["logo_url"]
else:
expected_logo_img = hub.spec["config"]["base-hub"]["jupyterhub"]["homepage"]["templateVars"]["org"]["logo_url"]
has_logo = False
soup = BeautifulSoup(response.text, 'html.parser')
images = soup.find_all('img')
for image in images:
if image.has_attr("class") and image["class"] == ["hub-logo"]:
has_logo = True
assert image['src'] == expected_logo_img
if hub.spec["template"] != "ephemeral-hub":
assert has_logo
|
from rest_framework import status
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from .renderers import UserJSONRenderer
from .serializers import (
LoginSerializer
)
class LoginAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = LoginSerializer
def post(self, request):
user = request.data.get('user', {})
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
from PIL import Image
import cv2
import time
camera = cv2.VideoCapture(0)
time.sleep(0.2)
return_value, image = camera.read()
cv2im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
pilim = Image.fromarray(cv2im)
del camera
bmp = pilim.convert('L').resize([60, 60])
bmp.save("Check\phand.jpg")
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for continue_statements module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ContinueCanonicalizationTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, continue_statements)
self.assertEqual(f(*inputs), tr(*inputs))
def test_basic(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_continues(self):
def f(x):
v = []
while x > 0:
x -= 1
if x > 1:
continue
if x > 2:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_continues_in_nested_scope(self):
def f(a):
v = []
for x in a:
x -= 1
if x > 100:
continue
try:
raise ValueError('intentional')
except ValueError:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_for_loop(self):
def f(a):
v = []
for x in a:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_nested_with(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs_and_statements(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
v.append(x)
v.append(x)
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs_and_nested_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
continue
with ops.name_scope(''):
v.append(x)
v.append(x)
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested(self):
def f(x):
v = []
u = []
w = []
while x > 0:
x -= 1
if x % 2 == 0:
if x % 3 != 0:
u.append(x)
else:
w.append(x)
continue
v.append(x)
return v, u, w
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_guarded_continues_with_side_effects(self):
def f(x):
def track(u, x):
u.append(x)
return x
u = []
v = []
while x > 0:
x -= 1
if track(u, x) > 1:
continue
if track(u, x) > 2:
continue
v.append(x)
return u, v
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 2)
if __name__ == '__main__':
test.main()
|
from collections import namedtuple
from serial.tools import list_ports
from unittest.mock import MagicMock
from mpf.exceptions.runtime_error import MpfRuntimeError
from mpf.platforms import autodetect
from mpf.tests.MpfTestCase import MpfTestCase
WINDOWS_PORTS = ['TEST', 'FOO', 'COM0', 'COM1', 'COM2', 'COM3', 'tty']
MAC_PORTS = ['/dev/tty', '/dev/cu.usb', '/dev/tty.usbmodem0', '/dev/tty.usbmodem1', '/dev/tty.usbmodem2', '/dev/tty.usbmodem3']
LINUX_PORTS = ['/dev/Bluetooth', '/dev/foo', '/dev/cu.ACM0', '/dev/cu.ACM1', '/dev/cu.ACM2', '/dev/cu.ACM3']
NO_PORTS = ['/dev/tty', 'COMX', '/dev/tty.usbmodem', 'foo']
class TestAutoDetect(MpfTestCase):
def mock_ports(self, ports):
mock = []
MockPort = namedtuple('MockPort', ['device'])
for port in ports:
mock_port = MockPort(port)
mock.append(mock_port)
return mock
def test_smartmatrix(self):
autodetect._find_fast_quad = MagicMock(return_value=["/a", "/b", "/c"])
result = autodetect.autodetect_smartmatrix_dmd_port()
self.assertEqual(result, "/a")
def test_retro(self):
for ports in (WINDOWS_PORTS, MAC_PORTS, LINUX_PORTS):
list_ports.comports = MagicMock(return_value=self.mock_ports(ports))
result = autodetect.autodetect_fast_ports(is_retro=True)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], ports[2])
list_ports.comports = MagicMock(return_value=self.mock_ports(NO_PORTS))
with self.assertRaises(MpfRuntimeError):
autodetect.autodetect_fast_ports(is_retro=True)
def test_quad(self):
for ports in (WINDOWS_PORTS, MAC_PORTS, LINUX_PORTS):
list_ports.comports = MagicMock(return_value=self.mock_ports(ports))
result = autodetect.autodetect_fast_ports()
self.assertEqual(len(result), 4)
self.assertEqual(result, ports[2:6])
list_ports.comports = MagicMock(return_value=self.mock_ports(NO_PORTS))
with self.assertRaises(MpfRuntimeError):
autodetect.autodetect_fast_ports(is_retro=True)
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Post-processes HTML generated by pdoc
"""
import re
import datetime
import os
import sys
import shutil
import tempfile
import logging
logger = logging.getLogger(__file__)
def pre_process_py(path):
def go(full_name, reader, writer):
text = reader.read()
output = str(DocExamplesPreprocessor(text, mode='doc', file_name=full_name))
writer.write(output)
walk_path(path, '.py', go)
def post_process_html(path):
def go(full_name, reader, writer):
for line in reader.readlines():
processed_line = process_html_line(line, full_name)
writer.write(processed_line)
walk_path(path, '.html', go)
def walk_path(path, suffixes, processor):
"""walks the path_to_examples and creates paths to all the .rst files found"""
logger.debug("walk_path(path='%s', suffixes=%s)", path, suffixes)
for root, dir_names, file_names in os.walk(path):
logger.debug("walk_path: file_names=%s", file_names)
for file_name in file_names:
if file_name.endswith(suffixes):
full_name = os.path.join(root, file_name)
logger.debug("walk_path: processing file %s", full_name)
with open(full_name, 'r') as r:
with tempfile.NamedTemporaryFile(delete=False) as w:
tmp_name = w.name
logger.debug("walk_path: tmp_name=%s", tmp_name)
processor(full_name, r, w)
os.remove(full_name)
shutil.move(tmp_name, full_name)
def process_html_line(line, full_name):
# Repair the "Up" link for certain files (this needs to match the doc/templates/css.mako)
if full_name.endswith("/index.html") and '<a href="index.html" id="fixed_top_left">Up</a>' in line:
if full_name.endswith("/daaltk/index.html"):
return ' <!-- No Up for root level index.html -->\n'
return '<a href="../index.html" id="fixed_top_left">Up</a>\n'
# clean doctest flags
return line
def parse_for_doc(text, file_name=None):
return str(DocExamplesPreprocessor(text, mode='doc', file_name=file_name))
def parse_for_doctest(text, file_name=None):
return str(DocExamplesPreprocessor(text, mode='doctest', file_name=file_name))
class DocExamplesException(Exception):
"""Exception specific to processing documentation examples"""
pass
class DocExamplesPreprocessor(object):
"""
Processes text (intended for Documentation Examples) and applies daal-tk doc markup, mostly to enable doctest testing
"""
doctest_ellipsis = '-etc-' # override for the doctest ELLIPSIS_MARKER
# multi-line tags
hide_start_tag = '<hide>'
hide_stop_tag = '</hide>'
skip_start_tag = '<skip>'
skip_stop_tag = '</skip>'
# replacement tags
doc_replacements = [('<progress>', '[===Job Progress===]'),
('<connect>', 'Connected ...'),
('<datetime.datetime>', repr(datetime.datetime.now())),
('<blankline>', '<BLANKLINE>')] # sphinx will ignore this for us
doctest_replacements = [('<progress>', doctest_ellipsis),
('<connect>', doctest_ellipsis),
('<datetime.datetime>', doctest_ellipsis),
('<blankline>', '<BLANKLINE>')]
# Two simple fsms, each with 2 states: Keep, Drop
keep = 0
drop = 1
def __init__(self, text, mode='doc', file_name=None):
"""
:param text: str of text to process
:param mode: preprocess mode, like 'doc' or 'doctest'
:return: object whose __str__ is the processed example text
"""
if mode == 'doc':
# process for human-consumable documentation
self.replacements = self.doc_replacements
self.is_state_keep = self._is_hide_state_keep
self._disappear = '' # in documentation, we need complete disappearance
elif mode == 'doctest':
# process for doctest execution
self.replacements = self.doctest_replacements
self.is_state_keep = self._is_skip_state_keep
self._disappear = '\n' # disappear means blank line for doctests, to preserve line numbers for error report
else:
raise DocExamplesException('Invalid mode "%s" given to %s. Must be in %s' %
(mode, self.__class__, ", ".join(['doc', 'doctest'])))
self.skip_state = self.keep
self.hide_state = self.keep
self.processed = ''
self._file_name = file_name
if text:
lines = text.splitlines(True)
self.processed = ''.join(self._process_line(line) for line in lines)
if self.hide_state != self.keep:
raise DocExamplesException("unclosed tag %s found%s" % (self.hide_start_tag, self._in_file()))
if self.skip_state != self.keep:
raise DocExamplesException("unclosed tag %s found" % self.skip_start_tag, self._in_file())
def _in_file(self):
return (" in file %s" % self._file_name) if self._file_name else ''
def _is_skip_state_keep(self):
return self.skip_state == self.keep
def _is_hide_state_keep(self):
return self.hide_state == self.keep
def _process_line(self, line):
"""processes line and advances fsms as necessary, returns processed line text"""
stripped = line.lstrip()
if stripped:
# Repair the "Up" link for certain files (this needs to match the doc/templates/css.mako)
if self._file_name and self._file_name.endswith("/index.html") and '<a href="index.html" id="fixed_top_left">Up</a>' in line:
if self._file_name.endswith("/daaltk/index.html"):
return ' <!-- No Up for root level index.html -->\n'
return '<a href="../index.html" id="fixed_top_left">Up</a>\n'
stripped = DocExamplesPreprocessor._strip_markdown_comment(stripped)
if stripped[0] == '<':
if self._process_if_tag_pair_tag(stripped):
return self._disappear # tag-pair markup should disappear appropriately
# check for keyword replacement
for keyword, replacement in self.replacements:
if stripped.startswith(keyword):
line = line.replace(keyword, replacement, 1)
break
return line if self.is_state_keep() else self._disappear
def _process_if_tag_pair_tag(self, stripped):
"""determines if the stripped line is a tag pair start or stop, advances fsms accordingly"""
if stripped.startswith(self.skip_start_tag):
if self.skip_state == self.drop:
raise DocExamplesException("nested tag %s found%s" % (self.skip_start_tag, self._in_file()))
self.skip_state = self.drop
return True
elif stripped.startswith(self.skip_stop_tag):
if self.skip_state == self.keep:
raise DocExamplesException("unexpected tag %s found%s" % (self.skip_stop_tag, self._in_file()))
self.skip_state = self.keep
return True
elif stripped.startswith(self.hide_start_tag):
if self.hide_state == self.drop:
raise DocExamplesException("nested tag %s found%s" % (self.hide_start_tag, self._in_file()))
self.hide_state = self.drop
return True
elif stripped.startswith(self.hide_stop_tag):
if self.hide_state == self.keep:
raise DocExamplesException("unexpected tag %s found%s" % (self.hide_stop_tag, self._in_file()))
self.hide_state = self.keep
return True
return False
markdown_comment_tell = r'[//]:'
markdown_comment_re = r'^\[//\]:\s*#\s*\"(.+)\"$'
markdown_comment_pattern = re.compile(markdown_comment_re)
@staticmethod
def _strip_markdown_comment(s):
"""
Checks if the given string is formatted as a Markdown comment per Magnus' response here:
http://stackoverflow.com/questions/4823468/comments-in-markdown/32190021#32190021
If it is, the formatting is stripped and only the comment's content is returned
If not, the string is returned untouched
"""
if s.startswith(DocExamplesPreprocessor.markdown_comment_tell):
m = DocExamplesPreprocessor.markdown_comment_pattern.match(s)
if m:
return m.group(1)
return s
def __str__(self):
return self.processed
##########################################################
def main():
script_name = os.path.basename(__file__)
usage = "Usage: %s <-html=HTML_DIR|-py=PY_DIR>" % script_name
if len(sys.argv) < 2:
raise RuntimeError(usage)
option = sys.argv[1]
html_flag = '-html='
py_flag = '-py='
if option.startswith(html_flag):
value = option[len(html_flag):]
html_dir = os.path.abspath(value)
print "[%s] processing HTML at %s" % (script_name, html_dir)
post_process_html(html_dir)
elif option.startswith(py_flag):
value = option[len(py_flag):]
py_dir = os.path.abspath(value)
print "[%s] processing Python at %s" % (script_name, py_dir)
pre_process_py(py_dir)
else:
raise RuntimeError(usage)
if __name__ == "__main__":
main()
|
"""
Create `Grabber` instances for list of resources we need to grab information
from.
"""
import importlib
import yaml
import settings
from utils import get_logger
from crawler.models.resource import Resource
from crawler.scheduled_task import ScheduledTask
from crawler.proxy import Proxy
from crawler.cache import Cache
from crawler.db import get_engine
# what?
from trader.shift_trader import ShiftTrader_v0
class Factory(object):
def __init__(self, resources=None):
self.resources = resources or []
self.cache = None
self.logger = get_logger(self.__class__.__name__.lower())
def load_resources(self):
self.logger.debug('Loading resources..')
with open(settings.RESOURCES_FILEPATH) as f:
resources = yaml.load(f.read())
self.resources = [Resource(**r) for r in resources]
return self.resources
async def init_cache(self):
"""
One shared instance for cache, but also may be implemented in the same
way as database engine.
"""
self.logger.debug('Initializing cache...')
self.cache = Cache()
await self.cache._create_pool()
async def init(self):
self.load_resources()
await self.init_cache()
async def cleanup(self):
self.logger.debug('Closing factory resources...')
await self.cache.close()
def _load_cls_from_module(self, subpackage, module_name):
"""
Load class from module name which follows our naming conventions.
"""
full_module_name = f'{__package__}.{subpackage}.{module_name}'
try:
module = importlib.import_module(full_module_name)
except ModuleNotFoundError:
raise ValueError(
f'No such {subpackage}: {full_module_name}. '
f'Check resources file syntax.'
)
class_name = f'{module_name}_{subpackage}'.title().replace('_', '')
cls_obj = getattr(module, class_name, None)
if cls_obj is None:
raise ValueError(
f'No such class {class_name} '
f'within module {full_module_name}.'
)
return cls_obj
def get_parser(self, parser_name):
parser_cls = self._load_cls_from_module('parser', parser_name)
return parser_cls()
def get_fetcher(self, resource):
fetcher_cfg = resource.fetcher
proxy_cfg = resource.proxy
fetcher_name = fetcher_cfg.instance
driver_name = fetcher_cfg.driver
proxy = None
if proxy_cfg.use:
proxy = Proxy(ip=resource.proxy.ip, port=resource.proxy.port)
driver_cls = None
if driver_name:
driver_cls = self._load_cls_from_module('driver', driver_name)
fetcher_cls = self._load_cls_from_module('fetcher', fetcher_name)
return fetcher_cls(
base_url=None,
proxy=proxy,
driver_cls=driver_cls,
)
def get_grabber(self, resource, *, fetcher, parser, cache, engine):
grabber_name = resource.grabber
grabber_cls = self._load_cls_from_module('grabber', grabber_name)
return grabber_cls(
resource=resource,
fetcher=fetcher,
parser=parser,
cache=cache,
engine=engine,
)
async def create_grabbers(self):
grabbers = []
# Each grabber is responsible for closing resources within itself
for res in self.resources:
fetcher = self.get_fetcher(res)
parser = self.get_parser(res.parser)
engine = await get_engine()
grabber = self.get_grabber(
resource=res,
fetcher=fetcher,
parser=parser,
cache=self.cache,
engine=engine,
)
grabbers.append(grabber)
return grabbers
async def create_traders(self):
# Create multiple traders for different algorithms
# todo: reuse cache from here
trader = ShiftTrader_v0(
starting_amount=10000,
)
await trader.init()
return [
ScheduledTask(
task=trader.daily,
scheduled_time='08:00',
)
]
async def create_daily(self):
return []
|
from jenkinsapi import jenkins
username = "" #add username
password = "" #add your password
URL = "http://ccp-tests.xenrt.xs.citrite.net/jenkins"
j = jenkins.Jenkins(URL, username, password)
jobclient = j.get_job('Greenfield-Regression-Multi')
#p = jobclient.get_last_build().get_resultset()._data
p = jobclient.get_build(8).get_resultset()._data
cases = p["childReports"][3]["result"]["suites"][0]["cases"]
count = 0
f = open('./lxc_adv.csv', 'w')
f.write("%s, %s, %s\n" % ('Test Case', 'Status', 'error'))
failed_tests = []
for case in cases:
#errorStr = case['errorDetails']
#print "test case:{0}, status: {1}, error: {2}".format(case['name'], case['status'], errorStr)
#f.write("%s, %s, %s\n" % (case['name'],case['status'], errorStr))
f.write("%s, %s\n" % (case['name'], case['status']))
count += 1
f.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-05-31 23:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0022_auto_20170203_2117'),
]
operations = [
migrations.AddField(
model_name='round',
name='accumulated_probability',
field=models.FloatField(default=1.0, help_text="Accumulated probability of current round's given knownsecret. "),
),
migrations.AddField(
model_name='round',
name='completed',
field=models.DateTimeField(blank=True, default=None, help_text='When we stopped round, successfully or not', null=True),
),
migrations.AddField(
model_name='round',
name='started',
field=models.DateTimeField(blank=True, default=None, help_text='Date and time since round was started', null=True),
),
]
|
from flask import Flask, render_template, make_response, request, send_file
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
import os, shutil
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome('driver/chromedriver',chrome_options=chrome_options)
app = Flask(__name__, static_url_path="")
screen_folder = 'screen/'
result_folder = 'runs/detect/exp/'
delete_folder = 'runs'
@app.route("/")
def index():
return render_template("index.html", path="")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
@app.route('/url', methods=['POST'])
def url():
url = request.form.get('url')
if url is None:
return make_response("Url is null", 400)
print(url)
file_name = url.split("//")[1] + ".png"
file_name = file_name.replace("/","-")
folder = 'screen/'
try:
driver.get(url)
except Exception:
return make_response("Cannot open website", 503)
width = 1920
height = 1080
driver.set_window_size(width, height)
if os.path.isdir(delete_folder):
shutil.rmtree(delete_folder)
path = screen_folder + file_name
try:
driver.save_screenshot(path)
print("Saved png for " + url)
except Exception:
return make_response('Error with save picture',503)
os.system(f"python3 YOLO/detect.py --weights YOLO/elements-model/best.pt --img 1920 --source {path} ")
os.remove(path)
proccessed_image = result_folder + file_name
new_result_folder = "static/results/"
os.rename(proccessed_image, new_result_folder+file_name)
return "/results/"+file_name
|
import json
from collections import namedtuple
LibraryDs = namedtuple('LibraryDs', ['name', 'version'])
class InstalledLibrariesOutputLinesPostProcessor(object):
INSTALLED_LIBRARIES_AFTER_MODIFICATIONS_TASK = 'Final list of all installed libraries in the venv'
def __init__(self):
self.stop_line_processing = False
self.next_line_contains_installed_libraries_after_modifications = False
self.installed_libraries_after_modifications = []
self.installed_libraries_before_modifications = []
def post_process_line(self, output_line):
if not self.stop_line_processing:
if not self.next_line_contains_installed_libraries_after_modifications:
if InstalledLibrariesOutputLinesPostProcessor.INSTALLED_LIBRARIES_AFTER_MODIFICATIONS_TASK in output_line:
self.next_line_contains_installed_libraries_after_modifications = True
else:
ip_and_command_info_parts = output_line.split(' => ')
command_info = json.loads(ip_and_command_info_parts[1])
installed_libraries_with_versions = command_info['stdout_lines']
for installed_library_with_version in installed_libraries_with_versions:
name_and_version_parts = installed_library_with_version.split('==')
if name_and_version_parts[0] != 'pkg-resources':
self.installed_libraries_after_modifications.append(
LibraryDs(name=name_and_version_parts[0], version=name_and_version_parts[1]))
self.stop_line_processing = True
class InstalledVirtualEnvironmentsOutputLinesPostProcessor(object):
INSTALLED_VIRTUAL_ENVIRONMENTS_TASK = 'list all installed virtual environments'
def __init__(self):
self.stop_line_processing = False
self.next_line_contains_installed_virtual_environments = False
self.installed_virtual_environments = []
def post_process_line(self, output_line):
if not self.stop_line_processing:
if not self.next_line_contains_installed_virtual_environments:
if InstalledVirtualEnvironmentsOutputLinesPostProcessor.INSTALLED_VIRTUAL_ENVIRONMENTS_TASK in output_line:
self.next_line_contains_installed_virtual_environments = True
else:
ip_and_command_info_parts = output_line.split(' => ')
command_info = json.loads(ip_and_command_info_parts[1])
installed_virtual_envs = command_info['stdout_lines']
for installed_virtual_env in installed_virtual_envs:
self.installed_virtual_environments.append(installed_virtual_env)
self.stop_line_processing = True
class InitializationOutputLinesPostProcessor(object):
PYTHON_VERSION_IDENTIFYING_TASK = 'Identify installed python version'
def __init__(self):
self.stop_line_processing = False
self.next_line_contains_python_version = False
self.python_version = []
def post_process_line(self, output_line):
if not self.stop_line_processing:
if not self.next_line_contains_python_version:
if InitializationOutputLinesPostProcessor.PYTHON_VERSION_IDENTIFYING_TASK in output_line:
self.next_line_contains_python_version = True
else:
ip_and_command_info_parts = output_line.split(' => ')
command_info = json.loads(ip_and_command_info_parts[1])
self.python_version = command_info['stdout_lines'][0].replace('Python', '')
self.stop_line_processing = True
class NullOutputLinesPostProcessor(object):
def post_process_line(self, output_line):
pass
|
class MapperTransform(object):
"""
Abstract base class for all mapper transforms.
Subclasses should implement `__call__` method.
Internal Meta class supports the following properties:
- fields: mapper fields to be given as arguments to the __call__ method, defaults to all the fields.
"""
def __init__(self, *args, **kwargs):
try:
self.fields = self.Meta.fields
except AttributeError:
self.fields = '__all__'
def __call__(self, **kwargs):
"""
Perform the transformation.
:param **kwargs: mapper field values declared in Meta.
:return: a dictionary which will be added to the mapper output (existing keys will be rewritten).
"""
raise NotImplementedError("You should subclass MapperTransform and implement the __call__ method.")
class SplitMapperTransform(MapperTransform):
"""
Map a single string-based field to multiple by splitting its value.
:param from_field: source field name.
:param to_fields: target fields names.
:param sep: separator (default ',').
"""
def __init__(self, from_field, to_fields, sep=','):
super(SplitMapperTransform, self).__init__(from_field, to_fields, sep)
self.from_field = from_field
self.to_fields = to_fields
self.sep = sep
def __call__(self, **kwargs):
elems = kwargs[self.from_field].split(self.sep)
# ignoring possible mismatches in length
return dict(zip(self.to_fields, elems))
class StackMapperTransform(MapperTransform):
"""
Map multiple string-based fields to a single one by joining their values.
:param from_fields: source fields names.
:param to_field: target field name.
:param sep: separator (default ',').
"""
def __init__(self, from_fields, to_field, sep=','):
super(StackMapperTransform, self).__init__(from_fields, to_field, sep)
self.from_fields = from_fields
self.to_field = to_field
self.sep = sep
def __call__(self, **kwargs):
elems = [kwargs[from_field] for from_field in self.from_fields]
return {self.to_field: self.sep.join(elems)}
|
# -*- coding: utf-8 -*-
"""
Module: rushhour_solver
File: parser.py
Creator: Nick Geense
Date: 23-11-2016
Rush Hour file parser for analysing board text representations.
Rush Hour Parser should receive a valid filename
and after all checks have passed return a Board object.
"""
import os.path
import logging
from rushhour_solver.game_components import Board, RedCar, Car, Truck
Lumberjack = logging.getLogger(__name__)
class RushHourParser(object):
def __init__(self, filename):
self.filename = None
self.board = None
self.set_filename(filename)
self.start_parser()
def start_parser(self):
board_object = None
file_content = self._read_file(self.filename)
content_dimensions_valid = self._check_dimensions(file_content)
if content_dimensions_valid:
board = self._rasterize_file_content(file_content)
else:
raise ValueError('File Content not valid: Dimensions do not match 6x6 tiles.')
vehicles = self._find_vehicles(board)
if len(vehicles) != 0:
board_object = Board()
board_object.set_board(board)
board_object.set_vehicles(vehicles)
else:
Lumberjack.warning('Warning, could not find any vehicles on the board.')
if board_object is None:
print('Board_object is None, this is getting rediculous..')
self.board = board_object
def set_filename(self, filename):
"""Filename should be a path"""
if os.path.exists(filename):
self.filename = filename
# TODO - Restart function call when filename changes
else:
raise ValueError('File path does not exist.')
def get_filename(self):
return self.filename
def _read_file(self, filename):
with open(filename, 'r') as f:
file_content = [line.rstrip('\n') for line in f]
return file_content
def _check_dimensions(self, file_content):
if len(file_content) > 6:
return False
for line in file_content:
if len(line) > 6:
return False
return True
def _rasterize_file_content(self, file_content):
board = []
for line in file_content:
board.append([char for char in line])
return board
def _find_vehicles(self, board):
vehicle_names = []
vehicles = []
for row in board:
for cell in row:
if cell not in vehicle_names and cell != '.':
id = cell
vehicle_names.append(id)
y = board.index(row)
x = row.index(cell)
length = 0
if x + 1 < len(row) and row[x + 1] == cell:
orientation = 'H'
if x + 2 < len(row) and row[x + 2] == cell:
length = 3
else:
length = 2
elif y + 1 < len(board) and board[y + 1][x] == cell:
orientation = 'V'
if y + 2 < len(board) and board[y + 2][x] == cell:
length = 3
else:
length = 2
if length == 2:
if id == 'r':
vehicles.append(RedCar(id, x, y, orientation))
else:
vehicles.append(Car(id, x, y, orientation))
elif length == 3:
if id != 'r':
vehicles.append(Truck(id, x, y, orientation))
elif length == 0:
Lumberjack.warning('Found a piece of information that does not behave like a vehicle!')
return vehicles
def get_board(self):
if self.board is None:
print('Parsed board is None..')
return self.board
|
"""
Demonstrates similarities between pcolor, pcolormesh, imshow and pcolorfast
for drawing quadrilateral grids.
"""
import matplotlib.pyplot as plt
import numpy as np
# make these smaller to increase the resolution
dx, dy = 0.15, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[slice(-3, 3 + dy, dy),
slice(-3, 3 + dx, dx)]
z = (1 - x / 2. + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = -np.abs(z).max(), np.abs(z).max()
plt.subplot(2, 2, 1)
plt.pcolor(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolor')
# set the limits of the plot to the limits of the data
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.colorbar()
plt.subplot(2, 2, 2)
plt.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolormesh')
# set the limits of the plot to the limits of the data
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.colorbar()
plt.subplot(2, 2, 3)
plt.imshow(z, cmap='RdBu', vmin=z_min, vmax=z_max,
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest', origin='lower')
plt.title('image (interp. nearest)')
plt.colorbar()
ax = plt.subplot(2, 2, 4)
ax.pcolorfast(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolorfast')
plt.colorbar()
plt.show() |
from ParallelCoordinates import ParallelCoordinates
import pandas as pd
import plotly.plotly as py
import os
py.sign_in('DemoAccount', '2qdyfjyr7o')
df = pd.read_csv('../data/iris.csv')
def test_ParallelCoordinates_plot():
pc = ParallelCoordinates(df)
pc.set_layout(plot_bgcolor='#E5E5E5',
paper_bgcolor='#E5E5E5')
pc.plot('species',
labels=['Sepal length', 'Sepal width', 'Petal length', 'Petal width'],
ranges=[[0,8],[0,8],[0,8],[0,8]])
def test_ParallelCoordinates_save():
pc = ParallelCoordinates(df)
pc.plot('species')
pc.save(output_filename='../data/save_test.png')
assert os.path.isfile('../data/save_test.png')
os.remove('../data/save_test.png')
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package provides various operations users can ask ARMI to do with their inputs.
An Entry Point might run a simulation, migrate inputs, build a suite of related inputs
and submit them in a parameter sweep, validate inputs, open the GUI, run a test suite,
or other similar things. There are built-in entry points, and additional ones may
be specified by custom plugins.
See Also
--------
armi.cases : Individual collections of tasks that may run one or more entry points.
These allow one entry point to create a sequence of events that may call one
or more additional entry points. For example, the ``submitSuite`` entry point builds
a case suite with many related cases that will all call the ``run`` entry point from
a HPC cluster.
armi.operators : Operations that ARMI will perform on a reactor model.
These may be created by ``Case`` objects created by certain entry points (e.g. ``run``).
armi : Fundamental entry point that calls this package.
"""
# importing each module causes the any EntryPoints defined in the module that
# are decorated with @armi.command to be added to the collection of registered
# classes
from __future__ import print_function
import argparse
import textwrap
import re
from typing import Optional
import armi
from armi import plugins
class EntryPointsPlugin(plugins.ArmiPlugin):
@staticmethod
@plugins.HOOKIMPL
def defineEntryPoints():
from armi.cli import (
checkInputs,
clone,
compareCases,
migrateInputs,
modify,
run,
# testing
backupDatabases,
cleanTemps,
convertDB,
)
entryPoints = []
entryPoints.append(checkInputs.CheckInputEntryPoint)
entryPoints.append(clone.CloneArmiRunCommandBatch)
entryPoints.append(clone.CloneArmiRunCommandInteractive)
entryPoints.append(clone.CloneSuiteCommand)
entryPoints.append(compareCases.CompareCases)
entryPoints.append(compareCases.CompareSuites)
entryPoints.append(migrateInputs.MigrateInputs)
entryPoints.append(modify.ModifyCaseSettingsCommand)
entryPoints.append(run.RunEntryPoint)
# testing
entryPoints.append(backupDatabases.BackUpInUseTestDabases)
entryPoints.append(backupDatabases.CopyDB)
entryPoints.append(cleanTemps.CleanTemps)
entryPoints.append(convertDB.ConvertDB)
return entryPoints
class ArmiCLI:
"""
ARMI CLI -- The main entry point into ARMI. There are various commands
available, to get help for the individual commands, run again with
`<command> --help`. Generically, the CLI implements functions that already
exists within ARMI
"""
def __init__(self):
self._entryPoints = dict()
for pluginEntryPoints in armi.getPluginManager().hook.defineEntryPoints():
for entryPoint in pluginEntryPoints:
if entryPoint.name in self._entryPoints:
raise KeyError(
"Duplicate entry points defined for `{}`: {} and {}".format(
entryPoint.name,
self._entryPoints[entryPoint.name],
entryPoint,
)
)
self._entryPoints[entryPoint.name] = entryPoint
parser = argparse.ArgumentParser(
prog="armi",
description=self.__doc__,
usage="%(prog)s [-h] [-l | command [args]]",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-v", "--version", action="version", version="%(prog)s " + armi.__version__
)
group.add_argument(
"-l", "--list-commands", action="store_true", help="list commands"
)
group.add_argument("command", nargs="?", default="help", help=argparse.SUPPRESS)
parser.add_argument("args", nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
self.parser = parser
def listCommands(self):
"""List commands with a short description."""
indent = 22
initial_indent = " "
subsequent_indent = initial_indent + " " * indent
wrapper = textwrap.TextWrapper(
initial_indent=initial_indent, subsequent_indent=subsequent_indent, width=79
)
sub = re.compile(r"\s+").sub
## given a string, condense white space into a single space
condense = lambda s: sub(" ", s.strip())
commands = self._entryPoints.values()
formatter = "{name:<{width}}{desc}".format
print("\ncommands:")
for cmd in sorted(commands, key=lambda cmd: cmd.name):
## Each command can optionally define a class attribute `description`
## as documentation. If description is not defined (default=None since
## it should inherit from EntryPoint), then the docstring is used.
## If the docstring is also None, then fall back to an empty string.
desc = condense(cmd.description or cmd.__doc__ or "")
print(wrapper.fill(formatter(width=indent, name=cmd.name, desc=desc)))
def run(self) -> Optional[int]:
args = self.parser.parse_args()
if args.list_commands:
self.listCommands()
raise SystemExit(0)
if args.command == "help":
self.parser.print_help()
raise SystemExit(0)
return self.executeCommand(args.command, args.args)
def executeCommand(self, command, args) -> Optional[int]:
r"""execute `command` with arguments `args`, return optional exit code."""
command = command.lower()
if command not in self._entryPoints:
print(
'Unrecognized command "{}". Valid commands are listed below.'.format(
command
)
)
self.listCommands()
raise SystemExit(1)
commandClass = self._entryPoints[command]
cmd = commandClass()
# parse the arguments... command can have their own
cmd.parse(args)
if cmd.args.batch:
armi.Mode.setMode(armi.Mode.Batch)
elif cmd.mode is not None:
armi.Mode.setMode(cmd.mode)
# do whatever there is to be done!
return cmd.invoke()
|
# Generated by Django 3.0.4 on 2020-03-18 22:17
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('producto', '0001_initial'),
('trabajador', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Ingreso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('no_ingreso', models.IntegerField(blank=True, null=True, verbose_name='No de ingreso')),
('fecha', models.DateField(default=datetime.datetime.now, verbose_name='Fecha emision')),
('vendedor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='trabajador.Trabajador')),
],
options={
'verbose_name': 'Ingreso de mercaderia',
'verbose_name_plural': 'Ingresos de mercaderia',
'db_table': 'ingreso',
},
),
migrations.CreateModel(
name='Detalle_Ingreso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('canti', models.PositiveIntegerField(verbose_name='Cantidad')),
('id_ingreso', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='detalles', to='ingreso.Ingreso')),
('id_prod', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='producto.Producto')),
],
options={
'verbose_name': 'Detalle de ingreso de mercaderia',
'verbose_name_plural': 'Detalle de ingresos de mercaderia',
'db_table': 'detalle_ingreso',
},
),
]
|
from lmsquery import const
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="LMSQuery",
version=const.LMSQUERY_VERSION,
author="Robert Einhaus",
author_email="robert@einhaus.info",
description=("Query library for Logitech Media Server"),
license="MIT",
keywords="logitech media server lms",
url="https://github.com/roberteinhaus/lmsquery",
packages=['lmsquery'],
install_requires=['requests'],
long_description="This library provides easy to use functions to send \
queries to a Logitech Media Server \
(https://github.com/Logitech/slimserver)",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from setuptools import setup, find_packages
import versioneer
setup(
name="q2-deblur",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
packages=find_packages(),
author="Daniel McDonald",
author_email="wasade@gmail.com",
description="Sequence quality control with deblur",
entry_points={
"qiime2.plugins":
["q2-deblur=q2_deblur.plugin_setup:plugin"]
},
package_data={
"q2_deblur": ["assets/*", "assets/js/*"],
'q2_deblur.tests': ['data/*',
'data/expected/*',
'data/expected/util/*',
'data/sample_seqs_16S/*',
'data/sample_seqs_other/*'],
},
zip_safe=False,
)
|
import numpy as np
import pytest
from procgen import ProcgenGym3Env
from .env import ENV_NAMES
import gym3
import multiprocessing as mp
NUM_STEPS = 10000
def gather_rollouts(
env_kwargs, actions, state=None, get_state=False, set_state_every_step=False
):
env = ProcgenGym3Env(**env_kwargs)
if state is not None:
env.callmethod("set_state", state)
result = [dict(ob=env.observe(), info=env.get_info())]
if get_state:
result[-1]["state"] = env.callmethod("get_state")
if set_state_every_step:
env.callmethod("set_state", result[-1]["state"])
for act in actions:
env.act(act)
result.append(dict(ob=env.observe(), info=env.get_info()))
if get_state:
result[-1]["state"] = env.callmethod("get_state")
if set_state_every_step:
env.callmethod("set_state", result[-1]["state"])
return result
def fn_wrapper(fn, result_queue, **kwargs):
result = fn(**kwargs)
result_queue.put(result)
def run_in_subproc(fn, **kwargs):
ctx = mp.get_context("spawn")
result_queue = ctx.Queue()
p = ctx.Process(
target=fn_wrapper, kwargs=dict(fn=fn, result_queue=result_queue, **kwargs)
)
p.start()
result = result_queue.get()
p.join()
return result
def assert_rollouts_identical(a_rollout, b_rollout):
assert len(a_rollout) == len(b_rollout)
for a, b in zip(a_rollout, b_rollout):
assert a["info"] == b["info"]
a_rew, a_ob, a_first = a["ob"]
b_rew, b_ob, b_first = b["ob"]
assert np.array_equal(a_rew, b_rew)
assert np.array_equal(a_first, b_first)
assert sorted(a_ob.keys()) == sorted(b_ob.keys())
for k in sorted(a_ob.keys()):
assert np.array_equal(a_ob[k], b_ob[k])
if "state" in a and "state" in b:
assert a["state"] == b["state"]
@pytest.mark.skip(reason="slow")
@pytest.mark.parametrize("env_name", ENV_NAMES)
def test_state(env_name):
run_state_test(env_name)
def run_state_test(env_name):
env_kwargs = dict(num=2, env_name=env_name, rand_seed=0)
env = ProcgenGym3Env(**env_kwargs)
rng = np.random.RandomState(0)
actions = [
gym3.types_np.sample(env.ac_space, bshape=(env.num,), rng=rng)
for _ in range(NUM_STEPS)
]
ref_rollouts = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions
)
assert len(ref_rollouts) == NUM_STEPS + 1
# run the same thing a second time
basic_rollouts = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions
)
assert_rollouts_identical(ref_rollouts, basic_rollouts)
# run but save states
state_rollouts = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions, get_state=True
)
assert_rollouts_identical(ref_rollouts, state_rollouts)
# make sure states are the same
state_rollouts_2 = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions, get_state=True
)
assert_rollouts_identical(ref_rollouts, state_rollouts_2)
assert_rollouts_identical(state_rollouts, state_rollouts_2)
# save and restore at each timestep
state_rollouts_3 = run_in_subproc(
gather_rollouts,
env_kwargs=env_kwargs,
actions=actions,
get_state=True,
set_state_every_step=True,
)
assert_rollouts_identical(ref_rollouts, state_rollouts_3)
assert_rollouts_identical(state_rollouts, state_rollouts_3)
# restore a point in the middle of the rollout and make sure that the remainder of the data looks the same
offset = NUM_STEPS // 2
state_restore_rollouts = run_in_subproc(
gather_rollouts,
env_kwargs={**env_kwargs, "rand_seed": 1},
actions=actions[offset:],
state=state_rollouts[offset]["state"],
get_state=True,
)
assert_rollouts_identical(ref_rollouts[offset:], state_restore_rollouts)
assert_rollouts_identical(state_rollouts[offset:], state_restore_rollouts)
|
from decimal import Decimal as D
from django.test import TestCase
from django.test.utils import override_settings
from mock import Mock
from oscar.apps.basket.models import Basket
from oscar.apps.order.models import Order
from oscar_testsupport.factories import create_product
from oscar.apps.order.utils import OrderCreator
class TestOrderCreatorErrorCases(TestCase):
def setUp(self):
self.creator = OrderCreator()
self.basket = Basket()
def test_raises_exception_when_empty_basket_passed(self):
with self.assertRaises(ValueError):
self.creator.place_order(basket=self.basket)
def test_raises_exception_if_duplicate_order_number_passed(self):
self.basket.add_product(create_product(price=D('12.00')))
self.creator.place_order(basket=self.basket, order_number='1234')
with self.assertRaises(ValueError):
self.creator.place_order(basket=self.basket, order_number='1234')
class TestSuccessfulOrderCreation(TestCase):
def setUp(self):
self.creator = OrderCreator()
self.basket = Basket.objects.create()
def tearDown(self):
Order.objects.all().delete()
def test_creates_order_and_line_models(self):
self.basket.add_product(create_product(price=D('12.00')))
self.creator.place_order(basket=self.basket, order_number='1234')
order = Order.objects.get(number='1234')
lines = order.lines.all()
self.assertEqual(1, len(lines))
def test_sets_correct_order_status(self):
self.basket.add_product(create_product(price=D('12.00')))
self.creator.place_order(basket=self.basket, order_number='1234', status='Active')
order = Order.objects.get(number='1234')
self.assertEqual('Active', order.status)
def test_defaults_to_using_free_shipping(self):
self.basket.add_product(create_product(price=D('12.00')))
self.creator.place_order(basket=self.basket, order_number='1234')
order = Order.objects.get(number='1234')
self.assertEqual(order.total_incl_tax, self.basket.total_incl_tax)
self.assertEqual(order.total_excl_tax, self.basket.total_excl_tax)
def test_defaults_to_setting_totals_to_basket_totals(self):
self.basket.add_product(create_product(price=D('12.00')))
method = Mock()
method.is_discounted = False
method.basket_charge_incl_tax = Mock(return_value=D('2.00'))
method.basket_charge_excl_tax = Mock(return_value=D('2.00'))
self.creator.place_order(basket=self.basket, order_number='1234', shipping_method=method)
order = Order.objects.get(number='1234')
self.assertEqual(order.total_incl_tax, self.basket.total_incl_tax + D('2.00'))
self.assertEqual(order.total_excl_tax, self.basket.total_excl_tax + D('2.00'))
def test_uses_default_order_status_from_settings(self):
self.basket.add_product(create_product(price=D('12.00')))
with override_settings(OSCAR_INITIAL_ORDER_STATUS='A'):
self.creator.place_order(basket=self.basket, order_number='1234')
order = Order.objects.get(number='1234')
self.assertEqual('A', order.status)
def test_uses_default_line_status_from_settings(self):
self.basket.add_product(create_product(price=D('12.00')))
with override_settings(OSCAR_INITIAL_LINE_STATUS='A'):
self.creator.place_order(basket=self.basket, order_number='1234')
order = Order.objects.get(number='1234')
line = order.lines.all()[0]
self.assertEqual('A', line.status)
|
a=0
for i in range (101):
if i % 2 == 0:
a=a+i
print(a)
for i in range (1000,0,-1):
if i%6==2 and i**3%5==3 :
print (i)
break
|
import policy.firewall
import policy.removable_devices_perms
import policy.printers
import policy.shortcuts
import policy.applications
from policy.common import Perms
data_roots = {}
data_roots.update(policy.firewall.data_roots)
data_roots.update(policy.removable_devices_perms.data_roots)
data_roots.update(policy.printers.data_roots)
data_roots.update(policy.shortcuts.data_roots)
data_roots.update(policy.applications.data_roots) |
import zipfile
import re
from book import Book
from cStringIO import StringIO
import logging
class Archive(object):
def __init__(self, path):
self.path = path
self.logger=logging.getLogger('performance')
self.logger.info("Opening archive " + path)
with open(path) as finfo:
self.logger.debug("Opened archive")
mmap=StringIO(finfo.read())
self.logger.info("Slurped archive")
self.zip = zipfile.ZipFile(mmap)
self.logger.debug("Examining books in archive")
self.filenames = [entry.filename for entry in self.zip.infolist()]
book_pattern = re.compile('([0-9]*)_metadata\.xml')
page_pattern = re.compile('ALTO\/([0-9]*?)_([0-9_]*)\.xml')
self.logger.debug("Enumerating books")
bookmatches=filter(None, [ book_pattern.match(name) for name in self.filenames ])
pagematches=filter(None, [ page_pattern.match(name) for name in self.filenames ])
self.book_codes={ match.group(1) : [] for match in bookmatches }
for match in pagematches:
self.book_codes[ match.group(1) ].append(match.group(2))
self.logger.info("Enumerated books")
def zip_info_for_book(self, book_code):
return self.zip.getinfo(book_code + '_metadata.xml')
def zip_info_for_page(self, book_code, page):
return self.zip.getinfo('ALTO/' + book_code + '_' + page + '.xml')
def metadata_file(self, book_code):
return self.zip.open(book_code + '_metadata.xml')
def page_file(self, book_code, page):
return self.zip.open('ALTO/' + book_code + '_' + page + '.xml')
def __getitem__(self, index):
self.logger.debug("Creating book")
return Book(self.book_codes.keys()[index],self)
def __iter__(self):
for book in self.book_codes:
self.logger.debug("Creating book")
yield Book(book, self)
def __len__(self):
return len(self.book_codes)
|
import logging
from settings.settings import DEBUG
logger = logging.getLogger()
if DEBUG:
logger.setLevel(20)
|
numeros = []
for i in range(5):
numeros.append(int(input('Digite o {} termo: '.format(i))))
if i > 0:
if numeros[i] > maior:
maior = numeros[i]
if numeros[i] < menor:
menor = numeros[i]
else:
maior = numeros[i]
menor = numeros[i]
print('-='*15)
print('O Maior número: {} foi digitado na posição '.format(maior), end = '')
for i, v in enumerate(numeros):
if v == maior:
print(i, end = ' ')
print('\nO Menor número: {} foi digitado na posição '.format(menor), end = '')
for i, v in enumerate(numeros):
if v == menor:
print(i, end = ' ') |
# Copyright (c) 2019 Nathan Juraj Michlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import NamedTuple
import numpy as np
import matplotlib.pyplot as plt
import ray
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from tqdm import tqdm
from pbt.strategies import ExploitUcb, ExploitTruncationSelection
from pbt.pbt import Member, Population
# ========================================================================= #
# SYSTEM #
# ========================================================================= #
class ToyHyperParams(NamedTuple):
learning_rate: float
# ========================================================================= #
# Member - TOY #
# ========================================================================= #
CHECKPOINT_MAP = {}
class MemberMnist(Member):
def __init__(self):
super().__init__()
self._trainable = TrainMNIST(config=dict(
lr=np.random.uniform(0.001, 0.1),
momentum=np.random.uniform(0.1, 0.9),
use_gpu=True,
))
self._last_eval = None
def copy_h(self) -> dict:
return self._trainable.config
def _save_theta(self, id):
CHECKPOINT_MAP[id] = self._trainable.save('./checkpoints')
def _load_theta(self, id):
self._trainable.restore(CHECKPOINT_MAP[id])
def _is_ready(self, population: 'Population') -> bool:
return self._t % population.options.get('steps_till_ready', 3) == 0 # and (self != max(population, key=lambda m: m._p))
def _step(self, options: dict) -> np.ndarray:
result = self._trainable._train()
self._last_eval = result['mean_accuracy']
return result
def _eval(self, options: dict) -> float:
return self._last_eval
def _explore(self, population: 'Population') -> dict:
return dict(
lr=np.random.uniform(0.001, 0.1),
momentum=np.random.uniform(0.1, 0.9),
)
# ============================================================================ #
# PLOTTING #
# ============================================================================ #
#
# def make_subplots(h, w, figsize=None):
# fig, axs = plt.subplots(h, w, figsize=figsize)
# return fig, np.array(axs).reshape((h, w))
#
# def plot_performance(ax, population, steps, title):
# for member, color in zip(population, 'brgcmyk'):
# vals = [step.p for step in member]
# ax.plot(vals, color=color, lw=0.7)
# ax.axhline(y=1.2, linestyle='dotted', color='k')
# ax.set(xlim=[0, steps-1], ylim=[-0.5, 1.31], title=title, xlabel='Step', ylabel='Q')
#
# def plot_theta(ax, population, steps, title):
# for member, color in zip(population, 'brgcmyk'):
# x, y = np.array([step.theta[0] for step in member]), np.array([step.theta[1] for step in member])
# jumps = np.where([step.exploit_id is not None for step in member])[0]
# x, y = np.insert(x, jumps, np.nan), np.insert(y, jumps, np.nan)
# ax.plot(x, y, color=color, lw=0.5, zorder=1)
# ax.scatter(x, y, color=color, s=1, zorder=2)
#
# ax.set(xlim=[-0.1, 1], ylim=[-0.1, 1], title=title, xlabel='theta0', ylabel='theta1')
def experiment(options, exploiter, n=20, steps=200, exploit=True, explore=True, title=None):
population = Population([MemberMnist() for i in range(n)], exploiter=exploiter, options=options)
population.train(steps, exploit=exploit, explore=explore)
# Calculates the score as the index of the first occurrence greater than 1.18
# scores = np.array([[h.p for h in m] for m in population])
# firsts = np.argmax(scores > 1.18, axis=1)
# firsts[firsts == 0] = scores.shape[1]
# time_to_converge = np.min(firsts)
# score = np.max(population.scores)
# plot_theta(ax_col[0], population, steps=steps, title=title)
# plot_performance(ax_col[1], population, steps=steps, title=title)
return np.max(population.scores), np.average(np.array([[h.p for h in m] for m in population]), axis=0)
if __name__ == '__main__':
ray.init()
options = {
"steps": 50,
"steps_till_ready": 1,
"exploration_scale": 0.1,
}
# REPEAT EXPERIMENT N TIMES
n, k, repeats = 10, 2, 100
score, scores = np.zeros(k), np.zeros((k, options['steps']))
# fig, axs = make_subplots(2, len(scores))
with tqdm(range(repeats)) as itr:
for i in itr:
score_0, scores_0 = experiment(options, ExploitTruncationSelection(), n=n, steps=options["steps"], exploit=True, explore=True, title='PBT Trunc Sel')
score_1, scores_1 = experiment(options, ExploitUcb(), n=n, steps=options["steps"], exploit=True, explore=True, title='PBT Ucb Sel')
score += [score_0, score_1]
scores += [scores_0, scores_1]
print(score_0)
print(scores_0)
print(score_1)
print(scores_1)
# itr.set_description(f'{np.around(scores / (i + 1), 4)} | {np.around(converge_times / (i + 1), 2)}')
scores /= repeats
score /= repeats
print(f'T: {score[0]} | {scores[0]}')
print(f'U: {score[1]} | {scores[1]}')
fig, ax = plt.subplots(1, 1)
ax.plot(scores[0], label='PBT Trunc Sel')
ax.plot(scores[1], label='PBT Ucb Sel')
ax.legend()
ax.set(title=f'Trunc vs Ucb: {dict(n=n, r=options["steps_till_ready"])}', xlabel='Steps', ylabel='Ave Max Score')
fig.show()
|
import pytest
from construct import Debugger, ValidationError
from bonfo.msp.fields.config import FeatureConfig, Features, SelectPID, SelectRate, SelectSetting
from .. import messages
def test_select_profile_out_of_range():
with pytest.raises(ValidationError):
data = SelectPID(10)
SelectSetting.get_struct().build(data)
with pytest.raises(ValidationError):
data = SelectRate(10)
SelectSetting.get_struct().build(data)
def test_select_setting_rate_profile_build():
data = SelectSetting(profile=dict(rate_profile=3))
assert SelectSetting.get_struct().build(data) == b"\x82"
def test_select_rate_build():
data = SelectRate(3)
assert SelectSetting.get_struct().build(data) == b"\x82"
def test_select_setting_pid_profile_build():
data = SelectSetting(profile=dict(pid_profile=2))
assert SelectSetting.get_struct().build(data) == b"\x01"
def test_select_pid_build():
data = SelectPID(3)
assert SelectSetting.get_struct().build(data) == b"\x02"
def test_feature_config():
messages.feature_config_response
# struct = Debugger(FeatureConfig.get_struct())
struct = FeatureConfig.get_struct()
struct.parse(messages.feature_config_response)
def test_feature_config_operations():
pass
# Test flag manipulation features?
# |
# &
# ^
rx_gps = sum([Features.RX_SERIAL, Features.GPS])
rx_gps = Features.RX_SERIAL - Features.GPS
print(rx_gps)
def xtest_rc_tuning_parse():
# RcTuning(
# rc_rate=1.0,
# rc_expo=0.0,
# roll_pitch_rate=0,
# roll_rate=0.7,
# pitch_rate=0.7,
# yaw_rate=0.7,
# dynamic_thr_pid=0.65,
# throttle_mid=0.5,
# throttle_expo=0.0,
# dynamic_thr_breakpofloat=0,
# rc_yaw_expo=0.0,
# rcyawrate=1.0,
# rcpitchrate=1.0,
# rc_pitch_expo=0.0,
# roll_rate_limit=1998,
# pitch_rate_limit=1998,
# yaw_rate_limit=1998,
# )
pass
def xtest_rx_config():
# RxConfig(
# serialrx_provider=2,
# stick_max=1900,
# stick_center=1500,
# stick_min=1050,
# spektrum_sat_bind=0,
# rx_min_usec=885,
# rx_max_usec=2115,
# rc_interpolation=2,
# rc_interpolation_interval=19,
# rc_interpolation_channels=2,
# air_mode_activate_threshold=1250,
# rx_spi_protocol=0,
# rx_spi_id=0,
# rx_spi_rf_channel_count=0,
# fpv_cam_angle_degrees=40,
# rc_smoothing_type=1,
# rc_smoothing_input_cutoff=0,
# rc_smoothing_derivative_cutoff=0,
# rc_smoothing_input_type=1,
# rc_smoothing_derivative_type=3,
# )
pass
|
# encoding: utf8
#
# retrieved from http://www.aminus.net/wiki/Dowser at 2015-03-18
# this document were placed in public domain by their author
#
# This file is part of the Neurons project.
# Copyright (c), Arskom Ltd. (arskom.com.tr),
# Burak Arslan <burak.arslan@arskom.com.tr>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Arskom Ltd., the neurons project nor the names of
# its its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
logger = logging.getLogger(__name__)
import cgi
import os
import gc
import sys
from types import FrameType, ModuleType
from datetime import datetime
from lxml.html.builder import E
from neurons.daemon.dowser.const import ASSETS_DIR
from spyne import rpc, Unicode, Service, ByteArray, AnyHtml
from spyne import Integer
from spyne.protocol.http import HttpPattern, HttpRpc
from spyne.util.six import BytesIO
from neurons.daemon.dowser import reftree
try:
from PIL import Image
from PIL import ImageDraw
except ImportError as e:
class Image:
@classmethod
def new(self):
raise e
ImageDraw = None
def get_repr(obj, limit=250):
return cgi.escape(reftree.get_repr(obj, limit))
class _(object): pass
dictproxy = type(_.__dict__)
method_types = [
type(tuple.__le__), # 'wrapper_descriptor'
type([1].__le__), # 'method-wrapper'
type(sys.getcheckinterval), # 'builtin_function_or_method'
type(cgi.FieldStorage.getfirst), # 'instancemethod'
]
def template(name, **params):
p = {
'maincss': "/assets/main.css",
'home': "/",
}
p.update(params)
return open(os.path.join(ASSETS_DIR, name)).read() % p
"""
curl $(python -c \
"from neurons.daemon.ipc import get_dowser_address_for_pid; \
print '%s:%d' % get_dowser_address_for_pid( $(pgrep ink_) )" )
"""
class DowserServices(Service):
period = 5
maxhistory = 300
history = {}
samples = 0
id_history = []
max_id_history = 200
@rpc(_returns=AnyHtml)
def snapshot(ctx):
ids = set()
for obj in gc.get_objects():
ids.add(id(obj))
DowserServices.id_history.append(ids)
if len(DowserServices.id_history) > DowserServices.max_id_history:
DowserServices.id_history.pop(0)
idhist = DowserServices.id_history
retval = E.table()
if len(idhist) > 1:
new, old = idhist[-1], idhist[0]
ids = sorted(new | old)
for oid in ids:
tr = E.tr()
if oid in new and not (oid in old):
tr.append(E.td("New: {}".format(oid)))
try:
obj = next(ob for ob in gc.get_objects() if id(ob) == oid)
if obj in idhist:
continue
try:
if obj.__name__ in ('cell', 'frame'):
continue
if obj.__class__.__name__ in ('cell', 'frame'):
continue
except:
pass
reprobj = repr(obj)
if len (reprobj) > 1024:
reprobj = reprobj[:1024]
tr.append(E.td(E.pre(repr(type(obj)))))
tr.append(E.td(E.pre(reprobj)))
except StopIteration:
continue
retval.append(tr)
return E.html(
E.head(E.style("td { vertical-align:top; }\n")),
E.body(retval)
)
@classmethod
def tick(cls):
logger.debug("Dowser tick %s", datetime.now().isoformat(' '))
gc.collect()
typecounts = {}
for obj in gc.get_objects():
objtype = type(obj)
if objtype in typecounts:
typecounts[objtype] += 1
else:
typecounts[objtype] = 1
for objtype, count in typecounts.items():
typename = str(objtype.__module__) + "." + objtype.__name__
if typename not in cls.history:
cls.history[typename] = [0] * cls.samples
cls.history[typename].append(count)
samples = cls.samples + 1
# Add dummy entries for any types which no longer exist
for typename, hist in cls.history.items():
diff = samples - len(hist)
if diff > 0:
hist.extend([0] * diff)
# Truncate history to self.maxhistory
if samples > cls.maxhistory:
for typename, hist in cls.history.items():
hist.pop(0)
else:
cls.samples = samples
ORDER_MAP = {
None: None,
'min': lambda x: min(DowserServices.history[x]),
'cur': lambda x: DowserServices.history[x][-1],
'max': lambda x: max(DowserServices.history[x]),
'diff': lambda x: len(DowserServices.id_history[-1][x]) -
len(DowserServices.id_history[0][x]),
}
@rpc(Unicode(values=['max', 'min', 'cur', 'diff']), Integer(default=0),
Integer(default=0), Integer(default=0), Integer(default=0),
_patterns=[HttpPattern('/', verb='GET')], _returns=AnyHtml)
def index(ctx, order, cur_floor, min_floor, max_floor, diff_floor):
rows = []
typenames = ctx.descriptor.service_class.history.keys()
key = ctx.descriptor.service_class.ORDER_MAP[order]
typenames.sort(key=key, reverse=False if order is None else True)
for typename in typenames:
hist = ctx.descriptor.service_class.history[typename]
# get numbers
maxhist = max(hist)
minhist = max(hist)
cur = hist[-1]
# idhist = DowserServices.id_history
# last = idhist[-1][typename]
# first = idhist[0][typename]
# diff = len(last) - len(first)
diff = -1 # FIXME
# check floors
show_this = cur >= cur_floor and \
minhist >= min_floor and \
maxhist >= max_floor and \
abs(diff) >= diff_floor
if not show_this:
continue
row = (
'<div class="typecount">%s<br />'
'<img class="chart" src="%s" /><br />'
'Min: %s Cur: %s Max: %s Diff: %s '
'<a href="%s">TRACE</a> · '
'<a href="%s">DIFF TRACE</a>'
'</div>' % (
cgi.escape(typename),
"/chart?typename=%s" % typename,
minhist, cur, maxhist, diff,
"/trace?typename=%s" % typename,
"/difftrace?typename=%s" % typename,
)
)
rows.append(row)
return template("graphs.html", output="\n".join(rows))
@rpc(Unicode, _returns=ByteArray)
def chart(ctx, typename):
"""Return a sparkline chart of the given type."""
data = ctx.descriptor.service_class.history[typename]
height = 20.0
scale = height / max(data)
im = Image.new("RGB", (len(data), int(height)), 'white')
draw = ImageDraw.Draw(im)
draw.line([(i, int(height - (v * scale))) for i, v in enumerate(data)],
fill="#009900")
del draw
f = BytesIO()
im.save(f, "PNG")
result = f.getvalue()
ctx.out_protocol = HttpRpc()
ctx.transport.resp_headers["Content-Type"] = "image/png"
return [result]
@rpc(Unicode, Integer, _returns=AnyHtml)
def trace(ctx, typename, objid):
gc.collect()
if objid is None:
rows = DowserServices.trace_all(typename)
else:
rows = DowserServices.trace_one(typename, objid)
return template("trace.html", output="\n".join(rows),
typename=cgi.escape(typename),
objid=str(objid or ''))
@rpc(Unicode, _returns=AnyHtml)
def difftrace(ctx, typename):
gc.collect()
rows = DowserServices.trace_all(typename, difftrace=True)
return template("trace.html", output="\n".join(rows),
typename=cgi.escape(typename), objid='')
@classmethod
def trace_all(cls, typename, difftrace=False):
rows = []
for obj in gc.get_objects():
objtype = type(obj)
if objtype.__module__ + "." + objtype.__name__ == typename and \
((not difftrace) or (
id(obj) in DowserServices.id_history[-1] and
(not id(obj) in DowserServices.id_history[0])
)):
rows.append("<p class='obj'>%s</p>"
% ReferrerTree(obj).get_repr(obj))
if not rows:
rows = ["<h3>The type you requested was not found.</h3>"]
return rows
@classmethod
def trace_one(cls, typename, objid):
rows = []
all_objs = gc.get_objects()
for obj in all_objs:
if id(obj) == objid:
objtype = type(obj)
if objtype.__module__ + "." + objtype.__name__ != typename:
rows = ["<h3>The object you requested is no longer "
"of the correct type.</h3>"]
else:
# Attributes
rows.append('<div class="obj"><h3>Attributes</h3>')
for k in dir(obj):
v = getattr(obj, k)
if type(v) not in method_types:
rows.append('<p class="attr"><b>%s:</b> %s</p>' %
(k, get_repr(v)))
del v
rows.append('</div>')
# Referrers
rows.append(
'<div class="refs"><h3>Referrers (Parents)</h3>')
rows.append('<p class="desc"><a href="%s">Show the '
'entire tree</a> of reachable objects</p>'
% (
"/tree?typename=%s&objid=%s" % (typename, objid)))
tree = ReferrerTree(obj)
tree.ignore(all_objs)
for depth, parentid, parentrepr in tree.walk(maxdepth=1):
if parentid:
rows.append("<p class='obj'>%s</p>" % parentrepr)
rows.append('</div>')
# Referents
rows.append(
'<div class="refs"><h3>Referents (Children)</h3>')
for child in gc.get_referents(obj):
rows.append(
"<p class='obj'>%s</p>" % tree.get_repr(child))
rows.append('</div>')
break
if not rows:
rows = ["<h3>The object you requested was not found.</h3>"]
return rows
@rpc(Unicode, Integer, _returns=AnyHtml)
def tree(self, typename, objid):
gc.collect()
rows = []
all_objs = gc.get_objects()
for obj in all_objs:
if id(obj) == objid:
objtype = type(obj)
if objtype.__module__ + "." + objtype.__name__ != typename:
rows = ["<h3>The object you requested is no longer "
"of the correct type.</h3>"]
else:
rows.append('<div class="obj">')
tree = ReferrerTree(obj)
tree.ignore(all_objs)
for depth, parentid, parentrepr in tree.walk(
maxresults=1000):
rows.append(parentrepr)
rows.append('</div>')
break
if not rows:
rows = ["<h3>The object you requested was not found.</h3>"]
params = {
'output': "\n".join(rows),
'typename': cgi.escape(typename),
'objid': str(objid),
}
return template("tree.html", **params)
class ReferrerTree(reftree.Tree):
ignore_modules = True
def _gen(self, obj, depth=0):
if self.maxdepth and depth >= self.maxdepth:
yield depth, 0, "---- Max depth reached ----"
return
if isinstance(obj, ModuleType) and self.ignore_modules:
return
refs = gc.get_referrers(obj)
refiter = iter(refs)
self.ignore(refs, refiter)
thisfile = sys._getframe().f_code.co_filename
for ref in refiter:
# Exclude all frames that are from this module or reftree.
if (isinstance(ref, FrameType)
and ref.f_code.co_filename in (thisfile, self.filename)):
continue
# Exclude all functions and classes from this module or reftree.
mod = getattr(ref, "__module__", "")
if mod is None:
continue
if "dowser" in mod or "reftree" in mod or mod == '__main__':
continue
# Exclude all parents in our ignore list.
if id(ref) in self._ignore:
continue
# Yield the (depth, id, repr) of our object.
yield depth, 0, '%s<div class="branch">' % (" " * depth)
if id(ref) in self.seen:
yield depth, id(ref), "see %s above" % id(ref)
else:
self.seen[id(ref)] = None
yield depth, id(ref), self.get_repr(ref, obj)
for parent in self._gen(ref, depth + 1):
yield parent
yield depth, 0, '%s</div>' % (" " * depth)
def get_repr(self, obj, referent=None):
"""Return an HTML tree block describing the given object."""
objtype = type(obj)
typename = objtype.__module__ + "." + objtype.__name__
prettytype = typename.replace("__builtin__.", "")
name = getattr(obj, "__name__", "")
if name:
prettytype = "%s %r" % (prettytype, name)
key = ""
if referent:
key = self.get_refkey(obj, referent)
return ('<a class="objectid" href="%s">%s</a> '
'<span class="typename">%s</span>%s<br />'
'<span class="repr">%s</span>'
% (("/trace?typename=%s&objid=%s" % (typename, id(obj))),
id(obj), prettytype, key, get_repr(obj, 100))
)
def get_refkey(self, obj, referent):
"""Return the dict key or attribute name of obj which refers to
referent."""
if isinstance(obj, dict):
for k, v in obj.items():
if v is referent:
return " (via its %r key)" % (k,)
for k in dir(obj) + ['__dict__']:
if getattr(obj, k, None) is referent:
return " (via its %r attribute)" % (k,)
return ""
|
import json
JC_SETTINGS_FOLDER_NAME = "javascript_completions"
JC_SETTINGS_FOLDER = os.path.join(PACKAGE_PATH, JC_SETTINGS_FOLDER_NAME)
class JavaScriptCompletions():
def init(self):
self.api = {}
self.API_Setup = sublime.load_settings('JavaScript-Completions.sublime-settings').get('completion_active_list')
sublime.set_timeout_async(self.load_api)
def load_api(self):
# Caching completions
if self.API_Setup:
for API_Keyword in self.API_Setup:
self.api[API_Keyword] = sublime.load_settings( API_Keyword + '.sublime-settings' )
if self.api[API_Keyword].get("scope") == None :
path_to_json = os.path.join(PACKAGE_PATH, "sublime-completions", API_Keyword + '.sublime-settings' )
if os.path.isfile(path_to_json):
with open(path_to_json) as json_file:
self.api[API_Keyword] = json.load(json_file)
def get(self, key):
return sublime.load_settings('JavaScript-Completions.sublime-settings').get(key) |
from unittest import TestCase
from terraform_compliance.common.readable_dir import ReadableDir
import os
from mock import patch
class TestReadableDir(TestCase):
def test_readable_dir_call_git_repository_success(self):
class Namespace(object):
pass
resp = ReadableDir('parser', 'value').__call__('parser', Namespace, 'git:value')
self.assertEqual(Namespace.value, 'value')
self.assertTrue(resp)
@patch.object(os.path, 'isdir', return_value=False)
def test_readable_dir_call_not_a_local_directory(self, *args):
class Namespace(object):
pass
with self.assertRaises(SystemExit):
ReadableDir('parser', 'value').__call__('parser', Namespace, 'non_existing_dir')
@patch.object(os.path, 'isdir', return_value=True)
@patch.object(os, 'access', return_value=True)
def test_readable_dir_call_is_a_local_dir(self, *args):
class Namespace(object):
pass
resp = ReadableDir('parser', 'value').__call__('parser', Namespace, 'value')
self.assertEqual(Namespace.value, 'value')
self.assertTrue(resp)
@patch.object(os.path, 'isdir', return_value=True)
@patch.object(os, 'access', return_value=False)
def test_readable_dir_call_can_not_access_dir(self, *args):
class Namespace(object):
pass
with self.assertRaises(SystemExit):
ReadableDir('parser', 'value').__call__('parser', Namespace, 'non_accessible_dir')
|
from typing import Iterator
from torch.utils.data import Sampler
from lightning_rl.storage.IBuffer import Buffer
class EntireBufferSampler(Sampler):
def __init__(
self,
buffer: Buffer,
samples_per_epoch: int,
) -> None:
super().__init__(None)
self.buffer = buffer
self.samples_per_epoch = samples_per_epoch
def __iter__(self) -> Iterator[int]:
idx = 0
for _ in range(self.samples_per_epoch):
yield idx
idx = (idx + 1) % max(1, len(self.buffer))
|
import discord
from config.globals import stable_client_id, testing_bot_prefix, testing_client_id, bot_prefix
async def get_default_prefix(bot):
"""Grabs the default prefix based on the branch version that is running. Allows testing and stable to coexist."""
if bot.user.id == stable_client_id:
return bot_prefix
if bot.user.id == testing_client_id:
return testing_bot_prefix
else:
raise UserWarning("Client ID does not match a valid branch configuration! Unable to return a default prefix.")
async def get_prefix(bot, message):
"""Checks if the bot has a configuration tag for the prefix. Otherwise, gets the default."""
default_prefix = await get_default_prefix(bot)
if isinstance(message.channel, discord.DMChannel):
return default_prefix
my_roles = [role.name for role in message.guild.me.roles]
for role_name in my_roles:
if role_name[:11] == "fox_prefix:":
return role_name[11:]
return default_prefix
|
"""
Write a function to find the longest common prefix string amongst an array of strings.
"""
def longestCommonPrefix(strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) > 0:
common = strs[0]
for str in strs[1:]:
while not str.startswith(common):
common = common[:-1]
return common
else:
return ''
print(longestCommonPrefix(['a', 'a']))
|
"""
"""
from multipledispatch import dispatch
import dask.dataframe as dd
import dask.array as da
@dispatch(dd._Frame)
def exp(A):
return da.exp(A)
@dispatch(dd._Frame)
def absolute(A):
return da.absolute(A)
@dispatch(dd._Frame)
def sign(A):
return da.sign(A)
@dispatch(dd._Frame)
def log1p(A):
return da.log1p(A)
# @dispatch(da.Array, da.Array)
# def dot(A, B):
# return da.dot(A, B)
@dispatch(dd.DataFrame)
def add_intercept(X):
columns = X.columns
if 'intercept' in columns:
raise ValueError("'intercept' column already in 'X'")
return X.assign(intercept=1)[['intercept'] + list(columns)]
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
import MySQLdb
import numpy as np
import re
import os
import json
import bibtexparser
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
class DocManager(object):
"""docstring for DocManager."""
DELETE_ALL_TABLES_SQL = \
"""DROP TABLE IF EXISTS Possess;
DROP TABLE IF EXISTS Connection;
DROP TABLE IF EXISTS Document;
DROP TABLE IF EXISTS Author;"""
CREATE_ALL_TABLES_SQL = \
"""CREATE TABLE IF NOT EXISTS Document(
docID VARCHAR(255) PRIMARY KEY,
title TEXT NOT NULL,
year INTEGER,
source TEXT,
type VARCHAR(30),
description TEXT,
bib TEXT
);
-- create table for authors
CREATE TABLE IF NOT EXISTS Author(
authorID INTEGER PRIMARY KEY auto_increment,
lastname VARCHAR(255) NOT NULL,
firstname VARCHAR(255) NOT NULL,
organization VARCHAR(255),
description TEXT
);
-- create table for author-paper relation
CREATE TABLE IF NOT EXISTS Possess(
docID VARCHAR(255) NOT NULL,
authorID INTEGER NOT NULL,
description TEXT,
FOREIGN KEY (docID) REFERENCES Document(docID) ON DELETE CASCADE,
FOREIGN KEY (authorID) REFERENCES Author(authorID) ON DELETE CASCADE,
PRIMARY KEY (docID, authorID)
);
-- create table for papers relation
CREATE TABLE IF NOT EXISTS Connection(
srcDocId VARCHAR(255) NOT NULL,
dstDocId VARCHAR(255) NOT NULL,
description TEXT,
FOREIGN KEY (srcDocId) REFERENCES Document(docID) ON DELETE CASCADE,
FOREIGN KEY (dstDocId) REFERENCES Document(docID) ON DELETE CASCADE,
PRIMARY KEY (srcDocId, dstDocId)
);"""
def __init__(self):
super(DocManager, self).__init__()
self.dbUser = ""
self.dbPwd = ""
self.dbName = ""
self.conn = -1
self.cursor = -1
# instance for writing...
self.db = BibDatabase()
self.writer = BibTexWriter()
self.writer.indent = ' '*4 # indent entries with 4 spaces instead of one
self.writer.comma_first = False # place the comma at the beginning of the line
# initial connection and the cursor
def initConn(self, user, passwd, db):
self.conn = MySQLdb.connect(user=user, passwd=passwd, charset='utf8mb4')
self.cursor = self.conn.cursor()
sql = 'CREATE DATABASE IF NOT EXISTS %s CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;' %db
self.cursor.execute(sql)
self.conn.commit()
self.conn.close()
self.conn = MySQLdb.connect(user=user, passwd=passwd, charset='utf8mb4', db=db)
self.cursor = self.conn.cursor()
self.dbUser = user
self.dbPwd = passwd
self.dbName = db
print("[+] initial connection finished")
def closeConn(self):
if self.conn:
self.conn.close()
def createTbls(self):
SQL = DocManager.CREATE_ALL_TABLES_SQL
if SQL:
self._execSql(SQL)
def deleteTbls(self):
SQL = DocManager.DELETE_ALL_TABLES_SQL
if SQL:
self._execSql(SQL)
# execute .sql file
def executeScriptsFromFile(self, filename):
self._isConnect()
statement = ""
for line in open(filename):
if re.match(r'--', line): # ignore sql comment lines
continue
if not re.search(r'[^-;]+;', line): # keep appending lines that don't end in ';'
statement = statement + line
else: # when you get a line ending in ';' then exec statement and reset for next statement
# print("reach the end of a complete line!!!!!!")
statement = statement + line
try:
self.cursor.execute(statement)
self.conn.commit()
statement = ""
except Exception as e:
print("[-] MySQLError during execute statement")
print(e)
def _isConnect(self):
if self.conn == -1:
raise Exception("[-] please connect the database using initConn(...) first")
return False
return True
# custom search interface
def _search(self, sql):
self._isConnect()
try:
self.cursor.execute(sql)
searchResult = np.array(self.cursor.fetchall())
except Exception as e:
print("[-] Cannot execute the query of:\n %s" % sql)
print(e)
return
return searchResult
def _execSql(self, sql):
self._isConnect()
try:
self.cursor.execute(sql)
self.conn.commit()
except Exception as e:
print("[-] Cannot execute the sql:\n %s" % sql)
print(e)
return
"""insert into the DB system"""
def _insertDoc(self, docId, title, type, year="NULL", source="NULL", description="NULL", bib="NULL"):
SQL = """INSERT INTO Document(docID, title, type, year, source, description, bib) VALUES ("{docId}", "{title}", "{type}", {year}, "{source}", "{description}", "{bib}")""".format(
docId=docId.replace('"', '""'),
title=title.replace('"', '""'),
type=type.replace('"', '""'),
year=year,
source=source.replace('"', '""'),
description=description.replace('"', '""'),
bib=bib.replace('"', '""')
)
try:
self.cursor.execute(SQL)
self.conn.commit()
print("[+] Insert %s %s" % (type, docId))
except Exception as e:
print("[-] Cannot insert %s %s" % (type, docId))
print(e)
# print("SQL: %s" % SQL)
def insertDocFromDict(self, docDic):
requiredKeys = {'title', 'type', 'docId'}
addupKeys = {'year', 'source', 'description', 'bib'}
for checkKey in requiredKeys:
if checkKey not in docDic:
print("[-] input docDic instance must have %s" % checkKey)
return
for checkKey in addupKeys:
if checkKey not in docDic:
docDic[checkKey] = "NULL"
print("%s description is %s" % (docDic["docId"], docDic["description"]))
self._insertDoc(docId=docDic["docId"],
title=docDic["title"],
type=docDic["type"],
year=docDic["year"],
source=docDic["source"],
description=docDic["description"],
bib=docDic["bib"])
def insertDocFromDictList(self, docDicList):
if isinstance(docDicList, list):
pass
elif isinstance(docDicList, dict):
docDicList = [docDicList]
for docDic in docDicList:
self.insertDocFromBibDic(docDic)
# insert a bibDic object
def insertDocFromBibDic(self, bibDic, description=""):
try:
doctype = bibDic['ENTRYTYPE']
docId = bibDic['ID'] + bibDic['title']
year = bibDic['year']
title = bibDic['title']
if "description" in bibDic:
description = bibDic["description"]
source = ""
for possbleSource in ['journal', 'booktitle', 'publisher']:
if possbleSource in bibDic:
source = bibDic[possbleSource]
# source = bibDic['journal']
author = bibDic['author'].split('and')
self.db.entries = [bibDic]
bib = self.writer.write(self.db)
self._insertDoc(docId=docId,
title=title,
type=doctype,
year=year,
source=source,
description=description,
bib=bib)
except Exception as e:
print("[-] Cannot insert current bib item")
print(e)
# finally:
# self.closeConn()
def insertDocFromBibFile(self, bibFileName, deleteAfterInsert=False):
if not os.path.getsize(bibFileName):
return
with open(bibFileName) as bibfile:
bibDicList = bibtexparser.load(bibfile).entries
self.insertDocFromDictList(bibDicList)
if deleteAfterInsert:
with open(bibFileName, 'w') as bibfile:
bibfile.write("")
def insertTopic(self, topicId, name, year="NULL", description="NULL"):
self._insertDoc(docId=topicId,
title=name,
type='topic',
year='NULL',
source='NULL',
description=description,
bib='NULL')
def insertTopicFromFile(self, jsonFilename, deleteAfterInsert=False):
if not os.path.getsize(jsonFilename):
return
with open(jsonFilename) as topicFp:
dicList = json.load(topicFp)
self.insertTopicFromDicList(dicList=dicList)
if deleteAfterInsert:
with open(jsonFilename, 'w') as topicFp:
topicFp.write("")
def insertTopicFromDicList(self, dicList):
if isinstance(dicList, dict):
dictList = [dicList]
for dic in dicList:
self.insertTopicFromDic(dic)
def insertTopicFromDic(self, dic):
# check keys
requiredKeys = {"topicId", "name"}
addupKeys = {"description", "year"}
for checkKey in requiredKeys:
if checkKey not in dic:
print("[-] topic dictionary must contain %s" % checkKey)
return
for checkKey in addupKeys:
if checkKey not in dic:
dic[checkKey] = "NULL"
self.insertTopic(
topicId=dic["topicId"],
name=dic["name"],
year=dic["year"],
description=dic["description"]
)
""" delete a document """
def deleteDoc(self, docId):
SQL = """DELETE FROM Document WHERE docID="{docId}"; """.format(docId=docId.replace('"', '""'))
self._execSql(SQL)
""" get documents """
def getAllDocs(self):
SQL = """(SELECT docId, title, type FROM Document WHERE type="topic" ORDER BY title) UNION (SELECT docId, title, type FROM Document WHERE type<>"topic" ORDER BY title);"""
return self._search(SQL)
def getDocById(self, docId):
SQL = """SELECT docId, title, type, year, source, description, bib FROM Document WHERE docID="{docId}";""".format(docId=docId.replace('"', '""'))
return self._search(SQL)
def getAllTopics(self):
SQL = """SELECT docId, title FROM Document WHERE type="topic";"""
return self._search(SQL)
"""add connection"""
def addConnection(self, srcDocId, dstDocId, description=""):
if dstDocId == srcDocId:
print("srcDocId shouldn't equal dstDocId")
return
"""
tobedeleted
"""
# rst = self._search("""SELECT docId from Document WHERE docId LIKE "{docId}%"; """.format(docId=srcDocId))
# srcDocId = rst[0, 0]
# rst = self._search("""SELECT docId from Document WHERE docId LIKE "{docId}%"; """.format(docId=dstDocId))
# dstDocId = rst[0, 0]
SQL = """INSERT INTO Connection(srcDocId, dstDocId, description) VALUE("{srcDocId}", "{dstDocId}", "{description}");""".format(
srcDocId=srcDocId.replace('"', '""'),
dstDocId=dstDocId.replace('"', '""'),
description=description.replace('"', '""')
)
try:
self.cursor.execute(SQL)
self.conn.commit()
print("[+] Insert connection pair %s - %s" % (srcDocId, dstDocId))
except Exception as e:
print("[-] Cannot insert connection %s - %s" % (srcDocId, dstDocId))
print(e)
def addConnectionFromFile(self, jsonFilename, deleteAfterInsert=False):
if not os.path.getsize(jsonFilename):
return
with open(jsonFilename) as connectionFp:
dicList = json.load(connectionFp)
self.addConnectionFromDicList(dicList=dicList)
if deleteAfterInsert:
with open(jsonFilename, 'w') as connectionFp:
connectionFp.write("")
def addConnectionFromDicList(self, dicList):
for dic in dicList:
self.addConnectionFromDic(dic)
def addConnectionFromDic(self, dic):
# check key
requiredKeys = {"srcDocId", "dstDocId"}
addupKeys = {"description"}
for checkKey in requiredKeys:
if checkKey not in dic:
print("[-] connection dictionary must contain %s" % checkKey)
return
for checkKey in addupKeys:
if checkKey not in dic:
dic[checkKey] = "NULL"
self.addConnection(
srcDocId=dic["srcDocId"],
dstDocId=dic["dstDocId"],
description=dic["description"]
)
"""delete connection"""
def delConnection(self, srcDocId, dstDocId):
SQL = """DELETE FROM Connection WHERE srcDocId="{srcDocId}" and dstDocId="{dstDocId}"; """.format(srcDocId=srcDocId, dstDocId=dstDocId)
self._execSql(SQL)
"""get connections"""
def getAncestors(self, tgtDocId):
SQL = """SELECT docId, title, type FROM Document WHERE docId in (SELECT srcDocId FROM Connection WHERE dstDocId="{tgtDocId}");""".format(tgtDocId=tgtDocId)
return self._search(SQL)
def getDescendants(self, tgtDocId):
SQL = """SELECT docId, title, type FROM Document WHERE docId in (SELECT dstDocId FROM Connection WHERE srcDocId="{tgtDocId}");""".format(tgtDocId=tgtDocId)
return self._search(SQL)
def getConnectionInfo(self, srcDocId, dstDocId):
SQL = """SELECT description FROM Connection WHERE srcDocId="{srcDocId}" AND dstDocId="{dstDocId}";""".format(srcDocId=srcDocId, dstDocId=dstDocId)
return self._search(SQL)
"""modify Document"""
def modifyDocument(self, docId, category, newVal):
if isinstance(newVal, str): # manually add double quotation marks
SQL = """UPDATE Document SET {category}="{newVal}" WHERE docId="{docId}"; """.format(
category=category,
newVal=newVal.replace('"', '""'),
docId=docId.replace('"', '""'),
)
else:
SQL = """UPDATE Document SET {category}={newVal} WHERE docId="{docId}"; """.format(
category=category,
newVal=newVal,
docId=docId
)
print("update %s %s" % (docId, category))
self._execSql(SQL)
"""modify Connection"""
def modifyConnectionDescription(self, srcDocId, dstDocId, description):
if isinstance(description, str): # manually add double quotation marks
description = '"%s"' % description
SQL = """UPDATE Connection SET description={description} WHERE srcDocId="{srcDocId}" and dstDocId="{dstDocId}"; """.format(
description=description,
srcDocId=srcDocId,
dstDocId=dstDocId
)
self._execSql(SQL)
"""export docs to bib, export topics and connections to json files """
def exportDocs(self, filename="./mybib.bib.bk"):
print("export documents to %s" % filename)
SQL = "SELECT description, bib FROM Document WHERE type != 'topic';"
docsInfo = self._search(SQL)
bibDicList = []
for description, bib in docsInfo:
bibDicList += bibtexparser.loads(bib).entries
bibDicList[-1]['description'] = description
self.db.entries = bibDicList
with open(filename, 'w') as bibtexFp:
bibtexparser.dump(self.db, bibtexFp)
def exportTopics(self, filename="./topics.json.bk"):
print("export topics to %s" % filename)
SQL = "SELECT docID, title, description FROM Document WHERE type = 'topic';"
topicInfo = self._search(SQL)
listForJson = []
for docId, title, description in topicInfo:
listForJson.append({"topicId":docId, "name":title, "description":description})
with open(filename, 'w') as jsonFp:
json.dump(listForJson, jsonFp, indent=4, separators=(',', ': '))
def exportConnections(self, filename="./connections.json.bk"):
print("export connections to %s" % filename)
SQL = "SELECT srcDocId, dstDocId, description FROM Connection;"
connectionInfo = self._search(SQL)
listForJson = []
for srcDocId, dstDocId, description in connectionInfo:
listForJson.append({"srcDocId":srcDocId, "dstDocId":dstDocId, "description":description})
with open(filename, 'w') as jsonFp:
json.dump(listForJson, jsonFp, indent=4, separators=(',', ': '))
"""search"""
def searchDocWithKeyword(self, keyword):
SQL = """SELECT docId, title, type FROM Document WHERE docId LIKE "%{keyword}%" OR title LIKE "%{keyword}%" OR description LIKE "%{keyword}%";""".format(keyword=keyword)
return self._search(SQL)
|
Libs = {
"libGSFont.dylib",
"libwebrtc.dylib",
"libsysdiagnose.dylib",
"libsystem_trace.dylib",
"libnetwork.dylib",
"libAudioStatistics.dylib",
"libswiftCore.dylib",
"libxpc.dylib",
"libAWDSupportFramework.dylib",
"libswiftDispatch.dylib",
"libAXSpeechManager.dylib",
"libswiftCoreMIDI.dylib",
"libtzupdate.dylib",
"libmecabra.dylib",
"libTelephonyUtilDynamic.dylib",
"libAccessibility.dylib",
"libAudioToolboxUtility.dylib",
"libLinearAlgebra.dylib",
"libate.dylib",
"libswiftCoreAudio.dylib",
"libboringssl.dylib",
"libusrtcp.dylib",
"libobjc.A.dylib",
"libswiftSpriteKit.dylib",
"libswiftAccelerate.dylib",
"libswiftNetwork.dylib",
"libprequelite.dylib",
"libdispatch.dylib",
"libIOAccessoryManager.dylib",
"libAXSafeCategoryBundle.dylib",
"libsystem_blocks.dylib",
"libswiftFoundation.dylib",
"libswiftCoreMedia.dylib",
"libMobileGestaltExtensions.dylib",
"libamsupport.dylib",
"libhvf.dylib",
"libnfshared.dylib",
}
|
import datetime as dt
import html
import json
import re
from copy import deepcopy
from urllib import parse as urlparse
from support import config
def prepend_domain(url, domain_url, ignore_domain_splice=False):
if domain_url is None:
return url
if not url:
return None
url = url.strip()
if url.startswith("www."):
url = "https://{}".format(url)
return url
split_url = urlparse.urlsplit(url)
if not split_url.scheme:
if not split_url.netloc:
if ignore_domain_splice:
return urlparse.urljoin(domain_url, url)
split_domain = urlparse.urlsplit(domain_url)
domain = "{}://{}".format(split_domain.scheme, split_domain.netloc)
return urlparse.urljoin(domain, url)
url = "https://{}".format(url.replace("//", ""))
return url
def safe_grab(data, keys, default=None):
if data is not None and keys is not None and len(keys) > 0:
if not isinstance(data, dict):
return None
keys_list = keys
key = keys_list[0]
data = data.get(key)
keys_list.pop(0)
else:
if data is None and default is not None:
return default
return data
return safe_grab(data, keys, default)
def safe_json(data):
try:
if not isinstance(data, str):
data = json.dumps(data)
return json.loads(data)
except Exception:
return {}
return {}
def safe_float(fl_str, safeon=True):
try:
return float(fl_str)
except Exception:
if safeon:
return 0
else:
return None
def price_round(price_str, round_num):
try:
price_str = (
price_str.replace("$", "").replace("USD", "").replace("US", "").strip()
)
price_str = str(round(safe_float(price_str, safeon=False), round_num))
return format_price(price_str)
except Exception:
return format_price(price_str)
def generate_result_meta(
shop_link,
searched_keyword,
image_url,
shop_name,
price,
title,
content_description,
date_searched=None,
):
if not validate_data(image_url, price, title):
return None
price = str(price)
numeric_price = re.findall(r"\d+\.+\d+", price) or re.findall(r"\d+", price)
if numeric_price is None or len(numeric_price) == 0:
return None
numeric_price = safe_float(numeric_price[0], safeon=False)
if numeric_price is None:
return None
price = price_round(price, 2)
if price is None:
return None
if date_searched is None:
date_searched = str(dt.datetime.now())
result_meta = {
searched_keyword: {
"image_url": prepend_domain(image_url, shop_link),
"shop_name": shop_name,
"shop_link": shop_link,
"price": price,
"title": truncate_data(title, 75),
"searched_keyword": searched_keyword,
"content_description": truncate_data(content_description, 250),
"date_searched": date_searched,
"numeric_price": str(round(numeric_price, 2)),
}
}
return result_meta
def extract_items(items):
item_r = ""
for item in items:
item_r += " " + item.rstrip().strip()
return item_r.strip()
def format_price(price):
if "$" not in price:
price = "${}".format(price)
return price.replace("USD", "").replace("US", "").strip()
def validate_data(image_url, price, title):
if not price:
return False
if not title:
return False
return True
def truncate_data(data, length_cont, html_escape=False):
if not data:
return data
data = data.rstrip().strip()
if len(data) > length_cont:
try:
if html_escape:
return html.escape("{}...".format(data[:length_cont]))
return "{}...".format(data[:length_cont])
except Exception:
return "" # data is not good
else:
return data
def save_shop_data(data):
if data and config.SAVE_TO_DB:
from db.models import ShoppedData
shop = ShoppedData(**data)
shop.commit()
def save_job(spider_name, job_id, status="done"):
if job_id and config.SAVE_TO_DB:
from db.models import Job
job = Job().get_item(id=job_id)
if job and job.meta:
if job.meta.get(spider_name, "started") == "error":
return
meta = deepcopy(job.meta)
meta[spider_name] = status
job.meta = meta
job.commit()
|
import os
from enums.dataset_type import DatasetType
DATA_FOLDER = 'data'
CHECKPOINTS_FOLDER = 'checkpoints'
RUNS_FOLDER = 'runs'
FEATURES_FOLDER = 'features'
MESSAGES_FOLDER = 'messages'
STEP3_FOLDER = 'step3'
class FileHelper:
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self._data_path = os.path.join(os.path.dirname(dir_path), DATA_FOLDER)
if not os.path.exists(self._data_path):
os.mkdir(self._data_path)
features_folder_path = os.path.join(self._data_path, FEATURES_FOLDER)
if not os.path.exists(features_folder_path):
os.mkdir(features_folder_path)
self._checkpoints_folder_path = os.path.join(self._data_path, CHECKPOINTS_FOLDER)
if not os.path.exists(self._checkpoints_folder_path):
os.mkdir(self._checkpoints_folder_path)
self._model_checkpoint_path = os.path.join(self._data_path, CHECKPOINTS_FOLDER, 'extractor.p')
self._train_input_path = os.path.join(self._data_path, FEATURES_FOLDER, 'train.input.npy')
self._valid_input_path = os.path.join(self._data_path, FEATURES_FOLDER, 'valid.input.npy')
self._test_input_path = os.path.join(self._data_path, FEATURES_FOLDER, 'test.input.npy')
self._train_features_path = os.path.join(self._data_path, FEATURES_FOLDER, 'train_features.npy')
self._valid_features_path = os.path.join(self._data_path, FEATURES_FOLDER, 'valid_features.npy')
self._test_features_path = os.path.join(self._data_path, FEATURES_FOLDER, 'test_features.npy')
self._train_metadata_path = os.path.join(self._data_path, FEATURES_FOLDER, 'train.metadata.p')
self._valid_metadata_path = os.path.join(self._data_path, FEATURES_FOLDER, 'valid.metadata.p')
self._test_metadata_path = os.path.join(self._data_path, FEATURES_FOLDER, 'test.metadata.p')
self._messages_folder_path = os.path.join(self._data_path, MESSAGES_FOLDER)
self._train_distractors_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.train.p')
self._train_targets_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.train.p')
self._valid_distractors_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.valid.p')
self._valid_targets_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.valid.p')
self._test_distractors_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.test.p')
self._test_targets_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.test.p')
self._train_distractors_RANDOM_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.trainRANDOM.p')
self._train_targets_RANDOM_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.trainRANDOM.p')
self._valid_distractors_RANDOM_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.validRANDOM.p')
self._valid_targets_RANDOM_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.validRANDOM.p')
self._test_distractors_RANDOM_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.testRANDOM.p')
self._test_targets_RANDOM_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.testRANDOM.p')
# three datasets for zero shot tests
self._train_distractors_zs1_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.trainRANDOM.color2_size0_.p')#horizontal1_size1_size1_color2_.p')
self._train_targets_zs1_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.trainRANDOM.color2_size0_.p')#horizontal1_size1_size1_color2_.p')
self._valid_distractors_zs1_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.validRANDOM.color2_size0_.p')#horizontal1_size1_size1_color2_.p')
self._valid_targets_zs1_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.validRANDOM.color2_size0_.p')#horizontal1_size1_size1_color2_.p')
self._train_distractors_zs2_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.trainRANDOM.horizontal0_size1_vertical2_shape0_vertical1_color2_.p')
self._train_targets_zs2_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.trainRANDOM.horizontal0_size1_vertical2_shape0_vertical1_color2_.p')
self._train_distractors_zs3_path = os.path.join(self._data_path, STEP3_FOLDER, 'distractor_dict.trainRANDOM.horizontal0_vertical1_size1_shape2_color2_horizontal1_.p')
self._train_targets_zs3_path = os.path.join(self._data_path, STEP3_FOLDER, 'target_dict.trainRANDOM.horizontal0_vertical1_size1_shape2_color2_horizontal1_.p')
@property
def model_checkpoint_path(self):
return self._model_checkpoint_path
@property
def train_input_path(self):
return self._train_input_path
@property
def valid_input_path(self):
return self._valid_input_path
@property
def test_input_path(self):
return self._test_input_path
@property
def train_features_path(self):
return self._train_features_path
@property
def valid_features_path(self):
return self._valid_features_path
@property
def test_features_path(self):
return self._test_features_path
@property
def train_metadata_path(self):
return self._train_metadata_path
@property
def valid_metadata_path(self):
return self._valid_metadata_path
@property
def test_metadata_path(self):
return self._test_metadata_path
@property
def messages_folder_path(self):
return self._messages_folder_path
@property
def train_distractors_path(self):
return self._train_distractors_path
@property
def train_targets_path(self):
return self._train_targets_path
@property
def valid_distractors_path(self):
return self._valid_distractors_path
@property
def valid_targets_path(self):
return self._valid_targets_path
@property
def test_distractors_path(self):
return self._test_distractors_path
@property
def test_targets_path(self):
return self._test_targets_path
# RANDOM distractors
@property
def train_distractors_RANDOM_path(self):
return self._train_distractors_RANDOM_path
@property
def train_targets_RANDOM_path(self):
return self._train_targets_RANDOM_path
@property
def valid_distractors_RANDOM_path(self):
return self._valid_distractors_RANDOM_path
@property
def valid_targets_RANDOM_path(self):
return self._valid_targets_RANDOM_path
@property
def test_distractors_RANDOM_path(self):
return self._test_distractors_RANDOM_path
@property
def test_targets_RANDOM_path(self):
return self._test_targets_RANDOM_path
# zero shot paths
@property
def train_distractors_zs1_path(self):
return self._train_distractors_zs1_path
@property
def train_targets_zs1_path(self):
return self._train_targets_zs1_path
@property
def valid_distractors_zs1_path(self):
return self._valid_distractors_zs1_path
@property
def valid_targets_zs1_path(self):
return self._valid_targets_zs1_path
@property
def train_distractors_zs2_path(self):
return self._train_distractors_zs2_path
@property
def train_targets_zs2_path(self):
return self._train_targets_zs2_path
@property
def train_distractors_zs3_path(self):
return self._train_distractors_zs3_path
@property
def train_targets_zs3_path(self):
return self._train_targets_zs3_path
def get_run_folder(self, sub_folder, model_name):
if not sub_folder:
run_folder = os.path.join(RUNS_FOLDER, model_name)
else:
run_folder = os.path.join(RUNS_FOLDER, sub_folder, model_name)
return run_folder
def get_sender_path(self, run_folder):
result = os.path.join(run_folder, 'sender.p')
return result
def get_receiver_path(self, run_folder):
result = os.path.join(run_folder, 'receiver.p')
return result
def get_visual_module_path(self, run_folder):
result = os.path.join(run_folder, 'visual_module.p')
return result
def get_input_path(self, dataset_type: DatasetType):
if dataset_type == DatasetType.Train:
return self.train_input_path
elif dataset_type == DatasetType.Valid:
return self.valid_input_path
elif dataset_type == DatasetType.Test:
return self.test_input_path
def get_features_path(self, dataset_type: DatasetType):
if dataset_type == DatasetType.Train:
return self.train_features_path
elif dataset_type == DatasetType.Valid:
return self.valid_features_path
elif dataset_type == DatasetType.Test:
return self.test_features_path
def get_metadata_path(self, dataset_type: DatasetType):
if dataset_type == DatasetType.Train:
return self.train_metadata_path
elif dataset_type == DatasetType.Valid:
return self.valid_metadata_path
elif dataset_type == DatasetType.Test:
return self.test_metadata_path
def get_vocabulary_path(self, vocabulary_size: int):
vocabulary_path = os.path.join(self._data_path, f'dict_{vocabulary_size}.pckl')
return vocabulary_path
def get_set_path(self, set_name: str):
set_path = os.path.join(self._data_path, f'{set_name}.input')
return set_path
def create_unique_model_path(self, model_name: str):
receiver_path = os.path.join(self._checkpoints_folder_path, f'{model_name}.pt')
return receiver_path |
from __future__ import absolute_import, print_function
from abc import ABCMeta, abstractmethod
import numpy as np
import tensorflow as tf
class AbstractRecommender(metaclass=ABCMeta):
"""Abstract base class for evaluator class."""
@abstractmethod
def create_placeholders(self) -> None:
"""Create the placeholders to be used."""
raise NotImplementedError()
@abstractmethod
def build_graph(self) -> None:
"""Build the main tensorflow graph with embedding layers."""
raise NotImplementedError()
@abstractmethod
def add_optimizer(self) -> None:
"""Add the required optimiser to the graph."""
raise NotImplementedError()
class PointwiseRecommender(AbstractRecommender):
"""Implicit Recommenders based on pointwise approach."""
def __init__(self, model_name:str, num_users: int, num_items: int,
dim: int, lam: float, eta: float, weight: float = 1.0, clip: float = 0, num: int = 0) -> None:
"""Initialize Class."""
self.num_users = num_users
self.num_items = num_items
self.dim = dim
self.lam = lam
self.eta = eta
self.weight = weight
self.clip = clip
self.num = num
# Build the graphs
self.create_placeholders()
self.build_graph()
if model_name in ["mf"]:
self.create_mf_losses()
elif model_name in ["rel-mf"]:
self.create_rel_mf_losses()
else: # mf-du
self.create_mf_du_losses()
self.add_optimizer()
def create_placeholders(self) -> None:
"""Create the placeholders to be used."""
self.users = tf.placeholder(tf.int32, [None], name='user_placeholder')
self.items = tf.placeholder(tf.int32, [None], name='item_placeholder')
self.scores = tf.placeholder(
tf.float32, [None, 1], name='score_placeholder')
self.inv_scores = tf.placeholder(
tf.float32, [None, 1], name='inv_score_placeholder')
self.scores_normalization = tf.placeholder(
tf.float32, [None, 1], name='scores_normalization')
self.inv_scores_normalization = tf.placeholder(
tf.float32, [None, 1], name='inv_scores_normalization')
self.labels = tf.placeholder(
tf.float32, [None, 1], name='label_placeholder')
def build_graph(self) -> None:
"""Build the main tensorflow graph with embedding layers."""
with tf.name_scope('embedding_layer'):
# initialize user-item matrices and biases
self.user_embeddings = tf.get_variable(
f'user_embeddings', shape=[self.num_users, self.dim],
initializer=tf.contrib.layers.xavier_initializer())
self.item_embeddings = tf.get_variable(
f'item_embeddings', shape=[self.num_items, self.dim],
initializer=tf.contrib.layers.xavier_initializer())
# lookup embeddings of current batch
self.u_embed = tf.nn.embedding_lookup(
self.user_embeddings, self.users)
self.i_embed = tf.nn.embedding_lookup(
self.item_embeddings, self.items)
with tf.variable_scope('prediction'):
self.logits = tf.reduce_sum(
tf.multiply(self.u_embed, self.i_embed), 1)
self.preds = tf.sigmoid(tf.expand_dims(
self.logits, 1))
def create_mf_losses(self) -> None:
"""Create the losses."""
with tf.name_scope('mflosses'):
self.mse = tf.reduce_sum(
(self.labels) * tf.square(1. - self.preds) +
(1 - self.labels) * tf.square(self.preds) ) / \
tf.reduce_sum(self.labels + (1 - self.labels))
# add the L2-regularizer terms.
reg_term_embeds = tf.nn.l2_loss(self.user_embeddings) \
+ tf.nn.l2_loss(self.item_embeddings)
self.loss = self.mse + self.lam * reg_term_embeds
def create_rel_mf_losses(self) -> None:
"""Create the losses."""
with tf.name_scope('crmflosses'):
# clipping
scores = tf.clip_by_value(
self.scores, clip_value_min=0.1, clip_value_max=1.0)
self.mse = tf.reduce_sum(
(self.labels / scores) * tf.square(1. - self.preds) +
(1 - self.labels / scores) * tf.square(self.preds)) / \
tf.reduce_sum(self.labels + (1 - self.labels))
# add the L2-regularizer terms.
reg_term_embeds = tf.nn.l2_loss(self.user_embeddings) \
+ tf.nn.l2_loss(self.item_embeddings)
self.loss = self.mse + self.lam * reg_term_embeds
def create_mf_du_losses(self) -> None:
"""Create the losses."""
with tf.name_scope('mf_du'):
eps = 0.000001
# clipping
scores = tf.clip_by_value(
self.scores, clip_value_min=0.1, clip_value_max=1.0)
inv_scores = tf.clip_by_value(
self.inv_scores, clip_value_min=0.1, clip_value_max=1.0)
# loss with SNIPS
self.mse = tf.reduce_sum(
(self.labels) * 1/(tf.reduce_sum((self.labels)*1/(scores))+eps) * (self.labels)/(scores) * tf.square(1. - self.preds) +
(1 - self.labels)* 1/(tf.reduce_sum((1 - self.labels)*1/(inv_scores))+eps) * (1 - self.labels) / (inv_scores) * tf.square(self.preds)
) / \
tf.reduce_sum(self.labels + (1 - self.labels))
# add the L2-regularizer terms.
reg_term_embeds = tf.nn.l2_loss(self.user_embeddings) \
+ tf.nn.l2_loss(self.item_embeddings)
self.loss = self.mse + self.lam * reg_term_embeds
def add_optimizer(self) -> None:
"""Add the required optimiser to the graph."""
with tf.name_scope('optimizer'):
# set Adam Optimizer.
self.apply_grads = tf.train.AdamOptimizer(
learning_rate=self.eta).minimize(self.loss)
|
"""
Signal models for fitting magnetization parameters from MR Images acquired with a Gradient Echo (GRE) Ultrashort TE (UTE) pulse sequence
"""
__author__ = "Dharshan Chandramohan"
import numpy as np
def T2strw_mag_simplified(K, TE, T2str, N):
"""Signal Model of T2str-weighted UTE GRE Magnitude Image
S = K * [ exp(-TE/T2*) ] + N
parameters:
K :: constant (proportional to proton density)
TE :: sequence echo time
T2str :: relaxation due to spin-spin effects and dephasing
N :: constant offset "noise" term
@return expected (magnitude) signal
"""
S = K * np.exp((-1.0 * TE)/T2str) + N
return S
def T2strw_power(P_0, TE, T2str):
"""Signal Model for the "power" (square of the magnitude) of a complex signal from a T2* weighted GRE image
P = M_c^2 = Re{S}^2 + Im{S}^2
P = P_0 * [ exp(-2TE/T2*) ]
M_c stands for the "noise-corrected" signal magnitude
M_c^2 = M^2 - 2*sigma^2
where sigma is the standard deviation in a noise region (this information is relevant to caluclating the residuals)
parameters:
P_0 :: constant (proportional to proton density)
TE :: sequence echo time
T2str :: relaxationn due to spin-spin effects and dephasing
@return P :: predicted power of the signal
"""
P = P_0 * np.exp((-2.0 * TE)/T2str)
return P
def T2strw_cplx(K, TE, T2str, df, phi):
"""Signal Model of T2str-weighted UTE GRE Magnitude Image
S = K * [ exp(-TE/T2*) ] + N
parameters:
K :: constant (proportional to proton density)
TE :: sequence echo time
T2str :: relaxation due to spin-spin effects and dephasing
df :: frequency shift
phi :: phase
@return expected (magnitude) signal
"""
S = K * np.exp((-1.0 * TE)/T2str - 1j*2*np.pi*df*TE + 1j*phi)
return S
def spgr_mag(PD, T1, T2str, TR, TE, alph, k=1.0):
"""Spoiled Gradient Recall at Steady State (SPGR) signal equation"""
S = k * PD * np.exp(-TE/T2str) * ((np.sin(alph) * (1 - np.exp(-TR/T1)))/(1 - (np.cos(alph) * np.exp(-TR/T1))))
return S
def spgr_complex(M0, T1, T2str, TR, TE, alph, c_shift, del_B0, phi):
"""Spoiled Gradient Recall at Steady State (SPGR) signal equation"""
df = c_shift + (42.577478518e6 * del_B0) # del_B0 in T, c_shift in Hz
S = M0 * np.exp(-TE/T2str(-1.0 * TE)/T2str - 1j*2*np.pi*df*TE + 1j*phi) * ((np.sin(alph) * (1 - np.exp(-TR/T1)))/(1 - (np.cos(alph) * np.exp(-TR/T1))))
return S
def T1w_mag(K, T1, TR, alph):
"""Expected signal for T1w UTE GRE 'magnitude' images"""
S = K * ((np.sin(alph) * (1 - np.exp((-1.0 * TR)/T1))) / (1 - (np.cos(alph) * np.exp((-1.0 * TR)/T1))))
return S
def spgr_complex_flex_experimental(M0=None, T1=None, T2str=None, TR=None, TE=None, alph=None, c_shift=None, del_B0=None, phi=None):
"""Spoiled Gradient Recall at Steady State (SPGR) signal equation"""
if not M0:
M0 = 1.0
if not (T1 and TR and alph):
T1_part = 1.0
else:
T1_part = ((np.sin(alph) * (1 - np.exp(-TR/T1)))/(1 - (np.cos(alph) * np.exp(-TR/T1))))
if not (T2str and TE):
T2_part = 1.0
else:
np.exp(-TE/T2str(-1.0 * TE)/T2str)
if not phi:
rf_phase_part = 1.0
else:
rf_phase_part = np.exp(1j*phi)
if not (c_shift and del_B0):
freq_shift_part = 1.0
else:
df = 0.0
if c_shift is not None:
df = df + c_shift
if del_B0 is not None:
df = df + (42577478518e6 * del_B0)
freq_shift_part = np.exp(-1j*2*np.pi*df*TE)
S = M0 * T1_part * T2_part * rf_phase_part * freq_shift_part
return S
|
import io
import struct
from confluent_kafka.avro import loads as avro_loads
from .mock_schema_registry import MockSchemaRegistryClient
from winton_kafka_streams.processor.serialization.serdes import AvroSerde
import winton_kafka_streams.kafka_config as config
string_avro = '{"type": "string"}'
def create_serde(registry, schema):
serde = AvroSerde()
config.AVRO_SCHEMA_REGISTRY = 'nowhere'
config.KEY_AVRO_SCHEMA = schema
serde.configure(config, True)
serde.serializer._avro_helper._set_serializer(registry)
serde.deserializer._avro_helper._set_serializer(registry)
serde.test_registry = registry
return serde
def test_serialize_avro():
registry = MockSchemaRegistryClient()
serde = create_serde(registry, string_avro)
message = serde.serializer.serialize('topic', 'data')
message_io = io.BytesIO(message)
magic, schema_id, length, string = struct.unpack('>bIb4s', message_io.read(10))
assert(0 == magic)
assert(schema_id in registry.id_to_schema)
assert(8 == length) # (==4) uses variable-length zig-zag encoding
assert(b'data' == string)
message_io.close()
def test_deserialize_avro():
registry = MockSchemaRegistryClient()
serde = create_serde(registry, string_avro)
schema_id = registry.register('topic-value', avro_loads(string_avro))
serialized = b'\0' + schema_id.to_bytes(4, 'big') + b'\x08data'
message = serde.deserializer.deserialize('ignored', serialized)
assert('data' == message)
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
# Author: Shengjia Yan
# Date: 2017/5/10
# Email: sjyan@seu.edu.cn
# 人工标注
import json
from table import *
class Mark(object):
# table_path: 表格路径
# baidubaike_candidates_path: baidubaike 候选实体路径
# hudongbaike_candidates_path: hudongbaike 候选实体路径
# zhwiki_candidates_path: zhwiki 候选实体路径
# baidubaike_single_human_mark_path: 基于 baidubaike 的分表格EL人工标注结果路径
# hudongbaike_single_human_mark_path: 基于 hudongbaike 的分表格EL人工标注结果路径
# zhwiki_single_human_mark_path: 基于 zhwiki 的分表格EL人工标注结果路径
# baidubaike_total_human_mark_path: 基于 baidubaike 的整个表格文件EL人工标注结果路径
# hudongbaike_total_human_mark_path: 基于 hudongbaike 的整个表格文件EL人工标注结果路径
# zhwiki_total_human_mark_path: 基于 zhwiki 的整个表格文件EL人工标注结果路径
def __init__(self, table_path, baidubaike_candidates_path, hudongbaike_candidates_path, zhwiki_candidates_path, baidubaike_single_human_mark_path, hudongbaike_single_human_mark_path, zhwiki_single_human_mark_path, baidubaike_total_human_mark_path, hudongbaike_total_human_mark_path, zhwiki_total_human_mark_path):
self.table_path = table_path
table_manager = TableManager(table_path)
self.tables = table_manager.get_tables()
self.table_quantity = table_manager.table_quantity
self.baidubaike_candidates_path = baidubaike_candidates_path
self.hudongbaike_candidates_path = hudongbaike_candidates_path
self.zhwiki_candidates_path = zhwiki_candidates_path
self.baidubaike_single_human_mark_path = baidubaike_single_human_mark_path
self.hudongbaike_single_human_mark_path = hudongbaike_single_human_mark_path
self.zhwiki_single_human_mark_path = zhwiki_single_human_mark_path
self.baidubaike_total_human_mark_path = baidubaike_total_human_mark_path
self.hudongbaike_total_human_mark_path = hudongbaike_total_human_mark_path
self.zhwiki_total_human_mark_path = zhwiki_total_human_mark_path
def mark(self):
try:
baidubaike_candidates_file = open(self.baidubaike_candidates_path, 'r')
baidubaike_candidates = baidubaike_candidates_file.read()
baidubaike_candidates_json = json.loads(baidubaike_candidates, encoding='utf8')
hudongbaike_candidates_file = open(self.hudongbaike_candidates_path, 'r')
hudongbaike_candidates = hudongbaike_candidates_file.read()
hudongbaike_candidates_json = json.loads(hudongbaike_candidates, encoding='utf8')
zhwiki_candidates_file = open(self.zhwiki_candidates_path, 'r')
zhwiki_candidates = zhwiki_candidates_file.read()
zhwiki_candidates_json = json.loads(zhwiki_candidates, encoding='utf8')
# for i in range(self.table_quantity):
for i in range(28, 123):
table = self.tables[i]
row_num = table.row_num
col_num = table.col_num
t_baidubaike = []
t_hudongbaike = []
t_zhwiki = []
try:
baidubaike_single_human_mark_path = self.baidubaike_single_human_mark_path + str(i) + '.txt'
baidubaike_single_human_mark_file = open(baidubaike_single_human_mark_path, 'w')
hudongbaike_single_human_mark_path = self.hudongbaike_single_human_mark_path + str(i) + '.txt'
hudongbaike_single_human_mark_file = open(hudongbaike_single_human_mark_path, 'w')
zhwiki_single_human_mark_path = self.zhwiki_single_human_mark_path + str(i) + '.txt'
zhwiki_single_human_mark_file = open(zhwiki_single_human_mark_path, 'w')
for r in range(row_num):
row_baidubaike = []
row_hudongbaike = []
row_zhwiki = []
for c in range(col_num):
dict_baidubaike = {}
dict_hudongbaike = {}
dict_zhwiki = {}
if r == 0:
dict_baidubaike['header'] = baidubaike_candidates_json[i][r][c]['header']
row_baidubaike.append(dict_baidubaike)
dict_hudongbaike['header'] = hudongbaike_candidates_json[i][r][c]['header']
row_hudongbaike.append(dict_hudongbaike)
dict_zhwiki['header'] = zhwiki_candidates_json[i][r][c]['header']
row_zhwiki.append(dict_zhwiki)
continue
mention = baidubaike_candidates_json[i][r][c]['mention']
dict_baidubaike['mention'] = mention
dict_hudongbaike['mention'] = mention
dict_zhwiki['mention'] = mention
print '---------------------------------------------------------'
print 'Table ' + str(i) + ' Row ' + str(r) + ' Column ' + str(c)
print 'Mention: ' + mention
############################
print 'Result: '
result_list = multiple_kb_el_result_json[i][r][c]['entity']
result_str = ''
for p in range(len(result_list)):
result_str += result_list[p][0]
result_str += '<'
result_str += result_list[p][1]
result_str += '> '
print result_str
print
###########################
# baidubaike
print 'Baidubaike Candidates List:'
baidubaike_candidates_list = baidubaike_candidates_json[i][r][c]['candidates']
baidubaike_candidates_str = ''
if len(baidubaike_candidates_list) == 0:
baidubaike_entity = 'Null'
print 'Null'
print
elif len(baidubaike_candidates_list) == 1:
baidubaike_entity = baidubaike_candidates_json[i][r][c]['candidates'][0]
print 'Only 1 candidate <' + baidubaike_entity + '>. Auto selection.'
print
else:
for k in range(len(baidubaike_candidates_list)):
baidubaike_candidates_str += str(k)
baidubaike_candidates_str += '. '
baidubaike_candidates_str += baidubaike_candidates_list[k]
baidubaike_candidates_str += '\n'
print baidubaike_candidates_str
candidate_index = input('Input the number before the entity you choose(If there is no referent entity, just input -1): ')
print
if int(candidate_index) == -1:
baidubaike_entity = 'Null'
else:
baidubaike_entity = baidubaike_candidates_json[i][r][c]['candidates'][int(candidate_index)]
# hudongbaike
print 'Hudongbaike Candidates List:'
hudongbaike_candidates_list = hudongbaike_candidates_json[i][r][c]['candidates']
hudongbaike_candidates_str = ''
if len(hudongbaike_candidates_list) == 0:
hudongbaike_entity = 'Null'
print 'Null'
print
elif len(hudongbaike_candidates_list) == 1:
hudongbaike_entity = hudongbaike_candidates_json[i][r][c]['candidates'][0]
print 'Only 1 candidate <' + hudongbaike_entity + '>. Auto selection.'
print
else:
for k in range(len(hudongbaike_candidates_list)):
hudongbaike_candidates_str += str(k)
hudongbaike_candidates_str += '. '
hudongbaike_candidates_str += hudongbaike_candidates_list[k]
hudongbaike_candidates_str += '\n'
print hudongbaike_candidates_str
candidate_index = input('Input the number before the entity you choose(If there is no referent entity, just input -1): ')
print
if int(candidate_index) == -1:
hudongbaike_entity = 'Null'
else:
hudongbaike_entity = hudongbaike_candidates_json[i][r][c]['candidates'][int(candidate_index)]
# zhwiki
print 'Zhwiki Candidates List:'
zhwiki_candidates_list = zhwiki_candidates_json[i][r][c]['candidates']
zhwiki_candidates_str = ''
if len(zhwiki_candidates_list) == 0:
zhwiki_entity = 'Null'
print 'Null'
print
elif len(zhwiki_candidates_list) == 1:
zhwiki_entity = zhwiki_candidates_json[i][r][c]['candidates'][0]
print 'Only 1 candidate <' + zhwiki_entity + '>. Auto selection.'
print
else:
for k in range(len(zhwiki_candidates_list)):
zhwiki_candidates_str += str(k)
zhwiki_candidates_str += '. '
zhwiki_candidates_str += zhwiki_candidates_list[k]
zhwiki_candidates_str += '\n'
print zhwiki_candidates_str
candidate_index = input('Input the number before the entity you choose(If there is no referent entity, just input -1): ')
print
if int(candidate_index) == -1:
zhwiki_entity = 'Null'
else:
zhwiki_entity = zhwiki_candidates_json[i][r][c]['candidates'][int(candidate_index)]
dict_baidubaike['entity'] = baidubaike_entity
dict_hudongbaike['entity'] = hudongbaike_entity
dict_zhwiki['entity'] = zhwiki_entity
row_baidubaike.append(dict_baidubaike)
row_hudongbaike.append(dict_hudongbaike)
row_zhwiki.append(dict_zhwiki)
t_baidubaike.append(row_baidubaike)
t_hudongbaike.append(row_hudongbaike)
t_zhwiki.append(row_zhwiki)
finally:
baidubaike_human_mark_json = json.dumps(t_baidubaike, ensure_ascii=False)
baidubaike_single_human_mark_file.write(baidubaike_human_mark_json)
hudongbaike_human_mark_json = json.dumps(t_hudongbaike, ensure_ascii=False)
hudongbaike_single_human_mark_file.write(hudongbaike_human_mark_json)
zhwiki_human_mark_json = json.dumps(t_zhwiki, ensure_ascii=False)
zhwiki_single_human_mark_file.write(zhwiki_human_mark_json)
if baidubaike_single_human_mark_file:
baidubaike_single_human_mark_file.close()
if hudongbaike_single_human_mark_file:
hudongbaike_single_human_mark_file.close()
if zhwiki_single_human_mark_file:
zhwiki_single_human_mark_file.close()
finally:
if baidubaike_candidates_file:
baidubaike_candidates_file.close()
if hudongbaike_candidates_file:
hudongbaike_candidates_file.close()
if zhwiki_candidates_file:
zhwiki_candidates_file.close()
def conbine(self):
try:
# baidubaike
baidubaike_human_mark_file = open(self.baidubaike_total_human_mark_path, 'w')
baidubaike_whole = []
for i in range(self.table_quantity):
human_mark_single_file_path = self.baidubaike_single_human_mark_path + str(i) + '.txt'
human_mark_single_file = open(human_mark_single_file_path, 'r').read()
human_mark_single_json = json.loads(human_mark_single_file)
baidubaike_whole.append(human_mark_single_json)
# hudongbaike
hudongbaike_human_mark_file = open(self.hudongbaike_total_human_mark_path, 'w')
hudongbaike_whole = []
for i in range(self.table_quantity):
human_mark_single_file_path = self.hudongbaike_single_human_mark_path + str(i) + '.txt'
human_mark_single_file = open(human_mark_single_file_path, 'r').read()
human_mark_single_json = json.loads(human_mark_single_file)
hudongbaike_whole.append(human_mark_single_json)
# zhwiki
zhwiki_human_mark_file = open(self.zhwiki_total_human_mark_path, 'w')
zhwiki_whole = []
for i in range(self.table_quantity):
human_mark_single_file_path = self.zhwiki_single_human_mark_path + str(i) + '.txt'
human_mark_single_file = open(human_mark_single_file_path, 'r').read()
human_mark_single_json = json.loads(human_mark_single_file)
zhwiki_whole.append(human_mark_single_json)
finally:
baidubaike_human_mark_json = json.dumps(baidubaike_whole, ensure_ascii=False)
baidubaike_human_mark_file.write(baidubaike_human_mark_json)
hudongbaike_human_mark_json = json.dumps(hudongbaike_whole, ensure_ascii=False)
hudongbaike_human_mark_file.write(hudongbaike_human_mark_json)
zhwiki_human_mark_json = json.dumps(zhwiki_whole, ensure_ascii=False)
zhwiki_human_mark_file.write(zhwiki_human_mark_json)
if baidubaike_human_mark_file:
baidubaike_human_mark_file.close()
if hudongbaike_human_mark_file:
hudongbaike_human_mark_file.close()
if zhwiki_human_mark_file:
zhwiki_human_mark_file.close()
def split(self):
try:
# baidubaike
baidubaike_human_mark_file = open(self.baidubaike_total_human_mark_path, 'r')
baidubaike_human_mark = baidubaike_human_mark_file.read()
baidubaike_human_mark_json = json.loads(baidubaike_human_mark, encoding='utf8')
# hudongbaike
hudongbaike_human_mark_file = open(self.hudongbaike_total_human_mark_path, 'r')
hudongbaike_human_mark = hudongbaike_human_mark_file.read()
hudongbaike_human_mark_json = json.loads(hudongbaike_human_mark, encoding='utf8')
# zhwiki
zhwiki_human_mark_file = open(self.zhwiki_total_human_mark_path, 'r')
zhwiki_human_mark = zhwiki_human_mark_file.read()
zhwiki_human_mark_json = json.loads(zhwiki_human_mark, encoding='utf8')
for i in range(self.table_quantity):
try:
baidubaike_single_human_mark_path = self.baidubaike_single_human_mark_path + str(i) + '.txt'
baidubaike_single_human_mark_file = open(baidubaike_single_human_mark_path, 'w')
baidubaike_single_human_mark_json = json.dumps(baidubaike_human_mark_json[i], ensure_ascii=False)
baidubaike_single_human_mark_file.write(baidubaike_single_human_mark_json)
hudongbaike_single_human_mark_path = self.hudongbaike_single_human_mark_path + str(i) + '.txt'
hudongbaike_single_human_mark_file = open(hudongbaike_single_human_mark_path, 'w')
hudongbaike_single_human_mark_json = json.dumps(hudongbaike_human_mark_json[i], ensure_ascii=False)
hudongbaike_single_human_mark_file.write(hudongbaike_single_human_mark_json)
zhwiki_single_human_mark_path = self.zhwiki_single_human_mark_path + str(i) + '.txt'
zhwiki_single_human_mark_file = open(zhwiki_single_human_mark_path, 'w')
zhwiki_single_human_mark_json = json.dumps(zhwiki_human_mark_json[i], ensure_ascii=False)
zhwiki_single_human_mark_file.write(zhwiki_single_human_mark_json)
finally:
if baidubaike_single_human_mark_file:
baidubaike_single_human_mark_file.close()
if hudongbaike_single_human_mark_file:
hudongbaike_single_human_mark_file.close()
if zhwiki_single_human_mark_file:
zhwiki_single_human_mark_file.close()
finally:
if baidubaike_human_mark_file:
baidubaike_human_mark_file.close()
if hudongbaike_human_mark_file:
hudongbaike_human_mark_file.close()
if zhwiki_human_mark_file:
zhwiki_human_mark_file.close()
|
#!/usr/bin/env python3
"""
Tools for unit tests to activate and deactivate "demo-extension"
"""
import importlib
import sys
from pathlib import Path
from typing import List
demo_extension_path = str(Path(__file__).parent / 'demo-extension')
def get_namespace_package_path() -> List[str]:
# keep in own namespace
import proxytest.backends
return proxytest.backends.__path__
def reload_namespace_package():
""" Required to update proxytest.backends.__path__ after modifying sys.path """
# keep in own namespace
import proxytest
importlib.reload(proxytest)
def activate_demo_extension():
if demo_extension_path not in sys.path:
sys.path.append(demo_extension_path)
reload_namespace_package() # required after modifying path
# verify namespace package updated correctly
ns_path = get_namespace_package_path()
if len(ns_path) < 2:
raise Exception('Namespace path less than 2 after adding demo extension: {}'.format(ns_path))
def deactivate_demo_extension():
try:
sys.path.remove(demo_extension_path)
except ValueError:
return
reload_namespace_package() # required after modifying path
|
import os
import urllib
import zipfile
def save_zip(url, loc):
if not os.path.exists(loc):
urllib.request.urlretrieve(url, loc)
def unzip(file, loc):
with zipfile.ZipFile(file, 'r') as zip_ref:
zip_ref.extractall(loc)
def mkdir_if_not_exists(loc):
if not os.path.exists(loc):
os.mkdir(loc)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.