content stringlengths 5 1.05M |
|---|
import unittest
from app.models import Article
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Article class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article("Manish Singh","Indian fantasy25M at over $2.5B valuation","Dream Sports, the headquartered firm builds what it calls…","https://techcrunch.com/2020/09/14/indian-fantasy-sports-app-dream11s-parent-firm-raises-225m-at-over-2-5b-valuation/","https://static.coindesk.com/wp-content/uploads/2020/09/claudio-schwarz-purzlbaum-htol9OLeW20-unsplash.jpg","2020-09-1413:47:13Z","The folks building the next generation of digital money in Switzerland understand the need to collaborate.Stablecoins, digital tokens pegged one i")
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Article))
|
import pytest
class TestRedirect:
def test_not_auth_redirect(self, client):
urls = [
'/new_words/',
'/get_words/',
'/familiar_words/',
'/get_familiar/',
'/translate/',
'/get_new/',
'/get_known/'
]
for url in urls:
try:
response = client.get(url)
except Exception as e:
assert False, (
f'Страница `{url}` работает неправильно. Ошибка: `{e}`'
)
assert response.status_code != 404, (
f'Страница `{url}` не найдена, проверьте этот адрес '
'в *urls.py*'
)
assert response.status_code in (301, 302), (
'Проверьте, что вы переадресуете пользователя со страницы '
'`{url}` на страницу авторизации, если он не авторизован'
)
assert response.url.startswith('/auth/login'), (
'Проверьте, что перенаправляете на страницу регистрации '
'`/auth/login/`'
)
@pytest.mark.django_db(transaction=True)
def test_auth_not_redirect(self, user_client, few_words_with_rating):
urls = [
'/new_words/',
'/familiar_words/',
'/translate/',
]
for url in urls:
try:
response = user_client.get(url)
except Exception as e:
assert False, (
f'Страница `{url}` работает неправильно. Ошибка: `{e}`'
)
assert response.status_code != 404, (
f'Страница `{url}` не найдена, проверьте этот адрес '
'в *urls.py*'
)
assert response.status_code not in (301, 302), (
'Проверьте, что вы не переадресуете пользователя со страницы '
'`{url}` на страницу авторизации, если он авторизован'
)
@pytest.mark.django_db(transaction=True)
def test_auth_redirect(self, user_client):
urls = [
'/get_words/',
'/get_familiar/',
'/get_new/',
'/get_known/'
]
for url in urls:
try:
response = user_client.get(url)
except Exception as e:
assert False, (
f'Страница `{url}` работает неправильно. Ошибка: `{e}`'
)
assert response.status_code != 404, (
f'Страница `{url}` не найдена, проверьте этот адрес '
'в *urls.py*'
)
assert response.status_code in (301, 302), (
'Проверьте, что вы переадресуете пользователя со страницы '
'`{url}` на страницу задания, если он авторизован'
)
|
import nltk
import numpy as np
import tensorflow as tf
from nltk.stem import WordNetLemmatizer
import json
from google.colab import files
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dropout, Activation, Dense, Flatten
import pickle
from nltk import punkt
import random
#print(pickle.format_version)
nltk.download('punkt')
nltk.download('wordnet')
#uploaded=files.upload()
with open('chatbot_intents.json') as file:
data=json.load(file,strict=False)
print(data['intents'])
lemm=WordNetLemmatizer()
words=[]
labels=[]
x=[]
y=[]
for intent in data['intents']:
for pattern in intent['patterns']:
w=nltk.word_tokenize(pattern)
words.extend(w)
x.append((w,intent['tag']))
if intent['tag'] not in labels:
labels.append(intent['tag'])
words = [lemm.lemmatize(i.lower()) for i in words if i != '?']
words=sorted(list(set(words)))
labels=sorted(list(set(labels)))
print(len(words))
print(len(labels))
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(labels,open('labels.pkl','wb'))
train=[]
output=[0]*len(labels)
print(len(x))
for doc in x:
bag=[]
pattern_w=doc[0]
pattern_w=[lemm.lemmatize(w.lower()) for w in pattern_w]
for w in words:
if w in pattern_w:
bag.append(1)
else:
bag.append(0)
output_row=list(output)
output_row[labels.index(doc[1])]=1
train.append((bag,output_row))
random.shuffle(train)
train=np.array(train)
train_x=list(train[:,0])
train_y=list(train[:,1])
train=np.array(train)
output=np.array(output)
model=Sequential()
model.add(Dense(64,input_shape=(len(train_x[0]),),activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(len(train_y[0]),activation='softmax'))
model.compile(optimizer='adam',metrics=['accuracy'],loss='categorical_crossentropy')
train_x=np.asarray(train_x)
train_y=np.asarray(train_y)
#print(len(train_x))
model.fit(train_x,train_y,epochs=100,verbose=1,batch_size=5)
model.save('chatbot_model.h5')
|
#!/usr/bin/env python
PKG = 'manhole_detector'
import roslib; roslib.load_manifest(PKG)
import rospy
import fileinput
from sensor_msgs.msg import Image
from std_msgs.msg import Bool
import numpy as np
import sys
from keras.models import load_model
import time
from cv_bridge import CvBridge, CvBridgeError
import cv2
from visualization_msgs.msg import Marker
# Profiling
from profilehooks import profile
class CNNDetector:
@profile
def depth_callback(self, img):
bool_msg = Bool(False)
depth_image = self.bridge.imgmsg_to_cv2(img, '32FC1')
#downsampled = cv2.resize(depth_image, None, fx=0.25,fy=0.25)
for i_ in range(0, 59):
for j_ in range(0,79):
if (np.isnan(depth_image[i_*4,j_*4])):
#print "NAN"
self.down_image[0,0,i_,j_] = 10.0
else:
#print depth_image[i_*4, j_*4]
self.down_image[0,0,i_,j_] = np.real(depth_image[i_*4,j_*4])
y = self.model.predict(self.down_image)
print y
if y[0,0]>= 0.9999999 or y[0,0]<=0.00000000000000001:
return
if y[0,0]>= self.thres:
bool_msg.data = True
marker = Marker()
marker.header.frame_id = self.base_frame_id
marker.header.stamp = rospy.Time.now()
marker.type = marker.CYLINDER
marker.action = marker.ADD
marker.pose.orientation.w = 1
marker.pose.position.x = 2.0
marker.pose.position.y = 2.0
t = rospy.Duration(2)
marker.lifetime = t
marker.scale.x = 1.6
marker.scale.y = 1.6
marker.scale.z = 1.6
marker.color.a = 1.0
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 1.0
self.manhole_pub.publish(marker)
else:
bool_msg.data = False
self.bool_pub.publish(bool_msg)
def __init__(self, camera, filename):
self.bridge = CvBridge()
self.down_image = np.zeros((1,1,60,80), dtype=float)
self.load_cnn(filename)
np.set_printoptions(precision=3, threshold=10000, linewidth=10000)
depth_image = camera + "/depth_registered/image_raw"
depth_info = camera + "/depth_registered/camera_info"
# Set up your subscriber and define its callback
rospy.Subscriber(depth_image, Image, self.depth_callback)
# Setup publisher
self.bool_pub = rospy.Publisher('manhole',Bool, queue_size=2)
self.manhole_pub = rospy.Publisher('manhole_marker', Marker, queue_size = 10)
self.thres = 0.5
self.base_frame_id = 'base_link'
def load_cnn(self, filename):
self.model = load_model(filename)
print ("Loaded_model")
print (self.model.summary())
if __name__ == '__main__':
if len(sys.argv) > 2:
rospy.init_node("cnn_detector")
detector = CNNDetector(sys.argv[1], sys.argv[2])
if rospy.has_param('~thres'):
detector.thres = rospy.get_param('~thres')
print ("Threshold of the detector: %f"%detector.thres)
if rospy.has_param('base_frame_id'):
detector.base_frame_id = rospy.get_param('base_frame_id')
print( "Base frame id: ", detector.base_frame_id)
# Spin until ctrl + c
rospy.spin()
else:
print ("usage: %s <camera> <cnn_file>" % sys.argv[0])
sys.exit() |
from datetime import datetime
import timebomb.models as models
def test_Notification():
notif = models.Notification("message")
assert notif.content == "message"
assert notif.read is False
assert str(notif) == "message"
def test_Player():
player = models.Player("name", "id")
assert player.name == "name"
assert player.id == "id"
assert player.team is None
assert player.hand is None
player = models.Player("name", "id", "team", ("A", "B"), "roomid")
assert player.name == "name"
assert player.id == "id"
assert player.team == "team"
assert player.hand == ("A", "B")
assert player.roomId == "roomid"
def test_Message():
now = datetime.now()
message = models.Message("player", "message")
assert message.player_name == "player"
assert message.content == "message"
assert message.timestamp and isinstance(message.timestamp, datetime)
assert str(message) == f"[{now:%H:%M}] player: message"
def test_Room():
player = models.Player("player", "player_id")
room = models.Room("room", "room_id", (player,))
assert room.name == "room" and room.id == "room_id"
assert len(room.players) == 1 and room.players[0] is player
assert room.cutter is None and room.winning_team is None and room.status == ""
assert isinstance(room.cards_found, dict) and isinstance(room.cards_left, dict)
assert not room.cards_found and not room.cards_left
def test_GameState():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
def test_GameState_new_message():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
message = state.new_message({"player": "player", "message": "test_message"})
assert len(state.messages) == 1 and state.messages[0] is message
assert message.player_name == "player" and message.content == "test_message"
for i in range(99):
last = state.new_message(
{"player": f"player{i}", "message": f"test_message{i}"}
)
assert len(state.messages) == 100
assert state.messages[0] is message and state.messages[99] is last
assert last.player_name == "player98" and last.content == "test_message98"
last = state.new_message({"player": "player99", "message": "test_message99"})
assert len(state.messages) == 100
assert state.messages[0] is not message and state.messages[99] is last
assert (
state.messages[0].player_name == "player0"
and state.messages[0].content == "test_message0"
)
assert last.player_name == "player99" and last.content == "test_message99"
res = state.new_message({"message": "test_message100"})
assert res is None
assert state.messages[99] is last
def test_GameState_new_notification():
state = models.GameState()
assert state.notification is None
notif1 = state.new_notification({"message": "notif1"})
assert state.notification is notif1 and notif1.content == "notif1"
notif2 = state.new_notification({"message": "notif2"})
assert state.notification is notif2 and notif2.content == "notif2"
notif3 = state.new_notification({"unknown": "notif2"})
assert notif3 is None and state.notification is notif2
def test_GameState_update_room():
state = models.GameState()
assert state.room is None
players_data = [{"name": "player1", "id": "id1"}]
room_data = {"name": "roomname", "id": "roomid", "players": players_data}
room = state.update_room(room_data)
assert state.room is room and room.name == "roomname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
new_data = {"name": "newname", "cutter": {"name": "cutter", "id": "cutterid"}}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 1
assert room.players[0].name == "player1" and room.players[0].id == "id1"
assert (
isinstance(room.cutter, models.Player)
and room.cutter.id == "cutterid"
and room.cutter.name == "cutter"
)
new_data = {
"players": [{"name": "player1", "id": "id1"}, {"name": "player2", "id": "id2"}]
}
room = state.update_room(new_data)
assert state.room is room and room.name == "newname" and room.id == "roomid"
assert len(room.players) == 2
def test_GameState_update_me():
state = models.GameState()
assert state.me is None
player = state.update_me({"name": "player1", "id": "id1"})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand is None
player = state.update_me({"hand": ("A", "A", "B", "A")})
assert state.me is player and player.name == "player1" and player.id == "id1"
assert player.hand == ("A", "A", "B", "A")
def test_GameState_reset():
state = models.GameState()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
state.messages = ["m1", "m2"]
state.room = "Room"
state.me = "Me"
state.notification = "Notification"
state.reset()
assert isinstance(state.messages, list) and not state.messages
assert state.room is None and state.me is None and state.notification is None
|
# -*- coding: utf-8 -*-
#
import numpy
def _z():
return numpy.array([[0, 0]])
def _symm_r_0(r):
return numpy.array([[+r, 0], [-r, 0], [0, +r], [0, -r]])
def _symm_r0(r):
z = numpy.zeros_like(r)
return numpy.array([[+r, z], [-r, z], [z, +r], [z, -r]])
def _symm_s(s):
return numpy.array([[+s, +s], [-s, +s], [+s, -s], [-s, -s]])
def _symm_s_t(s, t):
return numpy.array(
[[+s, +t], [-s, +t], [+s, -t], [-s, -t], [+t, +s], [-t, +s], [+t, -s], [-t, -s]]
)
def _pm(s, t):
return numpy.array([[+s, +t], [-s, -t]])
def _pm2(s, t):
return numpy.array([[+s, +t], [-s, +t], [+s, -t], [-s, -t]])
def _collapse0(a):
"""Collapse all dimensions of `a` except the first.
"""
return a.reshape(a.shape[0], -1)
def unroll(data, symbolic=False):
bary = []
weights = []
if "zero" in data:
d = numpy.array(data["zero"]).T
bary.append(numpy.zeros((1, 2)))
weights.append(numpy.tile(d[0], 1))
if "symm_r0" in data:
d = numpy.array(data["symm_r0"]).T
r0_data = _symm_r0(d[1])
r0_data = numpy.swapaxes(r0_data, 0, 1)
bary.append(_collapse0(r0_data).T)
weights.append(numpy.tile(d[0], 4))
if "symm_s" in data:
d = numpy.array(data["symm_s"]).T
s_data = _symm_s(d[1])
s_data = numpy.swapaxes(s_data, 0, 1)
bary.append(_collapse0(s_data).T)
weights.append(numpy.tile(d[0], 4))
if "symm_s_t" in data:
d = numpy.array(data["symm_s_t"]).T
s_data = _symm_s_t(*d[1:])
s_data = numpy.swapaxes(s_data, 0, 1)
bary.append(_collapse0(s_data).T)
weights.append(numpy.tile(d[0], 8))
if "pm" in data:
d = numpy.array(data["pm"]).T
s_data = _pm(*d[1:])
s_data = numpy.swapaxes(s_data, 0, 1)
bary.append(_collapse0(s_data).T)
weights.append(numpy.tile(d[0], 2))
if "pm2" in data:
d = numpy.array(data["pm2"]).T
s_data = _pm2(*d[1:])
s_data = numpy.swapaxes(s_data, 0, 1)
bary.append(_collapse0(s_data).T)
weights.append(numpy.tile(d[0], 4))
bary = numpy.concatenate(bary)
weights = numpy.concatenate(weights)
return bary, weights
|
from Strategies.AbstractStrategies.scheduleStrategy import scheduleStrategy
from redis_client import redisClient
# This strategy has to keep a memory of the past variables and their use/call times
# In order to keep the consistency of multiple contexts, the use of a key value cache server was recommended
from redis_client import redisClient
robinsQueues = {}
#
class roundRobin(scheduleStrategy):
cacheServer = redisClient()
def addElementToList(self,varName,value):
localList=[]
try :
localList=robinsQueues[varName]
except:
localList=[]
remoteList = self.cacheServer.getValue(varName)
if not isinstance(remoteList, list):
remoteList = []
robinsQueues[varName] = self.integrateLists(remoteList,localList)
#get the latest state of the queue
robinsQueues[varName] = self.integrateLists(remoteList,localList)
if(varName in robinsQueues.keys()):
if(value not in robinsQueues[varName]):
robinsQueues[varName].extend([value])
self.cacheServer.setValue(varName,robinsQueues[varName])
else:
robinsQueues[varName] = [value]
def getFrom(self,varName,put_at_end= True):
# localList=[]
# try :
# localList=robinsQueues[varName]
# except:
# pass
# remoteList = self.cacheClient.getValue(varName)
# if not isinstance(remoteList, list):
# remoteList = []
# robinsQueues[varName] = self.integrateLists(localList,remoteList)
#get the latest state of the queue
# robinsQueues[varName] = self.integrateLists(remoteList,localList)
# print(robinsQueues)
if(varName in robinsQueues.keys()):
val = robinsQueues[varName].pop(0)
if put_at_end :
robinsQueues[varName].extend([val])
self.cacheServer.setValue(varName,robinsQueues[varName])
return val
else:
return False
# future work: fault tolerance, multi agent redis modification
def integrateLists(self,list1,list2):
in_first = set(list1)
in_second = set(list2)
in_second_but_not_in_first = in_second - in_first
return list1 + list(in_second_but_not_in_first)
if __name__=="__main__":
ruben = roundRobin()
ruben.addElementToList("Robert",lambda x,y:True)
ruben.addElementToList("Roben",67)
ruben.addElementToList("Roben",67)
ruben.addElementToList("Roben",67)
ruben.addElementToList("Roben",15)
ruben.addElementToList("Roben",98)
ruben.addElementToList("Strûdel",68)
print(ruben.getFrom("Roben"))
print(robinsQueues) |
def memo_fib(input_value, save_memo):
if input_value == 0: # 에러케이스를 만들어둬야 한다.
return 0
elif input_value == 1:
return 1
elif input_value in save_memo:
return save_memo[input_value]
else:
res = memo_fib(input_value-2,save_memo) + memo_fib(input_value-1,save_memo)
save_memo[input_value] = res
return res
if __name__ == "__main__":
save_memo = {}
print(memo_fib(10, save_memo))
|
import functools
import generic
from abc import ABCMeta, abstractmethod
from typing import Tuple, List, Optional, Generator
from enum import Enum
class CheckStatus(Enum):
OK = 1
WARNING = 2
CRITICAL = 3
def combine_statuses_and(s1: CheckStatus, s2: CheckStatus):
if s1 == CheckStatus.CRITICAL or s2 == CheckStatus.CRITICAL:
return CheckStatus.CRITICAL
elif s1 == CheckStatus.WARNING or s2 == CheckStatus.WARNING:
return CheckStatus.WARNING
else:
return CheckStatus.OK
def combine_statuses_or(s1: CheckStatus, s2: CheckStatus):
if s1 == CheckStatus.OK or s2 == CheckStatus.OK:
return CheckStatus.OK
elif s1 == CheckStatus.WARNING or s2 == CheckStatus.WARNING:
return CheckStatus.WARNING
else:
return CheckStatus.CRITICAL
class ValidatorResult(object):
def __init__(self, success: CheckStatus, errors: List[Tuple[str, Optional[generic.Location]]], total_checks: int):
self.status = success
self.errors = errors
self.total_checks = total_checks
class AbstractValidator:
__metaclass__ = ABCMeta
@abstractmethod
def satisfies(self, entity) -> ValidatorResult:
pass
def short_circuit_evaluator(validators, items, pred):
for validator in validators:
for item in items:
result = validator.satisfies(item)
yield result
if pred(result):
return
class AllSatisfy(AbstractValidator):
def __init__(self, get_items, validator, build_message, short_circuit=False):
self.get_items = get_items
self.validator = validator
self.build_message = build_message
self.short_circuit = short_circuit
def satisfies(self, entity) -> ValidatorResult:
results = short_circuit_evaluator([self.validator], self.get_items(entity),
(lambda r: r.status != CheckStatus.OK) if self.short_circuit
else lambda _: False)
final_val = combine_and(results)
aggregate_errors = [] if final_val.status == CheckStatus.OK \
else [(self.build_message(entity), entity.location)] + final_val.errors
return ValidatorResult(
final_val.status,
aggregate_errors,
final_val.total_checks)
def combine_and(results: Generator[ValidatorResult, None, None]) -> ValidatorResult:
return functools.reduce(lambda x, y: ValidatorResult(combine_statuses_and(x.status, y.status),
x.errors + y.errors,
x.total_checks + y.total_checks),
results, ValidatorResult(CheckStatus.OK, [], 0))
def combine_or(results: List[ValidatorResult]):
errors = []
total_checks = 0
status = CheckStatus.CRITICAL
for res in results:
if res.status == CheckStatus.OK:
return ValidatorResult(CheckStatus.OK, [], res.total_checks)
else:
errors.extend(res.errors)
total_checks += res.total_checks
status = combine_statuses_or(status, res.status)
return ValidatorResult(status, errors, total_checks)
class AllOf(AbstractValidator):
def __init__(self, validators: List[AbstractValidator], get_item=lambda e: e, short_circuit=False):
self.validators = validators
self.get_item = get_item
self.short_circuit = short_circuit
def satisfies(self, entity) -> ValidatorResult:
results = short_circuit_evaluator(self.validators, [self.get_item(entity)],
(lambda r: r.status != CheckStatus.OK) if self.short_circuit
else lambda _: False)
return combine_and(results)
class AllOfSC(AllOf):
def __init__(self, validators: List[AbstractValidator], get_item=lambda e: e):
super(AllOfSC, self).__init__(validators, get_item, True)
class OneSatisfies(AbstractValidator):
def __init__(self, get_items, validator, build_message):
self.get_items = get_items
self.validator = validator
self.build_message = build_message
def satisfies(self, entity) -> ValidatorResult:
results = [self.validator.satisfies(entity) for entity in self.get_items(entity)]
final_val = combine_or(results)
aggregate_errors = [] if final_val.status == CheckStatus.OK \
else [(self.build_message(entity), entity.location)] + final_val.errors
return ValidatorResult(
final_val.status,
aggregate_errors,
final_val.total_checks)
class OneOf(AbstractValidator):
def __init__(self, validators: List[AbstractValidator], get_item=lambda e: e):
self.validators = validators
self.get_item = get_item
def satisfies(self, entity) -> ValidatorResult:
results = [validator.satisfies(self.get_item(entity)) for validator in self.validators]
return combine_or(results)
class SingleCheck(AbstractValidator):
def __init__(self, severity=CheckStatus.CRITICAL):
self.severity = severity
@abstractmethod
def predicate(self, e):
pass
@abstractmethod
def build_message(self, e):
pass
def satisfies(self, entity) -> ValidatorResult:
if self.predicate(entity):
return ValidatorResult(CheckStatus.OK, [], 1)
else:
return ValidatorResult(self.severity, [(self.build_message(entity), entity.location)], 1)
class AdHocCheck(SingleCheck):
def __init__(self, predicate, build_msg, severity=CheckStatus.CRITICAL):
super(AdHocCheck, self).__init__(severity)
self.predicate = predicate
self.build_msg = build_msg
def predicate(self, e):
return self.predicate(e)
def build_message(self, e):
return self.build_msg(e)
class Not(AbstractValidator):
def __init__(self, validator: AbstractValidator, build_msg, severity=CheckStatus.CRITICAL):
self.validator = validator
self.build_message = build_msg
self.severity = severity
def satisfies(self, e):
result = self.validator.satisfies(e)
if result.status == CheckStatus.OK:
return ValidatorResult(self.severity, [(self.build_message(e), e.location)], 1)
else:
return ValidatorResult(CheckStatus.OK, [], 1)
class Forbidden(SingleCheck):
def __init__(self, build_msg, severity=CheckStatus.CRITICAL):
super(Forbidden, self).__init__(severity)
self.build_msg = build_msg
def predicate(self, _):
return False
def build_message(self, e):
return self.build_msg(e)
class IsPrimitive(SingleCheck):
def predicate(self, e):
return isinstance(e.type_info, generic.PrimitiveType)
def build_message(self, e):
return '{} is not a primitive type'.format(e.name)
class IsDouble(SingleCheck):
def predicate(self, e):
return e.type_info.name == "double"
def build_message(self, e):
return '{} is not a double'.format(e.name)
class IsPublic(SingleCheck):
def predicate(self, e):
return e.access_specifier == generic.AccessSpecifier.PUBLIC
def build_message(self, e):
return '{} is not public'.format(e.name)
class IsVirtual(SingleCheck):
def predicate(self, e):
return e.traits.is_virtual
def build_message(self, e):
return '{} is not virtual'.format(e.name)
class IsPointer(SingleCheck):
def predicate(self, e):
return e.traits.ref_type == generic.RefType.POINTER
def build_message(self, e):
return '{} is not pointer'.format(e.name)
class IsReference(SingleCheck):
def predicate(self, e):
return e.traits.ref_type is not None
def build_message(self, e):
return '{} is not reference'.format(e.name)
class IsAbstract(SingleCheck):
def __init__(self):
super(IsAbstract, self).__init__(CheckStatus.WARNING)
def predicate(self, e):
return e.traits.is_abstract
def build_message(self, e):
return '{} is not abstract'.format(e.name)
class IsStaticMethod(SingleCheck):
def __init__(self):
super(IsStaticMethod, self).__init__(CheckStatus.WARNING)
def predicate(self, e):
return e.traits.is_static
def build_message(self, e):
return '{} is not static'.format(e.name)
class IsConst(SingleCheck):
def __init__(self):
super(IsConst, self).__init__(CheckStatus.WARNING)
def predicate(self, e):
return e.traits.is_const
def build_message(self, e):
return '{} is not const'.format(e.name)
class IsNotConst(SingleCheck):
def __init__(self):
super(IsNotConst, self).__init__(CheckStatus.WARNING)
def predicate(self, e):
return not e.traits.is_const
def build_message(self, e):
return '{} is const'.format(e.name)
class IsLvalueRef(SingleCheck):
def __init__(self):
super(IsLvalueRef, self).__init__(CheckStatus.WARNING)
def predicate(self, e):
return e.traits.ref_type == generic.RefType.LVALUE
def build_message(self, e):
return '{} is not an lvalue reference'.format(e.name)
class ReturnsVoid(SingleCheck):
def __init__(self):
super(ReturnsVoid, self).__init__(CheckStatus.WARNING)
def predicate(self, e):
return e.returns.type_info.name == 'void'
def build_message(self, e):
return '{} does not have a void return type'.format(e.name)
class IsNotCharPtr(AbstractValidator):
def satisfies(self, entity: generic.DefinedClass) -> ValidatorResult:
is_char = True if hasattr(entity.type_info, 'name') and entity.type_info.name == 'char' else False
is_ptr = True if hasattr(entity.traits, 'ref_type') and entity.traits.ref_type == generic.RefType.POINTER else False
return ValidatorResult(
CheckStatus.CRITICAL if is_char and is_ptr else CheckStatus.OK,
[('char* is not a supported type for member \'{}\''.format(entity.name), entity.location)] if is_char and is_ptr else [],
1)
class IsContainer(AbstractValidator):
def __init__(self, container_type, pointed_type):
self.container_type = container_type
self.pointed_type = pointed_type
@staticmethod
def make_full_name(type_info):
return '::'.join([type_info.namespace, type_info.name]) \
if hasattr(type_info, 'namespace') and type_info.namespace else type_info.name
def satisfies(self, entity) -> ValidatorResult: # TODO rewrite
if not self.make_full_name(entity.type_info) == self.container_type:
return ValidatorResult(
CheckStatus.WARNING,
[('{} is not of type {}'.format(entity.name, self.container_type), entity.location)],
1)
if not (len(entity.type_info.template_args) == 1
and entity.type_info.template_args[0].name == self.pointed_type):
return ValidatorResult(
CheckStatus.WARNING,
[('{} is not a {} to {}'.format(entity.name, self.container_type, self.pointed_type), entity.location)],
2)
return ValidatorResult(CheckStatus.OK, [], 2) |
# -*- coding: utf-8 -*-
#
# comparison_schemes.py
#
"""
Features extraction script.
"""
__author__ = "Ahmed Albuni, Ngoc Huynh"
__email__ = "ahmed.albuni@gmail.com, ngoc.huynh.bao@nmbu.no"
import argparse
import logging
from csv import DictWriter
from msilib.schema import Error
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import SimpleITK as sitk
import six
from radiomics import (
firstorder,
glcm,
gldm,
glrlm,
glszm,
ngtdm,
shape,
shape2D,
)
from tqdm import tqdm
import click
from .LBP3d import LBPFeature
# List of features groups available in pyradiomics package
# This list match the input csv parameters file
FEATURES_LIST = (
"shape",
"first_order",
"glszm",
"glrlm",
"ngtdm",
"gldm",
"glcm",
"LBP"
)
def extract_radiomics_features(
features_list,
bin_count,
images_path,
masks_path=None,
glcm_distance=None,
ngtdm_distance=None,
gldm_distance=None,
gldm_a=0,
output_file_name="output",
label=1,
bin_setting_name='binCount'
):
"""
:param features_list: list of features to be extracted
:param bin_count:
:param images_path: The path that contains the images
:param masks_path: The path of the masks, masks name should match the
images names
:param glcm_distance: A list of distances for GLCM calculations,
default is [1]
:param ngtdm_distance: List of integers. This specifies the distances
between the center voxel and the neighbor, for which angles should be
generated.
:param gldm_distance: List of integers. This specifies the distances
between the center voxel and the neighbor, for which angles should be
generated.
:param gldm_a: integer, α cutoff value for dependence.
A neighbouring voxel with gray level j is considered
dependent on center voxel with gray level i if |i−j|≤α
:param output_file_name: Name of the output csv file
:return:
"""
if glcm_distance is None:
glcm_distance = [1]
if ngtdm_distance is None:
ngtdm_distance = [1]
if gldm_distance is None:
gldm_distance = [1]
list_of_images = [
f for f in listdir(images_path) if isfile(join(images_path, f))
]
bin_settings = {
bin_setting_name: bin_count
}
for i, img in tqdm(
enumerate(list_of_images), total=len(list_of_images), unit="files"
):
image_name = images_path + img
image = sitk.ReadImage(image_name)
row = dict()
row["Name"] = img
# If the mask is not available we create a dummy mask here that
# covers the whole image
if masks_path is None:
mask = np.zeros((sitk.GetArrayFromImage(image)).shape, int) + 1
mask = sitk.GetImageFromArray(mask)
else:
mask_name = masks_path + img
mask = sitk.ReadImage(mask_name)
if type(label) == list:
# merge all labels
# label = 1
labels = label.copy()
label = 1
mask_data = sitk.GetArrayFromImage(mask)
for label_val in labels:
mask_data[mask_data==label_val] = 1
# regenerate the sitk image obj (for voxel size, etc..)
new_mask = sitk.GetImageFromArray(mask_data)
new_mask.CopyInformation(mask)
mask = new_mask
# Shape features applied only when the mask is provided
if "shape" in features_list:
if len((sitk.GetArrayFromImage(image)).shape) == 2:
shape_2d_f = shape2D.RadiomicsShape2D(
image, mask, label=label, **bin_settings
)
row.update(get_selected_features(shape_2d_f, "shape_2d"))
else:
shape_f = shape.RadiomicsShape(
image, mask, label=label, **bin_settings
)
row.update(get_selected_features(shape_f, "shape"))
if "first_order" in features_list:
f_o_f = firstorder.RadiomicsFirstOrder(
image, mask, label=label, **bin_settings
)
row.update(get_selected_features(f_o_f, "first_order"))
if "glszm" in features_list:
glszm_f = glszm.RadiomicsGLSZM(image, mask, label=label, **bin_settings)
row.update(get_selected_features(glszm_f, "glszm"))
if "glrlm" in features_list:
glrlm_f = glrlm.RadiomicsGLRLM(image, mask, label=label, **bin_settings)
row.update(get_selected_features(glrlm_f, "glrlm"))
if "ngtdm" in features_list:
for d in ngtdm_distance:
ngtdm_f = ngtdm.RadiomicsNGTDM(
image, mask, distances=[d], label=label, **bin_settings
)
row.update(
get_selected_features(
ngtdm_f, "ngtdm", additional_param="_d_" + str(d)
)
)
if "gldm" in features_list:
for d in gldm_distance:
gldm_f = gldm.RadiomicsGLDM(
image,
mask,
distances=[d],
gldm_a=gldm_a,
label=label,
**bin_settings
)
row.update(
get_selected_features(
gldm_f, "gldm", additional_param="_d_" + str(d)
)
)
if "glcm" in features_list:
for d in glcm_distance:
glcm_f = glcm.RadiomicsGLCM(
image, mask, distances=[d], label=label, **bin_settings
)
row.update(
get_selected_features(
glcm_f, "glcm", additional_param="_d_" + str(d)
)
)
if "LBP" in features_list:
lbp_f = LBPFeature(image_name=sitk.GetArrayFromImage(image), mask_name=sitk.GetArrayFromImage(mask), label=label).feature_vector()
row.update(
get_selected_features(
lbp_f,"LBP"
)
)
if i == 0:
create_output_file(output_file_name + ".csv", row.keys())
add_row_of_data(output_file_name + ".csv", row.keys(), row)
def create_output_file(file_name, columns):
with open(file_name, "w", newline="") as f:
writer = DictWriter(f, fieldnames=columns)
writer.writeheader()
def add_row_of_data(file_name, columns, row):
with open(file_name, "a", newline="") as f:
writer = DictWriter(f, fieldnames=columns)
writer.writerow(row)
def get_selected_features(selected_feature, category, additional_param=None):
data = {}
if category == 'LBP':
for (key, val) in six.iteritems(selected_feature):
key = category + "_" + str(key)
data[key] = val
else:
selected_feature.execute()
for (key, val) in six.iteritems(selected_feature.featureValues):
key = category + "_" + key
if additional_param is not None:
key = key + additional_param
data[key] = val
return data
def imskaper_feature_extract():
logging.disable(logging.CRITICAL)
parser = argparse.ArgumentParser(description="Features extraction")
parser.add_argument(
"-file", type=str, help="CSV parameters file name and " "path"
)
parser.add_argument(
"-glcm_distance",
type=str,
help="list of distances, " "comma separated. " "default: 1",
)
parser.add_argument(
"-ngtdm_distance",
type=str,
help="list of distances, " "comma separated. " "default 1",
)
parser.add_argument(
"-gldm_distance",
type=str,
help="list of distances, " "comma separated. " "default 1",
)
parser.add_argument(
"-gldm_a", type=int, help="Cutoff value for dependence, " "default: 0"
)
args = parser.parse_args()
glcm_d = args.glcm_distance
if glcm_d is not None:
glcm_d = glcm_d.split(",")
ngtdm_d = args.ngtdm_distance
if ngtdm_d is not None:
ngtdm_d = ngtdm_d.split(",")
gldm_d = args.gldm_distance
if gldm_d is not None:
gldm_d = gldm_d.split(",")
gldm_a = args.gldm_a
if gldm_a is None:
gldm_a = 0
if not args.file:
print('A path to the template file must be specified using the -file argument.')
print('imskaper_feature_extraction -file <path_to_csv_template>')
return
f_list = pd.read_csv(args.file)
for index, row in f_list.iterrows():
print("Output file: ", row["output_file_name"])
feature = []
for f in FEATURES_LIST:
if row[f] == 1:
feature.append(f)
if type(row["mask_dir"]) is not str:
mask_path = None
else:
mask_path = row["mask_dir"]
label = row.get('label', 1)
if type(label) is not int:
try:
label = int(label)
except:
label = list(map(int, label.split(',')))
if row.get('bin_count') is None or np.isnan(row.get('bin_count')):
bin_setting = 'bin_width'
bin_setting_name = 'binWidth'
else:
bin_setting = 'bin_count'
bin_setting_name = 'binCount'
extract_radiomics_features(
feature,
row[bin_setting],
row["image_dir"],
mask_path,
output_file_name=row["output_file_name"],
glcm_distance=glcm_d,
ngtdm_distance=ngtdm_d,
gldm_distance=gldm_d,
gldm_a=gldm_a,
label=label,
bin_setting_name=bin_setting_name
)
if __name__ == "__main__":
imskaper_feature_extract()
|
def format_response(response):
return {
'url': response.url,
"status_code": response.status_code,
"response_time": f"{response.elapsed.total_seconds()}s."
}
|
from ..IReg import IReg
class R1391(IReg):
def __init__(self):
self._header = ['REG',
'DT_REGISTRO',
'QTD_MOID',
'ESTOQ_INI',
'QTD_PRODUZ',
'ENT_ANID_HID',
'OUTR_ENTR',
'PERDA',
'CONS',
'SAI_ANI_HID',
'SAIDAS',
'ESTQ_FIN',
'ESTQ_INI_MEL',
'PROD_DIA_MEL',
'UTIL_MEL',
'PROD_ALC_MEL',
'OBS',
'COD_ITEM',
'TP_RESIDUO',
'QTD_RESIDUO']
self._hierarchy = "3"
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import sphinx_rtd_theme
# import foresight
# -- Project information -----------------------------------------------------
project = 'torch-foresight'
copyright = '2019, Eric J. Michaud'
author = 'Eric J. Michaud'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'autoapi.extension',
'sphinx.ext.napoleon'
]
autoapi_dirs = ['../foresight']
autoapi_add_toctree_entry = False
autoapi_generate_api_docs = False
# imgmath settings
imgmath_image_format = 'svg'
imgmath_font_size = 14
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_logo = 'figures/logo.png'
html_theme_options = {
'logo_only': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
imgmath_latex_preamble = r'''
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{cancel}
\usepackage[verbose=true,letterpaper]{geometry}
\geometry{
textheight=12in,
textwidth=6.5in,
top=1in,
headheight=12pt,
headsep=25pt,
footskip=30pt
}
\newcommand{\E}{{\mathrm E}}
\newcommand{\underE}[2]{\underset{\begin{subarray}{c}#1 \end{subarray}}{\E}\left[ #2 \right]}
\newcommand{\Epi}[1]{\underset{\begin{subarray}{c}\tau \sim \pi \end{subarray}}{\E}\left[ #1 \right]}
\newcommand{\norm}[1]{\left\lVert#1\right\rVert}
\newcommand{\C}{\mathbb{C}}
\newcommand{\F}{\mathbb{F}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\B}{\mathcal{B}}
'''
def setup(app):
app.add_stylesheet('css/modify.css')
|
from __future__ import absolute_import
import os
from .. import utils, platform
from netlib import http_auth, certutils
from .primitives import ConstUpstreamServerResolver, TransparentUpstreamServerResolver
TRANSPARENT_SSL_PORTS = [443, 8443]
CONF_BASENAME = "mitmproxy"
CONF_DIR = "~/.mitmproxy"
class ProxyConfig:
def __init__(self, confdir=CONF_DIR, clientcerts=None,
no_upstream_cert=False, body_size_limit=None, get_upstream_server=None,
http_form_in="absolute", http_form_out="relative", authenticator=None,
ciphers=None, certs=[], certforward = False
):
self.ciphers = ciphers
self.clientcerts = clientcerts
self.no_upstream_cert = no_upstream_cert
self.body_size_limit = body_size_limit
self.get_upstream_server = get_upstream_server
self.http_form_in = http_form_in
self.http_form_out = http_form_out
self.authenticator = authenticator
self.confdir = os.path.expanduser(confdir)
self.certstore = certutils.CertStore.from_store(self.confdir, CONF_BASENAME)
for spec, cert in certs:
self.certstore.add_cert_file(spec, cert)
self.certforward = certforward
def process_proxy_options(parser, options):
body_size_limit = utils.parse_size(options.body_size_limit)
c = 0
http_form_in, http_form_out = "absolute", "relative"
get_upstream_server = None
if options.transparent_proxy:
c += 1
if not platform.resolver:
return parser.error("Transparent mode not supported on this platform.")
get_upstream_server = TransparentUpstreamServerResolver(platform.resolver(), TRANSPARENT_SSL_PORTS)
http_form_in, http_form_out = "relative", "relative"
if options.reverse_proxy:
c += 1
get_upstream_server = ConstUpstreamServerResolver(options.reverse_proxy)
http_form_in, http_form_out = "relative", "relative"
if options.upstream_proxy:
c += 1
get_upstream_server = ConstUpstreamServerResolver(options.upstream_proxy)
http_form_in, http_form_out = "absolute", "absolute"
if options.manual_destination_server:
c += 1
get_upstream_server = ConstUpstreamServerResolver(options.manual_destination_server)
if c > 1:
return parser.error("Transparent mode, reverse mode, upstream proxy mode and "
"specification of an upstream server are mutually exclusive.")
if options.http_form_in:
http_form_in = options.http_form_in
if options.http_form_out:
http_form_out = options.http_form_out
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(options.clientcerts) or not os.path.isdir(options.clientcerts):
return parser.error(
"Client certificate directory does not exist or is not a directory: %s" % options.clientcerts
)
if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd):
if options.auth_singleuser:
if len(options.auth_singleuser.split(':')) != 2:
return parser.error("Invalid single-user specification. Please use the format username:password")
username, password = options.auth_singleuser.split(':')
password_manager = http_auth.PassManSingleUser(username, password)
elif options.auth_nonanonymous:
password_manager = http_auth.PassManNonAnon()
elif options.auth_htpasswd:
try:
password_manager = http_auth.PassManHtpasswd(options.auth_htpasswd)
except ValueError, v:
return parser.error(v.message)
authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy")
else:
authenticator = http_auth.NullProxyAuth(None)
certs = []
for i in options.certs:
parts = i.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
parts[1] = os.path.expanduser(parts[1])
if not os.path.exists(parts[1]):
parser.error("Certificate file does not exist: %s"%parts[1])
certs.append(parts)
return ProxyConfig(
clientcerts = options.clientcerts,
body_size_limit = body_size_limit,
no_upstream_cert = options.no_upstream_cert,
get_upstream_server = get_upstream_server,
confdir = options.confdir,
http_form_in = http_form_in,
http_form_out = http_form_out,
authenticator = authenticator,
ciphers = options.ciphers,
certs = certs,
certforward = options.certforward,
)
def ssl_option_group(parser):
group = parser.add_argument_group("SSL")
group.add_argument(
"--cert", dest='certs', default=[], type=str,
metavar = "SPEC", action="append",
help='Add an SSL certificate. SPEC is of the form "[domain=]path". '\
'The domain may include a wildcard, and is equal to "*" if not specified. '\
'The file at path is a certificate in PEM format. If a private key is included in the PEM, '\
'it is used, else the default key in the conf dir is used. Can be passed multiple times.'
)
group.add_argument(
"--client-certs", action="store",
type=str, dest="clientcerts", default=None,
help="Client certificate directory."
)
group.add_argument(
"--ciphers", action="store",
type=str, dest="ciphers", default=None,
help="SSL cipher specification."
)
group.add_argument(
"--cert-forward", action="store_true",
dest="certforward", default=False,
help="Simply forward SSL certificates from upstream."
)
group.add_argument(
"--no-upstream-cert", default=False,
action="store_true", dest="no_upstream_cert",
help="Don't connect to upstream server to look up certificate details."
) |
from datetime import date
from staff.models import Staff
from django.views.generic import ListView
from django.views.generic import DeleteView
from survey.models import Surveyordermodel
from django.shortcuts import render, redirect
from django.urls import reverse
from collar.models import Customers
from .forms import LinkCreateForm
from .utils import createHash
from .models import LinkHash
from .utils import PDFView
from vertexLite.models import Vertexliteordermodel
from .utils import accept_order, the_search
from survey.forms import SurveyUpdateForm
from survey.forms import SalesInputSurveyCreateForm
from .forms import StaffChoiceForm
from .forms import SearchForm
from survey.models import SalesSalesinputsurvey
from .utils import ProdRecView
from prioListe.models import PriolisteAssignment
from vertexLite.forms import VertexLiteUpdateForm
from survey.forms import ProdRecCreateForm
from .utils import get_model_objects
from .utils import OriginPDFView
from miniFawn.models import Minifawnordermodel
from trapTransmitter.models import Traptransmitterordermodel
from vertexLite.models import SalesSalesinputvertexlite
from vertexLite.forms import SalesInputVertexLiteCreateForm
from vertexLite.forms import VertexLiteProdRecForm
from miniFawn.forms import MiniFawnUpdateForm
from miniFawn.forms import SalesInputMiniFawnCreateForm
from miniFawn.models import SalesSalesinputminifawn
from miniFawn.forms import MiniFawnProdRecForm
from trapTransmitter.forms import TrapTransmitterUpdateForm
from trapTransmitter.forms import SalesInputTrapTransCreateForm
from trapTransmitter.forms import TrapTransmitterProdRecForm
from trapTransmitter.models import SalesSalesinputtraptransmitter
from .extra import CollarDetailView
from .extra import CreatePrioEntryByOrder
from .extra import production_record_update
from .extra import ProductionRecordInitial
from .extra import CollarUpdateAllFieldsView
from .extra import CreateProductionRecord
def create_link(request):
context = {}
if request.method != 'POST':
context = {
'form': LinkCreateForm
}
else:
form = LinkCreateForm(request.POST)
customerMail = form['customerEmail'].value()
customerId = form['contact_person'].value()
staffid = form['staff'].value()
staff = Staff.objects.get(pk=staffid)
operation_Number = form['operation_Number'].value()
collarType = form['collarType'].value()
communicationType = form['communicationType'].value()
companyType = form['company'].value()
if form.is_valid():
linkHash = []
if collarType == 'Survey':
linkHash.append('survey/' + create_hash_and_link(Surveyordermodel, staff, staffid, customerId, operation_Number, communicationType, companyType))
elif collarType == 'VertexLite':
linkHash.append('vertex_lite/' + create_hash_and_link(Vertexliteordermodel, staff, staffid, customerId, operation_Number, communicationType, companyType))
elif collarType == 'MiniFawn':
linkHash.append('miniFawn/' + create_hash_and_link(Minifawnordermodel, staff, staffid, customerId, operation_Number, communicationType, companyType))
elif collarType == 'TrapTransmitter':
linkHash.append('trapTransmitter/' + create_hash_and_link(Traptransmitterordermodel, staff, staffid, customerId, operation_Number, communicationType, companyType))
else:
pass
#create_hash_and_link(Vertexordermodel, staff, staffid, customerId)
context = {
'link': linkHash,
'collarType': collarType,
'customerMail': customerMail
}
else:
context = {
'form': form,
}
return render(request, 'link_create.html', context)
def overview(request):
return render(request, 'overview.html')
def create_hash_and_link(object, staff, staffid, customerId, operation_Number, communicationType, companyType):
orderId = pre_create_survey_order_model(object, staffid, customerId, operation_Number, communicationType, companyType)
linkHash = (createHash() + staff.initialies + str(orderId))
link = LinkHash.objects.create(hash=linkHash)
link.save(using='order_db')
return linkHash
def pre_create_survey_order_model(object, staffid, customerId, operation_Number, communicationType, companyType):
obj = object()
obj.as_post = False
obj.as_email = False
obj.customer_faktura_id = int(customerId)
obj.airtime_contract = ''
try:
customers = Customers.objects.get(id_customer=customerId)
obj.contacts_faktura_id = customers.cust_ref_number
except:
pass
obj.same_addr = True
obj.customer_staff = Staff.objects.get(pk=staffid).initialies
obj.external_dropoff = False
if communicationType == 'GL':
obj.globalstar = True
else:
obj.globalstar = False
if communicationType == 'IR':
obj.iridium = True
else:
obj.iridium = False
try:
if communicationType == 'GSM':
obj.gsm = True
obj.gsm_vectronic_sim = True
else:
obj.gsm = False
except:
pass
try:
if communicationType == 'SOB':
obj.store_on_board = True
else:
obj.store_on_board = False
except:
pass
obj.belt_labeling = False
obj.belt_plates = False
obj.created_at = date.today()
obj.order_acceptet = False
try:
obj.operation_number = operation_Number
except:
pass
obj.operation_Number = operation_Number
obj.inc_or_gmbh = companyType
obj.save(using='order_db')
return obj.pk
def accept_survey_order(request, pk):
return accept_order(request, pk, Surveyordermodel, 'sales:listsorders')
def accept_vertex_lite_order(request, pk):
return accept_order(request, pk, Vertexliteordermodel, success_url='sales:listsorders')
def accept_mini_fawn_order(request, pk):
return accept_order(request, pk, Minifawnordermodel, success_url='sales:listsorders')
def accept_trap_transmitter_order(request, pk):
return accept_order(request, pk, Traptransmitterordermodel, success_url='sales:listsorders')
def search(request, redirect_url):
print(request.path)
if 'Accepted' in request.path:
new_redirect_url = 'sales:listdoneorders'
title = 'Accepted Orders'
else:
new_redirect_url = 'sales:listsorders'
title = 'Open Orders'
try:
return the_search(request, [Surveyordermodel, Vertexliteordermodel,
Traptransmitterordermodel, Minifawnordermodel], new_redirect_url, title)
except ValueError:
return redirect(reverse(new_redirect_url))
def sales_input(request, pk):
redirect_url = 'sales:listvertexlites'
form = SalesInputSurveyCreateForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse(redirect_url))
class AllOrders(ListView):
template_name = 'list_of_collars.html'
def get_queryset(self):
return Surveyordermodel.objects.filter(order_acceptet=False)
def get_context_data(self, **kwargs):
context = super(AllOrders, self).get_context_data(**kwargs)
context['form_staffs'] = StaffChoiceForm
context['form_search'] = SearchForm
context['title'] = 'Running Orders'
context['list_objects'] = get_model_objects()
return context
class AllDoneOrders(AllOrders):
template_name = 'list_of_collars.html'
def get_queryset(self):
return Surveyordermodel.objects.filter(order_acceptet=True)
def get_context_data(self, **kwargs):
context = super(AllOrders, self).get_context_data(**kwargs)
context['form_staffs'] = StaffChoiceForm
context['form_search'] = SearchForm
context['title'] = 'Accepted Orders'
context['list_objects'] = get_model_objects(True)
return context
class SurveyUpdateView(CollarUpdateAllFieldsView):
def __init__(self):
super(SurveyUpdateView, self).__init__(model=Surveyordermodel,
input_model=SalesSalesinputsurvey,
template_name='sellsSurveyForm.html',
form_class=SurveyUpdateForm,
)
class VertexLiteUpdateView(CollarUpdateAllFieldsView):
def __init__(self):
super(VertexLiteUpdateView, self).__init__(model=Vertexliteordermodel,
input_model=SalesSalesinputvertexlite,
template_name='sellsVertexLiteForm.html',
form_class=VertexLiteUpdateForm,
)
class MiniFawnUpdateView(CollarUpdateAllFieldsView):
def __init__(self):
super(MiniFawnUpdateView, self).__init__(model=Minifawnordermodel,
input_model=SalesSalesinputminifawn,
template_name='sellsMiniFawnForm.html',
form_class=MiniFawnUpdateForm,
)
class TrapTransmitterUpdateView(CollarUpdateAllFieldsView):
def __init__(self):
super(TrapTransmitterUpdateView, self).__init__(model=Traptransmitterordermodel,
input_model=SalesSalesinputtraptransmitter,
template_name='sellsTrapTransForm.html',
form_class=TrapTransmitterUpdateForm,
)
class SurveyDetailView(CollarDetailView):
def __init__(self):
super(SurveyDetailView, self).__init__(model=Surveyordermodel,
template_name='collarDetail.html')
class VertexLiteDetailView(CollarDetailView):
def __init__(self):
super(VertexLiteDetailView, self).__init__(model=Vertexliteordermodel,
template_name='collarDetail.html')
class MiniFawnDetailView(CollarDetailView):
def __init__(self):
super(MiniFawnDetailView, self).__init__(model=Minifawnordermodel,
template_name='collarDetail.html')
class TrapTransmitterDetailView(CollarDetailView):
def __init__(self):
super(TrapTransmitterDetailView, self).__init__(model=Traptransmitterordermodel,
template_name='collarDetail.html')
class SurveyPDFView(PDFView):
def __init__(self):
super(SurveyPDFView, self).__init__(obj=Surveyordermodel,
template_name='surveyHtml4Pdf.html')
class VertexLitePDFView(PDFView):
def __init__(self):
super(VertexLitePDFView, self).__init__(obj=Vertexliteordermodel,
template_name='vertexLiteFinalOrderPdf.html')
class MiniFawnPDFView(PDFView):
def __init__(self):
super(MiniFawnPDFView, self).__init__(obj=Minifawnordermodel,
template_name='miniFawnFinalOrderPdf.html')
class TrapTransPDFView(PDFView):
def __init__(self):
super(TrapTransPDFView, self).__init__(obj=Traptransmitterordermodel,
template_name='trapTransFinalOrderPdf.html')
class SurveyOriginPDFView(OriginPDFView):
def __init__(self):
super(SurveyOriginPDFView, self).__init__(model=Surveyordermodel,
template_name='surveyOriginHtml4Pdf.html')
class VertexLiteOriginPDFView(OriginPDFView):
def __init__(self):
super(VertexLiteOriginPDFView, self).__init__(model=Vertexliteordermodel,
template_name='vertexLiteOriginOrderPdf.html')
class MiniFawnOriginPDFView(OriginPDFView):
def __init__(self):
super(MiniFawnOriginPDFView, self).__init__(model=Minifawnordermodel,
template_name='miniFawnOriginOrderPdf.html')
class TrapTransOriginPDFView(OriginPDFView):
def __init__(self):
super(TrapTransOriginPDFView, self).__init__(model=Traptransmitterordermodel,
template_name='trapTransOriginOrderPdf.html')
class ProdRecPDFView(ProdRecView):
def __init__(self):
super(ProdRecPDFView, self).__init__(obj=Surveyordermodel,
template_name='prod_rec_main.html',
test=True,
order_obj=Surveyordermodel,
input_obj=SalesSalesinputsurvey,
from_prio=False,
)
class VtxlProdRecPDFView(ProdRecView):
def __init__(self):
super(VtxlProdRecPDFView, self).__init__(obj=Vertexliteordermodel,
template_name='prod_rec_vtxl.html',
test=True,
order_obj=Vertexliteordermodel,
input_obj=SalesSalesinputvertexlite,
from_prio=False,
)
class MfProdRecPDFView(ProdRecView):
def __init__(self):
super(MfProdRecPDFView, self).__init__(obj=Minifawnordermodel,
template_name='prod_rec_mf.html',
test=True,
order_obj=Minifawnordermodel,
input_obj=SalesSalesinputminifawn,
from_prio=False,
minifawn=True
)
class TrapTransProdRecPDFView(ProdRecView):
def __init__(self):
super(TrapTransProdRecPDFView, self).__init__(obj=Traptransmitterordermodel,
template_name='prod_rec_tt3.html',
test=True,
order_obj=Traptransmitterordermodel,
input_obj=SalesSalesinputtraptransmitter,
from_prio=False,
)
class ProdRecFromPrioListePDFView(ProdRecView):
def __init__(self):
super(ProdRecFromPrioListePDFView, self).__init__(obj=PriolisteAssignment,
template_name='prod_rec_main.html',
from_prio=True,
order_obj=Surveyordermodel,
input_obj=SalesSalesinputsurvey,
)
class SurveyProductionRecord(CreateProductionRecord):
def __init__(self):
super(SurveyProductionRecord, self).__init__(model=SalesSalesinputsurvey,
order_model=Surveyordermodel,
form_class=SalesInputSurveyCreateForm,
template_name='surveyProdRec.html')
class VertexLiteProductionRecord(CreateProductionRecord):
def __init__(self):
super(VertexLiteProductionRecord, self).__init__(model=SalesSalesinputvertexlite,
order_model=Vertexliteordermodel,
form_class=SalesInputVertexLiteCreateForm,
template_name='surveyProdRec.html')
class MiniFawnProductionRecord(CreateProductionRecord):
def __init__(self):
super(MiniFawnProductionRecord, self).__init__(model=SalesSalesinputminifawn,
order_model=Minifawnordermodel,
form_class=SalesInputMiniFawnCreateForm,
template_name='surveyProdRec.html')
class TrapTransmitterProductionRecord(CreateProductionRecord):
def __init__(self):
super(TrapTransmitterProductionRecord, self).__init__(model=SalesSalesinputtraptransmitter,
order_model=Traptransmitterordermodel,
form_class=SalesInputTrapTransCreateForm,
template_name='surveyProdRec.html')
class SurveyProductionRecordInitial(ProductionRecordInitial):
def __init__(self):
super(SurveyProductionRecordInitial, self).__init__(model=SalesSalesinputsurvey,
order_model=Surveyordermodel,
form_class=ProdRecCreateForm,
template='surveyProdRec.html')
class VertexLiteProductionRecordInitial(ProductionRecordInitial):
def __init__(self):
super(VertexLiteProductionRecordInitial, self).__init__(model=SalesSalesinputvertexlite,
order_model=Vertexliteordermodel,
form_class=VertexLiteProdRecForm,
template='surveyProdRec.html')
class MiniFawnProductionRecordInitial(ProductionRecordInitial):
def __init__(self):
super(MiniFawnProductionRecordInitial, self).__init__(model=SalesSalesinputminifawn,
order_model=Minifawnordermodel,
form_class=MiniFawnProdRecForm,
template='surveyProdRec.html')
class TrapTransmitterProductionRecordInitial(ProductionRecordInitial):
def __init__(self):
super(TrapTransmitterProductionRecordInitial, self).__init__(model=SalesSalesinputtraptransmitter,
order_model=Traptransmitterordermodel,
form_class=TrapTransmitterProdRecForm,
template='surveyProdRec.html')
# class SurveyProductionRecordUpdate(ProductionRecordUpdate):
# def __init__(self):
# super(SurveyProductionRecordUpdate, self).__init__(model=SalesSalesinputsurvey,
# order_model=Surveyordermodel,
# form_class=SalesInputSurveyCreateForm,
# template='sellsSurveyForm.html')
class SurveyModelDelete(DeleteView):
def __init__(self):
super(SurveyModelDelete, self).__init__(model=Surveyordermodel)
class VertexLiteModelDelete(DeleteView):
def __init__(self):
super(VertexLiteModelDelete, self).__init__(model=Vertexliteordermodel)
class MiniFawnModelDelete(DeleteView):
def __init__(self):
super(MiniFawnModelDelete, self).__init__(model=Minifawnordermodel)
class TrapTransmitterModelDelete(DeleteView):
def __init__(self):
super(TrapTransmitterModelDelete, self).__init__(model=Traptransmitterordermodel)
class CreateSurveyPrioEntryByOrder(CreatePrioEntryByOrder):
def get_hardware_string(self):
order = self.order
try:
numbers_of_collars = order.number_of_collars.split('$')
battery_size = order.battery_size.split('$')
hardware_string = ''
for i in range(len(numbers_of_collars) - 1):
if order.iridium:
if order.external_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Survey IR {battery_size[i]} mit DO<br>'
else:
hardware_string += f'{numbers_of_collars[i]}x Survey IR {battery_size[i]} <br>'
else:
if order.external_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Survey GL {battery_size[i]} mit DO<br>'
else:
hardware_string += f'{numbers_of_collars[i]}x Survey GL {battery_size[i]} <br>'
except AttributeError:
hardware_string = ''
return hardware_string
def __init__(self):
super(CreateSurveyPrioEntryByOrder, self).__init__(order_model=Surveyordermodel,
hardware_string=self.get_hardware_string)
class CreateVertexLitePrioEntryByOrder(CreatePrioEntryByOrder):
def get_hardware_string(self):
order = self.order
try:
numbers_of_collars = order.number_of_collars.split('$')
battery_size = order.battery_size.split('$')
hardware_string = ''
for i in range(len(numbers_of_collars) - 1):
if order.iridium:
if order.external_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite IR {battery_size[i]} mit ext DO<br>'
elif order.internal_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite IR {battery_size[i]} mit DO<br>'
elif order.store_on_board:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite IR {battery_size[i]} mit store on board DO<br>'
else:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite IR {battery_size[i]} <br>'
elif order.gsm:
if order.external_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GSM {battery_size[i]} mit ext DO<br>'
elif order.internal_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GSM {battery_size[i]} mit DO<br>'
elif order.store_on_board:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GSM {battery_size[i]} mit store on board DO<br>'
else:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GSM {battery_size[i]} <br>'
else:
if order.external_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GL {battery_size[i]} mit ext DO<br>'
elif order.internal_dropoff:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GL {battery_size[i]} mit DO<br>'
elif order.store_on_board:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GL {battery_size[i]} mit store on board DO<br>'
else:
hardware_string += f'{numbers_of_collars[i]}x Vertex Lite GL {battery_size[i]} <br>'
except AttributeError:
hardware_string = ''
return hardware_string
def __init__(self):
super(CreateVertexLitePrioEntryByOrder, self).__init__(order_model=Vertexliteordermodel,
hardware_string=self.get_hardware_string)
class CreateMiniFawnPrioEntryByOrder(CreatePrioEntryByOrder):
def get_hardware_string(self):
order = self.order
try:
numbers_of_collars = order.number_of_collars.split('$')
battery_size = order.battery_size.split('$')
hardware_string = ''
for i in range(len(numbers_of_collars) - 1):
hardware_string += f'{numbers_of_collars[i]}x Mini Fawn GL {battery_size[i]} <br>'
except AttributeError:
hardware_string = ''
return hardware_string
def __init__(self):
super(CreateMiniFawnPrioEntryByOrder, self).__init__(order_model=Minifawnordermodel,
hardware_string=self.get_hardware_string)
class CreateTrapTransmitterPrioEntryByOrder(CreatePrioEntryByOrder):
def get_hardware_string(self):
order = self.order
try:
numbers_of_collars = order.number_of_collars.split('$')
battery_size = order.battery_size.split('$')
hardware_string = ''
for i in range(len(numbers_of_collars) - 1):
if order.iridium:
hardware_string += f'{numbers_of_collars[i]}x TT3 IR {battery_size[i]} <br>'
else:
hardware_string += f'{numbers_of_collars[i]}x TT3 GS {battery_size[i]} <br>'
except AttributeError:
hardware_string = ''
return hardware_string
def __init__(self):
super(CreateTrapTransmitterPrioEntryByOrder, self).__init__(order_model=Traptransmitterordermodel,
hardware_string=self.get_hardware_string)
def survey_production_record_update(request, pk):
return production_record_update(request, pk, SalesSalesinputsurvey, 'updatesurveys')
def vertex_lite_production_record_update(request, pk):
return production_record_update(request, pk, SalesSalesinputvertexlite, 'updatesurveys')
def mini_fawn_production_record_update(request, pk):
return production_record_update(request, pk, SalesSalesinputminifawn, 'updatesurveys')
def trap_trans_production_record_update(request, pk):
return production_record_update(request, pk, SalesSalesinputtraptransmitter, 'updatesurveys')
|
import types
import torch
import torch.nn as nn
from torchvision import models
from torchvision.models.mnasnet import MNASNet
from .build import META_ARCH_REGISTRY
__all__ = [
'MNASNet',
'MNASNet0_5',
'MNASNet0_75',
'MNASNet1_0',
'MNASNet1_3',
]
class MNASNet(nn.Module):
# Modify attributs
def __init__(self, model):
super(MNASNet, self).__init__()
for key, val in model.__dict__.items():
self.__dict__[key] = val
self.stem = model.layers[:8]
self.layer1 = model.layers[8]
self.layer2 = model.layers[9]
self.layer3 = model.layers[10]
self.layer4 = model.layers[11]
self.layer5 = model.layers[12]
self.layer6 = model.layers[13]
self.layer7 = model.layers[14:]
self.g_avg_pool = nn.AdaptiveAvgPool2d(1)
self.last_linear = model.classifier
def features(self, x): # b*3*64*64
out = self.stem(x) # b*16*32*32
out = self.layer1(out) # b*16*16*16
out = self.layer2(out) # b*24*8*8
out = self.layer3(out) # b*40*4*4
out = self.layer4(out) # b*48*4*4
out = self.layer5(out) # b*96*2*2
out = self.layer6(out) # b*160*2*2
out = self.layer7(out) # b*1280*2*2
return out
def logits(self, x):
out = x.mean([2, 3])
out = self.last_linear(out)
return out
def forward(self, x):
out = self.features(x)
out = self.logits(out)
return out
def generate_model(cfg, name):
pretrained=cfg.model.pretrained
classes = cfg.model.classes
if 'dropout' in cfg.model:
dropout = cfg.model.dropout
else:
dropout = 0.2
model = eval(f"models.{name}(pretrained={pretrained})")
if classes != 1000:
in_features = model.classifier[1].in_features
model.classifier = nn.Sequential(
nn.Dropout(p=dropout, inplace=True),
nn.Linear(in_features, classes, bias=False))
return MNASNet(model)
@META_ARCH_REGISTRY.register()
def MNASNet0_5(cfg):
return generate_model(cfg, 'mnasnet0_5')
@META_ARCH_REGISTRY.register()
def MNASNet0_75(cfg):
return generate_model(cfg, 'mnasnet0_75')
@META_ARCH_REGISTRY.register()
def MNASNet1_0(cfg):
return generate_model(cfg, 'mnasnet1_0')
@META_ARCH_REGISTRY.register()
def MNASNet1_3(cfg):
return generate_model(cfg, 'mnasnet1_3') |
import pytest
from protoactor.actor import PID
from protoactor.actor.message_envelope import MessageEnvelope
from protoactor.actor.message_header import MessageHeader
@pytest.fixture()
def message_envelope():
message = "test"
sender = PID()
sender.address = "test"
sender.id = "test"
header = MessageHeader()
return MessageEnvelope(message, sender, header)
def test_wrap(message_envelope):
envelope = MessageEnvelope.wrap(message_envelope.message)
assert message_envelope.message == envelope.message
def test_create_new_message_envelope_with_sender(message_envelope):
sender = PID()
sender.address = "test"
sender.id = "test"
envelope = message_envelope.with_sender(sender)
assert message_envelope.message == envelope.message
assert sender == envelope.sender
assert message_envelope.header == envelope.header
def test_create_new_message_envelope_with_message(message_envelope):
message = "test message"
envelope = message_envelope.with_message(message)
assert message == envelope.message
assert message_envelope.sender == envelope.sender
assert message_envelope.header == envelope.header
def test_create_new_message_envelope_with_header_based_on_key_value_pair_collection(message_envelope):
collection = {"Test Key": "Test Value", "Test Key 1": "Test Value 1"}
envelope = message_envelope.with_header(collection)
assert envelope.header["Test Key"] == "Test Value"
def test_create_new_message_envelope_with_header_based_on_message_header(message_envelope):
key = "Test Key"
value = "Test Value"
message_header = MessageHeader({key: value})
envelope = message_envelope.with_header(message_header)
assert envelope.header[key] == value
def test_create_new_message_envelope_with_header_based_on_key_value_pair(message_envelope):
key = "Test Key"
value = "Test Value"
envelope = message_envelope.with_header(key=key, value=value)
assert envelope.header[key] == value
def test_unwrap(message_envelope):
message, sender, header = MessageEnvelope.unwrap(message_envelope)
assert message == message_envelope.message
assert sender == message_envelope.sender
assert header == message_envelope.header
def test_unwrap_header(message_envelope):
assert 0 == len(message_envelope.header)
def test_unwrap_message(message_envelope):
message = MessageEnvelope.unwrap_message(message_envelope)
assert message == message_envelope.message
def test_unwrap_sender(message_envelope):
sender = MessageEnvelope.unwrap_sender(message_envelope)
assert sender == message_envelope.sender
|
import subprocess, re
import json
import itertools
from time import perf_counter
import numpy as np
with open("true_cardinality.json","r") as j:
true_cardinalities = json.load(j)
with open("queries.json","r") as j2:
queries = json.load(j2)
with open("attr_range.json","r") as j3:
attr_range = json.load(j3)
i = 43
query = queries[43]
order = dict()
print(f"Predicting cardinality for query {i}: {query}")
card_start_t = perf_counter()
for q in query.keys():
attr = query[q]
limit = attr_range[q]
for num in attr:
if num < 0 or num >= limit:
attr.remove(num)
for q in list(query.keys()):
attr = query[q]
limit = attr_range[q]
if len(attr) == limit:
query.pop(q)
for q in query.keys():
if query[q] == []:
query.remove(q)
vals = list(query.values())
output = subprocess.getoutput("~/Desktop/dice/Dice.native bayescard_test.dice").split("\n")[1:-2]
probs = 0
for l in output:
line = l.split("\t")
prob = float(line[-1].strip())
combo = re.findall("[0-9]+", line[0])
combo = [int(n) for n in combo]
check = all([True if combo[i] in vals[i] else False for i in range(len(vals))])
if check:
probs += prob
card_end_t = perf_counter()
latency_ms = (card_end_t-card_start_t) * 1000
cardinality_predict = probs * 2458285
cardinality_true = true_cardinalities[i]
# print(f"cardinality predict: {cardinality_predict} and cardinality true: {cardinality_true}")
if cardinality_predict == 0 and cardinality_true == 0:
q_error = 1.0
elif np.isnan(cardinality_predict) or cardinality_predict == 0:
cardinality_predict = 1
q_error = max(cardinality_predict / cardinality_true, cardinality_true / cardinality_predict)
elif cardinality_true == 0:
cardinality_true = 1
q_error = max(cardinality_predict / cardinality_true, cardinality_true / cardinality_predict)
else:
q_error = max(cardinality_predict / cardinality_true, cardinality_true / cardinality_predict)
print(f"latency: {latency_ms} and error: {q_error}") |
import argparse
import re
import json
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import savemat
keys = {}
metrics = {}
token_families = {}
line_structure = {}
notes = {}
song_token_counts = {}
def sort_by_label(data):
labels = []
values = []
for key, val in data.items():
labels.append(key)
values.append(val)
values = [v for _,v in sorted(zip(labels, values))]
labels = sorted(labels)
return labels, values
def plot_histogram(data, normalize, title):
labels, values = sort_by_label(data)
values = np.array(values).astype(float)
if normalize:
values /= np.sum(values)
plt.title(title)
plt.bar(np.arange(len(labels)), values)
plt.xticks(np.arange(len(labels)), labels, rotation=70)
plt.show()
def convert_to_mat(filename):
keys_labels, keys_values = sort_by_label(keys)
metrics_labels, metrics_values = sort_by_label(metrics)
token_labels, token_values = sort_by_label(token_families)
note_label, note_value = sort_by_label(notes)
count_label, count_value = sort_by_label(song_token_counts)
savemat(filename, {
'keys_l': keys_labels,
'keys_v': keys_values,
'metrics_l': metrics_labels,
'metrics_v': metrics_values,
'token_l': token_labels,
'token_v': token_values,
'note_l': note_label,
'note_v': note_value,
'count_l': count_label,
'count_v': count_value
})
def analyze(filename, structures):
# Taken from the parser
re_key = re.compile(r"\\?\[?K:\s?[ABCDEFG][#b]?\s?(major|maj|m|minor|min|mixolydian|mix|dorian|dor|phrygian|phr|lydian|lyd|locrian|loc)?\]?", re.IGNORECASE)
re_tempo = re.compile(r"\[?L\:\s?\d+\/\d+\s?\]?", re.IGNORECASE)
re_meter = re.compile(r"\[?M\:\s?\d+\/\d+\s?\]?", re.IGNORECASE)
re_duplets = re.compile(r"\([2-9]:?[2-9]?:?[2-9]?")
re_note = re.compile(r"\^{0,2}\_{0,2}=?[A-Ga-g]'?,?")
re_length = re.compile(r"[1-9]{0,2}\/{0,2}[1-9]{1,2}")
re_length_short_2 = re.compile(r"/")
re_length_short_4 = re.compile(r"//")
re_rest = re.compile(r"z")
re_repeat = re.compile(r":?\|\s?\[?\d")
re_bar = re.compile(r":?\|:?")
re_durations = re.compile(r"[<>]{1,2}")
re_grouping = re.compile(r"[\[\]]")
re_error = re.compile(r".+")
#Regex should be added in prority order since if one matches
#it will stop
regex_dict = {
'key': re_key,
'tempo': re_tempo,
'meter': re_meter,
'length': re_length,
'duplets': re_duplets,
'note': re_note,
'repeat': re_repeat,
'rest': re_rest,
'bar': re_bar,
'duration': re_durations,
'grouping': re_grouping,
'length_2': re_length_short_2,
'length_4': re_length_short_4,
'error': re_error
}
with open(filename, 'r') as f:
token_count = 0
expected_token_count = 0
T = []
text = []
for line in f:
text = f.read()
songs = text.split('\n\n')
for line in songs:
tokens = line.split()
# Remove X:0 from bobs transcripts
if len(tokens) > 0:
if re.match(r"X:\d+", tokens[0]):
del tokens[0]
l = len(tokens)
if l not in song_token_counts:
song_token_counts[l] = 0
song_token_counts[l] += 1
structure = ''
struct_check = False
for token in tokens:
expected_token_count += 1
for token_family, reg in regex_dict.items():
match = reg.match(token)
if match is None:
continue
token_count += 1
if structures:
if token_family == 'bar' or token_family == 'repeat':
struct_check = True
structure += token + ' '
# group togheter all length-types
#if token_family == 'length_2' or token_family == 'length_4':
# token_family = 'length'
if token_family == 'error':
print(token)
if token_family not in token_families:
token_families[token_family] = 0
token_families[token_family] += 1
if token_family == 'key':
token = re.search('\[K:(.+?)\]', token).group(1)
if token not in keys:
keys[token] = 0
keys[token] += 1
if token_family == 'meter':
token = re.search('\[M:(.+?)\]', token).group(1)
if token not in metrics:
metrics[token] = 0
metrics[token] += 1
if token_family == 'note':
if token not in notes:
notes[token] = 0
notes[token] += 1
break
if len(structure) > 0:
if structure not in line_structure:
line_structure[structure] = 1
line_structure[structure] += 1
print(json.dumps(keys, sort_keys=True, indent=2))
print(json.dumps(metrics, sort_keys=True, indent=2))
print(json.dumps(token_families, sort_keys=True, indent=2))
print(json.dumps(song_token_counts, sort_keys=True, indent=2))
print(json.dumps(notes, sort_keys=True, indent=2))
print('Tokens found: {} of {} ({:.3f}%)'.format(token_count, expected_token_count, float(token_count/expected_token_count)*100))
if structures:
print('Top {} line structures'.format(structures))
print('ID\tCOUNT\tSTRUCTURE')
id = 1
for structure, count in reversed(sorted(line_structure.items(), key=lambda kv: kv[1])):
print(f'{id}\t{count}\t{structure}')
id += 1
if id > int(structures):
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-f',
'--file',
required=True,
help='target file'
)
parser.add_argument(
'-t',
'--title',
help='appends t to tile as \'Key distribution t\'',
default=''
)
parser.add_argument(
'-n',
'--normalize',
help='normalize values in graph',
action='store_true',
)
parser.add_argument(
'-om',
'--output_matlab',
required=False,
help='output summary to ./path/of/filename.mat',
default=None,
)
parser.add_argument(
'--line_structure',
help='parses for top number of measure line structures',
default=False
)
args = parser.parse_args()
analyze(args.file, args.line_structure)
if args.output_matlab:
convert_to_mat(args.output_matlab)
else:
#plot_histogram(keys, args.normalize, 'Key distribution %s' % args.title)
#plot_histogram(metrics, args.normalize, 'Metric distribution %s' % args.title)
#plot_histogram(token_families, args.normalize, 'Token distribution %s' % args.title)
pass
|
# This parameter file contains the parameters related to the primitives located
# in the primitives_gnirs.py file, in alphabetical order.
from geminidr.core import parameters_preprocess
class associateSkyConfig(parameters_preprocess.associateSkyConfig):
def setDefaults(self):
self.distance = 1.
|
import os
import streamlit as st
import pandas as pd
DATADIR = os.path.join(os.path.expanduser('~'), 'Desktop/DATA/')
CODEBOOK = "Big5_codebook.txt"
DATAFILE = "Big5_data_small.csv"
with open(os.path.join(DATADIR, DATAFILE)) as fh:
df = pd.read_csv(fh)
st.write(df)
|
"""
*... iteration ...*
"""
from .iterator import Iterator
from .page import Page
from .redirect import Redirect
from .revision import Revision
from .comment import Comment
from .contributor import Contributor
from .text import Text
|
import numpy as np
lbl = [1, 1, 2, 3, 3, 3, 6, 7, 8, 8, 9, 10]
# a = np.asarray(a)
a = np.asarray(lbl)
print(a)
print()
np.random.shuffle(a)
print('a =', a)
sort_idx = np.argsort(a)
print(sort_idx.dtype, sort_idx.shape, sort_idx)
b = a[sort_idx]
print('b =', b)
_, unique_idx = np.unique(b, return_index=True)
print(unique_idx.dtype, unique_idx.shape, unique_idx)
c = b[unique_idx]
print('c =', c)
five_mask = c > 6
print(five_mask.dtype, five_mask.shape, five_mask)
d = c[five_mask]
print('d =', d)
eight_mask = d > 8
print(eight_mask.dtype, eight_mask.shape, eight_mask)
e = d[eight_mask]
print('e =', e)
eight_mask_ = np.nonzero(eight_mask)[0]
print(eight_mask_)
five_mask_ = np.nonzero(five_mask)[0]
print(five_mask_)
for i, num in enumerate(e):
print(num)
print('e idx = ', i)
d_idx = eight_mask_[i]
print('d idx = ', d_idx)
assert d[d_idx] == num
c_idx = five_mask_[d_idx]
print('c idx = ', c_idx)
assert c[c_idx] == num
b_idx = unique_idx[c_idx]
print('b idx = ', b_idx)
assert b[b_idx] == num
a_idx = sort_idx[b_idx]
print('a idx = ', a_idx)
assert a[a_idx] == num
print()
'''
pred = np.full_like(a, -1)
# print(sort_idx[unique_idx[five_mask_[eight_mask_[range(len(e))]]]])
_ = sort_idx[unique_idx[five_mask_[eight_mask_[range(len(e))]]]]
# pred[_] = 1
# pred[_] = [3, 4]
pred[_] = 3, 4
print(a)
print(pred)
'''
# pred = [[]] * len(a) # err
pred = [[] for _ in range(len(a))]
for i, num in enumerate(e):
# print(num)
# print('e idx = ', i)
raw_idx = sort_idx[unique_idx[five_mask_[eight_mask_[i]]]]
# pred[raw_idx].append(num)
import random
for _ in range(num):
pred[raw_idx].append(random.randint(0, 1))
print(pred)
from collections import Counter
for i, p in enumerate(pred):
if len(p):
pred[i] = Counter(p).most_common(1)[0][0]
else:
pred[i] = -1
print(pred)
|
import MySQLdb
def connectDB():
mysql = MySQLdb.connect('localhost', 'StockUser', 'StockPass', 'StockDB', charset="utf8", use_unicode=True)
cursor = mysql.cursor()
SQL = """
select Close from SpotValueOfNifty50 where Date= 12012014;
"""
cursor.execute(SQL)
print int(cursor.fetchone()[0])
mysql.close()
if __name__=='__main__':
connectDB()
|
# Copyright 2018 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SimpleITK as sitk
import torch as th
import torch.nn.functional as F
import numpy as np
import sys
from . import kernelFunction
class Image:
"""
Class representing an image in airlab
"""
def __init__(self, *args, **kwargs):
"""
Constructor for an image object where two cases are distinguished:
- Construct airlab image from an array or tensor (4 arguments)
- Construct airlab image from an SimpleITK image (less than 4 arguments
"""
if len(args) == 4:
self.initializeForTensors(*args)
elif len(args) < 4:
self.initializeForImages(*args)
def initializeForTensors(self, tensor_image, image_size, image_spacing, image_origin):
"""
Constructor for torch tensors and numpy ndarraysnorma
Args:
tensor_image (np.ndarray | th.Tensor): n-dimensional tensor, where the last dimensions are the image dimensions while the preceeding dimensions need to empty
image_size (array | list | tuple): number of pixels in each space dimension
image_spacing (array | list | tuple): pixel size for each space dimension
image_origin (array | list | tuple): physical coordinate of the first pixel
:return (Image): an airlab image object
"""
# distinguish between numpy array and torch tensors
if type(tensor_image) == np.ndarray:
self.image = th.from_numpy(tensor_image).squeeze().unsqueeze(0).unsqueeze(0)
elif type(tensor_image) == th.Tensor:
self.image = tensor_image.squeeze().unsqueeze(0).unsqueeze(0)
else:
raise Exception("A numpy ndarray or a torch tensor was expected as argument. Got " + str(type(tensor_image)))
self.size = image_size
self.spacing = image_spacing
self.origin = image_origin
self.dtype = self.image.dtype
self.device = self.image.device
self.ndim = len(self.image.squeeze().shape) # take only non-empty dimensions to count space dimensions
def initializeForImages(self, sitk_image, dtype=None, device="cpu"):
"""
Constructor for SimpleITK image
Note: the order of axis are flipped in order to follow the convention of numpy and torch
sitk_image (sitk.SimpleITK.Image): SimpleITK image
dtype: pixel type
device ('cpu'|'cuda'): on which device the image should be allocated
return (Image): an airlab image object
"""
if type(sitk_image) == sitk.SimpleITK.Image:
self.image = th.from_numpy(sitk.GetArrayFromImage(sitk_image)).unsqueeze(0).unsqueeze(0)
self.size = sitk_image.GetSize()
self.spacing = sitk_image.GetSpacing()
self.origin = sitk_image.GetOrigin()
if not dtype is None:
self.to(dtype, device)
else:
self.to(self.image.dtype, device)
self.ndim = len(self.image.squeeze().shape)
self._reverse_axis()
else:
raise Exception("A SimpleITK image was expected as argument. Got " + str(type(sitk_image)))
def to(self, dtype=None, device="cpu"):
"""
Converts the image tensor to a specified dtype and moves it to the specified device
"""
if not dtype is None:
self.image = self.image.to(dtype=dtype, device=device)
else:
self.image = self.image.to(device=device)
self.dtype = self.image.dtype
self.device = self.image.device
return self
def itk(self):
"""
Returns a SimpleITK image
Note: the order of axis is flipped back to the convention of SimpleITK
"""
image = Image(self.image.cpu().clone(), self.size, self.spacing, self.origin)
image._reverse_axis()
image.image.squeeze_()
itk_image = sitk.GetImageFromArray(image.image.numpy())
itk_image.SetSpacing(spacing=self.spacing)
itk_image.SetOrigin(origin=self.origin)
return itk_image
def numpy(self):
"""
Returns a numpy array
"""
return self.image.cpu().squeeze().numpy()
@staticmethod
def read(filename, dtype=th.float32, device="cpu"):
"""
Static method to directly read an image through the Image class
filename (str): filename of the image
dtype: specific dtype for representing the tensor
device: on which device the image has to be allocated
return (Image): an airlab image
"""
return Image(sitk.ReadImage(filename, sitk.sitkFloat32), dtype, device)
def write(self, filename):
"""
Write an image to hard drive
Note: order of axis are flipped to have the representation of SimpleITK again
filename (str): filename where the image is written
"""
sitk.WriteImage(self.itk(), filename)
def _reverse_axis(self):
"""
Flips the order of the axis representing the space dimensions (preceeding dimensions are ignored)
Note: the method is inplace
"""
# reverse order of axis to follow the convention of SimpleITK
self.image = self.image.squeeze().permute(tuple(reversed(range(self.ndim))))
self.image = self.image.unsqueeze(0).unsqueeze(0)
"""
Object representing a displacement image
"""
class Displacement(Image):
def __init__(self, *args, **kwargs):
"""
Constructor for a displacement field object where two cases are distinguished:
- Construct airlab displacement field from an array or tensor (4 arguments)
- Construct airlab displacement field from an SimpleITK image (less than 4 arguments)
"""
if len(args) == 4:
self.initializeForTensors(*args)
elif len(args) < 4:
self.initializeForImages(*args)
def itk(self):
# flip axis to
df = Displacement(self.image.clone(), self.size, self.spacing, self.origin)
df._reverse_axis()
df.image = df.image.squeeze()
df.image = df.image.cpu()
if len(self.size) == 2:
itk_displacement = sitk.GetImageFromArray(df.image.numpy(), isVector=True)
elif len(self.size) == 3:
itk_displacement = sitk.GetImageFromArray(df.image.numpy())
itk_displacement.SetSpacing(spacing=self.spacing)
itk_displacement.SetOrigin(origin=self.origin)
return itk_displacement
def magnitude(self):
return Image(th.sqrt(th.sum(self.image.pow(2), -1)).squeeze(), self.size, self.spacing, self.origin)
def numpy(self):
return self.image.cpu().numpy()
def _reverse_axis(self):
"""
Flips the order of the axis representing the space dimensions (preceeding dimensions are ignored).
Respectively, the axis holding the vectors is flipped as well
Note: the method is inplace
"""
# reverse order of axis to follow the convention of SimpleITK
order = list(reversed(range(self.ndim - 1)))
order.append(len(order))
self.image = self.image.squeeze_().permute(tuple(order))
self.image = flip(self.image, self.ndim - 1)
self.image = self.image.unsqueeze(0).unsqueeze(0)
@staticmethod
def read(filename, dtype=th.float32, device="cpu"):
"""
Static method to directly read a displacement field through the Image class
filename (str): filename of the displacement field
dtype: specific dtype for representing the tensor
device: on which device the displacement field has to be allocated
return (Displacement): an airlab displacement field
"""
return Displacement(sitk.ReadImage(filename, sitk.sitkVectorFloat32), dtype, device)
def flip(x, dim):
"""
Flip order of a specific dimension dim
x (Tensor): input tensor
dim (int): axis which should be flipped
return (Tensor): returns the tensor with the specified axis flipped
"""
indices = [slice(None)] * x.dim()
indices[dim] = th.arange(x.size(dim) - 1, -1, -1, dtype=th.long, device=x.device)
return x[tuple(indices)]
"""
Convert an image to tensor representation
"""
def read_image_as_tensor(filename, dtype=th.float32, device="cpu"):
itk_image = sitk.ReadImage(filename, sitk.sitkFloat32)
return create_tensor_image_from_itk_image(itk_image, dtype=dtype, device=device)
"""
Convert an image to tensor representation
"""
def create_image_from_image(tensor_image, image):
return Image(tensor_image, image.size, image.spacing, image.origin)
"""
Convert numpy image to AirlLab image format
"""
def image_from_numpy(image, pixel_spacing, image_origin, dtype=th.float32, device="cpu"):
tensor_image = th.from_numpy(image).unsqueeze(0).unsqueeze(0)
if th.is_complex(tensor_image):
dtype = th.complex64
tensor_image = tensor_image.to(dtype=dtype, device=device)
return Image(tensor_image, image.shape, pixel_spacing, image_origin)
"""
Convert an image to tensor representation
"""
def create_displacement_image_from_image(tensor_displacement, image):
return Displacement(tensor_displacement, image.size, image.spacing, image.origin)
"""
Create tensor image representation
"""
def create_tensor_image_from_itk_image(itk_image, dtype=th.float32, device="cpu"):
# transform image in a unit direction
image_dim = itk_image.GetDimension()
if image_dim == 2:
itk_image.SetDirection(sitk.VectorDouble([1, 0, 0, 1]))
else:
itk_image.SetDirection(sitk.VectorDouble([1, 0, 0, 0, 1, 0, 0, 0, 1]))
image_spacing = itk_image.GetSpacing()
image_origin = itk_image.GetOrigin()
np_image = np.squeeze(sitk.GetArrayFromImage(itk_image))
image_size = np_image.shape
# adjust image spacing vector size if image contains empty dimension
if len(image_size) != image_dim:
image_spacing = image_spacing[0 : len(image_size)]
tensor_image = th.tensor(np_image, dtype=dtype, device=device).unsqueeze(0).unsqueeze(0)
return Image(tensor_image, image_size, image_spacing, image_origin)
"""
Create an image pyramide
"""
def create_image_pyramid(image, down_sample_factor):
image_dim = len(image.size)
image_pyramide = []
if image_dim == 2:
for level in down_sample_factor:
sigma = (th.tensor(level) / 2).to(dtype=th.float32)
kernel = kernelFunction.gaussian_kernel_2d(sigma.numpy(), asTensor=True)
padding = np.array([(x - 1) / 2 for x in kernel.size()], dtype=int).tolist()
kernel = kernel.unsqueeze(0).unsqueeze(0)
kernel = kernel.to(dtype=image.dtype, device=image.device)
image_sample = F.conv2d(image.image, kernel, stride=level, padding=padding)
image_size = image_sample.size()[-image_dim:]
image_spacing = [x * y for x, y in zip(image.spacing, level)]
image_origin = image.origin
image_pyramide.append(Image(image_sample, image_size, image_spacing, image_origin))
image_pyramide.append(image)
elif image_dim == 3:
for level in down_sample_factor:
sigma = (th.tensor(level) / 2).to(dtype=th.float32)
kernel = kernelFunction.gaussian_kernel_3d(sigma.numpy(), asTensor=True)
padding = np.array([(x - 1) / 2 for x in kernel.size()], dtype=int).tolist()
kernel = kernel.unsqueeze(0).unsqueeze(0)
kernel = kernel.to(dtype=image.dtype, device=image.device)
image_sample = F.conv3d(image.image, kernel, stride=level, padding=padding)
image_size = image_sample.size()[-image_dim:]
image_spacing = [x * y for x, y in zip(image.spacing, level)]
image_origin = image.origin
image_pyramide.append(Image(image_sample, image_size, image_spacing, image_origin))
image_pyramide.append(image)
else:
print("Error: ", image_dim, " is not supported with create_image_pyramide()")
sys.exit(-1)
return image_pyramide
def create_downsampled_image(image, down_sample_factor):
image_dim = len(image.size)
if down_sample_factor[0] == 1:
return image
if image_dim == 2:
level = down_sample_factor
sigma = (th.tensor(level) / 2).to(dtype=th.float32)
kernel = kernelFunction.gaussian_kernel_2d(sigma.numpy(), asTensor=True)
padding = np.array([(x - 1) / 2 for x in kernel.size()], dtype=int).tolist()
kernel = kernel.unsqueeze(0).unsqueeze(0)
kernel = kernel.to(dtype=image.dtype, device=image.device)
image_sample = F.conv2d(image.image, kernel, stride=level, padding=padding)
image_size = image_sample.size()[-image_dim:]
image_spacing = [x * y for x, y in zip(image.spacing, level)]
image_origin = image.origin
return Image(image_sample, image_size, image_spacing, image_origin)
elif image_dim == 3:
level = down_sample_factor
sigma = (th.tensor(level) / 2).to(dtype=th.float32)
kernel = kernelFunction.gaussian_kernel_3d(sigma.numpy(), asTensor=True)
padding = np.array([(x - 1) / 2 for x in kernel.size()], dtype=int).tolist()
kernel = kernel.unsqueeze(0).unsqueeze(0)
kernel = kernel.to(dtype=image.dtype, device=image.device)
image_sample = F.conv3d(image.image, kernel, stride=level, padding=padding)
image_size = image_sample.size()[-image_dim:]
image_spacing = [x * y for x, y in zip(image.spacing, level)]
image_origin = image.origin
return Image(image_sample, image_size, image_spacing, image_origin)
else:
print("Error: ", image_dim, " is not supported?")
sys.exit(-1)
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import re
import shutil
import sys
import utils
BASENAME = "ExposureRender"
GIT_REPO = "http://code.google.com/p/exposure-render"
#GIT_TAG = "v5.6.1"
dependencies = ['CMake', 'Qt', 'VTK_QT_58']
class ExposureRender(InstallPackage):
def __init__(self):
self.source_dir = os.path.join(config.archive_dir, BASENAME)
self.build_dir = os.path.join(config.build_dir, '%s' %
(BASENAME,))
self.inst_dir = os.path.join(config.inst_dir, BASENAME)
def get(self):
if os.path.exists(self.source_dir):
utils.output("Exposure Render already checked out, skipping step.")
else:
utils.goto_archive()
ret = os.system("hg clone %s %s" % (GIT_REPO, BASENAME))
if ret != 0:
utils.error("Could not clone Exposure Render repository. Fix and try again.")
os.chdir(self.source_dir)
ret = os.system("hg update") #TODO: is this required?
if ret != 0:
utils.error("Could not update Exposure Render. Fix and try again.")
def configure(self):
if os.path.exists(
os.path.join(self.build_dir, 'CMakeFiles/cmake.check_cache')):
utils.output("Exposure Render build already configured.")
return
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
QT_MOC_EXECUTABLE = os.path.join(config.QT_BIN, 'moc.exe')
QT_QMAKE_EXECUTABLE = os.path.join(config.QT_BIN, 'qmake.exe')
QT_UIC_EXECUTABLE = os.path.join(config.QT_BIN, 'uic.exe')
#if not os.path.exists(QT_MOC_EXECUTABLE):
# print "Qt MOC executable not found, aborting!"
# return;
cmake_params = \
"-DBUILD_SHARED_LIBS=ON " \
"-DCMAKE_BUILD_TYPE=RelWithDebInfo " \
"-DCMAKE_INSTALL_PREFIX=%s " \
"-DPYTHON_INCLUDE_DIR=%s " \
"-DPYTHON_LIBRARY=%s " \
"-DQT_MOC_EXECUTABLE=%s " \
"-DQT_QMAKE_EXECUTABLE=%s " \
"-DQT_UIC_EXECUTABLE=%s " \
"-DVTK_DIR:PATH=%s" \
% (self.inst_dir,
config.PYTHON_INCLUDE_PATH,
config.PYTHON_LIBRARY,
QT_MOC_EXECUTABLE,
QT_QMAKE_EXECUTABLE,
QT_UIC_EXECUTABLE,
config.VTK_DIR)
ret = utils.cmake_command(self.build_dir, os.path.join(self.source_dir, 'Source'), cmake_params)
if ret != 0:
utils.error("Could not configure Exposure Render. Fix and try again.")
def build(self):
posix_file = os.path.join(self.build_dir, config.BUILD_TARGET,
'libvtkErCorePython.so') #TODO: check whether this is the correct file to test on
nt_file = os.path.join(self.build_dir, config.BUILD_TARGET,
'vtkErCorePythonD.dll')
if utils.file_exists(posix_file, nt_file):
utils.output("Exposure Render already built. Skipping build step.")
else:
os.chdir(self.build_dir)
ret = utils.make_command('ErGUI.sln')
if ret != 0:
utils.error("Error building Exposure Render. Fix and try again.")
def install(self):
posix_file = os.path.join(self.inst_dir, 'bin/ErGUI')
nt_file = os.path.join(self.inst_dir, 'bin', 'ErGUI.exe')
if utils.file_exists(posix_file, nt_file):
utils.output("Exposure Render already installed. Skipping install step.")
else:
ret = utils.make_command('ErGUI.sln', install=True)
if ret != 0:
utils.error("Could not install Exposure Render. Fix and try again.")
def clean_build(self):
utils.output("Removing build and installation directories.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
self.clean_install()
def clean_install(self):
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def get_installed_version(self):
#TODO: implement
return ''
|
#!/usr/bin/env python
"""
Given a set of genomic coordinates in BED format:
chr start end
...
calculates a GC matched set of genomic coordinates.
Only first 3 columns of input file will be used- all other columns are ignored.
"""
### imports ###
import os
import sys
import numpy as np
import argparse
import inspect
def read_target_positions(file_path, filter_chromosomes):
"""
reads a bed file and returns a list of tuples containing genomic coordinates
"""
if filter_chromosomes==None:
filter_chromosomes = []
else:
print('filtering out: ' + ' '.join(filter_chromosomes))
with open(file_path) as f:
data = f.readlines()
filter_chromosomes = set(filter_chromosomes)
positions = []
if data[0].strip()[0] == '#':
data = data[1:]
for line in data:
tokens = line.strip().split()
chrom = tokens[0]
start = int(tokens[1])
end = int(tokens[2])
#name = tokens[3]
if not chrom in filter_chromosomes:
if not 'chrUn' in chrom and not 'random' in chrom and not 'alt' in chrom:
positions.append([chrom, start, end])
return positions
def calc_gc_content(sequence):
'''
sequence - a string, representing a DNA sequence in upper case
returns the GC content of sequence
'''
C_count = sequence.count('C')
G_count = sequence.count('G')
GC_count = C_count + G_count
GC_content = GC_count/len(sequence)
return GC_content
def get_random_background(target_positions,
size_ratio,
num_bins = 10,
n_threshold = 0.5,
genome = 'mm10',
filter_chromosomes = ['chrM', 'chrY']
):
"""
target_sequences: 2D numpy array, list of genomic coordinates for target
sequences [[chr,start,end],...]
size_ratio: float, ratio of background sequences to target sequences
num_bins: int, number of GC bins
n_threshold: proportion of background sequences that can be N
genome: genome from which to draw background sequences
"""
###load genome into memory
# index target positions
# {chr:[]}, value is chromosome length boolean array
# largest chromosome has 200 million bps
script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
genome_path = script_path + '/' + genome + '/'
chromosomes = [x.split('.')[0] for x in os.listdir(genome_path)]
chromosomes = [chrom for chrom in chromosomes if not 'chrUn' in chrom and not 'random' in chrom and not 'alt' in chrom]
filter_chromosomes = set(filter_chromosomes)
chromosomes = [chrom for chrom in chromosomes if not chrom in filter_chromosomes]
chrom_size_dict = {}
chrom_seq_dict = {}
print('reading genome', genome)
for chrom in chromosomes:
with open(genome_path + chrom + '.fa') as f:
data = f.readlines()
seq = ''.join(x.upper().strip() for x in data[1:])
size = len(seq)
chrom_size_dict[chrom] = size
chrom_seq_dict[chrom] = seq
print('done reading genome')
### initialize target_chr_position_dict using target positions
target_chr_position_dict = {x:np.zeros(200000000) for x in chromosomes}
### retrieve target sequences and calculate GC content and mean length
target_length_count = 0
filtered_target_positions = []
for pos in target_positions:
chrom = pos[0]
start = int(pos[1])
end = int(pos[2])
# use 0 indexing of position, versus 1 indexing used in fasta
if chrom in chrom_seq_dict:
seq = chrom_seq_dict[chrom][start:(end)]
target_chr_position_dict[chrom][start-1:end] = 1
if len(seq) > 0:
gc_content = calc_gc_content(seq)
pos.append(seq)
pos.append(gc_content)
filtered_target_positions.append(pos)
target_length_count += len(seq)
else:
print(chrom, start, end, 'not found')
else:
print(chrom, start, end, 'not found')
# average length of target sequences
mean_target_length = target_length_count/len(filtered_target_positions)
mean_target_length = int(np.floor(mean_target_length))
# sort target_positions by gc_content and bin according to GC content
sorted_target_positions = sorted(filtered_target_positions, key=lambda x:x[-1])
sorted_target_positions = np.array(sorted_target_positions)
target_position_bins = np.array_split(sorted_target_positions, num_bins)
min_gc = float(sorted_target_positions[0][-1])
max_gc = float(sorted_target_positions[-1][-1])
gc_threshold = (max_gc - min_gc)/(num_bins*2)
background_positions = []
for target_pos_bin in target_position_bins:
current_random_pos = get_random_positions_with_gc(target_pos_bin,
size_ratio,
gc_threshold,
n_threshold,
chrom_seq_dict,
chrom_size_dict,
target_chr_position_dict,
mean_target_length)
background_positions = background_positions + current_random_pos
return background_positions
def get_random_positions_with_gc(target_positions,
size_ratio,
tolerance,
n_threshold,
chrom_seq_dict,
chrom_size_dict,
target_chr_position_dict,
mean_target_length
):
"""
target_positions: 2D numpy array, list of genomic coordinates for target
sequences [[chr,start,end, seq, gc_content],...]
size_ratio: float, ratio of background sequences to target sequences
tolerance: float, max difference in GC content between target and background
n_threshold: proportion of background sequences that can be N
genome: genome from which to draw background sequences
"""
chromosomes = sorted(chrom_seq_dict.keys())
numChromosomes = len(chrom_seq_dict.keys()) # number of chromosomes
### calculate GC content and average length of the target sequences
target_gc_count = 0
target_length_count = 0
for pos in target_positions:
seq = pos[-2]
if len(seq) >0:
target_gc_count += seq.count('G')
target_gc_count += seq.count('C')
target_length_count += len(seq)
target_gc_content = (target_gc_count + 0.1)/(target_length_count+0.1) # GC content of target sequences
### select random genomic loci such that they do no overlap target sequences
numSelected = 0
# candidate pool of background seqs is size_ratio X larger
numToSelect = len(target_positions) * size_ratio
candidate_positions = []
numNallowed = int(n_threshold * mean_target_length) # number of allowable Ns
counter = 0
while numSelected < numToSelect:
if counter % 100000 == 0:
print(counter, numSelected)
# select random chromsome
chromIndex = np.random.randint(numChromosomes)
randChrom = chromosomes[chromIndex]
randChromSize = chrom_size_dict[randChrom]
# must find non overlapping segment on this chromosome before moving on
selectedSequence = False
while not selectedSequence:
counter += 1
randStart = np.random.randint(randChromSize)
randEnd = randStart + mean_target_length
overlap_sum = np.sum(target_chr_position_dict[randChrom][randStart:(randEnd)])
if not overlap_sum > 0:
randSeq = chrom_seq_dict[randChrom][randStart:(randEnd+1)]
numN = randSeq.count('N')
if numN <= numNallowed:
rand_gc_count = randSeq.count('G')+ randSeq.count('C')
rand_gc = rand_gc_count/mean_target_length
if abs(target_gc_content - rand_gc) <= tolerance:
selectedSequence = True
numSelected+=1
candidate_positions.append([randChrom, randStart, randEnd, randSeq])
if counter > 10000:
break
# calcuate GC content of background samples
background_gc_count = 0
background_length = 0
for cp in candidate_positions:
s = cp[3]
background_gc_count += s.count('G')
background_gc_count += s.count('C')
background_length += len(s)
background_gc_content = background_gc_count/(background_length+0.0000001)
print('target GC:', target_gc_content,
'background GC:', background_gc_content,
'target length:', mean_target_length,
'numTargetPositions',len(target_positions),
'backgroundPositions', len(candidate_positions))
return candidate_positions
def write_background_positions(background_positions, output_dir):
"""
converts background positions into a bed file and a
"""
bed_file = open(output_dir + '/background.bed', 'w')
fasta_file = open(output_dir + '/background.fasta', 'w')
counter = 0
for pos in background_positions:
chrom = pos[0]
start = str(pos[1])
end = str(pos[2])
seq = str(pos[3])
randID = 'bg_' + str(np.random.randint(100000)) + '_' + str(counter)
counter += 1
bed_file.write('\t'.join([chrom, start, end, randID, '\n']))
fasta_file.write('>' + randID + '\n')
fasta_file.write(seq + '\n')
bed_file.close()
fasta_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Constructs random GC matched '+
'background regions')
parser.add_argument("inputPath",
help="path to a bed file containing a chr, start, end, and strand column",
type = str)
parser.add_argument("genome",
help="genome from which to construct background regions",
type=str)
parser.add_argument("outputPath",
help="directory where output files should be written",
default="./", type=str)
parser.add_argument("-sizeRatio",
help="size of the background region with respect to the target region",
default = 1.0, type=float)
parser.add_argument("-numBins",
help="number of bins to use for GC normalization",
default = 10,
type=float)
parser.add_argument("-nThreshold",
help="maximum fraction of background sequences that can be N",
default = 0.1,
type=float)
parser.add_argument("-filterChromosomes",
help="chromosomes to ignore",
type=str,
default=['chrM', 'chrY'],
nargs='+')
# parse arguments
args = parser.parse_args()
input_path = args.inputPath
output_path = args.outputPath
size_ratio = args.sizeRatio
num_bins = args.numBins
n_threshold = args.nThreshold
genome = args.genome
filter_chromosomes = args.filterChromosomes
target_positions = read_target_positions(input_path, filter_chromosomes)
background_positions = get_random_background(target_positions,
size_ratio = size_ratio,
num_bins = num_bins,
n_threshold = n_threshold,
genome = genome,
filter_chromosomes=filter_chromosomes
)
write_background_positions(background_positions, output_path)
|
from typing import Dict
from typing import TypedDict
class Lang(TypedDict):
title: str
description: str
LangData = Dict[
str, # two letter locale code
Lang,
]
|
"""Miscellaneous utility classes and functions. """
import sys
from typing import Any
class Logger:
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing
on both stdout and the file.
"""
def __init__(self, file_name: str = None, file_mode: str = 'w', should_flush: bool = True):
self.file = None
if file_name is not None:
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
"""Writing text to stdout (and a file) and optionally flush. """
if len(text) == 0:
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open. """
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring. """
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
class EMA:
"""Weighted Moving Average. """
def __init__(self, model, decay=0.999):
self.model = model
self.decay = decay
self.shadow = {}
self.back_up = {}
self.register_shadow()
def register_shadow(self):
"""Registers model's parameters into shadow. """
for name, param in self.model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def update(self):
"""Update shadow from model's parameters. """
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = (1 - self.decay) * param.data + self.decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def state_dict(self):
"""Returns a dictionary containing a whole state of the EMA weights. """
return self.shadow
def load_state_dict(self, state_dict):
"""Copies parameters and buffers from :attr:`state_dict` into shadow. """
for name, data in state_dict.items():
assert name in self.shadow
self.shadow[name] = data
def apply_shadow(self):
"""Loads shadow to model. """
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.back_up[name] = param.data
param.data = self.shadow[name]
def restore(self):
"""Restores model's parameters. """
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.back_up
param.data = self.back_up[name]
self.back_up = {}
|
# -*- encoding:utf8 -*-
from fabric import colors
from fabric.api import put, env
from denim import paths, utils
SERVICE_NAME = 'supervisor'
def upload_config(name_prefix=None):
"""
Upload configuration file.
:param name_prefix: Prefix to append to service name to provide alternate
configuration (or support multiple services)
"""
put(
paths.local_config_file(SERVICE_NAME, name_prefix),
paths.remote_config_file('/etc/supervisor/conf.d', name_prefix),
use_sudo=True
)
def manager_start():
"""
Start service manager daemon.
"""
utils.run_as('/etc/init.d/supervisor start', use_sudo=True)
def manager_stop():
"""
Stop service manager daemon.
"""
utils.run_as('/etc/init.d/supervisor stop', use_sudo=True)
def manager_restart():
"""
Restart service manager daemon.
"""
# Using run test as this method seems to return a status of 1
utils.run_test('/etc/init.d/supervisor restart', use_sudo=True)
def manager_reload():
"""
Reload service manager daemon.
"""
utils.run_as('supervisorctl reload', use_sudo=True)
def manager_status():
"""
Status of service manager daemon.
"""
if utils.run_test('/etc/init.d/supervisor status', use_sudo=True):
print colors.green("Service is Up")
else:
print colors.red("Service is Down")
def start(service_name=None):
"""
Start process.
:param service_name: name of the service to start in supervisor.
"""
if not service_name:
service_name = env.project_name
utils.run_as('supervisorctl start %s' % service_name, use_sudo=True)
def stop(service_name=None):
"""
Stop process.
:param service_name: name of the service to stop in supervisor.
"""
if not service_name:
service_name = env.project_name
utils.run_as('supervisorctl stop %s' % service_name, use_sudo=True)
def restart(service_name=None):
"""
Restart process.
:param service_name: name of the service to restart in supervisor.
"""
if not service_name:
service_name = env.project_name
utils.run_as('supervisorctl restart %s' % service_name, use_sudo=True)
def reload():
"""
Reload proces config.
"""
utils.run_as('supervisorctl reload', use_sudo=True)
def status(service_name=None):
"""
Process status.
:param service_name: name of the service to get status of.
"""
if service_name is None:
service_name = env.project_name
utils.run_as('supervisorctl status %s' % service_name, use_sudo=True)
|
# -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : sentigan_instructor.py
# @Time : Created at 2019-07-09
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from models.SentiGAN_D import SentiGAN_D, SentiGAN_C
from models.SentiGAN_G import SentiGAN_G
from utils import rollout
from utils.cat_data_loader import CatClasDataIter
from utils.data_loader import GenDataIter
from utils.text_process import tensor_to_tokens, write_tokens
class SentiGANInstructor(BasicInstructor):
def __init__(self, opt):
super(SentiGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen_list = [SentiGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, cfg.temperature, gpu=cfg.CUDA) for _ in range(cfg.k_label)]
self.dis = SentiGAN_D(cfg.k_label, cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.clas = SentiGAN_C(cfg.k_label, cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.extend_vocab_size,
cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt_list = [optim.Adam(gen.parameters(), lr=cfg.gen_lr) for gen in self.gen_list]
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
self.clas_opt = optim.Adam(self.clas.parameters(), lr=cfg.clas_lr)
# Metrics
self.all_metrics.append(self.clas_acc)
def init_model(self):
if cfg.dis_pretrain:
self.log.info(
'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
for i in range(cfg.k_label):
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path + '%d' % i))
self.gen_list[i].load_state_dict(torch.load(cfg.pretrained_gen_path + '%d' % i))
if cfg.clas_pretrain:
self.log.info('Load pretrained classifier: {}'.format(cfg.pretrained_clas_path))
self.clas.load_state_dict(torch.load(cfg.pretrained_clas_path, map_location='cuda:%d' % cfg.device))
if cfg.CUDA:
for i in range(cfg.k_label):
self.gen_list[i] = self.gen_list[i].cuda()
self.dis = self.dis.cuda()
self.clas = self.clas.cuda()
def _run(self):
# ===Pre-train Classifier with real data===
if cfg.use_clas_acc:
self.log.info('Start training Classifier...')
self.train_classifier(cfg.PRE_clas_epoch)
# ===PRE-TRAIN GENERATOR===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
for i in range(cfg.k_label):
torch.save(self.gen_list[i].state_dict(), cfg.pretrained_gen_path + '%d' % i)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path + '%d' % i))
# ===TRAIN DISCRIMINATOR====
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s', self.comb_metrics(fmt_str=True))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
for i in range(cfg.k_label):
pre_loss = self.train_gen_epoch(self.gen_list[i], self.train_data_list[i].loader,
self.mle_criterion, self.gen_opt_list[i])
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
if i == cfg.k_label - 1:
self.log.info('[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (
epoch, pre_loss, self.comb_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
for i in range(cfg.k_label):
rollout_func = rollout.ROLLOUT(self.gen_list[i], cfg.CUDA)
total_g_loss = 0
for step in range(g_step):
inp, target = GenDataIter.prepare(self.gen_list[i].sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)
# ===Train===
rewards = rollout_func.get_reward(target, cfg.rollout_num, self.dis)
adv_loss = self.gen_list[i].batchPGLoss(inp, target, rewards)
self.optimize(self.gen_opt_list[i], adv_loss)
total_g_loss += adv_loss.item()
# ===Test===
self.log.info('[ADV-GEN]: %s', self.comb_metrics(fmt_str=True))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
for step in range(d_step):
# prepare loader for training
real_samples = []
fake_samples = []
for i in range(cfg.k_label):
real_samples.append(self.train_samples_list[i])
fake_samples.append(self.gen_list[i].sample(cfg.samples_num // cfg.k_label, 8 * cfg.batch_size))
dis_samples_list = [torch.cat(fake_samples, dim=0)] + real_samples
dis_data = CatClasDataIter(dis_samples_list)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f' % (
phase, step, d_loss, train_acc))
if cfg.if_save and not cfg.if_test and phase == 'MLE':
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
def cal_metrics_with_label(self, label_i):
assert type(label_i) == int, 'missing label'
with torch.no_grad():
# Prepare data for evaluation
eval_samples = self.gen_list[label_i].sample(cfg.samples_num, 8 * cfg.batch_size)
gen_data = GenDataIter(eval_samples)
gen_tokens = tensor_to_tokens(eval_samples, self.idx2word_dict)
gen_tokens_s = tensor_to_tokens(self.gen_list[label_i].sample(200, 200), self.idx2word_dict)
clas_data = CatClasDataIter([eval_samples], label_i)
# Reset metrics
self.bleu.reset(test_text=gen_tokens, real_text=self.test_data_list[label_i].tokens)
self.nll_gen.reset(self.gen_list[label_i], self.train_data_list[label_i].loader)
self.nll_div.reset(self.gen_list[label_i], gen_data.loader)
self.self_bleu.reset(test_text=gen_tokens_s, real_text=gen_tokens)
self.clas_acc.reset(self.clas, clas_data.loader)
self.ppl.reset(gen_tokens)
return [metric.get_score() for metric in self.all_metrics]
def _save(self, phase, epoch):
"""Save model state dict and generator's samples"""
for i in range(cfg.k_label):
if phase != 'ADV':
torch.save(self.gen_list[i].state_dict(),
cfg.save_model_root + 'gen{}_{}_{:05d}.pt'.format(i, phase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_d{}_{}_{:05d}.txt'.format(i, phase, epoch)
samples = self.gen_list[i].sample(cfg.batch_size, cfg.batch_size)
write_tokens(save_sample_path, tensor_to_tokens(samples, self.idx2word_dict))
|
#!/usr/bin/env python3
import pytest
from networkpolicy_manager.policy import Singleton
class TestSingleton():
"""
make sure singleton class works appropriately
"""
def test_singleton_is_none(self):
context = Singleton.get_instance()
assert context.get() == None
def test_singleton_setter_function_works(self):
context = Singleton.get_instance()
context.set(True)
assert context.get() == True
def test_singleton_truly_behaves_like_a_singleton(self):
context = Singleton.get_instance()
context1 = Singleton.get_instance()
context.set(True)
assert context1.get() == True
|
# Generated by Django 2.2b1 on 2019-03-11 13:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('openbook_auth', '0026_auto_20190309_1527'),
]
operations = [
migrations.AlterField(
model_name='user',
name='connections_circle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='openbook_circles.Circle'),
),
]
|
# ===========================================================================
# rsshow.py ---------------------------------------------------------------
# ===========================================================================
# import ------------------------------------------------------------------
# ---------------------------------------------------------------------------
import rsvis.utils.rsioobject
import rsvis.utils.general as gu
from rsvis.utils import opener, imgtools
import rsvis.utils.logger
import rsvis.utils.bbox
import rsvis.utils.patches_ordered_ext
import rsvis.utils.utils_gan
import cv2
import math
import numpy as np
import pathlib
from tqdm import tqdm
import torch
import os
import shutil
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
if interArea == 0:
return 0, 0, 0, 0, 0
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
a = (min(boxAArea, boxBArea) + interArea)/ (2*min(boxAArea, boxBArea))
# return the intersection over union value
return iou, a, interArea, boxAArea ,boxBArea
def dist_med(boxA, boxB):
xA_m = min(boxA[0], boxA[2]) + abs(boxA[2]-boxA[0])
yA_m = min(boxA[1], boxA[3]) + abs(boxA[3]-boxA[1])
xB_m = min(boxB[0], boxB[2]) + abs(boxB[2]-boxB[0])
yB_m = min(boxB[1], boxB[3]) + abs(boxB[3]-boxB[1])
return (xB_m - xA_m)**2+(yB_m - yA_m)**2
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def run(
files,
label,
param_in,
param_out=dict(),
param_classes=list(),
param_exp=list(),
param_cloud=dict(),
param_show=dict()
):
rsvis.utils.logger.Logger().get_logformat("Start RSExp with the following parameters:", param_label=label, param_in=param_in, param_out=param_out, param_classes=param_classes, param_cloud=param_cloud, param_show=param_show)
# settings ------------------------------------------------------------
# -----------------------------------------------------------------------
param_label = [c["label"] for c in param_classes]
param_label = [c["color"] for c in param_classes]
param_color = [c["color"] for c in param_classes]
rsio = rsvis.utils.rsioobject.RSIOObject(files, label, param_in, param_out, param_show, label=param_label, color=param_color
)
if os.path.exists(param_out["log"]["path_dir"]):
shutil.rmtree(param_out["log"]["path_dir"], ignore_errors=True)
images_in = rsio.get_img_in()
images_out = rsio.get_img_out(img_name="image")
images_log_out = rsio.get_log_out(**param_out["log"])
label_id = dict()
for c in param_classes:
label_id[str(c["id"])] = c["name"]
# print(label_id)
param = param_exp
filepaths = [f for f in os.listdir(param["results"]) if f.endswith(".txt")]
img_obj = [list() for i in range(0, len(images_in))]
for file in tqdm(filepaths):
filename, file_extension = os.path.splitext(file)
img_param = [int(f) for i, f in enumerate(filename.split("-")) if i > 0]
img_obj[img_param[4]-1].append({'box': img_param[0:4], "file": file})
thresh = 2 # 3
for img_idx, (img, obj) in tqdm(enumerate(zip(rsio.get_img_in(), img_obj))):
box_list = list()
for o in obj:
root=list()
name = o["file"].split("-")
with open(os.path.join(param["results"],o["file"]), "r") as f:
root = ([k.split(" ") for k in f.read().splitlines()])
for r in root:
if(r):
box = [
float(r[1])*255*param["factor"][0],
float(r[2])*255*param["factor"][1],
float(r[3])*255*param["factor"][0],
float(r[4])*255*param["factor"][1]
]
if (box[0]-box[2])/2<thresh and not int(name[3])==0:
continue
if (box[0]+box[2])/2>(param["img_size"][0]-2) and not o["box"][3]==param["img_size"][0]:
continue
if (box[1]+box[3])/2 <thresh and not int(name[1])==0:
continue
if (box[1]+box[3])/2>(param["img_size"][1]-2) and not o["box"][1]==param["img_size"][1]:
continue
box = rsvis.utils.bbox.BBox().get_minmax(box, dtype="cowc")
m = [o["box"][2], o["box"][0], o["box"][3], o["box"][1]]
box = [box[0]+o["box"][2], box[1]+o["box"][0], box[2]+o["box"][2], box[3]+o["box"][0]]
d = dist_med(box, m)
box_ap = True
for box_idx, box_d in enumerate(box_list):
iou, a, _, b, c = bb_intersection_over_union(box, box_d)
if iou > 0.35 or a>0.9:
box_ap = False
# box_list[box_idx][4][int(r[0])-1] += 1
if box_d[5]<float(r[5])*100:
box_list[box_idx][4]=r[0]
box_list[box_idx][5]=float(r[5])*100
if b > c:
box_list[box_idx] = [*box, box_list[box_idx][4], float(r[5])*100, d]
# # box = [float(r[1])*255*param["factor"][0], float(r[2])*255*param["factor"][1], float(r[3])*255*param["factor"][0], float(r[4])*255*param["factor"][1]]
if box_ap:
box_list.append([*box, r[0], float(r[5])*100, d])
#box_list.append([*box, np.zeros([10]), float(r[5])*100, d])
# box_list[-1][4][int(r[0])-1] += 1
msg = ""
for b in box_list:
msg = "{}{} : {} : [{}, {}, {}, {}]\n".format(
msg, label_id[b[4]], b[5], b[0], b[1], b[2], b[3])
# msg, label_id[str(np.argmax(b[4])+1)], b[5], b[0], b[1], b[2], b[3])
src_path = img.get_img_from_label(param["label"]).path
images_log_out(src_path, msg) |
#Write a function that accepts two input lists and returns a new list
#which contains only the unique elements from both lists.
def unique_both_lists(list1, list2):
list1.extend(list2)
listaFinal = []
for x in list1:
if x not in listaFinal:
listaFinal.append(x)
return listaFinal
|
import pytest
import numpy as np
from edflow.debug import DebugDataset
from edflow.data.agnostics.subdataset import SubDataset
def test_sub():
D = DebugDataset(10)
I = np.array([9, 1, 2, 4, 3, 5, 7, 6, 8, 0])
S = SubDataset(D, I)
ref0 = {"val": 9, "other": 9, "index_": 0}
ref2 = {"val": 2, "other": 2, "index_": 2}
ref6 = {"val": 7, "other": 7, "index_": 6}
assert S[0] == ref0
assert S[2] == ref2
assert S[6] == ref6
assert all(S.labels["label1"] == I)
|
from django.utils.deprecation import MiddlewareMixin
from common import errors
from common.errors import LogicException, LogicError
from lib.http import render_json
from user.models import Users
class AuthMiddleware(MiddlewareMixin):
WHITE_LIST = [
'/api/user/verify-phone',
'/api/user/login',
]
def process_request(self,request):
if request.path in self.WHITE_LIST:
return None
uid = request.session.get('uid')
if uid is None:
return render_json(code=errors.LOGIN_REQUIRED)
request.user = Users.objects.get(id=uid)
class LogicExceptionMiddleware(MiddlewareMixin):
def process_exception(self, request, exception):
if isinstance(exception, (LogicException, LogicError)):
return render_json(code=exception.code) |
number = 2 ** 38
print(number) |
# Concord
#
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import os.path
import random
import unittest
from os import environ
import trio
from util import blinking_replica
from util import skvbc as kvbc
from util.bft import with_trio, with_bft_network, with_constant_load, KEY_FILE_PREFIX
def start_replica_cmd(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-p" if os.environ.get('BUILD_ROCKSDB_STORAGE', "").lower()
in set(["true", "on"])
else "",
"-t", os.environ.get('STORAGE_TYPE')]
class SkvbcCheckpointTest(unittest.TestCase):
__test__ = False # so that PyTest ignores this test scenario
@with_trio
@with_bft_network(start_replica_cmd)
async def test_checkpoint_creation(self, bft_network):
"""
Test the creation of checkpoints (independently of state transfer or view change)
Start all replicas, then send a sufficient number of client requests to trigger the
checkpoint protocol. Then make sure a checkpoint is created and agreed upon by all replicas.
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=0)
await skvbc.fill_and_wait_for_checkpoint(
initial_nodes=bft_network.all_replicas(),
checkpoint_num=1,
verify_checkpoint_persistency=False
)
checkpoint_after = await bft_network.wait_for_checkpoint(replica_id=0)
self.assertEqual(checkpoint_after, 1 + checkpoint_before)
|
'''
Midpoint of Linked list
For a given singly linked list of integers, find and return the node present at the middle of the list.
Note :
If the length of the singly linked list is even, then return the first middle node.
Example: Consider, 10 -> 20 -> 30 -> 40 is the given list, then the nodes present at the middle with respective data values are, 20 and 30. We return the first node with data 20.
Input format :
The first line contains an Integer 't' which denotes the number of test cases or queries to be run. Then the test cases follow.
The first and the only line of each test case or query contains the elements of the singly linked list separated by a single space.
Remember/Consider :
While specifying the list elements for input, -1 indicates the end of the singly linked list and hence, would never be a list element
Output Format :
For each test case/query, print the data value of the node at the middle of the given list.
Output for every test case will be printed in a seperate line.
Constraints :
1 <= t <= 10^2
0 <= M <= 10^5
Where M is the size of the singly linked list.
Time Limit: 1sec
Sample Input 1 :
1
1 2 3 4 5 -1
Sample Output 1 :
3
Sample Input 2 :
2
-1
1 2 3 4 -1
Sample Output 2 :
2
'''
class Node:
def __init__(self, data):
self.data = data
self.next = None
def take_input():
inputList = [int(ele) for ele in input().split()]
head = None
tail = None
for currData in inputList:
if currData == -1:
break
newNode = Node(currData)
if head is None:
head = newNode
tail = newNode
else:
tail.next = newNode
tail = newNode
return head
def printLL(head):
while head is not None:
print(str(head.data) + ' -> ', end = '')
head = head.next
print('None')
return
def midPoint(head):
if head is None:
return
slow = head
fast = head
while fast.next is not None and fast.next.next is not None:
slow = slow.next
fast = fast.next.next
return slow
head = take_input()
printLL(head)
head = midPoint(head)
printLL(head) |
#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
import math
import sys
from PyQt4.QtCore import (QDate, QRectF, QString, Qt, SIGNAL)
from PyQt4.QtGui import (QApplication, QDialog, QFont, QFontMetrics,
QHBoxLayout, QPainter, QPixmap, QPrintDialog, QPrinter,
QPushButton, QTableWidget, QTableWidgetItem, QTextBlockFormat,
QTextCharFormat, QTextCursor, QTextDocument, QTextFormat,
QTextOption, QTextTableFormat, QVBoxLayout)
import qrc_resources
DATE_FORMAT = "MMM d, yyyy"
class Statement(object):
def __init__(self, company, contact, address):
self.company = company
self.contact = contact
self.address = address
self.transactions = [] # List of (QDate, float) two-tuples
def balance(self):
return sum([amount for date, amount in self.transactions])
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.printer = QPrinter()
self.printer.setPageSize(QPrinter.Letter)
self.generateFakeStatements()
self.table = QTableWidget()
self.populateTable()
cursorButton = QPushButton("Print via Q&Cursor")
htmlButton = QPushButton("Print via &HTML")
painterButton = QPushButton("Print via Q&Painter")
quitButton = QPushButton("&Quit")
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(cursorButton)
buttonLayout.addWidget(htmlButton)
buttonLayout.addWidget(painterButton)
buttonLayout.addStretch()
buttonLayout.addWidget(quitButton)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.connect(cursorButton, SIGNAL("clicked()"),
self.printViaQCursor)
self.connect(htmlButton, SIGNAL("clicked()"),
self.printViaHtml)
self.connect(painterButton, SIGNAL("clicked()"),
self.printViaQPainter)
self.connect(quitButton, SIGNAL("clicked()"), self.accept)
self.setWindowTitle("Printing")
def generateFakeStatements(self):
self.statements = []
statement = Statement("Consality", "Ms S. Royal",
"234 Rue Saint Hyacinthe, 750201, Paris")
statement.transactions.append((QDate(2007, 8, 11), 2342))
statement.transactions.append((QDate(2007, 9, 10), 2342))
statement.transactions.append((QDate(2007, 10, 9), 2352))
statement.transactions.append((QDate(2007, 10, 17), -1500))
statement.transactions.append((QDate(2007, 11, 12), 2352))
statement.transactions.append((QDate(2007, 12, 10), 2352))
statement.transactions.append((QDate(2007, 12, 20), -7500))
statement.transactions.append((QDate(2007, 12, 20), 250))
statement.transactions.append((QDate(2008, 1, 10), 2362))
self.statements.append(statement)
statement = Statement("Demamitur Plc", "Mr G. Brown",
"14 Tall Towers, Tower Hamlets, London, WC1 3BX")
statement.transactions.append((QDate(2007, 5, 21), 871))
statement.transactions.append((QDate(2007, 6, 20), 542))
statement.transactions.append((QDate(2007, 7, 20), 1123))
statement.transactions.append((QDate(2007, 7, 20), -1928))
statement.transactions.append((QDate(2007, 8, 13), -214))
statement.transactions.append((QDate(2007, 9, 15), -3924))
statement.transactions.append((QDate(2007, 9, 15), 2712))
statement.transactions.append((QDate(2007, 9, 15), -273))
#statement.transactions.append((QDate(2007, 11, 8), -728))
#statement.transactions.append((QDate(2008, 2, 7), 228))
#statement.transactions.append((QDate(2008, 3, 13), -508))
#statement.transactions.append((QDate(2008, 3, 22), -2481))
#statement.transactions.append((QDate(2008, 4, 5), 195))
self.statements.append(statement)
def populateTable(self):
headers = ["Company", "Contact", "Address", "Balance"]
self.table.setColumnCount(len(headers))
self.table.setHorizontalHeaderLabels(headers)
self.table.setRowCount(len(self.statements))
for row, statement in enumerate(self.statements):
self.table.setItem(row, 0, QTableWidgetItem(statement.company))
self.table.setItem(row, 1, QTableWidgetItem(statement.contact))
self.table.setItem(row, 2, QTableWidgetItem(statement.address))
item = QTableWidgetItem(QString("$ %L1").arg(
float(statement.balance()), 0, "f", 2))
item.setTextAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.table.setItem(row, 3, item)
self.table.resizeColumnsToContents()
def printViaHtml(self):
html = ""
for statement in self.statements:
date = QDate.currentDate().toString(DATE_FORMAT)
address = Qt.escape(statement.address).replace(
",", "<br>")
contact = Qt.escape(statement.contact)
balance = statement.balance()
html += ("<p align=right><img src=':/logo.png'></p>"
"<p align=right>Greasy Hands Ltd."
"<br>New Lombard Street"
"<br>London<br>WC13 4PX<br>{0}</p>"
"<p>{1}</p><p>Dear {2},</p>"
"<p>The balance of your account is {3}.").format(
date, address, contact, QString("$ %L1").arg(
float(balance), 0, "f", 2))
if balance < 0:
html += (" <p><font color=red><b>Please remit the "
"amount owing immediately.</b></font>")
else:
html += (" We are delighted to have done business "
"with you.")
html += ("</p><p> </p><p>"
"<table border=1 cellpadding=2 "
"cellspacing=2><tr><td colspan=3>"
"Transactions</td></tr>")
for date, amount in statement.transactions:
color, status = "black", "Credit"
if amount < 0:
color, status = "red", "Debit"
html += ("<tr><td align=right>{0}</td>"
"<td>{1}</td><td align=right>"
"<font color={2}>{3}</font></td></tr>".format(
date.toString(DATE_FORMAT), status, color,
QString("$ %L1").arg(
float(abs(amount)), 0, "f", 2)))
html += ("</table></p><p style='page-break-after:always;'>"
"We hope to continue doing "
"business with you,<br>Yours sincerely,"
"<br><br>K. Longrey, Manager</p>")
dialog = QPrintDialog(self.printer, self)
if dialog.exec_():
document = QTextDocument()
document.setHtml(html)
document.print_(self.printer)
def printViaQCursor(self):
dialog = QPrintDialog(self.printer, self)
if not dialog.exec_():
return
logo = QPixmap(":/logo.png")
headFormat = QTextBlockFormat()
headFormat.setAlignment(Qt.AlignLeft)
headFormat.setTextIndent(
self.printer.pageRect().width() - logo.width() - 216)
bodyFormat = QTextBlockFormat()
bodyFormat.setAlignment(Qt.AlignJustify)
lastParaBodyFormat = QTextBlockFormat(bodyFormat)
lastParaBodyFormat.setPageBreakPolicy(
QTextFormat.PageBreak_AlwaysAfter)
rightBodyFormat = QTextBlockFormat()
rightBodyFormat.setAlignment(Qt.AlignRight)
headCharFormat = QTextCharFormat()
headCharFormat.setFont(QFont("Helvetica", 10))
bodyCharFormat = QTextCharFormat()
bodyCharFormat.setFont(QFont("Times", 11))
redBodyCharFormat = QTextCharFormat(bodyCharFormat)
redBodyCharFormat.setForeground(Qt.red)
tableFormat = QTextTableFormat()
tableFormat.setBorder(1)
tableFormat.setCellPadding(2)
document = QTextDocument()
cursor = QTextCursor(document)
mainFrame = cursor.currentFrame()
page = 1
for statement in self.statements:
cursor.insertBlock(headFormat, headCharFormat)
cursor.insertImage(":/logo.png")
for text in ("Greasy Hands Ltd.", "New Lombard Street",
"London", "WC13 4PX",
QDate.currentDate().toString(DATE_FORMAT)):
cursor.insertBlock(headFormat, headCharFormat)
cursor.insertText(text)
for line in statement.address.split(", "):
cursor.insertBlock(bodyFormat, bodyCharFormat)
cursor.insertText(line)
cursor.insertBlock(bodyFormat)
cursor.insertBlock(bodyFormat, bodyCharFormat)
cursor.insertText("Dear {0},".format(statement.contact))
cursor.insertBlock(bodyFormat)
cursor.insertBlock(bodyFormat, bodyCharFormat)
balance = statement.balance()
cursor.insertText(QString(
"The balance of your account is $ %L1.").arg(
float(balance), 0, "f", 2))
if balance < 0:
cursor.insertBlock(bodyFormat, redBodyCharFormat)
cursor.insertText("Please remit the amount owing "
"immediately.")
else:
cursor.insertBlock(bodyFormat, bodyCharFormat)
cursor.insertText("We are delighted to have done "
"business with you.")
cursor.insertBlock(bodyFormat, bodyCharFormat)
cursor.insertText("Transactions:")
table = cursor.insertTable(len(statement.transactions), 3,
tableFormat)
row = 0
for date, amount in statement.transactions:
cellCursor = table.cellAt(row, 0).firstCursorPosition()
cellCursor.setBlockFormat(rightBodyFormat)
cellCursor.insertText(date.toString(DATE_FORMAT),
bodyCharFormat)
cellCursor = table.cellAt(row, 1).firstCursorPosition()
if amount > 0:
cellCursor.insertText("Credit", bodyCharFormat)
else:
cellCursor.insertText("Debit", bodyCharFormat)
cellCursor = table.cellAt(row, 2).firstCursorPosition()
cellCursor.setBlockFormat(rightBodyFormat)
format = bodyCharFormat
if amount < 0:
format = redBodyCharFormat
cellCursor.insertText(QString("$ %L1").arg(
float(amount), 0, "f", 2), format)
row += 1
cursor.setPosition(mainFrame.lastPosition())
cursor.insertBlock(bodyFormat, bodyCharFormat)
cursor.insertText("We hope to continue doing business "
"with you,")
cursor.insertBlock(bodyFormat, bodyCharFormat)
cursor.insertText("Yours sincerely")
cursor.insertBlock(bodyFormat)
if page == len(self.statements):
cursor.insertBlock(bodyFormat, bodyCharFormat)
else:
cursor.insertBlock(lastParaBodyFormat, bodyCharFormat)
cursor.insertText("K. Longrey, Manager")
page += 1
document.print_(self.printer)
def printViaQPainter(self):
dialog = QPrintDialog(self.printer, self)
if not dialog.exec_():
return
LeftMargin = 72
sansFont = QFont("Helvetica", 10)
sansLineHeight = QFontMetrics(sansFont).height()
serifFont = QFont("Times", 11)
fm = QFontMetrics(serifFont)
DateWidth = fm.width(" September 99, 2999 ")
CreditWidth = fm.width(" Credit ")
AmountWidth = fm.width(" W999999.99 ")
serifLineHeight = fm.height()
logo = QPixmap(":/logo.png")
painter = QPainter(self.printer)
pageRect = self.printer.pageRect()
page = 1
for statement in self.statements:
painter.save()
y = 0
x = pageRect.width() - logo.width() - LeftMargin
painter.drawPixmap(x, 0, logo)
y += logo.height() + sansLineHeight
painter.setFont(sansFont)
painter.drawText(x, y, "Greasy Hands Ltd.")
y += sansLineHeight
painter.drawText(x, y, "New Lombard Street")
y += sansLineHeight
painter.drawText(x, y, "London")
y += sansLineHeight
painter.drawText(x, y, "WC13 4PX")
y += sansLineHeight
painter.drawText(x, y,
QDate.currentDate().toString(DATE_FORMAT))
y += sansLineHeight
painter.setFont(serifFont)
x = LeftMargin
for line in statement.address.split(", "):
painter.drawText(x, y, line)
y += serifLineHeight
y += serifLineHeight
painter.drawText(x, y, "Dear {0},".format(statement.contact))
y += serifLineHeight
balance = statement.balance()
painter.drawText(x, y, QString("The balance of your "
"account is $ %L1").arg(float(balance), 0, "f", 2))
y += serifLineHeight
if balance < 0:
painter.setPen(Qt.red)
text = "Please remit the amount owing immediately."
else:
text = ("We are delighted to have done business "
"with you.")
painter.drawText(x, y, text)
painter.setPen(Qt.black)
y += int(serifLineHeight * 1.5)
painter.drawText(x, y, "Transactions:")
y += serifLineHeight
option = QTextOption(Qt.AlignRight|Qt.AlignVCenter)
for date, amount in statement.transactions:
x = LeftMargin
h = int(fm.height() * 1.3)
painter.drawRect(x, y, DateWidth, h)
painter.drawText(
QRectF(x + 3, y + 3, DateWidth - 6, h - 6),
date.toString(DATE_FORMAT), option)
x += DateWidth
painter.drawRect(x, y, CreditWidth, h)
text = "Credit"
if amount < 0:
text = "Debit"
painter.drawText(
QRectF(x + 3, y + 3, CreditWidth - 6, h - 6),
text, option)
x += CreditWidth
painter.drawRect(x, y, AmountWidth, h)
if amount < 0:
painter.setPen(Qt.red)
painter.drawText(
QRectF(x + 3, y + 3, AmountWidth - 6, h - 6),
QString("$ %L1").arg(float(amount), 0, "f", 2),
option)
painter.setPen(Qt.black)
y += h
y += serifLineHeight
x = LeftMargin
painter.drawText(x, y, "We hope to continue doing "
"business with you,")
y += serifLineHeight
painter.drawText(x, y, "Yours sincerely")
y += serifLineHeight * 3
painter.drawText(x, y, "K. Longrey, Manager")
x = LeftMargin
y = pageRect.height() - 72
painter.drawLine(x, y, pageRect.width() - LeftMargin, y)
y += 2
font = QFont("Helvetica", 9)
font.setItalic(True)
painter.setFont(font)
option = QTextOption(Qt.AlignCenter)
option.setWrapMode(QTextOption.WordWrap)
painter.drawText(
QRectF(x, y, pageRect.width() - 2 * LeftMargin, 31),
"The contents of this letter are for information "
"only and do not form part of any contract.",
option)
page += 1
if page <= len(self.statements):
self.printer.newPage()
painter.restore()
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._assets_operations import AssetsOperations
from ._extensive_model_operations import ExtensiveModelOperations
from ._migration_operations import MigrationOperations
from ._models_operations import ModelsOperations
__all__ = [
'AssetsOperations',
'ExtensiveModelOperations',
'MigrationOperations',
'ModelsOperations',
]
|
import numpy as np
import rospy
import fasttext
class StaticWordEmbeddings(object):
""" Utility class to work with fasttext static word embeddings
"""
def __init__(self, pretrained_embeddings_file):
""" Static word embedding model contructor
"""
self.model = fasttext.load_model(pretrained_embeddings_file)
def compute_word_vector(self, word):
""" Compute a word vector (even for unknown words as it use subwords)
"""
return self.model.get_word_vector(word)
def compute_sentence_vector(self, sentence):
""" Compute fast sentence vector
"""
return np.average(np.array([self.compute_word_vector(w) for w in sentence.split()]), axis=0)
def get_similar_words(self, word):
""" Return the most similar words
"""
return self.model.get_nearest_neighbors(word)
def get_most_similar_word(self, word):
""" Return the most similar known word
"""
score, word = self.model.get_nearest_neighbors(word)[0]
return word
def get_unknwon_words(self, sentence):
""" Return the unknown words of the given sentence
"""
return [w for w in sentence.split() if w not in self.model]
|
"""
Script to divide data into training, validation and testing datasets.
"""
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
DATA_DIR = "data/"
ORIG_DATA_DIR = os.path.join(DATA_DIR, "orig_data/nlp-getting-started")
TRAIN_DIR = os.path.join(DATA_DIR, "training_data")
VALID_DIR = os.path.join(DATA_DIR, "validation_data")
TEST_DIR = os.path.join(DATA_DIR, "testing_data")
LUCKY_SEED = 42
def read_csv_data(path):
return pd.read_csv(path)
def stratified_split(data, n_splits=1, split_ratio=0.2):
split = StratifiedShuffleSplit(n_splits=n_splits, test_size=split_ratio, random_state=LUCKY_SEED)
for train_index, test_index in split.split(data, data["target"]):
train_set = data.loc[train_index]
test_set = data.loc[test_index]
return train_set, test_set
def save_to_multiple_csv_files(data, save_path, name_prefix, header, n_parts=10):
path_format = os.path.join(save_path, "{}_{:02d}.csv")
filepaths = []
m = len(data)
for file_idx, row_indices in enumerate(np.array_split(np.arange(m), n_parts)):
part_csv = path_format.format(name_prefix, file_idx)
filepaths.append(part_csv)
with open(part_csv, "wt", encoding="utf-8") as f:
if header is not None:
f.write(header)
f.write("\n")
for row_idx in row_indices:
f.write(",".join([repr(col) for col in data[row_idx]]))
f.write("\n")
return filepaths
def perform_split():
data = read_csv_data(os.path.join(ORIG_DATA_DIR, "train.csv"))
data = data.sample(frac=1, random_state=LUCKY_SEED).reset_index(drop=True)[["text", "target"]]
train_data, val_data = stratified_split(data)
header_cols = ["text", "target"]
header = ",".join(header_cols)
print(save_to_multiple_csv_files(train_data.values, TRAIN_DIR, "train", header, n_parts=10))
print(save_to_multiple_csv_files(val_data.values, VALID_DIR, "val", header, n_parts=2))
test_data = read_csv_data(os.path.join(ORIG_DATA_DIR, "test.csv"))
test_data[["id", "text"]].to_csv(os.path.join(TEST_DIR, "test.csv"), index=None, encoding="utf-8")
if __name__ == "__main__":
perform_split()
|
import pytest
from tartiflette import Scalar, create_engine
from tartiflette.scalar.builtins.string import ScalarString
from tartiflette.types.exceptions.tartiflette import GraphQLSchemaError
@pytest.mark.asyncio
async def test_issue370_double_values():
with pytest.raises(
GraphQLSchemaError,
match="""
0: Enum < Invalid > is invalid, Value < VALUE_3 > is not unique
1: Enum < Invalid > is invalid, Value < VALUE_2 > is not unique""",
):
await create_engine(
"""
enum Invalid {
VALUE_1
VALUE_2
VALUE_3
VALUE_3
VALUE_2
}
type Query {
field1: Invalid
}
""",
schema_name="test_issue370_uniqueness",
)
@pytest.mark.asyncio
async def test_issue370_type_name_enum_value_mismatch():
@Scalar("aType", schema_name="test_issue370_type_name_enum_value_mismatch")
class _(ScalarString):
pass
assert (
await create_engine(
"""
scalar aType
enum Valid {
VALUE_1
VALUE_2
VALUE_3
aType
}
type Query {
field1: Valid
}
""",
schema_name="test_issue370_type_name_enum_value_mismatch",
)
is not None
)
|
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
def maxSubArray(nums):
n = len(nums)
# 确定终止分割的条件
if n == 1:
return nums[0]
left = maxSubArray(nums[:n // 2])
right = maxSubArray(nums[n // 2:])
max_l = nums[n // 2 - 1]
tmp = 0
for i in range(n // 2 - 1, -1, -1):
tmp += nums[i]
max_l = max(tmp, max_l)
max_r = nums[n // 2]
tmp = 0
for i in range(n // 2, n):
tmp += nums[i]
max_r = max(tmp, max_r)
return max(left, right, max_l + max_r)
print(maxSubArray(nums))
|
#!/usr/bin/env python
# Copyright (c) 2018, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from FINN.core.config import FINN_ROOT
import FINN.core.nn as nn
import FINN.core.device as device
import FINN.core.perf_model as pm
from FINN.frontend.caffeloader import CaffeLoader
from FINN.core.coverification import testOnMNIST
import FINN.transforms.transformations as transform
import FINN.backend.fpga.backend_fpga as fpga_backend
import copy
import numpy as np
import sys
import tempfile
import shutil
class TestHWGQLFCw1a1(unittest.TestCase):
"""Test HWGQ network import and streamlining using a small binarized FC net."""
def setUp(self):
nname = "lfc-w1a1"
proto = FINN_ROOT + "/inputs/%s.prototxt" % nname
weights = FINN_ROOT + "/inputs/%s.caffemodel" % nname
l = CaffeLoader(weights, proto)
self.net = nn.NN(l)
frequency = 300
self.dev = device.Device('XLNX:VU9P.json', frequency)
self.streamlined_net = copy.deepcopy(self.net)
print self.streamlined_net.layers
self.streamlined_net.layers = transform.makeCromulent(
self.streamlined_net.layers)
print self.streamlined_net.layers
# use the first numImagesToTest of the test set for verification
self.numImagesToTest = 1000
# expected number of successful predictions
self.ok_golden = 967
# expected number of unsuccessful predictions
self.nok_golden = 33
def test_fpgabackend_rawhls(self):
# resource allocation function to set number of PE/SIMD per layer
# the allocation is statically determined for this test case.
def res_alloc_predetermined(pipeline, net, dev):
ret_pipeline = copy.deepcopy(pipeline)
print "PIPELINE: ", ret_pipeline
net.layers = ret_pipeline
perfmodel = pm.PerfModel(net, dev)
fps = perfmodel.maximise_fps()
for i in range(len(ret_pipeline)):
ret_pipeline[i].simd = perfmodel.SIMD[i]
print "SIMD:", ret_pipeline[i].simd
ret_pipeline[i].pe = perfmodel.PE[i]
print "PE:", ret_pipeline[i].pe
ret_pipeline[i].mmv = perfmodel.MMV[i]
print "MMV:", ret_pipeline[i].mmv
return ret_pipeline
dirpath = tempfile.mkdtemp()
# pick all layers except first (input quantization) and last
# (final batchnorm) of the streamlined network
hlslayers = self.streamlined_net.layers[1:-1]
# call the FPGA backend to generate HLS and compile raw HLS sim
print "Synthesising"
ret = fpga_backend.synthesize(
hlslayers, self.net, self.dev, res_alloc_predetermined, dirpath, "sfcall-")
print "Synthesised"
print ret.getFPGAPerformanceModel().SIMD
print ret.getFPGAPerformanceModel().PE
print ret.getFPGAPerformanceModel().MMV
print ret.ir
print ret.getFPGAPerformanceModel().network_utilisation()
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestHWGQLFCw1a1)
unittest.TextTestRunner(verbosity=2).run(suite)
|
#!/usr/bin/env python
# license removed for brevity
# Directed Study, Spring 2020
# Charles DeLorey
# Department of Computer Science, Tufts University
# Example publisher for Arduino-based robotic hand
# Sends Adc message (6 uint16's), ignoring last, to hand to set joint angle
import sys
import rospy
import serial
from itertools import cycle, chain
from std_msgs.msg import UInt16
from rosserial_arduino.msg import Adc
#port = "dev/ttyACM0"
#baud = 9600
joint_msg = Adc() #using predefined message, as custom message is not working
joint_msg.adc0 = 30
joint_msg.adc1 = 30
joint_msg.adc2 = 30
joint_msg.adc3 = 30
joint_msg.adc4 = 30
joint_msg.adc5 = 0 #no 6th finger, so this part of the message is not used
def talker():
pub = rospy.Publisher('servo', Adc, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
pub.publish(joint_msg)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
from flask.ext.script import Manager, Command
from security_monkey import app, db
from security_monkey.datastore import Datastore
from security_monkey.common.route53 import Route53Service
from gunicorn.app.base import Application
from flask.ext.migrate import Migrate, MigrateCommand
from security_monkey import run_change_reporter as sm_run_change_reporter
from security_monkey import find_rds_changes as sm_find_rds_changes
from security_monkey import find_elb_changes as sm_find_elb_changes
from security_monkey import find_iamssl_changes as sm_find_iamssl_changes
from security_monkey import find_sg_changes as sm_find_sg_changes
from security_monkey import find_s3_changes as sm_find_s3_changes
from security_monkey import find_iamuser_changes as sm_find_iamuser_changes
from security_monkey import find_iamgroup_changes as sm_find_iamgroup_changes
from security_monkey import find_iamrole_changes as sm_find_iamrole_changes
from security_monkey import find_keypair_changes as sm_find_keypair_changes
from security_monkey import find_sqs_changes as sm_find_sqs_changes
from security_monkey import find_sns_changes as sm_find_sns_changes
from security_monkey import audit_sns as sm_audit_sns
from security_monkey import audit_sg as sm_audit_sg
from security_monkey import audit_rds as sm_audit_rds
from security_monkey import audit_s3 as sm_audit_s3
from security_monkey import audit_iamuser as sm_audit_iamuser
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def create_db():
""" Creates a database with all of the tables defined in
your Alchemy models.
DEPRECATED. Use `python manage.py db upgrade`
"""
#db.create_all()
raise Exception("Received a call to create_db. Instead, please allow flask-migrate to create " +
"the database by calling `python manage.py db upgrade`.")
@manager.command
def drop_db():
""" Drops the database.
"""
db.drop_all()
@manager.command
def test_datastore():
""" Tries to save garbage data to the DB """
datastore = Datastore()
mydict = {"fingerprint": "9d:bc:c5:f3:a6:12:9e:0b:b5:f3:3c:93:0e:32:78:80:9c:a9:ce:8c"}
datastore.store("keypair", "us-east-1", "seg", "myname", True, mydict)
for itemrevision in datastore.get("keypair", "us-east-1", "seg", "myname"):
print itemrevision.__dict__
@manager.command
def run_change_reporter(accounts):
""" Runs Reporter """
sm_run_change_reporter(accounts)
#### CHANGE WATCHERS ####
@manager.command
def find_elb_changes(accounts):
""" Runs watchers/elb"""
sm_find_elb_changes(accounts)
@manager.command
def find_iamssl_changes(accounts):
""" Runs watchers/iam_ssl"""
sm_find_iamssl_changes(accounts)
@manager.command
def find_rds_changes(accounts):
""" Runs watchers/rds_security_group"""
sm_find_rds_changes(accounts)
@manager.command
def find_sg_changes(accounts):
""" Runs watchers/security_group"""
sm_find_sg_changes(accounts)
@manager.command
def find_s3_changes(accounts):
""" Runs watchers/s3"""
sm_find_s3_changes(accounts)
@manager.command
def find_iamuser_changes(accounts):
""" Runs watchers/iamuser"""
sm_find_iamuser_changes(accounts)
@manager.command
def find_iamgroup_changes(accounts):
""" Runs watchers/iamgroup"""
sm_find_iamgroup_changes(accounts)
@manager.command
def find_iamrole_changes(accounts):
""" Runs watchers/iamrole"""
sm_find_iamrole_changes(accounts)
@manager.command
def find_keypair_changes(accounts):
""" Runs watchers/keypair"""
sm_find_keypair_changes(accounts)
@manager.command
def find_sqs_changes(accounts):
""" Runs watchers/sqs"""
sm_find_sqs_changes(accounts)
@manager.command
def find_sns_changes(accounts):
""" Runs watchers/sns """
sm_find_sns_changes(accounts)
#### AUDITORS ####
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-r', '--send_report', dest='send_report', type=bool, default=False)
def audit_sns(accounts, send_report):
""" Runs auditors/sns """
sm_audit_sns(accounts, send_report)
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-r', '--send_report', dest='send_report', type=bool, default=False)
def audit_sg(accounts, send_report):
""" Runs auditors/security_group """
sm_audit_sg(accounts, send_report)
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-r', '--send_report', dest='send_report', type=bool, default=False)
def audit_rds(accounts, send_report):
""" Runs auditors/rds_security_group """
sm_audit_rds(accounts, send_report)
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-r', '--send_report', dest='send_report', type=bool, default=False)
def audit_s3(accounts, send_report):
""" Runs auditors/s3 """
sm_audit_s3(accounts, send_report)
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-r', '--send_report', dest='send_report', type=bool, default=False)
def audit_iamuser(accounts, send_report):
""" Runs auditors/iam_user """
sm_audit_iamuser(accounts, send_report)
@manager.command
def start_scheduler():
""" starts the python scheduler to run the watchers and auditors"""
from security_monkey import scheduler
import security_monkey
security_monkey.setup_scheduler()
scheduler.start()
class APIServer(Command):
def __init__(self, host='127.0.0.1', port=7102, workers=6):
self.host = host
self.port = port
self.workers = workers
def handle(self, app, *args, **kwargs):
if app.config.get('USE_ROUTE53'):
route53 = Route53Service()
route53.register(app.config.get('FQDN'), exclusive=True)
workers = self.workers
class FlaskApplication(Application):
def init(self, parser, opts, args):
return {
'bind': '{}:{}'.format(
'127.0.0.1',
app.config.get('API_PORT')
),
'workers': workers
}
def load(self):
return app
FlaskApplication().run()
if __name__ == "__main__":
manager.add_command("run_api_server", APIServer())
manager.run()
|
def strings(w):
index = 0
c = len(w)
while index < len(w):
letter = w[c-1]
print(letter)
c = c-1
index = index + 1
w = input("Ingrese una palabra:\n")
strings(w)
|
'''
The match module allows for match routines to be run and determine target
specs.
'''
import salt.minion
__func_alias__ = {
'list_': 'list'
}
def compound(tgt):
'''
Return True if the minion matches the given compound target
CLI Example::
salt '*' match.compound 'L@cheese,foo and *'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.compound_match(tgt)
except Exception:
return False
def ipcidr(tgt):
'''
Return True if the minion matches the given ipcidr target
CLI Example::
salt '*' match.ipcidr '192.168.44.0/24'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.ipcidr_match(tgt)
except Exception:
return False
def pillar(tgt):
'''
Return True if the minion matches the given pillar target
CLI Example::
salt '*' match.pillar 'cheese:foo'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.pillar_match(tgt)
except Exception:
return False
def data(tgt):
'''
Return True if the minion matches the given data target
CLI Example::
salt '*' match.data 'spam:eggs'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.data_match(tgt)
except Exception:
return False
def grain_pcre(tgt):
'''
Return True if the minion matches the given grain_pcre target
CLI Example::
salt '*' match.grain_pcre 'os:Fedo.*'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.grain_pcre_match(tgt)
except Exception:
return False
def grain(tgt):
'''
Return True if the minion matches the given grain target
CLI Example::
salt '*' match.grain 'os:Ubuntu'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.grain_match(tgt)
except Exception:
return False
def list_(tgt):
'''
Return True if the minion matches the given list target
CLI Example::
salt '*' match.list 'server1,server2'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.list_match(tgt)
except Exception:
return False
def pcre(tgt):
'''
Return True if the minion matches the given pcre target
CLI Example::
salt '*' match.pcre '.*'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.pcre_match(tgt)
except Exception:
return False
def glob(tgt):
'''
Return True if the minion matches the given glob target
CLI Example::
salt '*' match.glob '*'
'''
matcher = salt.minion.Matcher(__opts__, __salt__)
try:
return matcher.glob_match(tgt)
except Exception:
return False
|
from enum import IntEnum
class SteamAppId(IntEnum):
HALF_LIFE_2 = 220
HALF_LIFE_2_EP_1 = 380
HALF_LIFE_2_EP_2 = 420
PORTAL = 400
PORTAL_2 = 620
LEFT_4_DEAD = 500
LEFT_4_DEAD_2 = 550
TEAM_FORTRESS_2 = 440
COUNTER_STRIKE_GO = 730
SOURCE_FILMMAKER = 1840
BLACK_MESA = 362890
|
import chancapmc
import numpy as np
import os
from itertools import product
def test_memory():
print("PID:", os.getpid())
input()
print("Created arrays in Python")
n = 6
a1 = np.arange(n**2, dtype=float).reshape([n]*2) * 1.3123
a2 = np.arange(n**3, dtype=float).reshape([n]*3)*1.5
a3 = np.arange(n*2, dtype=np.float64) + 1
a3 = a3[::2] # Check what happens when we use a non-contiguous view, does GETPTR still work?
for i in range(4):
input()
print("Launching chancapmc.ba_discretein_gaussout for a memory test")
chancapmc.ba_discretein_gaussout(a1, a2, a3, 20.) # captol > 10. for memory test
print("Final hold")
input()
return None
def test_blahut():
print("PID:", os.getpid())
input()
# Basically run the same two blahut-arimoto tests as in unittests.c
# but initialized from the Python interface.
# First, for a binary channel, two well-separated, symmetric gaussians
means = np.asarray([[6, 6], [-6, -6]], dtype=np.float64)
covs = np.asarray([[[1, 0], [0, 1]]]*2, dtype=np.float64)
inputs = np.asarray([1., 2.])
sd = 946373 # seed
cap_and_vec = chancapmc.ba_discretein_gaussout(means, covs, inputs, 5., sd)
print("Capacity found, for 2 gaussians:", cap_and_vec[0], "bits")
print("Optimal probability vector:", cap_and_vec[1])
print()
# Second, four symmetric gaussians with some overlap
means = np.asarray([[1, 1], [1, -1], [-1, 1], [-1, -1]], dtype=np.float64)
covs = np.asarray([[[1, 0], [0, 1]]]*4, dtype=np.float64)
inputs = np.asarray([1., 2., 3., 4.])
sd = 946373
cap_and_vec = chancapmc.ba_discretein_gaussout(means, covs, inputs, 5., sd)
print("Capacity found, for 4 gaussians:", cap_and_vec[0], "bits")
print("Optimal probability vector:", cap_and_vec[1])
def test_blahut_grabowski():
""" Code adapted from test cases in
https://github.com/pawel-czyz/channel-capacity-estimator
"""
meanvecs = np.zeros([8, 3])
covvecs = np.zeros([8, 3, 3])
corners = list(product(range(2),repeat=3))
for i in range(8):
means3, sigma = (corners[i], 0.25 + ((i + 1)/8 + sum(corners[i]))/10)
covar3 = sigma**2 * np.identity(3)
meanvecs[i] = np.asarray(means3)
covvecs[i] = np.asarray(covar3)
print(meanvecs)
print(covvecs)
cap_accur = 1.8035 # result from Mathematica, according to Grabowski 2019
rtol = 0.01
sd = 6823949
res = chancapmc.ba_discretein_gaussout(meanvecs, covvecs, np.arange(8), rtol, sd)
assert abs(res[0] - cap_accur)/cap_accur <= rtol, "Could not reproduce cap. for 8 gaussians on corners of a box"
print("Capacity for 8 gaussians on corners of a 3D box:", res[0])
print("Optimal input prob. distrib.:", res[1])
def gaussxw(n):
""" Fonction qui calcule les poids w_k et les positions x_k à utiliser pour la quadrature gaussienne à n points.
Les valeurs retournées sont normalisées pour un intervalle d'intégration [-1, 1].
Les positions x_k sont les zéros du polynôme de Legendre de degré n
Les poids sont donnés par:
(\frac{2}{1-x^2} (\frac{dP_n}{dx})^{-2})_{x = x_k}
Pour une intégrale sur l'intervalle [a, b], les bons x_k' et w_k' s'obtiennent à partir des valeurs pour [-1, 1]
avec la relation linéaire:
x_k' = 1/2 (b - a) x_k + 1/2 (b + a)
w_k' = 1/2 (b - a) w_k
Args:
n (int): le nombre de points à utiliser pour la quadrature gaussienne
Returns:
x (list): la liste des positions x_k pour [-1, 1]
w (list): la liste des poids w_k pour chaque x_k de [-1, 1]
"""
# Estimés des zéros initiaux (doivent être assez bons pour que la méthode converge)
# Pour ces estimés, formule de Abramowitz et Stegun:
# x_k = cos(pi*a_k + 1/(8n^2 tan(a_k)) avec a_x = (4k-1)/(4n+2), k=1, 2, 3, ..., n
a = np.linspace(3, 4*n - 1, n)/(4*n + 2)
x = np.cos(np.pi*a + 1/(8*n*n*np.tan(a)))
# Trouver les zéros avec la méthode de Newton-Raphson
epsilon = 1e-15
delta = 1.0
while delta > epsilon:
# Polynôme P_0(x) = 1 partout
p0 = np.ones(n, dtype=float)
# Polynôme P_1(x) = x, on l'évalue aux estimés actuels
p1 = x.copy()
# Calculer P_n(x) à chaque zéro
# On utilise la formule de récurrence de Bonnet, (m+1)P_{m+1}(x) = (2m+1)x P_m(x) - m P_{m-1}(x)
for m in range(1, n):
p0, p1 = p1, ((2*m + 1)*x*p1 - m*p0)/(m + 1)
# Calcul de la correction à apporter aux estimés actuels, soit P_n(x)/P_n'(x)
# Pour calculer la dérivée de P_n(x), on utilise la relation de récurrence:
# (x^2 - 1)/n dP_n(x)/dx = xP_n(x) - P_{n-1}(x)
derivp = (n + 1)*(p0 - x*p1)/(1 - x*x) # La bonne formule aurait n et non n+1 comme facteur,
# mais Newman utilise ceci, j'ignore pourquoi
varix = p1/derivp
x -= varix
# Calcul de la variation
delta = np.max(abs(varix))
# Calcul des poids avec ces zéros. Il faut ici annuler le n+1 mis à la place de n plus tôt dans derivp
w = 2*(n + 1)*(n + 1) / ((1 - x*x) * n*n*derivp*derivp)
return x, w
def test_blahut_smallnoise():
""" Test case I computed analytically myself in a small-noise approx.
"""
import matplotlib.pyplot as plt
nvecs = 32
alph = 2**(-8)
qrange = np.linspace(0., 1., nvecs+1)
qrange = (qrange[1:] + qrange[:-1])/2
meanvecs = np.ones([nvecs, 2])*qrange.reshape(nvecs, -1) / np.sqrt(2)
sigma_mat = np.asarray([[5/8, -3/8], [-3/8, 5/8]])
covmats = alph * np.tile(sigma_mat, [nvecs, 1, 1]) * (qrange.reshape(nvecs, 1, 1) + 1)**2
cap_accur = -np.log2(alph) - np.log2(2 * np.pi * np.e)
rtol = 0.025
sd = 6823949
new = True
if new:
res = chancapmc.ba_discretein_gaussout(meanvecs, covmats, qrange, rtol, sd)
np.save("tests/optimal_distrib.npy", res[1])
print("Capacity found numerically, small noise approx, bits:", res[0])
# assert abs(res[0] - cap_accur)/cap_accur <= rtol*5, "Could not reproduce cap. for small noise approx."
else:
res = [1.6, 0]
res[1] = np.load("tests/optimal_distrib.npy")
print(res[1])
# Plot the prob distrib
fig, ax = plt.subplots()
width = np.concatenate([[0.], (qrange[1:] + qrange[:-1])/2, [1.]])
width = width[1:] - width[:-1]
ax.bar(qrange, res[1] / width, width=width)
# Compare to the theoretical one
theoretical = 2 / (1 + qrange)**2
ax.plot(qrange, theoretical, color="orange")
plt.show()
plt.close()
if __name__ == "__main__":
# test_memory()
# test_blahut()
# test_blahut_grabowski()
test_blahut_smallnoise()
|
from pygame import display,font
font.init()
police = font.Font('Roboto.ttf',50)
scr = display.get_surface()
scrrect = scr.get_rect()
class Levelmess(object):
t = 0
@staticmethod
def update(mess):
Levelmess.mess = police.render(mess,1,(200,200,200))
Levelmess.messrect = Levelmess.mess.get_rect(center=scrrect.center)
Levelmess.t = 0
@staticmethod
def render():
Levelmess.t += 1
if Levelmess.t == 125:
Levelmess.update('')
Levelmess.t = 0
return 0
scr.blit(Levelmess.mess,Levelmess.messrect)
return 1
Levelmess.update('')
|
# author: Roy Kid
# contact: lijichen365@126.com
# date: 2021-09-11
# version: 0.0.1
from mollab.abc import Item, Template
import numpy as np
class Atom(Item):
serial = 1
def __init__(self):
self.serial = Atom.serial
Atom.serial += 1
self.bondAtoms = []
self.properties = self.__dict__
def __hash__(self) -> int:
return id(self)
def __eq__(self, o: object) -> bool:
return hash(self) == hash(o)
def __repr__(self):
return f' < Atom {self.serial} > '
def __lt__(self, o):
return self.serial < o.serial
def __gt__(self, o):
return self.serial > o.serial
def __le__(self, o):
return self.serial <= o.serial
def __ge__(self, o):
return self.serial >= o.serial
def bondto(self, o):
if o not in self.bondAtoms:
self.bondAtoms.append(o)
if self not in o.bondAtoms:
o.bondAtoms.append(self)
def debond(self, o):
self.bondAtoms.remove(o)
o.bondAtoms.remove(self)
def move(self, position: np.ndarray):
self.position.__iadd__(position)
class TemplateAtom(Template):
def __init__(self, templateName=None):
super().__init__()
if isinstance(templateName, str):
self.name = templateName
elif isinstance(templateName, TemplateAtom):
self.inherit(templateName)
def render(self, **changes):
atom = Atom()
for k, v in self.properties.items():
setattr(atom, k, v)
for k, v in changes.items():
setattr(atom, k, v)
return atom
def __call__(self, **changes) -> Atom:
return self.render(**changes)
|
"""
parser_usda.py
Parses data from the USDA text files.
"""
from bs4 import BeautifulSoup
import urlparse
from urllib2 import urlopen
from urllib import urlretrieve
import os
import sys
import time
import glob
from random import randint
import codecs
import json
import inspect
from cheese import Cheese, CheeseLibrary
import parser_utils
def parseFdaLine(line):
return [field.strip('~') for field in line.split('^')]
def parseFdaFileUniqueDict(filename):
output = {}
lines = open(filename, 'r').readlines()
for line in lines:
fields = parseFdaLine(line)
output[fields[0]] = fields[1:]
return output
def parseFdaFileArray(filename):
output = {}
lines = open(filename, 'r').readlines()
# For debugging so that it doesn't parse forever
numLineLimit = 1000
lineIndex = 0
for line in lines:
fields = parseFdaLine(line)
if not output.has_key(fields[0]):
output[fields[0]] = []
output[fields[0]].append(fields[1:])
lineIndex += 1
if lineIndex > numLineLimit:
break
return output
def parseUSDA(library):
source = 'USDA'
root = os.path.dirname(os.path.realpath(__file__))
sr22 = os.path.join(root, 'sr22')
food_descriptions_filename = os.path.join(sr22, 'FOOD_DES.txt')
food_descriptions = parseFdaFileUniqueDict(food_descriptions_filename)
food_groups_filename = os.path.join(sr22, 'FD_GROUP.txt')
food_groups = parseFdaFileUniqueDict(food_groups_filename)
nutritional_data_filename = os.path.join(sr22, 'NUT_DATA.txt')
nutritional_data = parseFdaFileArray(nutritional_data_filename)
nutritional_definitions_filename = os.path.join(sr22, 'NUTR_DEF.txt')
nutritional_definitions = parseFdaFileUniqueDict(
nutritional_definitions_filename)
food_descriptions_headers = ['FdGrp_Cd',
'Long_Desc', 'Shrt_Desc', 'ComName']
nutritional_data_headers = ['Nutr_No',
'Nutr_Val', 'Num_Data_Pts', 'ComName']
nutritional_definition_headers = ['Units', 'Tagname', 'NutrDesc']
for (ndb_no, food) in food_descriptions.iteritems():
if ndb_no in nutritional_data:
nutritions = nutritional_data[ndb_no]
short_food_names = food[food_descriptions_headers.index(
'Shrt_Desc')].split(',')
long_food_names = food[food_descriptions_headers.index(
'Long_Desc')].split(',')
common_food_names = food[food_descriptions_headers.index(
'ComName')].split(',')
if short_food_names[0].lower() == 'cheese':
for nutrition in nutritions:
nutritional_definition_index = nutrition[nutritional_data_headers.index(
'Nutr_No')]
nutritional_definition = nutritional_definitions[nutritional_definition_index]
value = nutrition[nutritional_data_headers.index(
'Nutr_Val')]
units = nutritional_definition[nutritional_definition_headers.index(
'Units')]
name = nutritional_definition[nutritional_definition_headers.index(
'NutrDesc')]
cheese = Cheese()
cheese.name = ' '.join(long_food_names[1:]).strip()
if not library.add(cheese, source):
break
return
|
# -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
from ...context import CONTEXT_SETTINGS, click
from ...options import AUTH, add_options
USER_NAME = click.option(
"--name",
"-n",
"name",
help="Name of user",
required=True,
show_envvar=True,
show_default=True,
)
OPTIONS = [*AUTH, USER_NAME]
@click.command(name="delete", context_settings=CONTEXT_SETTINGS)
@add_options(OPTIONS)
@click.pass_context
def cmd(ctx, url, key, secret, name, **kwargs):
"""Delete a user."""
client = ctx.obj.start_client(url=url, key=key, secret=secret)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
client.system_users.delete(name=name)
ctx.obj.echo_ok(f"Deleted user {name!r}")
|
# synth.__main__
import argparse
import configparser
import pathlib
import subprocess
import typing
import synth.metadata
import synth.config
class CommandlineParsingError(RuntimeError):
pass
class CustomFormatter(argparse.RawDescriptionHelpFormatter):
def _format_action(self, action: argparse.Action) -> str:
result = super()._format_action(action)
if action.nargs == argparse.PARSER:
# since we aren't showing the subcommand group, de-indent by 2
# spaces
lines = result.split('\n')
lines = [line[2:] for line in lines]
result = '\n'.join(lines)
return result
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(formatter_class=CustomFormatter)
subparsers = parser.add_subparsers(
dest='mode',
required=True,
metavar='<mode>',
title=argparse.SUPPRESS)
subparsers.add_parser(
'init',
help='Initialize a new `synth` repository')
add_parser = subparsers.add_parser(
'add',
help='Track the repository at <origin>')
add_parser.add_argument('origin', metavar='<origin>')
add_parser.add_argument('--ref', metavar='<ref>', default='HEAD')
add_parser.add_argument('--name', metavar='<name>')
compose_parser = subparsers.add_parser(
'compose',
help='Compose a directory structure at <target>')
compose_parser.add_argument('names', metavar='<name>', nargs='*')
compose_parser.add_argument('target', metavar='<target>', nargs='?')
extract_parser = subparsers.add_parser(
'extract',
help='Extract modifications from <target>>')
extract_parser.add_argument('names', metavar='<name>', nargs='*')
extract_parser.add_argument('target', metavar='<target>', nargs='?')
extract_parser.add_argument(
'--upstream',
metavar='<upstream>',
default='@{upstream}')
set_parser = subparsers.add_parser(
'config',
help='Update configuration <property> to <value>')
set_parser.add_argument(
'property',
metavar='<property>',
choices=synth.config.validators.keys())
set_parser.add_argument(dest='value', metavar='<value>')
return parser
def post_process_args(
args: argparse.Namespace,
config: configparser.ConfigParser) -> None:
if args.mode == 'compose' or args.mode == 'extract':
config_target_path = config.get('target', 'path', fallback=None)
if config_target_path:
args.target = config_target_path
else:
if len(args.names) == 0:
raise CommandlineParsingError('target not specified')
args.target = args.names.pop()
def git_cmd(
args: list[str],
working_dir: typing.Optional[pathlib.Path]=None) -> list[str]:
return subprocess.run(
['git'] + args,
capture_output=True,
check=True,
cwd=working_dir,
encoding='ascii').stdout.split('\n')
def synth_config(property: str, value: str) -> None:
synth.config.write({ property: value }.items())
def synth_init() -> None:
synth.metadata.initialize()
def synth_add(origin: str, ref: str, name: str) -> None:
if origin[-1] == '/':
origin = origin[:-1]
if name is None:
name = origin.split('/')[-1]
resolved_hash = None
for line in git_cmd(['ls-remote', origin, ref]):
if line == '':
continue
if resolved_hash:
raise RuntimeError(f'{ref} is ambiguous and lists multiple refs')
resolved_hash, _ = line.split()
if not resolved_hash:
raise RuntimeError(f'Could not find {ref} at {origin}')
synth.metadata.create_module(name, origin, resolved_hash)
def synth_compose(raw_target: str, raw_names: list[str]) -> None:
target = pathlib.Path(raw_target)
if len(raw_names) == 0:
names = synth.metadata.get_module_names()
else:
names = iter(raw_names)
for name in names:
module = synth.metadata.get_module(name)
print(f'Composing {name} from {module["origin"]} at '
f'{module["commit"]}')
dest = target/name
if not dest.exists():
git_cmd(['clone', module['origin'], str(dest)])
git_cmd(['fetch', 'origin', module['commit']], dest)
git_cmd(['reset', '--hard', module['commit']], dest)
patches = synth.metadata.get_patch_dir(name)
if patches.exists():
git_cmd(['am', '-3', str(patches)], dest)
def synth_extract(
raw_target: str,
raw_names: list[str],
upstream: str) -> None:
target = pathlib.Path(raw_target)
if len(raw_names) == 0:
names = synth.metadata.get_module_names()
else:
names = iter(raw_names)
for name in names:
src = target/name
if not src.is_dir():
raise RuntimeError(f'{name} has not been composed')
module = synth.metadata.get_module(name)
git_cmd(['fetch', module['origin'], upstream], src)
module['commit'] = git_cmd(['rev-parse', 'FETCH_HEAD'], src)[0]
synth.metadata.update_module(name, module)
print(f'Extracting patches from {name} since {upstream}')
synth.metadata.clear_patches(name)
git_cmd([
'format-patch',
'-N',
'FETCH_HEAD',
'-o',
str(synth.metadata.get_patch_dir(name).resolve())],
src)
def main() -> None:
args = get_parser().parse_args()
if args.mode == 'config':
synth_config(args.property, args.value)
if args.mode == 'init':
synth_init()
if args.mode == 'add':
synth_add(args.origin, args.ref, args.name)
post_process_args(args, synth.config.read())
if args.mode == 'compose':
synth_compose(args.target, args.names)
if args.mode == 'extract':
synth_extract(args.target, args.names, args.upstream)
if __name__ == '__main__':
main()
|
from .for_events import prompt_event_occurred, prompt_event_category, \
prompt_event_weight, edit_event_description, prompt_event_artifacts
from .for_init import prompt_init_config
|
'''-------------------------------------------------------------------------------
Tool Name: CreateInflowFileFromECMWFRunoff
Source Name: CreateInflowFileFromECMWFRunoff.py
Version: ArcGIS 10.3
Author: Environmental Systems Research Institute Inc.
Updated by: Alan D. Snow, US Army ERDC
Description: Creates RAPID inflow file based on the WRF_Hydro land model output
and the weight table previously created.
History: Initial coding - 10/21/2014, version 1.0
Updated: Version 1.0, 10/23/2014, modified names of tool and parameters
Version 1.0, 10/28/2014, added data validation
Version 1.0, 10/30/2014, initial version completed
Version 1.1, 11/05/2014, modified the algorithm for extracting runoff
variable from the netcdf dataset to improve computation efficiency
Version 1.2, 02/03/2015, bug fixing - output netcdf3-classic instead
of netcdf4 as the format of RAPID inflow file
Version 1.2, 02/03/2015, bug fixing - calculate inflow assuming that
ECMWF runoff data is cumulative instead of incremental through time
Version 1.3, 02/28/2019, revised durations to use a variable temporal
range. CJB ENSCO, MJS ERDC CRREL
-------------------------------------------------------------------------------'''
import netCDF4 as NET
import numpy as NUM
import csv
from io import open
class CreateInflowFileFromECMWFRunoff(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Create Inflow File From ECMWF Runoff"
self.description = ("Creates RAPID NetCDF input of water inflow " +
"based on ECMWF runoff results and previously created weight table.")
self.canRunInBackground = False
#CJB self.header_wt = ['StreamID', 'area_sqm', 'lon_index', 'lat_index', 'npoints']
self.header_wt = ['rivid', 'area_sqm', 'lon_index', 'lat_index', 'npoints']
#SDR added new structure to fit new ecmwf ##.runoff.nc file order
#self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time']]
self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time'], ['time','lon','lat']] # Line Added/Modified CJB 20190108
#self.vars_oi = [["lon", "lat", "time", "RO"], ['longitude', 'latitude', 'time', 'ro']]
self.vars_oi = [["lon", "lat", "time", "RO"], ['longitude', 'latitude', 'time', 'ro'], ["time", "lon", "lat", "RO"]] # Line Added/Modified CJB 20190108
self.length_time = {"LowRes": 61, "Low3HrRes": 40, "LowResFull": 85,"HighRes": 125, "High3HrRes":3} # *** MJS What is High3HrRes for? Doesn't seem to be used.
#self.length_time = {"LowResFull": 85,"HighRes": 125}
self.length_time_opt = {"LowRes-6hr": 60, "LowRes-3hr": 40,
"LowResFull-3hr-Sub": 48, "LowResFull-6hr-Sub": 36,
"HighRes-1hr": 90, "HighRes-3hr": 48, "HighRes-6hr": 40, # *** MJS HighRes-3hr was changed to 40 before; why?
"HighRes-3hr-Sub": 18, "HighRes-6hr-Sub": 16}
self.errorMessages = ["Missing Variable 'time'",
"Incorrect dimensions in the input ECMWF runoff file.",
"Incorrect variables in the input ECMWF runoff file.",
"Incorrect time variable in the input ECMWF runoff file",
"Incorrect number of columns in the weight table",
"No or incorrect header in the weight table",
"Incorrect sequence of rows in the weight table"]
def dataValidation(self, in_nc):
"""Check the necessary dimensions and variables in the input netcdf data"""
vars_oi_index = None
data_nc = NET.Dataset(in_nc)
dims = list(data_nc.dimensions)
if dims not in self.dims_oi:
raise Exception(self.errorMessages[1])
vars = list(data_nc.variables)
if vars == self.vars_oi[0]:
vars_oi_index = 0
elif vars == self.vars_oi[1]:
vars_oi_index = 1
elif vars == self.vars_oi[2]: # Line Added/Modified CJB 20190108
vars_oi_index = 2 # Line Added/Modified CJB 20190108
else:
raise Exception(self.errorMessages[2])
return vars_oi_index
def dataIdentify(self, in_nc):
"""Check if the data is Ensemble 1-51 (low resolution) or 52 (high resolution)"""
data_nc = NET.Dataset(in_nc)
time = data_nc.variables['time'][:]
diff = NUM.unique(NUM.diff(time))
data_nc.close()
#time_interval_highres = NUM.array([1.0,3.0,6.0],dtype=float)
#time_interval_lowres_full = NUM.array([3.0, 6.0],dtype=float)
#time_interval_lowres = NUM.array([6.0],dtype=float)
#time_interval_lowres_3Hr = NUM.array([3.0],dtype=float)
time_interval_HRES1 = NUM.array([1.0],dtype=float) # Line Added/Modified CJB 20190108
time_interval_HRES13 = NUM.array([1.0,3.0],dtype=float) # Line Added/Modified CJB 20190108
time_interval_HRES136 = NUM.array([1.0,3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108
time_interval_ENS3 = NUM.array([3.0],dtype=float) # Line Added/Modified CJB 20190108
time_interval_ENS36 = NUM.array([3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108
time_interval_ENS6 = NUM.array([6.0],dtype=float) # Line Added/Modified CJB 20190108
#print "SDR - diff:", diff, time_interval_highres, time_interval_lowres_full, time_interval_lowres
#if NUM.array_equal(diff, time_interval_highres):
# return "HighRes"
#elif NUM.array_equal(diff, time_interval_lowres_full):
# return "LowResFull"
#elif NUM.array_equal(diff, time_interval_lowres):
# return "LowRes"
#elif NUM.array_equal(diff, time_interval_lowres_3Hr):
# return "Low3HrRes"
#else:
# return None
if NUM.array_equal(diff, time_interval_HRES1): # Line Added/Modified CJB 20190108
return "HRES1" # Line Added/Modified CJB 20190108
elif NUM.array_equal(diff, time_interval_HRES13): # Line Added/Modified CJB 20190108
return "HRES13" # Line Added/Modified CJB 20190108
elif NUM.array_equal(diff, time_interval_HRES136): # Line Added/Modified CJB 20190108
return "HRES136" # Line Added/Modified CJB 20190108
elif NUM.array_equal(diff, time_interval_ENS3): # Line Added/Modified CJB 20190108
return "ENS3" # Line Added/Modified CJB 20190108
elif NUM.array_equal(diff, time_interval_ENS36): # Line Added/Modified CJB 20190108
return "ENS36" # Line Added/Modified CJB 20190108
elif NUM.array_equal(diff, time_interval_ENS6): # Line Added/Modified MJS, CJB 20190108
return "ENS6" # Line Added/Modified CJB 20190108
else: # Line Added/Modified CJB 20190108
return None # Line Added/Modified CJB 20190108
def getGridName(self, in_nc, high_res=False):
"""Return name of grid"""
if high_res:
return 'ecmwf_t1279'
return 'ecmwf_tco639'
#if high_res: # Line Added/Modified CJB 20190108
#return 'ecmwf_HRES_F' # Line Added/Modified CJB 20190108
#else: # MJS 20190108
#return 'ecmwf_ENS_F' # Line Added/Modified MJS, CJB 20190108
def getTimeSize(self, in_nc): # Line Added/Modified CJB 20190108
"""Return time size""" # Line Added/Modified MJS 20190108
data_in_nc = NET.Dataset(in_nc) # Line Added/Modified CJB 20190108
time = data_in_nc.variables['time'][:] # Line Added/Modified CJB 20190108
size_time = len(time) # Line Added/Modified CJB 20190108
data_in_nc.close() # Line Added/Modified CJB 20190108
return size_time # Line Added/Modified CJB 20190108
def execute(self, in_nc, in_weight_table, out_nc, grid_name, conversion_flag, in_time_interval="6hr"): # modified this line CJB 20190218
# MJS I might consider netCDF4.Dataset.variables['RO'].units
# and upstream correction of the cdo grid conversion units attribute.
"""The source code of the tool."""
# Validate the netcdf dataset
vars_oi_index = self.dataValidation(in_nc)
"""get conversion factor the flag is used to differentiate forecasts converted
to netCDF from GRIB and the original netCDF. They both use the same weight tables
but the original netCDF is in mm whereas the stock GRIB forecasts are in meters.
Set the conversion_flag in the run.py configuration file.
"""
if conversion_flag: # Line Added CJB 20190218
conversion_factor = 1.0 #Line Modified CJB 20190218
elif grid_name == 'ecmwf_t1279' or grid_name == 'ecmwf_tco639': # Line Modified CJB 20190218
#if grid_name == 'ecmwf_HRES_F' or grid_name == 'ecmwf_ENS_F': # Line Added/Modified CJB 20190108
#new grids in mm instead of m
conversion_factor = 0.001
else: #set the conversion factor to 1 for everything else (data is in m but legacy installations do not have a flag) Line Added CJB 20190218
conversion_factor = 1.0 # Line Added CJB 20190218
# MJS I might consider netCDF4.Dataset.variables['RO'].units
# and upstream correction of the cdo grid conversion units attribute.
# identify if the input netcdf data is the High Resolution data with three different time intervals
id_data = self.dataIdentify(in_nc)
if id_data is None:
raise Exception(self.errorMessages[3])
''' Read the netcdf dataset'''
data_in_nc = NET.Dataset(in_nc)
time = data_in_nc.variables['time'][:]
# Check the size of time variable in the netcdf data
if len(time) == 0: # *** MJS This change seems like it is too loose an error trap; should it account for instances when nc file time var is != in length with id_data lenght?
raise Exception(self.errorMessages[3])
#if len(time) != self.length_time[id_data]:
# raise Exception(self.errorMessages[3])
''' Read the weight table '''
print("Reading the weight table...", in_weight_table)
dict_list = {self.header_wt[0]:[], self.header_wt[1]:[], self.header_wt[2]:[],
self.header_wt[3]:[], self.header_wt[4]:[]}
with open(in_weight_table, "r") as csvfile:
reader = csv.reader(csvfile)
count = 0
for row in reader:
if count == 0:
#check number of columns in the weight table
if len(row) < len(self.header_wt):
raise Exception(self.errorMessages[4])
#check header
if row[1:len(self.header_wt)] != self.header_wt[1:]:
raise Exception(self.errorMessages[5])
count += 1
else:
for i in range(len(self.header_wt)):
dict_list[self.header_wt[i]].append(row[i])
count += 1
''' Calculate water inflows
as a reminder, the first 91 time steps are T=0 to T=90 and are 1-hourly for HRES
the next 18 time steps for HRES are T=93 to T=144 at 3-hourly
then the final 16 time steps are T=150 to T=240 at 6-hourly for a total of 125 records
For ENS, the first 49 time steps are T=0 to T=144 at 3-hourly
the final 35 time steps are T=150 to T=360 at 6-hourly for a total of 84 records
'''
print("Calculating water inflows...")
'''
added the next section CJB 20180122
'''
# Get the overall number of time steps
size_time = self.getTimeSize(in_nc) #CJB 20180122
# Determine the size of time steps in each group (1-hourly, 3-hourly, and/or 6-hourly)
if id_data == "HRES1": # T <= 90
time_size = (size_time - 1)
elif id_data == "HRES13": # 93 <= T <= 144
if in_time_interval == "1hr":
time_size = self.length_time_opt["HighRes-1hr"]
else:
time_size = (size_time - self.length_time_opt["HighRes-1hr"] - 1)
elif id_data == "HRES136": # 150 <= T <= 240
if in_time_interval == "1hr":
time_size = self.length_time_opt["HighRes-1hr"]
elif in_time_interval == "3hr": # MJS Doesn't seem to be a case used currently, but added just in case later need.
time_size = self.length_time_opt["HighRes-3hr-sub"] # MJS This is HRES136, i.e., if for some reason in ecmwf_rapid_multi a 3 hr is asked for for this case, it should still have the 3hr_sub number of times
elif in_time_interval == "3hr_subset":
time_size = self.length_time_opt["HighRes-3hr-Sub"]
else:
time_size = (size_time - self.length_time_opt["HighRes-1hr"] - self.length_time_opt["HighRes-3hr-Sub"] - 1)
elif id_data == "ENS3": # T <= 144
time_size = (size_time - 1)
elif id_data == "ENS36": # 150 <= T <= 360
if in_time_interval == "3hr_subset":
time_size = self.length_time_opt["LowResFull-3hr-Sub"]
else:
time_size = (size_time - self.length_time_opt["LowResFull-3hr-Sub"] - 1)
else: # id_data == "ENS6": # T <= 360 but all 6-hourly
time_size = (size_time - 1)
#else: # something is wrong and need to throw an error message - likely a corrupt forecast file
# raise Exception(self.errorMessages[3])
#''' end of added section CJB 20180122
#'''
#if id_data == "LowRes":
# size_time = self.length_time_opt["LowRes-6hr"]
#elif id_data == "Low3HrRes":
# size_time = self.length_time_opt["LowRes-3hr"]
#elif id_data == "LowResFull":
# if in_time_interval == "3hr_subset":
# size_time = self.length_time_opt["LowResFull-3hr-Sub"]
# elif in_time_interval == "6hr_subset":
# size_time = self.length_time_opt["LowResFull-6hr-Sub"]
# else:
# size_time = self.length_time_opt["LowRes-6hr"]
#else: #HighRes
# if in_time_interval == "1hr":
# size_time = self.length_time_opt["HighRes-1hr"]
# elif in_time_interval == "3hr":
# size_time = self.length_time_opt["HighRes-3hr"]
# elif in_time_interval == "3hr_subset":
# size_time = self.length_time_opt["HighRes-3hr-Sub"]
# elif in_time_interval == "6hr_subset":
# size_time = self.length_time_opt["HighRes-6hr-Sub"]
# else:
# size_time = self.length_time_opt["HighRes-6hr"]
size_streamID = len(set(dict_list[self.header_wt[0]]))
# Create output inflow netcdf data
# data_out_nc = NET.Dataset(out_nc, "w") # by default format = "NETCDF4"
data_out_nc = NET.Dataset(out_nc, "w", format = "NETCDF3_CLASSIC")
#dim_Time = data_out_nc.createDimension('Time', size_time)
dim_Time = data_out_nc.createDimension('Time', time_size)
dim_RiverID = data_out_nc.createDimension('rivid', size_streamID)
var_m3_riv = data_out_nc.createVariable('m3_riv', 'f4',
('Time', 'rivid'),
fill_value=0)
#data_temp = NUM.empty(shape = [size_time, size_streamID])
data_temp = NUM.empty(shape = [time_size, size_streamID])
lon_ind_all = [int(i) for i in dict_list[self.header_wt[2]]]
lat_ind_all = [int(j) for j in dict_list[self.header_wt[3]]]
# Obtain a subset of runoff data based on the indices in the weight table
min_lon_ind_all = min(lon_ind_all)
max_lon_ind_all = max(lon_ind_all)
min_lat_ind_all = min(lat_ind_all)
max_lat_ind_all = max(lat_ind_all)
# self.vars_oi[vars_oi_index][3] = RO; get that variable's 3D structure (time, lat_index, lon_index) ready to reshape into 2D (time, lat_index x lon_index)
data_subset_all = data_in_nc.variables[self.vars_oi[vars_oi_index][3]][:, min_lat_ind_all:max_lat_ind_all+1, min_lon_ind_all:max_lon_ind_all+1]
len_time_subset_all = data_subset_all.shape[0]
len_lat_subset_all = data_subset_all.shape[1]
len_lon_subset_all = data_subset_all.shape[2]
data_subset_all = data_subset_all.reshape(len_time_subset_all, (len_lat_subset_all * len_lon_subset_all))
# compute new indices based on the data_subset_all
index_new = []
for r in range(0,count-1):
ind_lat_orig = lat_ind_all[r]
ind_lon_orig = lon_ind_all[r]
index_new.append((ind_lat_orig - min_lat_ind_all)*len_lon_subset_all + (ind_lon_orig - min_lon_ind_all))
# obtain a new subset of data
data_subset_new = data_subset_all[:,index_new]*conversion_factor
# start compute inflow
pointer = 0
for s in range(0, size_streamID):
npoints = int(dict_list[self.header_wt[4]][pointer])
# Check if all npoints points correspond to the same streamID
if len(set(dict_list[self.header_wt[0]][pointer : (pointer + npoints)])) != 1:
print("ROW INDEX {0}".format(pointer))
print("RIVID {0}".format(dict_list[self.header_wt[0]][pointer]))
raise Exception(self.errorMessages[2])
area_sqm_npoints = [float(k) for k in dict_list[self.header_wt[1]][pointer : (pointer + npoints)]]
area_sqm_npoints = NUM.array(area_sqm_npoints)
area_sqm_npoints = area_sqm_npoints.reshape(1, npoints)
data_goal = data_subset_new[:, pointer:(pointer + npoints)]
#remove noise from data
data_goal[data_goal<=0.00001] = 0
''' IMPORTANT NOTE: runoff variable in ECMWF dataset is cumulative instead of incremental through time
'''
# For data with Low Resolution, there's only one time interval 6 hrs
if id_data == "ENS6": # Line Added/Modified CJB 20190108
#ro_stream = data_goal * area_sqm_npoints
ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints
elif id_data == "ENS3": # there's only one time interval 3 hrs # Line Added/Modified CJB 20190108
#ro_stream = data_goal * area_sqm_npoints
ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108
elif id_data == "HRES1": # there's only one time interval 1 hrs # Line Added/Modified CJB 20190108
#ro_stream = data_goal * area_sqm_npoints
ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108
#For data with the full version of Low Resolution, from Hour 0 to 144 (the first 49 time points) are of 3 hr time interval,
# then from Hour 144 to 360 (36 time points) are of 6 hour time interval
elif id_data == "ENS36": # Line Added/Modified CJB 20190108
if in_time_interval == "3hr_subset":
#use only the 3hr time interval
ro_stream = NUM.subtract(data_goal[1:49,], data_goal[:48,]) * area_sqm_npoints
elif in_time_interval == "6hr_subset":
#use only the 6hr time interval
ro_stream = NUM.subtract(data_goal[49:,], data_goal[48:-1,]) * area_sqm_npoints
else: #"LowRes-6hr"
######################################################
# MJS Always assume this case will have a full ECMWF 240
# hour forecast to work with. It's actually never re-
# quested by ecmwf_rapid_multiprocess anyhow.
######################################################
#convert all to 6hr
# calculate time series of 6 hr data from 3 hr data
ro_6hr_a = NUM.subtract(data_goal[2:49:2,], data_goal[:48:2,])
# get the time series of 6 hr data
ro_6hr_b = NUM.subtract(data_goal[49:,], data_goal[48:-1,])
# concatenate all time series
ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b]) * area_sqm_npoints
#For data with High Resolution, from Hour 0 to 90 (the first 91 time points) are of 1 hr time interval,
# then from Hour 90 to 144 (18 time points) are of 3 hour time interval, and from Hour 144 to 240 (16 time points)
# are of 6 hour time interval
##########################################################
# MJS The following should handle id_data = HRES13 and HRES136
##########################################################
else:
if in_time_interval == "1hr":
#ro_stream = NUM.subtract(data_goal[1:91,],data_goal[:90,]) * area_sqm_npoints
ro_stream = NUM.subtract(data_goal[1:1+time_size,],data_goal[:time_size,]) * area_sqm_npoints # Line Added/Modified CJB, MJS 20190108
elif in_time_interval == "3hr": # MJS HRES 3hr not currently used
# calculate time series of 3 hr data from 1 hr data
ro_3hr_a = NUM.subtract(data_goal[3:91:3,],data_goal[:88:3,])
# get the time series of 3 hr data
#ro_3hr_b = NUM.subtract(data_goal[91:109,], data_goal[90:108,])
ro_3hr_b = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) # MJS modified again; seems no case for this, but just in case later... Line Added/Modified CJB 20190108
# concatenate all time series
ro_stream = NUM.concatenate([ro_3hr_a, ro_3hr_b]) * area_sqm_npoints
elif in_time_interval == "3hr_subset":
#use only the 3hr time interval
#ro_stream = NUM.subtract(data_goal[91:109,], data_goal[90:108,]) * area_sqm_npoints
ro_stream = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) * area_sqm_npoints # MJS modified again; needs to handle HRES13 that might not have complete 3hr set... Line Added/Modified CJB 20190108
elif in_time_interval == "6hr_subset":
#use only the 6hr time interval
ro_stream = NUM.subtract(data_goal[109:,], data_goal[108:-1,]) * area_sqm_npoints
######################################################
# MJS Always assume this case will have a full ECMWF 240
# hour forecast to work with. It's actually never re-
# quested by ecmwf_rapid_multiprocess anyhow.
######################################################
else: # in_time_interval == "6hr"
#arcpy.AddMessage("6hr")
# calculate time series of 6 hr data from 1 hr data
ro_6hr_a = NUM.subtract(data_goal[6:91:6,], data_goal[:85:6,])
# calculate time series of 6 hr data from 3 hr data
ro_6hr_b = NUM.subtract(data_goal[92:109:2,], data_goal[90:107:2,])
# get the time series of 6 hr data
ro_6hr_c = NUM.subtract(data_goal[109:,], data_goal[108:-1,])
# concatenate all time series
ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b, ro_6hr_c]) * area_sqm_npoints
#remove negative values
ro_stream[ro_stream<0] = 0
data_temp[:,s] = ro_stream.sum(axis = 1)
pointer += npoints
'''Write inflow data'''
print("Writing inflow data...")
var_m3_riv[:] = data_temp
# close the input and output netcdf datasets
data_in_nc.close()
data_out_nc.close()
|
#
# filter-build-log.py - Cuts out cruft to focus on build failure details.
#
# This script filters build logs down to only [WARNING] and [ERROR] lines,
# and consolidates lengthy duplicate class listings down to packages only.
import sys
def print_filtered_log(log):
dups = []
parsingdups = False
atbeginning = True
for line in log:
line = line.rstrip('\n')
if line.startswith('[INFO]'):
# Filter out non-error build messages.
continue
if line.startswith('Download') or line.startswith('Progress'):
# Filter out details of remote resource queries.
continue
if atbeginning and not line.strip():
# Filter out leading blank lines.
continue
atbeginning = False
if parsingdups:
if line.startswith(' '):
if line.find('/') >= 0:
# Strip to containing package only.
line = line[:line.rindex('/')]
dups.append(line)
else:
parsingdups = False
for dup in sorted(set(dups)):
print(dup)
print('')
dups = []
else:
if line == ' Duplicate classes:':
print(' Duplicate packages:')
parsingdups = True
else:
print(line)
for arg in sys.argv[1:]:
with open(arg) as f:
print_filtered_log(f)
|
from gpiozero import Button, LED
from gpiozero.pins.pigpio import PiGPIOFactory
from gpiozero.tools import all_values
from signal import pause
factory3 = PiGPIOFactory(host='192.168.1.3')
factory4 = PiGPIOFactory(host='192.168.1.4')
led = LED(17)
button_1 = Button(17, pin_factory=factory3)
button_2 = Button(17, pin_factory=factory4)
led.source = all_values(button_1, button_2)
pause()
|
from collections import namedtuple
import os
import azure.mgmt.keyvault
import azure.mgmt.batch
from azure_devtools.scenario_tests.preparers import (
AbstractPreparer,
SingleValueReplacer,
)
from azure_devtools.scenario_tests.exceptions import AzureTestError
from devtools_testutils import AzureMgmtPreparer, ResourceGroupPreparer, FakeResource
from devtools_testutils.resource_testcase import RESOURCE_GROUP_PARAM
class KeyVaultPreparer(AzureMgmtPreparer):
def __init__(self,
name_prefix='batch',
location='westus',
parameter_name='keyvault',
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None):
super(KeyVaultPreparer, self).__init__(name_prefix, 24,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.location = location
self.resource_group_parameter_name = resource_group_parameter_name
self.parameter_name = parameter_name
self.parameter_name_for_location='location'
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a keyvault a resource group is required. Please add ' \
'decorator @{} in front of this storage account preparer.'
raise AzureTestError(template.format(ResourceGroupPreparer.__name__))
def create_resource(self, name, **kwargs):
name = name.replace('_', '-')
#raise Exception(name)
if self.is_live:
self.client = self.create_mgmt_client(
azure.mgmt.keyvault.KeyVaultManagementClient)
group = self._get_resource_group(**kwargs)
self.resource = self.client.vaults.create_or_update(
group.name,
name,
{
'location': self.location,
'properties': {
'sku': {'name': 'standard'},
'tenant_id': "72f988bf-86f1-41af-91ab-2d7cd011db47",
'enabled_for_deployment': True,
'enabled_for_disk_encryption': True,
'enabled_for_template_deployment': True,
'access_policies': [ {
'tenant_id': "72f988bf-86f1-41af-91ab-2d7cd011db47",
'object_id': "f520d84c-3fd3-4cc8-88d4-2ed25b00d27a",
'permissions': {
'keys': ['all'],
'secrets': ['all']
}
}]
}
}
)
else:
self.resource = FakeResource(name=name, id=name)
return {
self.parameter_name: self.resource,
}
def remove_resource(self, name, **kwargs):
name = name.replace('_', '-')
if self.is_live:
group = self._get_resource_group(**kwargs)
self.client.vaults.delete(group.name, name)
class SimpleBatchPreparer(AzureMgmtPreparer):
def __init__(self,
name_prefix='batch',
location='westus',
parameter_name='batch_account',
resource_group_parameter_name=RESOURCE_GROUP_PARAM,
disable_recording=True, playback_fake_resource=None,
client_kwargs=None):
super(SimpleBatchPreparer, self).__init__(name_prefix, 24,
disable_recording=disable_recording,
playback_fake_resource=playback_fake_resource,
client_kwargs=client_kwargs)
self.location = location
self.resource_group_parameter_name = resource_group_parameter_name
self.parameter_name = parameter_name
self.parameter_name_for_location='location'
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a batch account a resource group is required. Please add ' \
'decorator @{} in front of this storage account preparer.'
raise AzureTestError(template.format(ResourceGroupPreparer.__name__))
def create_resource(self, name, **kwargs):
if self.is_live:
self.client = self.create_mgmt_client(
azure.mgmt.batch.BatchManagementClient)
group = self._get_resource_group(**kwargs)
batch_account = azure.mgmt.batch.models.BatchAccountCreateParameters(
location=self.location,
)
account_setup = self.client.batch_account.create(
group.name,
name,
batch_account)
self.resource = account_setup.result()
else:
self.resource = FakeResource(name=name, id=name)
return {
self.parameter_name: self.resource,
}
def remove_resource(self, name, **kwargs):
if self.is_live:
group = self._get_resource_group(**kwargs)
deleting = self.client.batch_account.delete(group.name, name)
try:
deleting.wait()
except:
pass
|
#!/usr/bin/env python3.9
"""
Test the `zendesk_common.py` file under main/upstream.
"""
import os
import pytest
@pytest.fixture()
def variables():
"""
Retrieve environment variables for testing, and pass their values to the test
functions.
"""
# get environemnt variable values for testing
subdomain: str = os.getenv("ZENDESK_API_SUBDOMAIN")
email: str = os.getenv("ZENDESK_API_EMAIL")
token: str = os.getenv("ZENDESK_API_TOEKEN")
# pass the values to the test function
yield subdomain, email, token
def test_zendesk_common_url_root(variables):
"""
Import the relevant Zendesk configuration constants and see if it contains the correct
URL root.
"""
subdomain, email, token = variables
from main.upstream.zendesk_common import API_URL_ROOT
assert API_URL_ROOT == f"https://{subdomain}.zendesk.com/api/v2"
def test_zendesk_common_auth(variables):
"""
Import the relevant Zendesk configuration constants and see if it contains the correct
authentication tuple.
"""
subdomain, email, token = variables
from main.upstream.zendesk_common import AUTH_TUPLE
assert AUTH_TUPLE == (f"{email}/token", token)
|
#!/usr/bin/env python3
from functools import partial
import unittest
from unittest.mock import Mock, patch
from sap.rest.connection import Connection
from sap.rest.errors import UnauthorizedError
def stub_retrieve(response, session, method, url, params=None, headers=None, body=None):
req = Mock()
req.method = method
req.url = url
req.params = params
req.headers = headers
req.body = body
return (req, response)
class TestConnectionExecute(unittest.TestCase):
@patch('sap.rest.connection.Connection._retrieve')
def test_unauthorized_error(self, fake_retrieve):
icf_path = '/foo'
login_path = '/bar'
host = 'books.fr'
client = '69'
user = 'Arsan'
password = 'Emmanuelle'
method = 'GET'
url = '/all'
conn = Connection(icf_path, login_path, host, client, user, password)
res = Mock()
res.status_code = 401
fake_retrieve.side_effect = partial(stub_retrieve, res)
with self.assertRaises(UnauthorizedError) as caught:
conn._execute_with_session(conn._session, method, url)
self.assertEqual(str(caught.exception), f'Authorization for the user "{user}" has failed: {method} {url}')
|
from sparselayer_tensorflow.sparselayer_tensorflow import SparseLayerDense, SparseLayerConv2D
|
import json
class Paragraph:
def __init__(self, text, tickers):
self.text = text
self.tickers = tickers
def toJson(self):
return json.dumps({
"text": self.text,
"tickers": self.tickers
})
|
import logging
import logging.config
import yaml
import os
def get_logger(name:str)->logging.Logger:
"""
get_logger function
Parameters:
name: the name of the logger to get. If it's not defined in config it will be the root
use a name that allows the identification of the application that's logging.
Returns:
A logger with a specific name based on the configuration
"""
directoryPath = os.getenv("LOGGINGDEMOHOME", "./")
with open(f"{directoryPath}logging.yaml", "r") as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
return logging.getLogger(name) |
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
from toontown.battle import BattlePlace
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import ToontownGlobals
from toontown.building import Elevator
from panda3d.core import *
from toontown.coghq import CogHQExterior
from toontown.safezone import GolfKart
from direct.distributed.ClockDelta import *
import random
class BossbotHQExterior(CogHQExterior.CogHQExterior):
notify = DirectNotifyGlobal.directNotify.newCategory('BossbotHQExterior')
GEAR_INDEX = xrange(12)
GEYSER_INDEX = xrange(1, 5)
def __init__(self, loader, parentFSM, doneEvent):
CogHQExterior.CogHQExterior.__init__(self, loader, parentFSM, doneEvent)
self.elevatorDoneEvent = 'elevatorDone'
self.trains = None
self.fsm.addState(State.State('elevator', self.enterElevator, self.exitElevator, ['walk', 'stopped', 'golfKartBlock']))
self.fsm.addState(State.State('golfKartBlock', self.enterGolfKartBlock, self.exitGolfKartBlock, ['walk', 'stopped', 'elevator']))
state = self.fsm.getStateNamed('walk')
state.addTransition('elevator')
state.addTransition('golfKartBlock')
state = self.fsm.getStateNamed('stopped')
state.addTransition('elevator')
state.addTransition('golfKartBlock')
state = self.fsm.getStateNamed('stickerBook')
state.addTransition('elevator')
state.addTransition('golfKartBlock')
self.golfKartDoneEvent = 'golfKartDone'
self.golfKartBlockDoneEvent = 'golfKartBlockDone'
self.gearLerps = []
return
def enter(self, requestStatus):
CogHQExterior.CogHQExterior.enter(self, requestStatus)
self.loader.startCollisionDetection()
self.loader.startBushCollisionDetection()
def exit(self):
CogHQExterior.CogHQExterior.exit(self)
self.loader.stopCollisionDetection()
self.loader.stopBushCollisionDetection()
def enterElevator(self, distElevator, skipDFABoard=0):
self.accept(self.elevatorDoneEvent, self.handleElevatorDone)
self.elevator = Elevator.Elevator(self.fsm.getStateNamed('elevator'), self.elevatorDoneEvent, distElevator)
if skipDFABoard:
self.elevator.skipDFABoard = 1
distElevator.elevatorFSM = self.elevator
self.elevator.setReverseBoardingCamera(True)
self.elevator.load()
self.elevator.enter()
def exitElevator(self):
self.ignore(self.elevatorDoneEvent)
self.elevator.unload()
self.elevator.exit()
del self.elevator
def detectedElevatorCollision(self, distElevator):
if self.fsm.getCurrentState().getName() == 'walk':
self.fsm.request('elevator', [distElevator])
def handleElevatorDone(self, doneStatus):
self.notify.debug('handling elevator done event')
where = doneStatus['where']
if where == 'reject':
if hasattr(base.localAvatar, 'elevatorNotifier') and base.localAvatar.elevatorNotifier.isNotifierOpen():
pass
else:
self.fsm.request('walk')
else:
if where == 'exit':
self.fsm.request('walk')
else:
if where == 'countryClubInterior':
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
else:
self.notify.error('Unknown mode: ' + where + ' in handleElevatorDone')
def __handleOnFloor(self, collision):
base.localAvatar.b_setParent(ToontownGlobals.SPDynamic + int(collision.getIntoNode().getName()[29:]))
def __handleOffFloor(self, collision):
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def __cleanupDialog(self, value):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
if hasattr(self, 'fsm'):
self.fsm.request('walk', [1])
return
def enterGolfKartBlock(self, golfKart):
base.localAvatar.laffMeter.start()
base.localAvatar.b_setAnimState('off', 1)
self.accept(self.golfKartDoneEvent, self.handleGolfKartDone)
self.trolley = GolfKart.GolfKart(self, self.fsm, self.golfKartDoneEvent, golfKart.getDoId())
self.trolley.load()
self.trolley.enter()
def exitGolfKartBlock(self):
base.localAvatar.laffMeter.stop()
self.trolley.unload()
self.trolley.exit()
del self.trolley
def detectedGolfKartCollision(self, golfKart):
self.notify.debug('detectedGolfkartCollision()')
self.fsm.request('golfKartBlock', [golfKart])
def handleStartingBlockDone(self, doneStatus):
self.notify.debug('handling StartingBlock done event')
where = doneStatus['where']
if where == 'reject':
self.fsm.request('walk')
else:
if where == 'exit':
self.fsm.request('walk')
else:
if where == 'racetrack':
print 'Entering Racetrack'
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
else:
self.notify.error('Unknown mode: ' + where + ' in handleStartingBlockDone')
def handleGolfKartDone(self, doneStatus):
self.notify.debug('handling golf kart done event')
mode = doneStatus['mode']
if mode == 'reject':
self.fsm.request('walk')
else:
if mode == 'exit':
self.fsm.request('walk')
else:
if mode == 'golfcourse':
self.doneStatus = {'loader': 'golfcourse', 'where': 'golfcourse', 'hoodId': self.loader.hood.id,
'zoneId': doneStatus['zoneId'],
'shardId': None,
'courseId': doneStatus['courseId']}
messenger.send(self.doneEvent)
else:
self.notify.error('Unknown mode: ' + mode + ' in handleGolfKartDone')
return |
import os
import subprocess
import sys
from multiprocessing.pool import ThreadPool
from pathlib import Path
timeout = 20
cpus = 10
num_threads = 10
def call_ta2search(command):
print(command)
p = subprocess.Popen(command, shell=True)
try:
p.communicate(timeout=timeout * 60)
except TimeoutExpired:
p.kill()
print(command, "took too long and was terminated" + "\n\n")
tp = ThreadPool(num_threads)
home = str(Path.home())
# config_dir = sys.argv[2]
config_dir = home + "/dsbox/runs2/config-ll0/"
for conf in os.listdir(config_dir):
# command = "python3 ta1-run-single-template --template " + sys.argv[1] + " " + os.path.join(config_dir, conf, 'search_config.json')
command = "python ta2-search " + config_dir + conf + " --timeout " + str(timeout) + " --cpus " + str(cpus)
tp.apply_async(call_ta2search, (command,))
tp.close()
tp.join()
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kv.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='kv.proto',
package='kv',
syntax='proto3',
serialized_pb=_b('\n\x08kv.proto\x12\x02kv\"\x19\n\nGetRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\"\x1c\n\x0bGetResponse\x12\r\n\x05value\x18\x01 \x01(\t\"(\n\nSetRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x19\n\x0bSetResponse\x12\n\n\x02ok\x18\x01 \x01(\x08\x32T\n\x02KV\x12&\n\x03Get\x12\x0e.kv.GetRequest\x1a\x0f.kv.GetResponse\x12&\n\x03Set\x12\x0e.kv.SetRequest\x1a\x0f.kv.SetResponseb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETREQUEST = _descriptor.Descriptor(
name='GetRequest',
full_name='kv.GetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='kv.GetRequest.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=41,
)
_GETRESPONSE = _descriptor.Descriptor(
name='GetResponse',
full_name='kv.GetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='kv.GetResponse.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=43,
serialized_end=71,
)
_SETREQUEST = _descriptor.Descriptor(
name='SetRequest',
full_name='kv.SetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='kv.SetRequest.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='kv.SetRequest.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=113,
)
_SETRESPONSE = _descriptor.Descriptor(
name='SetResponse',
full_name='kv.SetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ok', full_name='kv.SetResponse.ok', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=140,
)
DESCRIPTOR.message_types_by_name['GetRequest'] = _GETREQUEST
DESCRIPTOR.message_types_by_name['GetResponse'] = _GETRESPONSE
DESCRIPTOR.message_types_by_name['SetRequest'] = _SETREQUEST
DESCRIPTOR.message_types_by_name['SetResponse'] = _SETRESPONSE
GetRequest = _reflection.GeneratedProtocolMessageType('GetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETREQUEST,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.GetRequest)
))
_sym_db.RegisterMessage(GetRequest)
GetResponse = _reflection.GeneratedProtocolMessageType('GetResponse', (_message.Message,), dict(
DESCRIPTOR = _GETRESPONSE,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.GetResponse)
))
_sym_db.RegisterMessage(GetResponse)
SetRequest = _reflection.GeneratedProtocolMessageType('SetRequest', (_message.Message,), dict(
DESCRIPTOR = _SETREQUEST,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.SetRequest)
))
_sym_db.RegisterMessage(SetRequest)
SetResponse = _reflection.GeneratedProtocolMessageType('SetResponse', (_message.Message,), dict(
DESCRIPTOR = _SETRESPONSE,
__module__ = 'kv_pb2'
# @@protoc_insertion_point(class_scope:kv.SetResponse)
))
_sym_db.RegisterMessage(SetResponse)
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class KVStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/kv.KV/Get',
request_serializer=GetRequest.SerializeToString,
response_deserializer=GetResponse.FromString,
)
self.Set = channel.unary_unary(
'/kv.KV/Set',
request_serializer=SetRequest.SerializeToString,
response_deserializer=SetResponse.FromString,
)
class KVServicer(object):
def Get(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Set(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KVServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=GetRequest.FromString,
response_serializer=GetResponse.SerializeToString,
),
'Set': grpc.unary_unary_rpc_method_handler(
servicer.Set,
request_deserializer=SetRequest.FromString,
response_serializer=SetResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'kv.KV', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaKVServicer(object):
def Get(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Set(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaKVStub(object):
def Get(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
Get.future = None
def Set(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
Set.future = None
def beta_create_KV_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('kv.KV', 'Get'): GetRequest.FromString,
('kv.KV', 'Set'): SetRequest.FromString,
}
response_serializers = {
('kv.KV', 'Get'): GetResponse.SerializeToString,
('kv.KV', 'Set'): SetResponse.SerializeToString,
}
method_implementations = {
('kv.KV', 'Get'): face_utilities.unary_unary_inline(servicer.Get),
('kv.KV', 'Set'): face_utilities.unary_unary_inline(servicer.Set),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_KV_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('kv.KV', 'Get'): GetRequest.SerializeToString,
('kv.KV', 'Set'): SetRequest.SerializeToString,
}
response_deserializers = {
('kv.KV', 'Get'): GetResponse.FromString,
('kv.KV', 'Set'): SetResponse.FromString,
}
cardinalities = {
'Get': cardinality.Cardinality.UNARY_UNARY,
'Set': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'kv.KV', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/python
import sys
pre = []
args = []
for l in sys.stdin.readlines():
l = l.strip()
if l.find('=') == -1:
continue
k = l.split('=')[0].strip()
if k == 'Arguments' or k == 'arguments': args.append(l)
else: pre.append(l)
for p in pre:
print p
args.reverse()
for a in args:
print a
print "queue"
print
|
# Generated by Django 2.1.11 on 2019-08-13 06:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ActivityLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('modified_object_id', models.PositiveIntegerField()),
('action_type', models.CharField(choices=[('ADD_TAG_TO_DOCUMENT', 'Add tag to document')], max_length=255)),
('data', models.TextField()),
('modified_object_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
# coding: utf-8
import os
''' xcode clang compile command line:
/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang \
-c -x c++ -arch x86_64 -O0 -g -mmacosx-version-min=10.14 \
-std=c++11 -MMD -MT dependencies \
-MF /Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/JSONCPP.build/Debug/jsoncpp_test.build/Objects-normal/x86_64/main.d \
-isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk \
-fasm-blocks -fstrict-aliasing -fcolor-diagnostics \
--serialize-diagnostics /Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/JSONCPP.build/Debug/jsoncpp_test.build/Objects-normal/x86_64/main.dia \
-fmessage-length=142 -fdiagnostics-show-note-include-stack -fmacro-backtrace-limit=0 \
-Wno-trigraphs -fpascal-strings -Wno-missing-field-initializers -Wno-missing-prototypes -Wno-return-type -Wno-non-virtual-dtor -Wno-overloaded-virtual -Wno-exit-time-destructors \
-Wno-missing-braces -Wparentheses -Wswitch -Wno-unused-function -Wno-unused-label -Wno-unused-parameter -Wno-unused-variable -Wunused-value -Wno-empty-body -Wno-uninitialized \
-Wno-unknown-pragmas -Wno-shadow -Wno-four-char-constants -Wno-conversion -Wno-constant-conversion -Wno-int-conversion -Wno-bool-conversion -Wno-enum-conversion -Wno-float-conversion \
-Wno-non-literal-null-conversion -Wno-objc-literal-conversion -Wno-shorten-64-to-32 -Wno-newline-eof -Wno-c++11-extensions -Wdeprecated-declarations -Winvalid-offsetof \
-Wno-sign-conversion -Wno-infinite-recursion -Wno-move -Wno-comma -Wno-block-capture-autoreleasing -Wno-strict-prototypes -Wno-range-loop-analysis -Wno-semicolon-before-method-body \
-Wmost -Wno-four-char-constants -Wno-unknown-pragmas -Wall -Wconversion -Wshadow -Werror=conversion -Werror=sign-compare \
-DCMAKE_INTDIR=\"Debug\" \
-I/include \
-I/Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/Debug/include \
-I/Users/nsw/src/frameflow/third_party/repo/jsoncpp/src/lib_json/../../include \
-I/Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/JSONCPP.build/Debug/jsoncpp_test.build/DerivedSources-normal/x86_64 \
-I/Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/JSONCPP.build/Debug/jsoncpp_test.build/DerivedSources/x86_64 \
-I/Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/JSONCPP.build/Debug/jsoncpp_test.build/DerivedSources \
-F/Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/Debug \
/Users/nsw/src/frameflow/third_party/repo/jsoncpp/src/test_lib_json/main.cpp \
-o /Users/nsw/src/frameflow/third_party/work/jsoncpp-arm64/src/test_lib_json/JSONCPP.build/Debug/jsoncpp_test.build/Objects-normal/x86_64/main.o
'''
class Clang:
def __init__(self, **kwargs):
self.name = 'clang'
self.tags = ('clang', )
self.kwargs = kwargs
self.suffix = kwargs.get('suffix', '')
self.prefix = kwargs.get('prefix', '')
if os.path.isdir(self.prefix):
self.prefix += os.sep
self.target_triple = ''
self._cmds = {
'cc': (self.prefix + 'clang' + self.suffix, 'src', '-c', '-x', 'c'),
'cxx': (self.prefix + 'clang' + self.suffix, 'src', '-c', '-x', 'c++'),
'ar': (self.prefix + 'ar' + self.suffix, 'dst', '-rcs'),
'ld': (self.prefix + 'clang' + self.suffix, 'dst'),
#'ldd': (self.prefix + 'ld.bfd' + self.suffix, ),
}
self._compositors = {
'sysroot': lambda path, args: f'--sysroot={path}',
'includePath': lambda path, args: ['-I', path],
'libPath': lambda path, args: ['-L', path],
'lib': lambda path, args: f'-l{path}',
'define': lambda macro, args: f'-D{macro}',
}
def initByRequest(self, request):
self.target_triple = request.target_cpu + '-' + request.target_os.name
def asCmdProvider(self, kwargs):
return self._cmds
def asCmdInterpreter(self):
return self._compositors
def asCmdFilter(self, cmd, kwargs):
if cmd.name not in ('ar', 'cc', 'cxx', 'ld'):
return
if cmd.name == 'cc':
cmd.ccflags += ['-Wall', '--target=' + self.target_triple]
elif cmd.name == 'cxx':
cmd.cxxflags += ['-Wall', '--target=' + self.target_triple]
cmd.defines += '_LIBCPP_HAS_THREAD_API_PTHREAD'
elif cmd.name == 'ld':
cmd.ldflags += '--target=' + self.target_triple
cmd.composeSources(
cmd.sources,
os.path.join(kwargs['request'].rootBuild, 'src_list.txt'))
if kwargs['target'].isSharedLib():
cmd.ldflags += ['-shared', '-fpic']
dst = kwargs['dst']
if cmd.name == 'ar':
cmd += dst
else:
cmd += ['-o', dst]
|
import torch
from torch import nn, autograd
from torch.utils.data import DataLoader, Dataset
import numpy as np
import random
from sklearn import metrics
from torch import nn
import torch.nn.functional as F
import torchtext
def evaluate_validation(scores, loss_function, gold):
guesses = scores.argmax(dim=1)
n_correct = (guesses == gold).sum().item()
return n_correct, loss_function(scores, gold).item()
class LanguageClientUpdate(object):
def __init__(self, args, train_set=None, test_set=None, idxs_train=None,
idxs_val=None,idxs_test=None, TEXT=None, LABEL=None):
self.args = args
self.loss_function = nn.NLLLoss()
#TEXT = torchtext.data.Field(sequential=True, tokenize=lambda x: x.split())
#LABEL = torchtext.data.LabelField(is_target=True)
datafields = [('text', TEXT), ('label', LABEL)]
train_examples = [train_set.examples[i] for i in idxs_train]
val_examples = [train_set.examples[i] for i in idxs_val]
test_examples = [test_set.examples[i] for i in idxs_test]
local_train_set = torchtext.data.Dataset(train_examples, datafields)
local_val_set = torchtext.data.Dataset(val_examples, datafields)
local_test_set = torchtext.data.Dataset(test_examples, datafields)
self.n_val = len(local_val_set)
self.n_test = len(local_test_set)
#TEXT.build_vocab(local_train_set, max_size=10000)
#LABEL.build_vocab(local_train_set)
train_iterator = torchtext.data.BucketIterator(
local_train_set,
device=args.device,
batch_size=args.local_bs,
sort_key=lambda x: len(x.text),
repeat=False,
train=True)
val_iterator = torchtext.data.BucketIterator(
local_val_set,
device=args.device,
batch_size=args.local_bs,
sort_key=lambda x: len(x.text),
repeat=False,
train=False,
sort=True)
test_iterator = torchtext.data.Iterator(
local_test_set,
device=args.device,
batch_size=args.local_bs,
repeat=False,
train=False,
sort=False)
self.train_batches = list(train_iterator)
self.val_batches = list(val_iterator)
self.test_batches = list(test_iterator)
def train(self, net, n_epochs, learning_rate):
net.train()
# train and update
optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate)
epoch_loss = []
for iter in range(n_epochs):
net.train()
loss_sum = 0
n_batches = 0
for batch in self.train_batches:
scores = net(batch.text)
loss = self.loss_function(scores, batch.label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item()
n_batches += 1
train_loss = loss_sum / n_batches
epoch_loss.append(train_loss)
#history['train_loss'].append(train_loss)
return net.state_dict(), epoch_loss[-1]
def train_finetune(self, net, n_epochs, learning_rate, val):
# train and update
optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate)
epoch_loss = []
patience = 10
model_best = net.state_dict()
train_acc_best = np.inf
val_acc_best = -np.inf
val_loss_best = np.inf
counter = 0
for iter in range(n_epochs):
net.train()
loss_sum = 0
n_batches = 0
for batch in self.train_batches:
scores = net(batch.text)
loss = self.loss_function(scores, batch.label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item()
n_batches += 1
train_loss = loss_sum / n_batches
epoch_loss.append(train_loss)
if(iter%5==0):
val_acc, val_loss = self.validate(net,val)
net.train()
if(val_loss < val_loss_best - 0.01):
counter = 0
model_best = net.state_dict()
val_acc_best = val_acc
val_loss_best = val_loss
print("Iter %d | %.2f" %(iter, val_acc_best))
else:
counter = counter + 1
if counter == patience:
return model_best, val_loss_best, val_acc_best, train_acc_best
return model_best, val_loss_best, val_acc_best
def train_mix(self, net_local, net_global, gate, train_gate_only, n_epochs, early_stop, learning_rate, val):
# train and update
if(train_gate_only):
optimizer = torch.optim.Adam(list(gate.parameters()),lr=learning_rate)
else:
optimizer = torch.optim.Adam(list(net_local.parameters()) + list(gate.parameters()),lr=learning_rate)
epoch_loss = []
patience = 10
gate_best = gate.state_dict()
local_best = net_local.state_dict()
global_best = net_global.state_dict()
train_acc_best = np.inf
val_acc_best = -np.inf
val_loss_best = np.inf
counter = 0
for iter in range(n_epochs):
net_local.train()
net_global.train()
gate.train()
loss_sum = 0
n_batches = 0
for batch in self.train_batches:
scores_l = net_local(batch.text)
scores_g = net_global(batch.text)
gate_weight = gate(batch.text)
scores = gate_weight * scores_l + (1-gate_weight) * scores_g
loss = self.loss_function(scores, batch.label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item()
n_batches += 1
train_loss = loss_sum / n_batches
epoch_loss.append(train_loss)
if(iter%5==0):
val_acc, val_loss = self.validate_mix(net_local, net_global, gate, val)
net_local.train()
net_global.train()
gate.train()
if(val_loss < val_loss_best - 0.01):
counter = 0
gate_best = gate.state_dict()
local_best = net_local.state_dict()
global_best = net_global.state_dict()
val_acc_best = val_acc
val_loss_best = val_loss
print("Iter %d | %.2f" %(iter, val_acc_best))
else:
counter = counter + 1
if counter == patience:
return gate_best, local_best, global_best, val_loss_best, val_acc_best
return gate_best, local_best, global_best, val_loss_best, val_acc_best
def validate(self,net,val):
net.eval()
if(val):
batches = self.val_batches
dataset_size = self.n_val
else:
batches = self.test_batches
dataset_size = self.n_test
with torch.no_grad():
loss_sum = 0
n_correct = 0
n_batches = 0
for batch in batches:
scores = net(batch.text)
n_corr_batch, loss_batch = evaluate_validation(scores, self.loss_function, batch.label)
loss_sum += loss_batch
n_correct += n_corr_batch
n_batches += 1
val_acc = 100*n_correct / dataset_size
val_loss = loss_sum / n_batches
return val_acc, val_loss
def validate_mix(self,net_l, net_g, gate, val):
net_l.eval()
net_g.eval()
gate.eval()
if(val):
batches = self.val_batches
dataset_size = self.n_val
else:
batches = self.test_batches
dataset_size = self.n_test
with torch.no_grad():
loss_sum = 0
n_correct = 0
n_batches = 0
for batch in batches:
scores_l = net_l(batch.text)
scores_g = net_g(batch.text)
gate_weight = gate(batch.text)
scores = gate_weight * scores_l + (1-gate_weight) * scores_g
n_corr_batch, loss_batch = evaluate_validation(scores, self.loss_function, batch.label)
loss_sum += loss_batch
n_correct += n_corr_batch
n_batches += 1
val_acc = 100*n_correct / dataset_size
val_loss = loss_sum / n_batches
return val_acc, val_loss |
from setuptools import setup
setup(
name="geneutils",
version="0.0.6",
packages=["geneutils"],
data_files=[("", ["LICENSE"])],
url="https://github.com/samapriya/geneutils",
install_requires=[
"biopython>=1.77",
"pandas>=1.1.5",
"requests>=2.26.0",
"beautifulsoup4>=4.9.3",
],
license="MIT",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
],
author="Samapriya Roy",
author_email="samapriya.roy@gmail.com",
description="CLI and utilities for Genetic analysis and database interface",
entry_points={
"console_scripts": [
"geneutils=geneutils.geneutils:main",
],
},
)
|
contains_a = lambda word: "a" in word
print contains_a("banana")
print contains_a("apple")
print contains_a("cherry")
#Write your lambda function here
long_string = lambda str: len(str) > 12
print long_string("short")
print long_string("photosynthesis")
#Write your lambda function here
ends_in_a = lambda str: str[len(str) -1 ] == 'a'
print ends_in_a("data")
print ends_in_a("aardvark")
|
# -*- coding = utf-8 -*-
import torch
import torch.autograd as autograd
import torch.nn as nn
# hyper parameters
EMBEDDING_DIM = 50
LEARNING_RATE = 1e-3
EPOCH = 100
HIDDEN_DIM = 6
NUM_LAYERS = 1
# load data
training_data = [
("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_idx = {}
tag_to_idx = {}
idx_to_tags = {}
for sent, tags in training_data:
for word in sent:
if word not in word_to_idx:
word_to_idx[word] = len(word_to_idx)
for label in tags:
if label not in tag_to_idx:
tag_to_idx[label] = len(tag_to_idx)
idx_to_tags[len(idx_to_tags)] = label
# print(word_to_idx)
# print(tag_to_idx)
def prepare_sequence(seq, to_idx):
idxs = [to_idx[w] for w in seq]
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
# model definition
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_dim,
num_layers=NUM_LAYERS,
)
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, h = self.lstm(embeds.view(len(sentence), 1, -1), None)
out = self.hidden2tag(lstm_out.view(len(sentence), -1))
return out
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_idx), len(tag_to_idx))
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=LEARNING_RATE
)
# model training
def train(epoch):
model.train()
training_loss = 0
for sentence, tags in training_data:
sentence_in = prepare_sequence(sentence, word_to_idx)
targets = prepare_sequence(tags, tag_to_idx)
# forward
out = model(sentence_in)
loss = loss_func(out, targets)
training_loss += loss.data[0]
# backward
model.zero_grad()
loss.backward()
optimizer.step()
print('Epoch: {0}, Loss: {1:.6f}'.format(epoch, training_loss / len(training_data)))
# model evaluation
def evaluate():
model.eval()
test_loss = 0
for sentence, tags in training_data:
sentence_in = prepare_sequence(sentence, word_to_idx)
targets = prepare_sequence(tags, tag_to_idx)
# forward
out = model(sentence_in)
loss = loss_func(out, targets)
test_loss += loss.data[0]
print('Loss: {0:.6f}'.format(test_loss / len(training_data)))
# model inference
def inference(instance):
model.eval()
sentence_in = prepare_sequence(instance, word_to_idx)
out = model(sentence_in)
tags = [idx_to_tags[int(i)] for i in out.data.max(1, keepdim=True)[1]]
return ','.join(tags)
if __name__ == '__main__':
for i in range(0, EPOCH):
train(i)
evaluate()
# print(idx_to_tags)
result = inference(training_data[0][0])
label = ','.join(training_data[0][1])
print('real POS is `{0}`, predict POS is `{1}`'.format(label, result))
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pytest
from datadog_checks.dev import run_command
from datadog_checks.dev.kind import kind_run
from datadog_checks.dev.kube_port_forward import port_forward
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
HERE = os.path.dirname(os.path.abspath(__file__))
PORT = 9402
def setup_cert_manager():
# Deploy Cert Manager
run_command(
[
"kubectl",
"apply",
"-f",
"https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml",
]
)
run_command(
["kubectl", "wait", "deployments", "--all", "--for=condition=Available", "-n", "cert-manager", "--timeout=300s"]
)
run_command(["kubectl", "wait", "pods", "-n", "cert-manager", "--all", "--for=condition=Ready", "--timeout=300s"])
# Issue self-signed certs
config = os.path.join(HERE, 'kubernetes', 'selfsigned.yaml')
run_command(["kubectl", "create", "-f", config])
run_command(
[
"kubectl",
"wait",
"certificates",
"-n",
"cert-manager-test",
"--all",
"--for=condition=Ready",
"--timeout=300s",
]
)
# Deploy Pebble
config = os.path.join(HERE, 'kubernetes', 'pebble.yaml')
run_command(["kubectl", "create", "-f", config])
# Deploy Nginx
config = os.path.join(HERE, 'kubernetes', 'nginx.yaml')
run_command(["kubectl", "create", "-f", config])
# Wait for deployments
run_command(["kubectl", "wait", "deployments", "--all", "--for=condition=Available", "--timeout=300s"])
# Issue acme certs
config = os.path.join(HERE, 'kubernetes', 'acme.yaml')
run_command(["kubectl", "create", "-f", config])
run_command(
[
"kubectl",
"wait",
"certificates",
"-n",
"acme-test",
"--all",
"--for=condition=Ready",
"--timeout=300s",
]
)
@pytest.fixture(scope='session')
def dd_environment():
with kind_run(conditions=[setup_cert_manager]) as kubeconfig:
with ExitStack() as stack:
ip_ports = [stack.enter_context(port_forward(kubeconfig, 'cert-manager', 'cert-manager', PORT))]
instances = {
'instances': [
{'prometheus_url': 'http://{}:{}/metrics'.format(*ip_ports[0])},
]
}
yield instances
@pytest.fixture
def instance():
return {}
|
# from django.shortcuts import render
from django.views.generic.base import ContextMixin
from .models import PageHelp
class HelpContextMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(HelpContextMixin, self).get_context_data(**kwargs)
page_help, create = PageHelp.objects.get_or_create(
page_name=self.page_help_name
)
context['page_help'] = page_help
return context
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Spotify AB
__all__ = ["RAMLLoader"]
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
import os
import yaml
from .errors import LoadRAMLError
class RAMLLoader(object):
"""
Extends YAML loader to load RAML files with ``!include`` tags.
"""
def _yaml_include(self, loader, node):
"""
Adds the ability to follow ``!include`` directives within
RAML Files.
"""
# Get the path out of the yaml file
file_name = os.path.join(os.path.dirname(loader.name), node.value)
file_ext = os.path.splitext(file_name)[1]
parsable_ext = [".yaml", ".yml", ".raml", ".json"]
if file_ext not in parsable_ext:
with open(file_name) as inputfile:
return inputfile.read()
with open(file_name) as inputfile:
return yaml.load(inputfile, self._ordered_loader)
def _ordered_load(self, stream, loader=yaml.SafeLoader):
"""
Preserves order set in RAML file.
"""
class OrderedLoader(loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor("!include", self._yaml_include)
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
self._ordered_loader = OrderedLoader
return yaml.load(stream, OrderedLoader)
def load(self, raml):
"""
Loads the desired RAML file and returns data.
:param raml: Either a string/unicode path to RAML file, a file object,\
or string-representation of RAML.
:return: Data from RAML file
:rtype: ``dict``
"""
try:
return self._ordered_load(raml, yaml.SafeLoader)
except yaml.parser.ParserError as e:
msg = "Error parsing RAML: {0}".format(e)
raise LoadRAMLError(msg)
except yaml.constructor.ConstructorError as e:
msg = "Error parsing RAML: {0}".format(e)
raise LoadRAMLError(msg)
|
# Credit to Lester Leong and his "Python Risk Management: Monte Carlo Simulations"
# article which helped with starting off and understanding the material.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime as dt
import json
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import mpld3
from scipy.stats import norm
class Monte:
def __init__(self, ticker, sim_amount, time_steps, width, height, dpi, start, end, data_source='yahoo'):
"""
Initialization function for the Monte object.
:param ticker: the ticker label associated with a stock
:param sim_amount: the amount of simulations to be done
:param time_steps: the number of time steps into the future the simualtion will go
:param width: width of the figure in pixels
:param height: height of the figure in pixels
:param dpi: the dpi for the browser display
:param start: the start datetime for the simulations
:param end: the end datetime for the simulations
:param data_source: data source from where the stock data is derived. Yahoo finance is the default
"""
self.ticker = ticker
self.sim_amount = sim_amount
self.time_steps = time_steps + 1
self.start = start
self.end = end
self.data_source = data_source
self.width = width / dpi
self.height = height / dpi
self.dpi = dpi
self.data = pd.DataFrame()
self.monte_sims = pd.DataFrame()
#self.figure = plt.figure(figsize=(self.width, self.height))
def create_DataFrame(self):
"""
Function that creates the DataFrame object where the stock data will be stored.
"""
self.data[self.ticker] = pdr.DataReader(self.ticker, data_source=self.data_source,
start=self.start, end=self.end)['Adj Close']
def simulate(self):
"""
Function that does the necessary calculations for the simulation data.
"""
# np.random.seed(8) this can be used to seed the simulation so you can repeat results
# Initial data values needed to set up the simulations.
log_returns = np.log(1 + self.data.pct_change()) # percentage change between current and prior element
mu = log_returns.mean() # average/mean
var = log_returns.var() # variance
drift = mu - (0.5 * var) # stochastic drift
sigma = log_returns.std() # standard deviation
daily_returns = np.exp(drift.to_numpy() + sigma.to_numpy() * norm.ppf(np.random.rand(self.time_steps, self.sim_amount)))
# Takes last data point in stock data as as the starting point for the simulations
initial = self.data.iloc[-1]
self.monte_sims = np.zeros_like(daily_returns)
self.monte_sims[0] = initial
# Fills monte_sims with simulated prices which are pseudorandomized with daily_returns
for t in range(1, self.time_steps):
self.monte_sims[t] = self.monte_sims[t - 1] * daily_returns[t]
def plot_history(self):
"""
Function that plots the history of stock prices in the time frame set by the user.
:returns: plot_history_str which is a string which contains the html for the graphical output.
:rtype: str
"""
stock_plot = self.data.plot(figsize=(self.width, self.height))
stock_plot.set_xlabel('Date')
stock_plot.set_ylabel('Adjusted Closing Price')
stock_plot.set_title("Historical Adjusted Closing Prices Over Time")
history = plt.gcf()
self.history = history
plot_history_str = mpld3.fig_to_html(self.history) # saves figure to string of html
#plot_history_dict = mpld3.fig_to_dict(self.history)
return plot_history_str
def plot_pdf(self):
"""
Function that plots the distribution of simulated prices of a given time step into the future.
This histogram is fit to a Probability Density Function with the mean and standard deviation
listed in the title.
:returns: plot_pdf_str which is a string which contains the html for the graphical output.
:rtype: str
"""
# Histogram for the price frequencies, number of bins can be adjusted'
fig = plt.figure(figsize=(self.width, self.height))
plt.hist(self.monte_sims[self.time_steps - 2], bins=10, density=True)
# Probability Density Function
sim_mu, sim_sig = norm.fit(self.monte_sims[self.time_steps - 2]) # Simulation mean and standard deviation values
xmin, xmax = plt.xlim() # set the xmin and xmax along the x-axis for the pdf
x = np.linspace(xmin, xmax)
p = norm.pdf(x, sim_mu, sim_sig)
# Plots frequencies of the Monte Carle simulations fit to normal distribution
plt.plot(x, p, 'k') # normal distribution fit
plt.xlabel('Adjusted Closing Price')
plt.ylabel('Probability Density')
title = "Simulated Prices %d Days into the Future\n(PDF fit results: μ = %.4f, σ = %.4f)" % (self.time_steps - 1, sim_mu, sim_sig)
plt.title(title)
plot_pdf_str = mpld3.fig_to_html(fig) # saves figure to string of html
#plot_pdf_dict = mpld3.fig_to_dict(fig)
return plot_pdf_str
def plot_single(self):
"""
Function that plots the first element in each set of simulations after a given time step.
These elements are plotted to show a single simulated projection line.
:returns: plot_single_str which is a string which contains the html for the graphical output.
:rtype: str
"""
single = []
for item in self.monte_sims:
single.append(item[0])
plt.figure(figsize=(self.width, self.height))
plt.plot(single)
plt.xlabel('Days into the Future')
plt.ylabel('Adjusted Closing Price')
plt.title('Simulated Adjusted Closing Prices Over Time')
single = plt.gcf()
plot_single_str = mpld3.fig_to_html(single) # saves figure to string of html
#plot_single_dict = mpld3.fig_to_dict(single)
return plot_single_str
def plot_multi(self):
"""
Function that plots all of the price simualtions at each time step into the future.
:returns: plot_multi_str which is a string which contains the html for the graphical output.
:rtype: str
"""
plt.figure(figsize=(self.width, self.height))
plt.plot(self.monte_sims)
plt.xlabel('Days into the Future')
plt.ylabel('Adjusted Closing Price')
title = "Monte Carlo Simulations for Adjusted Closing Prices"
plt.title(title)
multi = plt.gcf()
plot_multi_str = mpld3.fig_to_html(multi) # saves figure to string of html
#plot_multi_dict = mpld3.fig_to_dict(multi)
return plot_multi_str
def get_json(self, plot_history_str, plot_pdf_str, plot_single_str, plot_multi_str):
"""
Function that returns the json data for the html plots.
:returns: plot_json which is the json that contains the html strings for each plot.
"""
plot_dict = {
"plot_history" : plot_history_str,
"plot_pdf" : plot_pdf_str,
"plot_single" : plot_single_str,
"plot_multi" : plot_multi_str
}
plot_json = json.dumps(plot_dict, indent=4) # converts dictionary to json
return plot_json
def clear_figures(self):
plt.close('all')
'''
These commented out functions are for if we want to plot all the subplots onto one plot.
'''
'''
def plot_all(self):
"""
Original function which generates one figure with 4 subplots made from user inputs. The
functions below this one separate all of the plots into their own figures.
:returns: html_str which is a string that contains the graphical output for the matplotlib plots
:rtype: str
"""
# Creating the fig object for the matplotlib canvas
fig, axs = plt.subplots(2, 2, figsize = (16,10))
# Stock Data Subplot ################################################################
stock_plot = axs[0, 0]
stock_plot.plot(self.data)
stock_plot.set_xlabel('Date')
stock_plot.set_ylabel('Adjusted Closing Price')
stock_plot.set_title("Adjusted Closing Prices Over Time")
# Single Future Price Subplot #######################################################
plt.subplot(2,2,3)
single = []
for item in self.monte_sims:
single.append(item[0])
plt.plot(single)
plt.xlabel('Days into the Future')
plt.ylabel('Adjusted Closing Price')
title = "Single Set of Simulations for Adjusted Closing Prices"
plt.title(title)
# Multiple Future Price Subplot #####################################################
plt.subplot(2,2,4)
plt.plot(self.monte_sims)
plt.xlabel('Days into the Future')
plt.ylabel('Adjusted Closing Price')
title = "Monte Carlo Simulations for Adjusted Closing Prices"
plt.title(title)
# PDF Fit Subplot ###################################################################
plt.subplot(2,2,2)
# Histogram for the price frequencies, number of bins can be adjusted
plt.hist(self.monte_sims[1], bins=10, density=True)
# Probability Density Function
sim_mu, sim_sig = norm.fit(self.monte_sims[1]) # Simulation mean and standard deviation values
xmin, xmax = plt.xlim() # set the xmin and xmax along the x-axis for the pdf
x = np.linspace(xmin, xmax)
p = norm.pdf(x, sim_mu, sim_sig)
# Plots frequencies of the Monte Carle simulations fit to normal distribution
plt.plot(x, p, 'k') # normal distribution fit
plt.xlabel('Adjusted Closing Price')
plt.ylabel('Probability Density')
title = "Simulated Prices %d Days into the Future\n(PDF fit results: μ = %.4f, σ = %.4f)" % (self.time_steps - 1, sim_mu, sim_sig)
plt.title(title)
# Save the figure to HTML string ##################################################
plt.tight_layout()
self.figure = plt.gcf()
html_str = mpld3.fig_to_html(self.figure) # saves figure to string of html
return html_str
'''
'''
def get_json(self):
"""
Function that converts the figure to Python dictionary which is directly json-serializable.
:returns: plot_dict
"""
#plot_dict = mpld3.fig_to_dict(self.figure) # saves figure dictionary
#plot_json = json.dumps(plot_dict, indent=4) # converts dictionary to json
plot_history_dict = mpld3.fig_to_dict(self.plot_history)
plot_history_json = json.dumps(plot_history_dict, indent=4)
plot_pdf_dict = mpld3.fig_to_dict(self.plot_pdf)
plot_pdf_json = json.dumps(plot_pdf_dict, indent=4)
return plot_json
'''
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 3 23:57:21 2021
@author: Connor
"""
import numpy as np
def parse_input(filename):
with open(filename, "r") as fh:
lines = fh.readlines()
# Get just the list of drawings
drawings = np.array([int(item) for item in lines[0].split(",")])
# Separate out the boards to parse
board_list = lines[1:]
boards = []
marks = []
ii = 0
while ii < len(board_list):
if board_list[ii] == "\n":
# Skip blank lines
ii += 1
else:
# else update a zeros matrix with the proper board values
new_board = np.zeros((5,5))
for jj in range(5):
new_board[jj,:] = np.array([int(item) for item in board_list[ii+jj].split()])
boards.append(new_board)
# Also store a marks mask
marks.append(np.zeros((5,5), dtype=bool))
ii += 5
return drawings, boards, marks
def check_winner(board, mark):
# No diagonals means this easy check works
if np.any(np.sum(mark, axis=0)==5):
return True
elif np.any(np.sum(mark, axis=1)==5):
return True
else:
return False
def get_winner(drawings, boards, marks):
# For each drawing
for drawing in drawings:
# update the boards corresponding marks
for board, mark in zip(boards, marks):
mark |= board==drawing
# See if any board has won yet
if check_winner(board, mark):
# if so, return that board, marks mask and wininng number
return board, mark, drawing
return None, None, None
def get_last_winner(drawings, boards, marks):
dd = 0
# Loop until all boards have won
while len(boards) > 0:
drawing = drawings[dd]
winners = []
# Update all boards per move
for ii, (board, mark) in enumerate(zip(boards, marks)):
mark |= board==drawing
# See if any boards won
if check_winner(board, mark):
# Append those indices to a list as a surprise tool for later
winners.append(ii)
# its later; loop over this list BACKWARDS (to avoid index issues)
for winner in winners[::-1]:
# pop marks and boards for each winner
last_marks = marks.pop(winner)
last_winner = boards.pop(winner)
dd += 1
# If the loop ends, return the last winner, its mask,
# and the last winning number
return last_winner, last_marks, drawing
if __name__ == "__main__":
drawings, boards, marks = parse_input("input4.txt")
winning_board, winning_marks, winning_num = get_winner(drawings, boards, marks)
# Get the solutions to the problem
print(np.sum(winning_board[winning_marks==False]) * winning_num)
winning_board, winning_marks, winning_num = get_last_winner(drawings, boards, marks)
print(np.sum(winning_board[winning_marks==False]) * winning_num) |
import os
import sys
import subprocess
import shutil
ENV_NAME = "pyxl_test_env_" + os.path.basename(sys.executable)
if not os.path.exists(ENV_NAME) or os.stat(sys.executable).st_mtime > os.stat(ENV_NAME + "/bin/python").st_mtime:
print "Creating virtualenv to install testing dependencies..."
VIRTUALENV_SCRIPT = os.path.dirname(__file__) + "/virtualenv/virtualenv.py"
try:
args = [sys.executable, VIRTUALENV_SCRIPT, "-p", sys.executable, ENV_NAME]
print "Running", args
subprocess.check_call(args)
except:
print "Error occurred; trying to remove partially-created directory"
ei = sys.exc_info()
try:
subprocess.check_call(["rm", "-rf", ENV_NAME])
except Exception as e:
print e
raise ei[0], ei[1], ei[2]
PYXL_DIR = os.path.dirname(__file__) + "/pyxl"
python_exe = os.path.abspath(ENV_NAME + "/bin/python")
subprocess.check_call([python_exe, "setup.py", "build"], cwd=PYXL_DIR)
subprocess.check_call([python_exe, "setup.py", "install"], cwd=PYXL_DIR)
subprocess.check_call([python_exe, "finish_install.py"], cwd=PYXL_DIR)
out = subprocess.check_output([python_exe, "pyxl/examples/hello_world.py"], cwd=PYXL_DIR)
print
print "Output: '%s'" % out
assert out == "<html><body>Hello World!</body></html>\n"
print "PASSED"
|
import os
import sys
import json
import operator
import requests
import tempfile
from pathlib import Path
import sys
import boto3
BUCKET = "covid-19-aggregates"
WEBHOOK_URL = os.environ.get("SLACK_WEBHOOK_METRICS_URL", None)
def to_isodate(d):
mm, dd, yyyy = d.split("-")
return f"{yyyy}-{mm}-{dd}"
def get_data(s3, bucket, key):
data = None
with tempfile.NamedTemporaryFile() as fout:
s3.Object(bucket, key).download_file(fout.name)
with open(fout.name) as fp:
data = json.load(fp)
if next(iter(data)) != Path(key).stem: # next(iter(k)) gives first key of k
print(f"Bucket key {key} does not match key in file {k0}, aborting.")
sys.exit(1)
return data
def diff(x, y):
if y == x:
return "= "
return ("▲ " if y > x else "▼ ") + str(abs(y - x))
def compare(d1, d2):
last_day = next(iter(d1))
d1 = d1[last_day]
today = next(iter(d2))
d2 = d2[today]
lines = []
total1 = sum(x["casecount"] for x in d1)
total2 = sum(x["casecount"] for x in d2)
if total2 == total1:
lines.append(f"No overall case count change from {last_day}")
elif total2 > total1:
lines.append(f"*New cases added*: {total2 - total1} since {last_day}\n")
else:
lines.append(f"*Cases dropped* ⚠️: {total2 - total1} since {last_day}\n")
countries1 = {str(x["_id"]): x["casecount"] for x in d1 if x["casecount"]}
countries2 = {str(x["_id"]): x["casecount"] for x in d2 if x["casecount"]}
if c21 := sorted(set(countries2) - set(countries1)):
lines.append("*Countries added*: " + ", ".join(c21))
for k in c21:
lines.append(f"- {k}: {countries2[k]} ")
if c12 := sorted(set(countries1) - set(countries2)):
lines.append("*Countries dropped*: " + ", ".join(c12))
for k in c12:
lines.append(f"- {k}: {countries1[k]} ")
common_countries = []
for c in sorted(set(countries1) & set(countries2)):
if countries1[c] == countries2[c]:
continue
common_countries.append(
f"- {c}: {countries2[c]} ({diff(countries1[c], countries2[c])})"
)
if common_countries:
lines.append("*Country data additions/deletions*:")
lines.extend(common_countries)
return "\n".join(lines)
if __name__ == "__main__":
s3 = boto3.resource("s3")
bucket = s3.Bucket(BUCKET)
today, last_day = sorted(
[
(x.key, to_isodate(Path(x.key).stem))
for x in bucket.objects.filter(Prefix="country")
if "latest" not in x.key
],
key=operator.itemgetter(1),
reverse=True,
)[:2]
d1 = get_data(s3, BUCKET, last_day[0])
d2 = get_data(s3, BUCKET, today[0])
ret = compare(d1, d2)
print(ret)
if WEBHOOK_URL:
response = requests.post(WEBHOOK_URL, json={"text": ret})
if response.status_code != 200:
print(f"Slack notification failed with {response.status_code}: {response.text}")
sys.exit(1)
if "⚠️" in ret:
sys.exit(1) # Trigger CI failure as an additional notification
|
#!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership.
#
import os
import sys
import subprocess
import StringIO
def stderrprint(error):
sys.stderr.write(error)
def print_success(job_name):
print "SUCCESS: " + job_name
def print_failure(job_name):
print "FAILURE: " + job_name
def check_and_print_std(stdout, stderr):
for line in StringIO.StringIO(stdout).readlines():
print "[STDOUT] " + line.rstrip()
if stderr:
for line in StringIO.StringIO(stderr).readlines():
stderrprint(line)
print "[STDERR] " + line.rstrip()
return 1
return 0
def run_routine(master_host_ip, master_ssh_port, master_ssh_user, master_ssh_pass, job_name, master_workspace):
#downloding resources to run test cases
master_job_folder = os.path.join(master_workspace, job_name)
master_resource_file = os.path.join(master_job_folder, "resources.tar")
command = ["sshpass", "-p", master_ssh_pass, "scp", "-o", "LogLevel=error", "-o", "UserKnownHostsFile=/dev/null", "-o", "ConnectTimeout=60", "-o",
"StrictHostKeyChecking=no", "-P", master_ssh_port,
master_ssh_user + "@" + master_host_ip + ":\"" + master_resource_file + "\"",
"./resources.tar"]
print "Executing: " + " ".join(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if check_and_print_std(stdout, stderr):
print_failure(job_name)
return
#extraction all resources
command = ["tar", "-xf", "resources.tar"]
print "Executing: " + " ".join(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if check_and_print_std(stdout, stderr):
print_failure(job_name)
return
#execute test cases
command = ["java", "-cp", ".:testJar.jar", "org.thingml.testjar.TestJar", "compilers.jar",
"official-network-plugins.jar", "config.properties"]
print "Executing: " + " ".join(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if check_and_print_std(stdout, stderr):
print_failure(job_name)
return
command = ["tar", "-cf", "tmp.tar", "tmp"]
print "Executing: " + " ".join(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if check_and_print_std(stdout, stderr):
print_failure(job_name)
return
#send results back to master
master_tmp_arch_file = os.path.join(master_job_folder, "tmp.tar")
command = ["sshpass", "-p", master_ssh_pass, "scp", "-o", "LogLevel=error", "-o", "UserKnownHostsFile=/dev/null", "-o", "ConnectTimeout=30", "-o",
"StrictHostKeyChecking=no", "-P", master_ssh_port, "./tmp.tar",
master_ssh_user + "@" + master_host_ip + ":\"" + master_tmp_arch_file + "\""]
print "Executing: " + " ".join(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if check_and_print_std(stdout, stderr):
print_failure(job_name)
return
print_success(job_name)
return
if __name__ == "__main__":
if len(sys.argv) < 7:
print "./dojob.py <master_workspace> <job_name> <master_host_ip> <master_ssh_port> <master_ssh_user> <master_ssh_pass>"
sys.exit(1)
master_workspace = sys.argv[1]
slave_job_name = sys.argv[2]
master_host_ip = sys.argv[3]
master_ssh_port = sys.argv[4]
master_ssh_user = sys.argv[5]
master_ssh_pass = sys.argv[6]
run_routine(master_host_ip, master_ssh_port, master_ssh_user, master_ssh_pass, slave_job_name, master_workspace) |
import pygame
LETGO_PIDGEON = pygame.USEREVENT + 1
|
from util import timeit
@timeit
def trailing_zeros(n: int) -> int:
"""
[Easy] https://cses.fi/problemset/task/1618
[Help] https://www.geeksforgeeks.org/count-trailing-zeroes-factorial-number/
[Solution] https://cses.fi/paste/e30a1f7b61c0770e239417/
Your task is to calculate the number of trailing zeros in the factorial n!.
For example, 20!=2432902008176640000 and it has 4 trailing zeros.
The only input line has an integer n.
Print the number of trailing zeros in n!.
Constraints: 1 ≤ n ≤ 10^9
Example
Input: 20
Output: 4
"""
c, d = 0, 5
while n >= d:
c += n // d
d *= 5
return c
if __name__ == '__main__':
trailing_zeros(20)
trailing_zeros(11)
trailing_zeros(5)
trailing_zeros(395)
trailing_zeros(374960399)
trailing_zeros(100000000)
''' terminal
run trailing_zeros(20)
got 4 in 0.0000219000 secs.
run trailing_zeros(11)
got 2 in 0.0000067000 secs.
run trailing_zeros(5)
got 1 in 0.0000068000 secs.
run trailing_zeros(395)
got 97 in 0.0000071000 secs.
run trailing_zeros(374960399)
got 93740092 in 0.0000095000 secs.
run trailing_zeros(100000000)
got 24999999 in 0.0000100000 secs.
'''
|
import types
import pytest
from indy_client.test.helper import genTestClient
from plenum.common.constants import VERKEY
from plenum.common.signer_simple import SimpleSigner
from plenum.common.messages.node_messages import PrePrepare, Commit
from plenum.common.signer_did import DidSigner
from plenum.common.util import randomString
from plenum.test.helper import waitForSufficientRepliesForRequests
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.test_node import getPrimaryReplica
from indy_client.client.wallet.wallet import Wallet
from indy_common.identity import Identity
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventually
logger = getlogger()
@pytest.fixture(scope="module")
def tconf(tconf, request):
# Delaying performance checks since delaying 3PC messages in the test
old_freq = tconf.PerfCheckFreq
tconf.PerfCheckFreq = 50
def reset():
tconf.PerfCheckFreq = old_freq
request.addfinalizer(reset)
return tconf
def test_successive_batch_do_no_change_state(looper,
tdirWithDomainTxnsUpdated,
tdirWithClientPoolTxns,
tconf, nodeSet, trustee,
trusteeWallet, monkeypatch):
"""
Send 2 NYM txns in different batches such that the second batch does not
change state so that state root remains same, but keep the identifier
and reqId different. Make sure the first request is not ordered by the
primary before PRE-PREPARE for the second is sent.
Also check reject and commit
:return:
"""
all_reqs = []
# Delay only first PRE-PREPARE
pp_seq_no_to_delay = 1
delay_pp_duration = 5
delay_cm_duration = 10
def delay_commits(wrappedMsg):
msg, sender = wrappedMsg
if isinstance(msg, Commit) and msg.instId == 0:
return delay_cm_duration
def new_identity():
wallet = Wallet(randomString(5))
signer = DidSigner()
new_idr, _ = wallet.addIdentifier(signer=signer)
verkey = wallet.getVerkey(new_idr)
idy = Identity(identifier=new_idr,
verkey=verkey,
role=None)
return idy, wallet
def submit_id_req(idy, wallet=None, client=None):
nonlocal all_reqs
wallet = wallet if wallet is not None else trusteeWallet
client = client if client is not None else trustee
wallet.updateTrustAnchoredIdentity(idy)
reqs = wallet.preparePending()
all_reqs.extend(reqs)
client.submitReqs(*reqs)
return reqs
def submit_id_req_and_wait(idy, wallet=None, client=None):
reqs = submit_id_req(idy, wallet=wallet, client=client)
looper.runFor(.2)
return reqs
def check_verkey(i, vk):
for node in nodeSet:
data = node.idrCache.getNym(i, isCommitted=True)
assert data[VERKEY] == vk
def check_uncommitted(count):
for node in nodeSet:
assert len(node.idrCache.un_committed) == count
for node in nodeSet:
for rpl in node.replicas:
monkeypatch.setattr(rpl, '_request_missing_three_phase_messages',
lambda *x, **y: None)
idy, new_wallet = new_identity()
new_idr = idy.identifier
verkey = idy.verkey
submit_id_req(idy)
waitForSufficientRepliesForRequests(looper, trustee, requests=all_reqs[-1:],
add_delay_to_timeout=delay_cm_duration)
for node in nodeSet:
node.nodeIbStasher.delay(delay_commits)
new_client, _ = genTestClient(nodeSet, tmpdir=tdirWithClientPoolTxns,
usePoolLedger=True)
looper.add(new_client)
looper.run(new_client.ensureConnectedToNodes(count=len(nodeSet)))
new_client.registerObserver(new_wallet.handleIncomingReply, name='temp')
idy.seqNo = None
# Setting the same verkey thrice but in different batches with different
# request ids
for _ in range(3):
req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
logger.debug('{} sent request {} to change verkey'.
format(new_client, req))
waitForSufficientRepliesForRequests(looper, new_client,
requests=all_reqs[-3:],
add_delay_to_timeout=delay_cm_duration)
# Number of uncommitted entries is 0
looper.run(eventually(check_uncommitted, 0))
check_verkey(new_idr, verkey)
new_client.deregisterObserver(name='temp')
# Setting the verkey to `x`, then `y` and then back to `x` but in different
# batches with different request ids. The idea is to change
# state root to `t` then `t'` and then back to `t` and observe that no
# errors are encountered
idy, new_wallet = new_identity()
submit_id_req(idy)
waitForSufficientRepliesForRequests(looper, trustee, requests=all_reqs[-1:],
add_delay_to_timeout=delay_cm_duration)
new_client.registerObserver(new_wallet.handleIncomingReply)
idy.seqNo = None
x_signer = SimpleSigner(identifier=idy.identifier)
idy.verkey = x_signer.verkey
req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
new_wallet.updateSigner(idy.identifier, x_signer)
logger.debug('{} sent request {} to change verkey'.
format(new_client, req))
y_signer = SimpleSigner(identifier=idy.identifier)
idy.verkey = y_signer.verkey
req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
new_wallet.updateSigner(idy.identifier, y_signer)
logger.debug('{} sent request {} to change verkey'.
format(new_client, req))
idy.verkey = x_signer.verkey
req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
new_wallet.updateSigner(idy.identifier, x_signer)
logger.debug('{} sent request {} to change verkey'.
format(new_client, req))
waitForSufficientRepliesForRequests(looper, new_client,
requests=all_reqs[-3:],
add_delay_to_timeout=delay_cm_duration)
# Number of uncommitted entries is 0
looper.run(eventually(check_uncommitted, 0))
check_verkey(new_idr, verkey)
monkeypatch.undo()
# Delay COMMITs so that IdrCache can be checked for correct
# number of entries
uncommitteds = {}
methods = {}
for node in nodeSet:
cache = node.idrCache
uncommitteds[cache._name] = []
cre = cache.currentBatchCreated
com = cache.onBatchCommitted
methods[cache._name] = (cre, com)
# Patch methods to record and check roots after commit
def patched_cre(self, stateRoot):
uncommitteds[self._name].append(stateRoot)
return methods[self._name][0](stateRoot)
def patched_com(self, stateRoot):
assert uncommitteds[self._name][0] == stateRoot
rv = methods[self._name][1](stateRoot)
uncommitteds[self._name] = uncommitteds[self._name][1:]
return rv
cache.currentBatchCreated = types.MethodType(patched_cre, cache)
cache.onBatchCommitted = types.MethodType(patched_com, cache)
# Set verkey of multiple identities
more = 5
keys = {}
for _ in range(more):
idy, _ = new_identity()
keys[idy.identifier] = idy.verkey
submit_id_req(idy)
looper.runFor(.01)
# Correct number of uncommitted entries
looper.run(eventually(check_uncommitted, more, retryWait=1))
waitForSufficientRepliesForRequests(looper, trustee,
requests=all_reqs[-more:],
add_delay_to_timeout=delay_cm_duration)
# Number of uncommitted entries is 0
looper.run(eventually(check_uncommitted, 0))
# The verkeys are correct
for i, v in keys.items():
check_verkey(i, v)
waitNodeDataEquality(looper, nodeSet[0], *nodeSet[1:])
keys = {}
for _ in range(3):
idy, _ = new_identity()
keys[idy.identifier] = idy.verkey
submit_id_req(idy)
looper.runFor(.01)
# Correct number of uncommitted entries
looper.run(eventually(check_uncommitted, 3, retryWait=1))
# Check batch reject
for node in nodeSet:
cache = node.idrCache
initial = cache.un_committed
cache.batchRejected()
# After reject, last entry is removed
assert cache.un_committed == initial[:-1]
root = cache.un_committed[0][0]
cache.onBatchCommitted(root)
# Calling commit with same root results in Assertion error
with pytest.raises(AssertionError):
cache.onBatchCommitted(root)
|
import unittest
from moduls import get_random_spectators_and_players
import logging
logger = logging.getLogger(__name__)
PLAYERS = ["player01", "player02", "player03", "player04", "player05", "player06", "player07", "player08", "player09"]
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(len(PLAYERS), 9)
def test_get_random_spectators_and_players_more_8(self):
players, spectators = get_random_spectators_and_players(PLAYERS) # 9
self.assertEqual(len(players), 8)
self.assertEqual(len(spectators), 1)
def test_get_random_spectators_and_players_eq_8(self):
players, spectators = get_random_spectators_and_players(PLAYERS[:8]) # 8
self.assertEqual(len(players), 8)
self.assertEqual(len(spectators), 0)
def test_get_random_spectators_and_players_le_8(self):
players, spectators = get_random_spectators_and_players(PLAYERS[:5]) # 5
self.assertEqual(len(players), 4)
self.assertEqual(len(spectators), 1)
if __name__ == "__main__":
unittest.main()
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
bootstrap = Bootstrap()
# mail = Mail()
# moment = Moment()
# 这里首先实例化了orm实例,但是没有传入配置信息。在这里实例化只是为了先定义出来,供别的代码引用
db = SQLAlchemy()
pagedown = PageDown()
def create_app(config_name):
app = Flask(__name__)
# 这一行是应用配置的,from_object是让app从传入的对象中读取配置。
# 如果你自定义了配置类,就把你的类传进来,也就是你的 config['default']。
# app是Flask的实例,它有一个default_config属性,代表了默认的配置
app.config.from_object(config[config_name])
# 这个地方发起了一个KeyError异常,KeyError一般在直接获取dict值时报错
# 举例,a是一个字典,当直接过去里面b的值的时候就会报错:
# a = {'a': 1}
# print(a['b'])
# 所以你这里是因为 config 变量不存在值为 config_name的键
# 如何调试呢?首先打印config,看config里都有什么。然后打印config_name,看在不在config里
# print('config is: ', config)
# print('config_name is: ', config_name)
# config 是个字典类型,如果config中存在config_name这个键,那么config_name in config会是True
# print('config_name in config ', config_name in config)
# 这一行不是应用配置,而是执行配置内的init_app函数,进行其他一些处理。
# 你 config 中自定义的init_app函数,代码块是pass,什么都没做。所以这一行可以注释掉
config[config_name].init_app(app)
# print(app.c:wq
# onfig)
bootstrap.init_app(app)
# mail.init_app(app)
# moment.init_app(app)
# 这里调用init_app方法,传入app实例。
# 首先这里的db是上面定义好的db,所以init_app方法所有的操作都是在修改db这个对象
# 经过这一行代码,虽然db还是内存中的db,但是它里面的一些属性已经变化了。因为init_app进行了处理
db.init_app(app)
# pagedown.init_app(app)
#
# if app.config['SSL_REDIRECT']:
# from flask_sslify import SSLify
# sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# from .auth import auth as auth_blueprint
# app.register_blueprint(auth_blueprint, url_prefix='/auth')
#
# from .api import api as api_blueprint
# app.register_blueprint(api_blueprint, url_prefix='/api/v1')
return app |
# coding utf-8
import os
import face_recognition
import cv2
def func_encodings(image_path, known_encodings, known_names):
name = image_path.split(os.path.sep)[-2] # 读取路径中文件夹名,并分隔出人名
image = cv2.imread(image_path) # opencv读取图像函数读出图像为bgr形式
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # dlib库要求输入图像为rgb形式
# 检测出人脸,返回人脸边框数组
boxes = face_recognition.face_locations(rgb)
# 对面部数据编码得到128个特征值列表
encodings = face_recognition.face_encodings(rgb, boxes)
# 在列表中循环,往已知编码和已知人名列表中插入得到的数据
for encoding in encodings:
known_encodings.append(encoding)
known_names.append(name)
|
# coding: utf-8
'''
Memory Game where players look for matches in a grid of 'cards'
Tapping the card turns it over, tap another and if they match
you get to add a point to your total.
'''
from scene import *
# import sound
import random
from math import sqrt # sin, cos, pi
# A = Action
# - - - - - - -
# grid dimensions (at least one is even)
grid_rows = 4 # max 4
grid_cols = 6 # max 7
# define 'textures' to use
backside_tex = Texture('plf:Ground_DirtCenter_rounded')
# the first (index=0) is gold star used when a pair is found
front_textures = [Texture('plf:Item_Star'),
Texture('plf:Enemy_FishBlue'),
Texture('plf:Tile_Cactus'),
Texture('plf:Enemy_FishPink'),
Texture('plf:Enemy_Ladybug'),
Texture('plf:Enemy_Frog_move'),
Texture('plf:Enemy_Mouse'),
Texture('plf:Enemy_Fly'),
Texture('plf:Enemy_SlimeBlue'),
Texture('plf:Enemy_Snail'),
Texture('plf:Enemy_SlimeBlock'),
Texture('plf:HudPlayer_yellow'),
Texture('plf:Enemy_WormPink'),
Texture('plf:Enemy_Bee'),
Texture('plf:Enemy_Barnacle')]
# - - - - - - -
def shuffle(some_list):
shuffled_list = []
for item in some_list:
iwhere = random.choice(range(0, 1 + len(shuffled_list)))
shuffled_list.insert(iwhere, item)
return shuffled_list
# - - - - - - -
def my_dist_to(self, loc):
'''
calc distance from self.position to the xy-tuple loc
'''
dist = (self.position[0] - loc[0])**2
dist += (self.position[1] - loc[1])**2
return sqrt(dist)
class Card (SpriteNode):
def __init__(self, loc_x, loc_y, tex_num, *args, **kwargs):
self.tex_num = tex_num
self.showing = False
SpriteNode.__init__(self, backside_tex, *args, **kwargs)
self.position = (loc_x, loc_y)
self.anchor_point = (0.5, 0.5)
def show_card(self, show_it):
# set the card to show it, or not
if show_it == True:
self.texture = front_textures[self.tex_num]
self.showing = True
else:
self.texture = backside_tex
self.showing = False
class MemGame (Scene):
def setup(self):
'''
do setup once at beginning to set things up
'''
self.background_color = '#606060'
# screen sizes
max_x = self.size.w
max_y = self.size.h
# keep track of number of cards that are not matched
self.num_left = grid_rows * grid_cols
self.num_misses = 0
# create a list of card faces to use
# start with all available tex's
texs_to_use = list(range(1, len(front_textures)))
# from these pick the number needed and put them in as pairs
tex_pairs = []
for ipair in range(int(self.num_left / 2)):
tex_pairs += 2 * [texs_to_use.pop()]
# and shuffle these, a lot
tex_pairs = shuffle(shuffle(tex_pairs))
# print("Shuffled pairs: ", tex_pairs)
# create the cards, in a list
self.the_cards = []
# add them by row and column
for irow in range(0, grid_rows):
row_y = max_y * (irow + 0.5) / (grid_rows)
for icol in range(0, grid_cols):
col_x = max_x * (icol + 0.5) / (grid_cols)
this_tex = tex_pairs.pop()
self.the_cards.append(
Card(col_x, row_y, this_tex, parent=self))
# keep track of the cards that are currently turned over
self.cards_over = []
def update(self):
'''
This is repeatedly called: update, render, update, render
Nothing to update generally - this game is touch driven
'''
# detect the end of the game
if self.num_left == 0:
# wait a little and exit
self.background_color = '#44b844'
ui.delay(self.view.close, 1.0)
def touch_ended(self, touch):
'''
this is called whenever a touch ends
'''
# are two cards turned over?
# if so, then either set them to stars or hide them again
if len(self.cards_over) == 2:
if self.cards_over[0].tex_num != self.cards_over[1].tex_num:
self.cards_over[0].show_card(False)
self.cards_over[1].show_card(False)
self.cards_over = []
self.num_misses += 1
else:
# change to stars
# they are still 'showing' so further touches are ignored
self.cards_over[0].tex_num = 0
self.cards_over[1].tex_num = 0
self.cards_over[0].show_card(True)
self.cards_over[1].show_card(True)
self.cards_over = []
# decrease the number left to match
self.num_left -= 2
print("Num misses: " + str(self.num_misses))
else:
# go through the cards and turn one over if touched
for this_card in self.the_cards:
if my_dist_to(this_card, touch.location) < 25:
# ignore the touch if the card is showing already
if not this_card.showing:
# turn it face up
this_card.show_card(True)
# add this to cards_over list
self.cards_over.append(this_card)
# for debug print this
# print(str(touch.location) + ' - ' + str(len(self.cards_over)))
# print(self.num_touches)
if __name__ == '__main__':
print("Game starting...")
run(MemGame(), LANDSCAPE)
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, BatchNormalization, Activation, Add
import copy
import os
from typing import Any, Tuple
import constants as c
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class NNet:
def __init__(self, epochs: int = c.DEFAULT_EPOCHS, learning_rate: float = c.DEFAULT_LEARNING_RATE,
batch_size: int = c.DEFAULT_BATCH_SIZE, model_name: str = c.DEFAULT_MODEL_NAME,
load_data: bool = True):
self.model = self._get_model(learning_rate, load_data, model_name)
self.model_name = model_name
self.epochs = epochs
self.batch_size = batch_size
@classmethod
def _get_model(cls, learning_rate: float, load_data: bool, model_name: str) -> keras.Model:
inputs = Input(shape=(c.COLUMNS, c.ROWS, 1))
x = Conv2D(filters=64, kernel_size=(4, 4), padding='same')(inputs)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = cls._res_net(inputs=x, filters=64, kernel_size=(4, 4))
x = cls._res_net(inputs=x, filters=64, kernel_size=(4, 4))
x = cls._res_net(inputs=x, filters=64, kernel_size=(4, 4))
policy = Conv2D(filters=64, kernel_size=(4, 4), padding='valid')(x)
policy = BatchNormalization(axis=3)(policy)
policy = Activation('relu')(policy)
policy = Flatten()(policy)
policy = Dense(256, activation='relu')(policy)
policy = Dense(c.COLUMNS, activation='softmax', name='policy')(policy)
value = Conv2D(filters=64, kernel_size=(4, 4), padding='valid')(x)
value = BatchNormalization(axis=3)(value)
value = Activation('relu')(value)
value = Flatten()(value)
value = Dense(1, activation='sigmoid', name='value')(value)
model = keras.Model(inputs=inputs, outputs=[policy, value])
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
loss={'value': 'mean_squared_error',
'policy': 'categorical_crossentropy'}
)
if load_data:
try:
model.load_weights(f'{parent_dir}\\weights\\{model_name}\\').expect_partial()
except ValueError:
print('No saved weights found')
return model
@staticmethod
def _res_net(inputs: Any, filters: int, kernel_size: Tuple[int, int]) -> Any:
x_shortcut = inputs
x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same')(inputs)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Conv2D(filters=filters, kernel_size=kernel_size, padding='same')(x)
x = BatchNormalization(axis=3)(x)
x = Add()([x, x_shortcut])
x = Activation('relu')(x)
return x
def train(self, examples: list, save_data: bool = False) -> None:
x_train = np.array([example[0] for example in examples])
y_train = (np.array([example[1][0] for example in examples]), np.array([example[1][1] for example in examples]))
self.model.fit(x=x_train, y={'policy': y_train[0], 'value': y_train[1]},
epochs=self.epochs, batch_size=self.batch_size, shuffle=True)
if save_data:
self.model.save_weights(f'{parent_dir}\\weights\\{self.model_name}\\')
def prediction(self, state: np.ndarray, player: int = 1) -> np.array:
state_copy = copy.deepcopy(state) * player
prediction = self.model.predict(np.array([np.array(state_copy)]))
policy = prediction[0][0]
value = prediction[1][0][0]
for move in range(len(policy)):
if not self._is_legal(state=state_copy, move=move):
policy[move] = 0
else:
policy[move] = policy[move] + 0.00001
return policy / np.sum(policy), value
@staticmethod
def _is_legal(state: np.ndarray, move: int) -> bool:
return state[move][c.ROWS - 1] == 0
|
"""AuthZ Adapter implementations of hierarchy sessions."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from ..osid import sessions as osid_sessions
from ..osid.osid_errors import NotFound
from ..osid.osid_errors import PermissionDenied, NullArgument, Unimplemented
from ..primitives import Id
from ..utilities import raise_null_argument
from dlkit.abstract_osid.hierarchy import sessions as abc_hierarchy_sessions
class HierarchyTraversalSession(abc_hierarchy_sessions.HierarchyTraversalSession, osid_sessions.OsidSession):
"""Adapts underlying HierarchyTraversalSession methodswith authorization checks."""
def get_hierarchy_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_hierarchy_id()
hierarchy_id = property(fget=get_hierarchy_id)
def get_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_hierarchy()
hierarchy = property(fget=get_hierarchy)
def can_access_hierarchy(self):
raise Unimplemented()
def get_roots(self):
raise Unimplemented()
roots = property(fget=get_roots)
@raise_null_argument
def has_parents(self, id_):
raise Unimplemented()
@raise_null_argument
def is_parent(self, id_, parent_id):
raise Unimplemented()
@raise_null_argument
def get_parents(self, id_):
raise Unimplemented()
@raise_null_argument
def is_ancestor(self, id_, ancestor_id):
raise Unimplemented()
@raise_null_argument
def has_children(self, id_):
raise Unimplemented()
@raise_null_argument
def is_child(self, id_, child_id):
raise Unimplemented()
@raise_null_argument
def get_children(self, id_):
raise Unimplemented()
@raise_null_argument
def is_descendant(self, id_, descendant_id):
raise Unimplemented()
@raise_null_argument
def get_nodes(self, id_, ancestor_levels, descendant_levels, include_siblings):
raise Unimplemented()
class HierarchyDesignSession(abc_hierarchy_sessions.HierarchyDesignSession, osid_sessions.OsidSession):
"""Adapts underlying HierarchyDesignSession methodswith authorization checks."""
def get_hierarchy_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_hierarchy_id()
hierarchy_id = property(fget=get_hierarchy_id)
def get_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_hierarchy()
hierarchy = property(fget=get_hierarchy)
def can_modify_hierarchy(self):
# From azosid_templates/ontology.py::SubjectHierarchyDesignSession::can_modify_subject_hierarchy_template
return self._can('modify')
@raise_null_argument
def add_root(self, id_):
raise Unimplemented()
@raise_null_argument
def add_child(self, id_, child_id):
raise Unimplemented()
@raise_null_argument
def remove_root(self, id_):
raise Unimplemented()
@raise_null_argument
def remove_child(self, id_, child_id):
raise Unimplemented()
@raise_null_argument
def remove_children(self, id_):
raise Unimplemented()
class HierarchySequencingSession(abc_hierarchy_sessions.HierarchySequencingSession, osid_sessions.OsidSession):
"""Adapts underlying HierarchySequencingSession methodswith authorization checks."""
def get_hierarchy_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_hierarchy_id()
hierarchy_id = property(fget=get_hierarchy_id)
def get_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_hierarchy()
hierarchy = property(fget=get_hierarchy)
def can_sequence_hierarchy(self):
raise Unimplemented()
@raise_null_argument
def move_node_ahead(self, parent_id, reference_id, id_):
raise Unimplemented()
@raise_null_argument
def move_node_behind(self, parent_id, reference_id, id_):
raise Unimplemented()
@raise_null_argument
def sequence_nodes(self, parent_id, ids):
raise Unimplemented()
class HierarchyStructureNotificationSession(abc_hierarchy_sessions.HierarchyStructureNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying HierarchyStructureNotificationSession methodswith authorization checks."""
def get_hierarchy_id(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_id_template
return self._provider_session.get_hierarchy_id()
hierarchy_id = property(fget=get_hierarchy_id)
def get_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
else:
return self._provider_session.get_hierarchy()
hierarchy = property(fget=get_hierarchy)
def can_register_for_hierarchy_structure_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.can_register_for_resource_notifications
return self._can('register')
def reliable_hierarchy_structure_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_hierarchy_structure_notifications()
def unreliable_hierarchy_structure_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_hierarchy_structure_notifications()
@raise_null_argument
def acknowledge_hierarchy_structure_notification(self, notification_id):
raise Unimplemented()
def register_for_new_hierarchy_nodes(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_new_hierarchy_nodes()
def register_for_deleted_hierarchy_nodes(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_hierarchy_nodes()
@raise_null_argument
def register_for_deleted_hierarchy_node(self, node_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_deleted_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_deleted_hierarchy_node(node_id)
def register_for_changed_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resources
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_hierarchy()
@raise_null_argument
def register_for_changed_hierarchy_for_ancestors(self, billing_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_hierarchy_for_ancestors(billing_id)
@raise_null_argument
def register_for_changed_hierarchy_for_descendants(self, node_id):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_changed_resource
if not self._can('register'):
raise PermissionDenied()
self._provider_session.register_for_changed_hierarchy_for_descendants(node_id)
def reliable_hierarchy_structure_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_hierarchy_structure_notifications()
def unreliable_hierarchy_structure_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_hierarchy_structure_notifications()
@raise_null_argument
def acknowledge_hierarchy_structure_notification(self, notification_id):
raise Unimplemented()
class HierarchyLookupSession(abc_hierarchy_sessions.HierarchyLookupSession, osid_sessions.OsidSession):
"""Adapts underlying HierarchyLookupSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('hierarchy.Hierarchy%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'hierarchy.Hierarchy'
def can_lookup_hierarchies(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.can_lookup_bins_template
return self._can('lookup')
def use_comparative_hierarchy_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_comparative_bin_view_template
self._provider_session.use_comparative_hierarchy_view()
def use_plenary_hierarchy_view(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.use_plenary_bin_view_template
self._provider_session.use_plenary_hierarchy_view()
@raise_null_argument
def get_hierarchy(self, hierarchy_id):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bin_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_hierarchy(hierarchy_id)
@raise_null_argument
def get_hierarchies_by_ids(self, hierarchy_ids):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_hierarchies_by_ids(hierarchy_ids)
@raise_null_argument
def get_hierarchies_by_genus_type(self, hierarchy_genus_type):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bins_by_genus_type_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_hierarchies_by_genus_type(hierarchy_genus_type)
@raise_null_argument
def get_hierarchies_by_parent_genus_type(self, hierarchy_genus_type):
raise Unimplemented()
@raise_null_argument
def get_hierarchies_by_record_type(self, hierarchy_record_type):
raise Unimplemented()
@raise_null_argument
def get_hierarchies_by_provider(self, resource_id):
raise Unimplemented()
def get_hierarchies(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.get_bins_template
if not self._can('lookup'):
raise PermissionDenied()
return self._provider_session.get_hierarchies()
hierarchies = property(fget=get_hierarchies)
class HierarchyQuerySession(abc_hierarchy_sessions.HierarchyQuerySession, osid_sessions.OsidSession):
"""Adapts underlying HierarchyQuerySession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('hierarchy.Hierarchy%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'hierarchy.Hierarchy'
def can_search_hierarchies(self):
# Implemented from azosid template for -
# osid.resource.BinQuerySession.can_search_bins_template
return self._can('search')
def get_hierarchy_query(self):
# Implemented from azosid template for -
# osid.resource.BinQuerySession.get_bin_query_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_hierarchy_query()
hierarchy_query = property(fget=get_hierarchy_query)
@raise_null_argument
def get_hierarchies_by_query(self, hierarchy_query):
# Implemented from azosid template for -
# osid.resource.BinQuerySession.get_bins_by_query_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_hierarchies_by_query(hierarchy_query)
class HierarchySearchSession(abc_hierarchy_sessions.HierarchySearchSession, HierarchyQuerySession):
"""Adapts underlying HierarchySearchSession methodswith authorization checks."""
def get_hierarchy_search(self):
raise Unimplemented()
hierarchy_search = property(fget=get_hierarchy_search)
def get_hierarchy_search_order(self):
raise Unimplemented()
hierarchy_search_order = property(fget=get_hierarchy_search_order)
@raise_null_argument
def get_hierarchies_by_search(self, hierarchy_query, hierarchy_search):
raise Unimplemented()
@raise_null_argument
def get_hierarchy_query_from_inspector(self, hierarchy_query_inspector):
raise Unimplemented()
class HierarchyAdminSession(abc_hierarchy_sessions.HierarchyAdminSession, osid_sessions.OsidSession):
"""Adapts underlying HierarchyAdminSession methodswith authorization checks."""
def __init__(self, *args, **kwargs):
osid_sessions.OsidSession.__init__(self, *args, **kwargs)
# This needs to be done right
# Build from authority in config
self._qualifier_id = Id('hierarchy.Hierarchy%3AROOT%40ODL.MIT.EDU')
self._id_namespace = 'hierarchy.Hierarchy'
def can_create_hierarchies(self):
# Implemented from azosid template for -
# osid.resource.BinLookupSession.can_create_bins_template
return self._can('create')
@raise_null_argument
def can_create_hierarchy_with_record_types(self, hierarchy_record_types):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.can_create_bin_with_record_types_template
# This would like to be a real implementation someday:
if hierarchy_record_types is None:
raise NullArgument() # Just 'cause the spec says to :)
return self._can('create')
@raise_null_argument
def get_hierarchy_form_for_create(self, hierarchy_record_types):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_create_template
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.get_hierarchy_form_for_create(hierarchy_record_types)
@raise_null_argument
def create_hierarchy(self, hierarchy_form):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.create_bin_template
if not self._can('create'):
raise PermissionDenied()
return self._provider_session.create_hierarchy(hierarchy_form)
def can_update_hierarchies(self):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.can_update_bins
return self._can('update')
@raise_null_argument
def get_hierarchy_form_for_update(self, hierarchy_id):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.get_hierarchy_form_for_update(hierarchy_id)
@raise_null_argument
def update_hierarchy(self, hierarchy_form):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.update_bin_template
if not self._can('update'):
raise PermissionDenied()
return self._provider_session.update_hierarchy(hierarchy_form)
def can_delete_hierarchies(self):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.can_delete_bins
return self._can('delete')
@raise_null_argument
def delete_hierarchy(self, hierarchy_id):
raise Unimplemented()
def can_manage_hierarchy_aliases(self):
# Implemented from azosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases
return (self._can('manage') or
bool(self._get_overriding_catalog_ids('manage')))
@raise_null_argument
def alias_hierarchy(self, hierarchy_id, alias_id):
# Implemented from azosid template for -
# osid.resource.BinAdminSession.alias_bin_template
if not self._can('alias'):
raise PermissionDenied()
return self._provider_session.alias_hierarchy(hierarchy_id, alias_id)
class HierarchyNotificationSession(abc_hierarchy_sessions.HierarchyNotificationSession, osid_sessions.OsidSession):
"""Adapts underlying HierarchyNotificationSession methodswith authorization checks."""
def can_register_for_hierarchy_notifications(self):
raise Unimplemented()
def reliable_hierarchy_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_hierarchy_notifications()
def unreliable_hierarchy_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_hierarchy_notifications()
@raise_null_argument
def acknowledge_hierarchy_notification(self, notification_id):
raise Unimplemented()
def register_for_new_hierarchies(self):
raise Unimplemented()
def register_for_changed_hierarchies(self):
raise Unimplemented()
@raise_null_argument
def register_for_changed_hierarchy(self, hierarchy_id):
raise Unimplemented()
def register_for_deleted_hierarchies(self):
raise Unimplemented()
@raise_null_argument
def register_for_deleted_hierarchy(self, hierarchy_id):
raise Unimplemented()
def reliable_hierarchy_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.reliable_hierarchy_notifications()
def unreliable_hierarchy_notifications(self):
# Implemented from azosid template for -
# osid.resource.ResourceNotificationSession.register_for_new_resources
self._provider_session.unreliable_hierarchy_notifications()
@raise_null_argument
def acknowledge_hierarchy_notification(self, notification_id):
raise Unimplemented()
|
""" Configuration file for scripts.
Author: Tang Yew Siang
Date: July 2019
"""
import os
import sys
import argparse
import numpy as np
# =============================== Algorithm Details ===============================
# A: Combines the PC with the 3D box by using planes of the 3D box
# B: Combines the PC with the 3D box by MLPs into a higher dims feature space
BOX_PC_MASK_REPRESENTATION = '' # A/B
USE_NORMALIZED_BOX2D_AS_FEATS = False
NORMALIZE_PC_BEFORE_SEG = False
NORMALIZATION_METHOD = '' # SD/Spread
# ===================== Box PC Fit ====================
BOXPC_SAMPLING_METHOD = 'SAMPLE' # BATCH / SAMPLE
BOXPC_SAMPLE_EQUAL_CLASS_WITH_PROB = 1.
BOXPC_PROPORTION_OF_BOXPC_FIT = 0.5
BOXPC_NOFIT_BOUNDS = [0.01, 0.25]
BOXPC_FIT_BOUNDS = [0.7, 1.0]
BOXPC_CENTER_PERTURBATION = 0.8
BOXPC_SIZE_PERTURBATION = 0.2
BOXPC_ANGLE_PERTURBATION = np.pi
# Use Box fit cls prediction to weigh delta prediction / loss
BOXPC_DELTA_LOSS_TYPE = 'huber' # mse / huber. Specific to B
BOXPC_WEIGH_DELTA_PRED_BY_CLS_CONF = False # Specific to B
BOXPC_WEIGH_DELTA_LOSS_BY_CLS_CONF = False # Specific to B
BOXPC_STOP_GRAD_OF_CLS_VIA_DELTA = True # Specific to B
# Use GT cls to weigh delta loss
BOXPC_WEIGH_DELTA_LOSS_BY_CLS_GT = False # Specific to B
# Weights
BOXPC_WEIGHT_CLS = 1.
BOXPC_WEIGHT_DELTA = 1. # Specific to B
BOXPC_WEIGHT_DELTA_CENTER_PERCENT = 0.34 # Specific to B
BOXPC_WEIGHT_DELTA_SIZE_PERCENT = 0.33 # Specific to B
BOXPC_WEIGHT_DELTA_ANGLE_PERCENT = 0.33 # Specific to B
BOXPC_WEIGHT_CLUSTER = 1. # Specific to B & C
# ================== Semi Supervised ==================
SEMI_MODEL = '' # A/B/C/D
# Sampling method, options:
# BATCH : Simply iterate through the dataset
# ALTERNATE_BATCH : Alternate between Weak and Strong samples + Sample
# MIXED_BATCH : Mix Weak and Strong samples + Sample
SEMI_SAMPLING_METHOD = 'ALTERNATE_BATCH'
# Probability of sampling equal class, else sample from data distribution
# If set to 1, it will randomly sample an equal num of samples / class
# If set to 0, it will randomly sample from the dataset
SEMI_SAMPLE_EQUAL_CLASS_WITH_PROB = 0. # ********* IMPORTANT **********
SEMI_USE_LABELS2D_OF_CLASSES3D = False # ********* IMPORTANT **********
# Adversarial loss related parameters
SEMI_ADV_INITIAL_ITERS_BEFORE_TRAIN = 0
SEMI_ADV_INITIAL_TRAINING_EPOCHS = 0
SEMI_ADV_ITERS_FOR_D = 0 # 1
SEMI_ADV_SAMPLE_EQUAL_CLASS_W_PROB = 0. # TODO
SEMI_ADV_DROPOUTS_FOR_G = 0.5
SEMI_ADV_FLIP_LABELS_FOR_D_PROB = 0.
SEMI_ADV_SOFT_NOISY_LABELS_FOR_D = False
SEMI_ADV_FEATURE_MATCHING = False # TODO
SEMI_ADV_NORMALIZE_PC_TO_NEG1_TO_1 = False # NA
SEMI_ADV_TANH_FOR_LAST_LAYER_OF_G = True
SEMI_ADV_DIFF_MINIBATCH_REAL_VS_FAKE = False # TODO
SEMI_ADV_LEAKY_RELU = True
SEMI_ADV_AVERAGE_POOLING = False # NA
SEMI_TRAIN_BOXPC_MODEL = False
SEMI_TRAIN_SEG_TRAIN_CLASS_AG_SEG = False
SEMI_TRAIN_BOX_TRAIN_CLASS_AG_TNET = False
SEMI_TRAIN_BOX_TRAIN_CLASS_AG_BOX = False
# Box-PC related
# Min. the Box PC Fit loss AFTER refining instead of before
SEMI_BOXPC_MIN_FIT_LOSS_AFT_REFINE = False
SEMI_BOXPC_FIT_ONLY_ON_2D_CLS = False
SEMI_WEIGH_BOXPC_DELTA_DURING_TEST = False
SEMI_REFINE_USING_BOXPC_DELTA_NUM = 1
# Intracls Dims Var
SEMI_INTRACLSDIMS_ONLY_ON_2D_CLS = True
# ================================= Loss Details =================================
# ========= Semi Supervised =========
SEMI_MULTIPLIER_FOR_WEAK_LOSS = 1 #0.1
SEMI_WEIGHT_REG_DELTA_LOSS = 0.
SEMI_WEIGHT_G_LOSS = 0.4
SEMI_WEIGHT_G_FEATURE_MATCH_LOSS = 0.
SEMI_WEIGHT_BOXPC_FIT_LOSS = 1.
SEMI_WEIGHT_BOXPC_INTRACLS_FIT_LOSS = 0 #0.001
SEMI_WEIGHT_BOXPC_KMEANS_LOSS = 0
# ======== Weakly Supervised ========
# Loss Weights
# 1. Segmentation
WEAK_WEIGHT_CROSS_ENTROPY = 5.
WEAK_CLS_WEIGHTS_CROSS_ENTROPY = [2., 0.333, 1., 0.333]
WEAK_WEIGHT_VARIANCE = 1.
WEAK_WEIGHT_INACTIVE = 4.
WEAK_WEIGHT_BINARY = 0.002
# 2. Box Estimation
WEAK_WEIGHT_ANCHOR_CLS = 1.
# Variance of y center predictions across the batch
WEAK_WEIGHT_CENTER_YVAR = 1.
WEAK_WEIGHT_INACTIVE_VOLUME = 0 #1.
WEAK_WEIGHT_REPROJECTION = 0.01 #0.25
WEAK_WEIGHT_SURFACE = 1.
WEAK_WEIGHT_INTRACLASSVAR = 0 #0.025
# Whether to train Seg / Box with the weak losses.
# Box parameters to train each of the following loss functions (Center, Dims, Orient)
# If False, tf.stop_gradient will be applied to prevent gradients from propagating. None means N.A.
WEAK_TRAIN_SEG_W_SURFACE = False
WEAK_TRAIN_BOX_W_REPROJECTION = [True, True, True]
WEAK_TRAIN_BOX_W_SURFACE = [True, False, True]
# Margin for Variance loss before there will be no more penalization (to prevent excessively small masks)
# A margin of 1 means that once the mask lies within a sphere of radius 1m, it will no longer be penalized.
# A margin of 0 means there is no margin, it will always want to seek a smaller mask.
WEAK_VARIANCE_LOSS_MARGIN = 1.5
# Whether to use the ground truth anchor label
WEAK_USE_GT_ANCHOR_CLS_LABEL = False
# Margin for Inactive vol loss before there will be no more penalization (to prevent excessively small vols)
WEAK_INACTIVE_VOL_LOSS_MARGINS = [10.,0.,0.]
WEAK_INACTIVE_VOL_ONLY_ON_2D_CLS = True
# Apply softmax on projection instead of maximum
# Depending on softmax_scale, the points that are closest to the max/min will be given different weightings
# EG. Suppose there are 8 points [0, 0.1, 0.2, 0.4, 0.6, 0.8, 0.9, 1.0], after softmax, the weightings are:
# Weighting given for scale = 1: [0.071, 0.079, 0.087, 0.106, 0.13 , 0.158, 0.175, 0.194]
# Weighting given for scale = 5: [0.003, 0.005, 0.008, 0.023, 0.062, 0.168, 0.276, 0.455]
# Weighting given for scale = 10: [0.0 , 0.0 , 0.0 , 0.002, 0.012, 0.089, 0.241, 0.656]
# Weighting given for scale = 20: [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.016, 0.117, 0.867]
# Weighting given for scale = 40: [0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.018, 0.982]
WEAK_REPROJECTION_USE_SOFTMAX_PROJ = False
WEAK_REPROJECTION_SOFTMAX_SCALE = 10.
# Only apply on 2D classes or 2D+3D classes
WEAK_REPROJECTION_ONLY_ON_2D_CLS = False
# Whether the reprojection loss should be applied on all points
WEAK_REPROJECTION_CLIP_LOWERB_LOSS = True
# Whether to clip the projection of the 3D bbox or not (will prevent training if it exceeds the box)
WEAK_REPROJECTION_CLIP_PRED_BOX = False
# Reprojection Loss (huber/mse)
WEAK_REPROJECTION_LOSS_TYPE = 'huber'
# Given the GT 2D bbox as the lower bound, we will dilate it by this factor to give the upper bound.
# The reprojection loss does not penalize for Pred 2D bboxes between these bounds.
WEAK_REPROJECTION_DILATE_FACTOR = 1.5
# No penalization if points lie within margin
WEAK_SURFACE_MARGIN = 0
# If points are within the box, surface loss will be weighted
WEAK_SURFACE_LOSS_WT_FOR_INNER_PTS = 0.8
# Value to scale dims before calculating surface loss
WEAK_SURFACE_LOSS_SCALE_DIMS = 0.9
# Intraclass Dims Variance Loss (huber/mse)
WEAK_DIMS_LOSS_TYPE = 'huber'
# Whether to use margin for loss or not
WEAK_DIMS_USE_MARGIN_LOSS = True
# Margin allowed before penalizing
WEAK_DIMS_SD_MARGIN = 0.2
# Decay to be applies to the exponential moving average of dims per class
WEAK_DIMS_EMA_DECAY = 0.99
# ======== Strongly Supervised ========
# Loss Weights
# 1. Segmentation
STRONG_WEIGHT_CROSS_ENTROPY = 1.
# 2. Box Estimation
STRONG_BOX_MULTIPLER = 0.1
STRONG_WEIGHT_CENTER = 1.
STRONG_WEIGHT_ORIENT_CLS = 1.
STRONG_WEIGHT_ORIENT_REG = 20.
STRONG_WEIGHT_DIMS_CLS = 1.
STRONG_WEIGHT_DIMS_REG = 20.
STRONG_WEIGHT_TNET_CENTER = 1.
STRONG_WEIGHT_CORNER = 1.
# ==================================== SUNRGBD ====================================
# Classes when doing strongly supervised learning
SUNRGBD_STRONG_TRAIN_CLS = ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']
# Classes when doing semi supervised learning
SUNRGBD_SEMI_TRAIN_CLS = ['bed','chair','toilet','desk','bathtub']
SUNRGBD_SEMI_TEST_CLS = [cls_type for cls_type in SUNRGBD_STRONG_TRAIN_CLS if cls_type not in SUNRGBD_SEMI_TRAIN_CLS]
# Classes when doing weakly supervised learning
SUNRGBD_WEAK_TRAIN_CLS = SUNRGBD_STRONG_TRAIN_CLS
SUNRGBD_WEAK_TEST_CLS = SUNRGBD_STRONG_TRAIN_CLS
# ===================================== KITTI =====================================
KITTI_ALL_CLS = ['Car', 'Pedestrian', 'Cyclist']
# Classes when doing semi supervised learning
KITTI_SEMI_TRAIN_CLS = [] #['Car', 'Pedestrian']
KITTI_SEMI_TEST_CLS = [cls_type for cls_type in KITTI_ALL_CLS if cls_type not in KITTI_SEMI_TRAIN_CLS]
class SpecialArgumentParser(argparse.ArgumentParser):
"""
Works like standard Argparse but provides set_attributes function to support adding in of dictionary / array
attributes in the object returned by parse_special_args.
"""
def __init__(self):
argparse.ArgumentParser.__init__(self)
self.attr_names_and_vals = None
def parse_special_args(self):
flags = self.parse_args()
for attr_name, attr_val in self.attr_names_and_vals:
setattr(flags, attr_name, attr_val)
config_str = self.get_config_str(flags)
setattr(flags, 'config_str', config_str)
return flags
def set_attributes(self, attr_names_and_vals):
self.attr_names_and_vals = attr_names_and_vals
def get_config_str(self, c):
if not hasattr(c, 'mode'): c.mode = None
config_str = '\n * REMEMBER TO CHECK THE CONFIGURATIONS * \n\n' + \
' [MODE: %s]\n' % (c.mode) + \
' [CLASSES]\n' + \
' SUNRGBD_STRONG_TRAIN_CLS : %s\n' % (c.SUNRGBD_STRONG_TRAIN_CLS) + \
' SUNRGBD_SEMI_TRAIN_CLS : %s\n' % (c.SUNRGBD_SEMI_TRAIN_CLS) + \
' SUNRGBD_SEMI_TEST_CLS : %s\n' % (c.SUNRGBD_SEMI_TEST_CLS) + \
' SUNRGBD_WEAK_TRAIN_CLS : %s\n' % (c.SUNRGBD_WEAK_TRAIN_CLS) + \
' SUNRGBD_WEAK_TEST_CLS : %s\n' % (c.SUNRGBD_WEAK_TEST_CLS)
return config_str
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
cfg = SpecialArgumentParser()
cfg.set_attributes([])
# Parameters that can be changed using the command line
cfg.add_argument('--BOX_PC_MASK_REPRESENTATION', type=str, default=BOX_PC_MASK_REPRESENTATION)
cfg.add_argument('--USE_NORMALIZED_BOX2D_AS_FEATS', type=str2bool, default=USE_NORMALIZED_BOX2D_AS_FEATS)
cfg.add_argument('--NORMALIZE_PC_BEFORE_SEG', type=str2bool, default=NORMALIZE_PC_BEFORE_SEG)
cfg.add_argument('--NORMALIZATION_METHOD', type=str, default=NORMALIZATION_METHOD)
cfg.add_argument('--BOXPC_SAMPLING_METHOD', type=str, default=BOXPC_SAMPLING_METHOD)
cfg.add_argument('--BOXPC_SAMPLE_EQUAL_CLASS_WITH_PROB', type=float, default=BOXPC_SAMPLE_EQUAL_CLASS_WITH_PROB)
cfg.add_argument('--BOXPC_PROPORTION_OF_BOXPC_FIT', type=float, default=BOXPC_PROPORTION_OF_BOXPC_FIT)
cfg.add_argument('--BOXPC_NOFIT_BOUNDS', nargs='+', type=float, default=BOXPC_NOFIT_BOUNDS)
cfg.add_argument('--BOXPC_FIT_BOUNDS', nargs='+', type=float, default=BOXPC_FIT_BOUNDS)
cfg.add_argument('--BOXPC_CENTER_PERTURBATION', type=float, default=BOXPC_CENTER_PERTURBATION)
cfg.add_argument('--BOXPC_SIZE_PERTURBATION', type=float, default=BOXPC_SIZE_PERTURBATION)
cfg.add_argument('--BOXPC_ANGLE_PERTURBATION', type=float, default=BOXPC_ANGLE_PERTURBATION)
cfg.add_argument('--BOXPC_DELTA_LOSS_TYPE', type=str, default=BOXPC_DELTA_LOSS_TYPE)
cfg.add_argument('--BOXPC_WEIGH_DELTA_PRED_BY_CLS_CONF', type=str2bool, default=BOXPC_WEIGH_DELTA_PRED_BY_CLS_CONF)
cfg.add_argument('--BOXPC_WEIGH_DELTA_LOSS_BY_CLS_CONF', type=str2bool, default=BOXPC_WEIGH_DELTA_LOSS_BY_CLS_CONF)
cfg.add_argument('--BOXPC_STOP_GRAD_OF_CLS_VIA_DELTA', type=str2bool, default=BOXPC_STOP_GRAD_OF_CLS_VIA_DELTA)
cfg.add_argument('--BOXPC_WEIGH_DELTA_LOSS_BY_CLS_GT', type=str2bool, default=BOXPC_WEIGH_DELTA_LOSS_BY_CLS_GT)
cfg.add_argument('--BOXPC_WEIGHT_CLS', type=float, default=BOXPC_WEIGHT_CLS)
cfg.add_argument('--BOXPC_WEIGHT_DELTA', type=float, default=BOXPC_WEIGHT_DELTA)
cfg.add_argument('--BOXPC_WEIGHT_DELTA_CENTER_PERCENT', type=float, default=BOXPC_WEIGHT_DELTA_CENTER_PERCENT)
cfg.add_argument('--BOXPC_WEIGHT_DELTA_SIZE_PERCENT', type=float, default=BOXPC_WEIGHT_DELTA_SIZE_PERCENT)
cfg.add_argument('--BOXPC_WEIGHT_DELTA_ANGLE_PERCENT', type=float, default=BOXPC_WEIGHT_DELTA_ANGLE_PERCENT)
cfg.add_argument('--BOXPC_WEIGHT_CLUSTER', type=float, default=BOXPC_WEIGHT_CLUSTER)
cfg.add_argument('--SEMI_MODEL', type=str, default=SEMI_MODEL)
cfg.add_argument('--SEMI_SAMPLING_METHOD', type=str, default=SEMI_SAMPLING_METHOD)
cfg.add_argument('--SEMI_SAMPLE_EQUAL_CLASS_WITH_PROB', type=float, default=SEMI_SAMPLE_EQUAL_CLASS_WITH_PROB)
cfg.add_argument('--SEMI_USE_LABELS2D_OF_CLASSES3D', type=str2bool, default=SEMI_USE_LABELS2D_OF_CLASSES3D)
cfg.add_argument('--SEMI_ADV_INITIAL_ITERS_BEFORE_TRAIN', type=int, default=SEMI_ADV_INITIAL_ITERS_BEFORE_TRAIN)
cfg.add_argument('--SEMI_ADV_INITIAL_TRAINING_EPOCHS', type=int, default=SEMI_ADV_INITIAL_TRAINING_EPOCHS)
cfg.add_argument('--SEMI_ADV_ITERS_FOR_D', type=int, default=SEMI_ADV_ITERS_FOR_D)
cfg.add_argument('--SEMI_ADV_SAMPLE_EQUAL_CLASS_W_PROB', type=float, default=SEMI_ADV_SAMPLE_EQUAL_CLASS_W_PROB)
cfg.add_argument('--SEMI_ADV_DROPOUTS_FOR_G', type=float, default=SEMI_ADV_DROPOUTS_FOR_G)
cfg.add_argument('--SEMI_ADV_FLIP_LABELS_FOR_D_PROB', type=float, default=SEMI_ADV_FLIP_LABELS_FOR_D_PROB)
cfg.add_argument('--SEMI_ADV_SOFT_NOISY_LABELS_FOR_D', type=str2bool, default=SEMI_ADV_SOFT_NOISY_LABELS_FOR_D)
cfg.add_argument('--SEMI_ADV_FEATURE_MATCHING', type=str2bool, default=SEMI_ADV_FEATURE_MATCHING)
cfg.add_argument('--SEMI_ADV_NORMALIZE_PC_TO_NEG1_TO_1', type=str2bool, default=SEMI_ADV_NORMALIZE_PC_TO_NEG1_TO_1)
cfg.add_argument('--SEMI_ADV_TANH_FOR_LAST_LAYER_OF_G', type=str2bool, default=SEMI_ADV_TANH_FOR_LAST_LAYER_OF_G)
cfg.add_argument('--SEMI_ADV_DIFF_MINIBATCH_REAL_VS_FAKE', type=str2bool, default=SEMI_ADV_DIFF_MINIBATCH_REAL_VS_FAKE)
cfg.add_argument('--SEMI_ADV_LEAKY_RELU', type=str2bool, default=SEMI_ADV_LEAKY_RELU)
cfg.add_argument('--SEMI_ADV_AVERAGE_POOLING', type=str2bool, default=SEMI_ADV_AVERAGE_POOLING)
cfg.add_argument('--SEMI_TRAIN_BOXPC_MODEL', type=str2bool, default=SEMI_TRAIN_BOXPC_MODEL)
cfg.add_argument('--SEMI_TRAIN_SEG_TRAIN_CLASS_AG_SEG', type=str2bool, default=SEMI_TRAIN_SEG_TRAIN_CLASS_AG_SEG)
cfg.add_argument('--SEMI_TRAIN_BOX_TRAIN_CLASS_AG_TNET', type=str2bool, default=SEMI_TRAIN_BOX_TRAIN_CLASS_AG_TNET)
cfg.add_argument('--SEMI_TRAIN_BOX_TRAIN_CLASS_AG_BOX', type=str2bool, default=SEMI_TRAIN_BOX_TRAIN_CLASS_AG_BOX)
cfg.add_argument('--SEMI_BOXPC_MIN_FIT_LOSS_AFT_REFINE', type=str2bool, default=SEMI_BOXPC_MIN_FIT_LOSS_AFT_REFINE)
cfg.add_argument('--SEMI_BOXPC_FIT_ONLY_ON_2D_CLS', type=str2bool, default=SEMI_BOXPC_FIT_ONLY_ON_2D_CLS)
cfg.add_argument('--SEMI_WEIGH_BOXPC_DELTA_DURING_TEST', type=str2bool, default=SEMI_WEIGH_BOXPC_DELTA_DURING_TEST)
cfg.add_argument('--SEMI_REFINE_USING_BOXPC_DELTA_NUM', type=int, default=SEMI_REFINE_USING_BOXPC_DELTA_NUM)
cfg.add_argument('--SEMI_INTRACLSDIMS_ONLY_ON_2D_CLS', type=str2bool, default=SEMI_INTRACLSDIMS_ONLY_ON_2D_CLS)
cfg.add_argument('--SEMI_MULTIPLIER_FOR_WEAK_LOSS', type=float, default=SEMI_MULTIPLIER_FOR_WEAK_LOSS)
cfg.add_argument('--SEMI_WEIGHT_REG_DELTA_LOSS', type=float, default=SEMI_WEIGHT_REG_DELTA_LOSS)
cfg.add_argument('--SEMI_WEIGHT_G_LOSS', type=float, default=SEMI_WEIGHT_G_LOSS)
cfg.add_argument('--SEMI_WEIGHT_G_FEATURE_MATCH_LOSS', type=float, default=SEMI_WEIGHT_G_FEATURE_MATCH_LOSS)
cfg.add_argument('--SEMI_WEIGHT_BOXPC_FIT_LOSS', type=float, default=SEMI_WEIGHT_BOXPC_FIT_LOSS)
cfg.add_argument('--SEMI_WEIGHT_BOXPC_INTRACLS_FIT_LOSS', type=float, default=SEMI_WEIGHT_BOXPC_INTRACLS_FIT_LOSS)
cfg.add_argument('--SEMI_WEIGHT_BOXPC_KMEANS_LOSS', type=float, default=SEMI_WEIGHT_BOXPC_KMEANS_LOSS)
cfg.add_argument('--WEAK_WEIGHT_CROSS_ENTROPY', type=float, default=WEAK_WEIGHT_CROSS_ENTROPY)
cfg.add_argument('--WEAK_CLS_WEIGHTS_CROSS_ENTROPY', nargs='+', type=float, default=WEAK_CLS_WEIGHTS_CROSS_ENTROPY)
cfg.add_argument('--WEAK_WEIGHT_VARIANCE', type=float, default=WEAK_WEIGHT_VARIANCE)
cfg.add_argument('--WEAK_WEIGHT_INACTIVE', type=float, default=WEAK_WEIGHT_INACTIVE)
cfg.add_argument('--WEAK_WEIGHT_BINARY', type=float, default=WEAK_WEIGHT_BINARY)
cfg.add_argument('--WEAK_WEIGHT_ANCHOR_CLS', type=float, default=WEAK_WEIGHT_ANCHOR_CLS)
cfg.add_argument('--WEAK_WEIGHT_CENTER_YVAR', type=float, default=WEAK_WEIGHT_CENTER_YVAR)
cfg.add_argument('--WEAK_WEIGHT_INACTIVE_VOLUME', type=float, default=WEAK_WEIGHT_INACTIVE_VOLUME)
cfg.add_argument('--WEAK_WEIGHT_REPROJECTION', type=float, default=WEAK_WEIGHT_REPROJECTION)
cfg.add_argument('--WEAK_WEIGHT_SURFACE', type=float, default=WEAK_WEIGHT_SURFACE)
cfg.add_argument('--WEAK_WEIGHT_INTRACLASSVAR', type=float, default=WEAK_WEIGHT_INTRACLASSVAR)
cfg.add_argument('--WEAK_TRAIN_SEG_W_SURFACE', type=str2bool, default=WEAK_TRAIN_SEG_W_SURFACE)
cfg.add_argument('--WEAK_TRAIN_BOX_W_REPROJECTION', nargs='+', type=str2bool, default=WEAK_TRAIN_BOX_W_REPROJECTION)
cfg.add_argument('--WEAK_TRAIN_BOX_W_SURFACE', nargs='+', type=str2bool, default=WEAK_TRAIN_BOX_W_SURFACE)
cfg.add_argument('--WEAK_VARIANCE_LOSS_MARGIN', type=float, default=WEAK_VARIANCE_LOSS_MARGIN)
cfg.add_argument('--WEAK_USE_GT_ANCHOR_CLS_LABEL', type=str2bool, default=WEAK_USE_GT_ANCHOR_CLS_LABEL)
cfg.add_argument('--WEAK_INACTIVE_VOL_LOSS_MARGINS', nargs='+', type=float, default=WEAK_INACTIVE_VOL_LOSS_MARGINS)
cfg.add_argument('--WEAK_INACTIVE_VOL_ONLY_ON_2D_CLS', type=str2bool, default=WEAK_INACTIVE_VOL_ONLY_ON_2D_CLS)
cfg.add_argument('--WEAK_REPROJECTION_USE_SOFTMAX_PROJ', type=str2bool, default=WEAK_REPROJECTION_USE_SOFTMAX_PROJ)
cfg.add_argument('--WEAK_REPROJECTION_SOFTMAX_SCALE', type=str2bool, default=WEAK_REPROJECTION_SOFTMAX_SCALE)
cfg.add_argument('--WEAK_REPROJECTION_ONLY_ON_2D_CLS', type=str2bool, default=WEAK_REPROJECTION_ONLY_ON_2D_CLS)
cfg.add_argument('--WEAK_REPROJECTION_CLIP_LOWERB_LOSS', type=str2bool, default=WEAK_REPROJECTION_CLIP_LOWERB_LOSS)
cfg.add_argument('--WEAK_REPROJECTION_CLIP_PRED_BOX', type=str2bool, default=WEAK_REPROJECTION_CLIP_PRED_BOX)
cfg.add_argument('--WEAK_REPROJECTION_LOSS_TYPE', type=str, default=WEAK_REPROJECTION_LOSS_TYPE)
cfg.add_argument('--WEAK_REPROJECTION_DILATE_FACTOR', type=float, default=WEAK_REPROJECTION_DILATE_FACTOR)
cfg.add_argument('--WEAK_SURFACE_MARGIN', type=float, default=WEAK_SURFACE_MARGIN)
cfg.add_argument('--WEAK_SURFACE_LOSS_WT_FOR_INNER_PTS', type=float, default=WEAK_SURFACE_LOSS_WT_FOR_INNER_PTS)
cfg.add_argument('--WEAK_SURFACE_LOSS_SCALE_DIMS', type=float, default=WEAK_SURFACE_LOSS_SCALE_DIMS)
cfg.add_argument('--WEAK_DIMS_LOSS_TYPE', type=str, default=WEAK_DIMS_LOSS_TYPE)
cfg.add_argument('--WEAK_DIMS_USE_MARGIN_LOSS', type=str2bool, default=WEAK_DIMS_USE_MARGIN_LOSS)
cfg.add_argument('--WEAK_DIMS_SD_MARGIN', type=float, default=WEAK_DIMS_SD_MARGIN)
cfg.add_argument('--WEAK_DIMS_EMA_DECAY', type=float, default=WEAK_DIMS_EMA_DECAY)
cfg.add_argument('--STRONG_WEIGHT_CROSS_ENTROPY', type=float, default=STRONG_WEIGHT_CROSS_ENTROPY)
cfg.add_argument('--STRONG_BOX_MULTIPLER', type=float, default=STRONG_BOX_MULTIPLER)
cfg.add_argument('--STRONG_WEIGHT_CENTER', type=float, default=STRONG_WEIGHT_CENTER)
cfg.add_argument('--STRONG_WEIGHT_ORIENT_CLS', type=float, default=STRONG_WEIGHT_ORIENT_CLS)
cfg.add_argument('--STRONG_WEIGHT_ORIENT_REG', type=float, default=STRONG_WEIGHT_ORIENT_REG)
cfg.add_argument('--STRONG_WEIGHT_DIMS_CLS', type=float, default=STRONG_WEIGHT_DIMS_CLS)
cfg.add_argument('--STRONG_WEIGHT_DIMS_REG', type=float, default=STRONG_WEIGHT_DIMS_REG)
cfg.add_argument('--STRONG_WEIGHT_TNET_CENTER', type=float, default=STRONG_WEIGHT_TNET_CENTER)
cfg.add_argument('--STRONG_WEIGHT_CORNER', type=float, default=STRONG_WEIGHT_CORNER)
cfg.add_argument('--SUNRGBD_STRONG_TRAIN_CLS', nargs='+', type=str, default=SUNRGBD_STRONG_TRAIN_CLS)
cfg.add_argument('--SUNRGBD_SEMI_TRAIN_CLS', nargs='+', type=str, default=SUNRGBD_SEMI_TRAIN_CLS)
cfg.add_argument('--SUNRGBD_SEMI_TEST_CLS', nargs='+', type=str, default=SUNRGBD_SEMI_TEST_CLS)
cfg.add_argument('--SUNRGBD_WEAK_TRAIN_CLS', nargs='+', type=str, default=SUNRGBD_WEAK_TRAIN_CLS)
cfg.add_argument('--SUNRGBD_WEAK_TEST_CLS', nargs='+', type=str, default=SUNRGBD_WEAK_TEST_CLS)
cfg.add_argument('--KITTI_ALL_CLS', nargs='+', type=str, default=KITTI_ALL_CLS)
cfg.add_argument('--KITTI_SEMI_TRAIN_CLS', nargs='+', type=str, default=KITTI_SEMI_TRAIN_CLS)
cfg.add_argument('--KITTI_SEMI_TEST_CLS', nargs='+', type=str, default=KITTI_SEMI_TEST_CLS)
|
from googletrans import Translator as google_translator
from src.inc.lang_detection_utils import *
class Translator:
def __init__(self, text: str, target: str) -> None:
self.text = text
self.target = target # Can be code or full name
self.translator = google_translator()
self.lang = detect_lang(self.text)
self.translation = None
def get_text_lang(self) -> str:
if not self.lang:
self.lang = detect_lang(self.text)
return self.lang
def translate(self) -> str:
if not self.translation:
self.translation = self.translator.translate(
self.text, dest=self.target, src=self.lang).text
return self.translation
|
import pygame
from settings import *
from hero import *
from walls import *
class Game:
def __init__(self):
self.walls = None
self.hero = None
self.done = False
pygame.init()
self.screen = pygame.display.set_mode(WINDOW_SIZE)
self.clock = pygame.time.Clock()
pygame.display.set_caption(TITLE)
def setup(self):
self.hero = Hero(WIDTH/2, HEIGHT/2, 10, 10)
self.walls = Walls()
self.walls.platform_level()
def run(self):
while not self.done:
#### EVENT CHECK SECTION
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
if self.hero is not None:
self.hero.handle_event(event)
## extra stuff will go here
### clear the screen
self.screen.fill(WHITE)
if self.walls is not None:
self.walls.draw(self.screen)
walls = self.walls.walls
else:
walls = []
## extra stuff will go here
if self.hero is not None:
self.hero.update(walls)
self.hero.draw(self.screen)
#### update the display and move forward 1 frame
pygame.display.flip()
# --- Limit to 60 frames per second
self.clock.tick(FPS)
pygame.quit()
game = Game()
game.setup()
game.run()
|
# Lint as: python3
"""Tests for custom tensorflow operators in HDRnet (CUDA only)."""
import collections
import hdrnet_ops as ops
import numpy as np
from parameterized import parameterized
import tensorflow.compat.v1 as tf
def _assert_tf_shape_equals(test_case, expected_shape, tf_tensor):
tf_shape = tf_tensor.shape.as_list()
tf.logging.info('expected_shape: %s, tf_shape: %s', expected_shape, tf_shape)
test_case.assertEqual(expected_shape, tf_shape)
def _assert_np_shape_equals(test_case, expected_shape, np_array):
np_shape = list(np_array.shape)
tf.logging.info('expected_shape: %s, np_shape: %s', expected_shape, np_shape)
test_case.assertEqual(expected_shape, np_shape)
def _assert_shape_equals(test_case, expected_shape, np_array, tf_tensor):
"""Asserts equality on shapes given numpy and tf inputs.
Asserts (using test_case.assertEqual) that both np_array and tf_tensor has
shape expected_shape.
Args:
test_case: (tf.test.TestCase) The TestCase currently being run.
expected_shape: (list of ints) The expected shape.
np_array: (np.ndarray) The input numpy array.
tf_tensor: (tf.Tensor) The input Tensorflow Tensor.
"""
_assert_tf_shape_equals(test_case, expected_shape, tf_tensor)
_assert_np_shape_equals(test_case, expected_shape, np_array)
def _get_device_string(use_gpu):
if use_gpu:
return '/gpu:0'
else:
return '/cpu:0'
_ForwardTestExtents = collections.namedtuple(
'_ForwardTestExtents',
'batch_size, h, w, input_channels, gh, gw, gd, gc, output_channels')
class BilateralSliceTest(tf.test.TestCase):
def run_bilateral_slice(self, grid_data, guide_data, use_gpu):
dev = _get_device_string(use_gpu)
graph = tf.Graph()
with graph.as_default():
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
output_tensor = ops.bilateral_slice(grid_tensor, guide_tensor)
with self.test_session(
graph=graph, use_gpu=use_gpu, force_gpu=use_gpu) as sess:
output_data = sess.run(output_tensor)
return output_data, output_tensor
def run_bilateral_slice_grad(self, grid_data, guide_data, use_gpu):
dev = _get_device_string(use_gpu)
graph = tf.Graph()
with graph.as_default():
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
output_tensor = ops.bilateral_slice(grid_tensor, guide_tensor)
grid_grad_tensor, guide_grad_tensor = tf.gradients(
output_tensor, [grid_tensor, guide_tensor])
with self.test_session(
graph=graph, use_gpu=use_gpu, force_gpu=use_gpu) as sess:
grid_grad_data, guide_grad_data = sess.run(
[grid_grad_tensor, guide_grad_tensor])
return (grid_grad_tensor, guide_grad_tensor, grid_grad_data,
guide_grad_data)
def create_forward_test(self,
batch_size=3,
h=30,
w=25,
input_channels=3,
gh=16,
gw=12,
gd=8,
output_channels=3,
randomize_values=True):
np.random.seed(1234)
gc = output_channels * (1 + input_channels)
grid_shape = (batch_size, gh, gw, gd, gc)
guide_shape = (batch_size, h, w)
if randomize_values:
grid_data = np.random.rand(*grid_shape).astype(np.float32)
guide_data = np.random.rand(*guide_shape).astype(np.float32)
else:
grid_data = np.zeros(grid_shape)
guide_data = np.zeros(guide_shape)
sz = _ForwardTestExtents(batch_size, h, w, input_channels, gh, gw, gd, gc,
output_channels)
return sz, grid_data, guide_data
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_shape(self, use_gpu):
"""bilateral_slice(grid, guide) should have the right shape."""
sz, grid_data, guide_data = self.create_forward_test()
output_data, output_tensor = self.run_bilateral_slice(
grid_data, guide_data, use_gpu)
_assert_shape_equals(self, [sz.batch_size, sz.h, sz.w, sz.gc], output_data,
output_tensor)
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_grad_shape(self, use_gpu):
"""gradient(bilateral_slice(grid, guide)) should have the right shape."""
sz, grid_data, guide_data = self.create_forward_test()
grid_grad_tensor, guide_grad_tensor, grid_grad_data, guide_grad_data = (
self.run_bilateral_slice_grad(grid_data, guide_data, use_gpu))
_assert_shape_equals(self, [sz.batch_size, sz.gh, sz.gw, sz.gd, sz.gc],
grid_grad_data, grid_grad_tensor)
_assert_shape_equals(self, [sz.batch_size, sz.h, sz.w], guide_grad_data,
guide_grad_tensor)
# TODO(jiawen): Read back both CPU and GPU gradients and compare them to each
# other as well as the gradient checker.
def run_grad_test(self, batch_size, h, w, input_channels, gh, gw, gd,
output_channels, grad_tensor_name, use_gpu):
dev = _get_device_string(use_gpu)
gc = (1 + input_channels) * output_channels
grid_shape = [batch_size, gh, gw, gd, gc]
guide_shape = [batch_size, h, w]
output_shape = [batch_size, h, w, gc]
grid_data = np.random.rand(*grid_shape).astype(np.float32)
guide_data = np.random.rand(*guide_shape).astype(np.float32)
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
output_tensor = ops.bilateral_slice(grid_tensor, guide_tensor)
if grad_tensor_name == 'grid':
grad_tensor = grid_tensor
grad_shape = grid_shape
elif grad_tensor_name == 'guide':
grad_tensor = guide_tensor
grad_shape = guide_shape
# It is important to use self.test_session, which will disable the
# graph optimization, otherwise it won't use GPU ops. See details here:
# https://github.com/tensorflow/tensorflow/issues/2054
with self.test_session(graph=graph, use_gpu=use_gpu, force_gpu=use_gpu):
err = tf.test.compute_gradient_error(
grad_tensor, grad_shape, output_tensor, output_shape, delta=1e-4)
# Note that the gradient cannot be accurate, as trilinear interpolation
# is not a smooth function. When the interpolated point is on the grid,
# the gradient does not exist. Therefore, the analytical gradient (by
# gradient op, implemented in bilateral_slice.cu.cc) and numerical
# grident (by tf.test.compute_gradient_error) will never match.
self.assertLess(err, 3e-3)
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_grid_gradient(self, use_gpu):
"""True grid derivative should closely match numerical derivative."""
self.run_grad_test(
batch_size=3,
h=8,
w=5,
input_channels=3,
gh=6,
gw=3,
gd=7,
output_channels=4,
use_gpu=use_gpu,
grad_tensor_name='grid')
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_guide_gradient(self, use_gpu):
"""True guide derivative should closely match numerical derivative."""
self.run_grad_test(
batch_size=1,
h=6,
w=18,
input_channels=1,
gh=3,
gw=9,
gd=7,
output_channels=1,
use_gpu=use_gpu,
grad_tensor_name='guide')
class BilateralSliceApplyTest(tf.test.TestCase):
def run_bilateral_slice_apply(self,
grid_data,
guide_data,
input_data,
use_gpu=False):
dev = _get_device_string(use_gpu)
graph = tf.Graph()
with graph.as_default():
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
input_tensor = tf.convert_to_tensor(
input_data, name='input', dtype=tf.float32)
output_tensor = ops.bilateral_slice_apply(
grid_tensor, guide_tensor, input_tensor, has_offset=True)
with self.test_session(
graph=graph, use_gpu=use_gpu, force_gpu=use_gpu) as sess:
output_data = sess.run(output_tensor)
return output_data, output_tensor
def run_bilateral_slice_apply_grad(self,
grid_data,
guide_data,
input_data,
use_gpu=False):
dev = _get_device_string(use_gpu)
graph = tf.Graph()
with graph.as_default():
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
input_tensor = tf.convert_to_tensor(
input_data, name='input', dtype=tf.float32)
output_tensor = ops.bilateral_slice_apply(
grid_tensor, guide_tensor, input_tensor, has_offset=True)
grid_grad_tensor, guide_grad_tensor, _ = tf.gradients(
output_tensor, [grid_tensor, guide_tensor, input_tensor])
with self.test_session(
graph=graph, use_gpu=use_gpu, force_gpu=use_gpu) as sess:
grid_grad_data, guide_grad_data = sess.run(
[grid_grad_tensor, guide_grad_tensor])
return (grid_grad_tensor, guide_grad_tensor, grid_grad_data,
guide_grad_data)
def create_forward_test(self,
batch_size=3,
h=30,
w=25,
input_channels=3,
gh=16,
gw=12,
gd=8,
output_channels=3,
randomize_values=True):
np.random.seed(1234)
gc = output_channels * (1 + input_channels)
grid_shape = (batch_size, gh, gw, gd, gc)
guide_shape = (batch_size, h, w)
input_shape = (batch_size, h, w, input_channels)
if randomize_values:
grid_data = np.random.rand(*grid_shape).astype(np.float32)
guide_data = np.random.rand(*guide_shape).astype(np.float32)
input_data = np.random.rand(*input_shape).astype(np.float32)
else:
grid_data = np.zeros(grid_shape)
guide_data = np.zeros(guide_shape)
input_data = np.zeros(input_shape)
sz = _ForwardTestExtents(batch_size, h, w, input_channels, gh, gw, gd, gc,
output_channels)
return sz, grid_data, guide_data, input_data
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_shape(self, use_gpu):
"""bilateral_slice_apply(grid, guide) should have the right shape."""
sz, grid_data, guide_data, input_data = self.create_forward_test()
output_data, output_tensor = self.run_bilateral_slice_apply(
grid_data, guide_data, input_data, use_gpu)
_assert_shape_equals(self, [sz.batch_size, sz.h, sz.w, sz.output_channels],
output_data, output_tensor)
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_grad_shape(self, use_gpu):
"""grad(bilateral_slice_apply(grid, guide)) should have the right shape."""
sz, grid_data, guide_data, input_data = self.create_forward_test()
grid_grad_tensor, guide_grad_tensor, grid_grad_data, guide_grad_data = (
self.run_bilateral_slice_apply_grad(grid_data, guide_data, input_data,
use_gpu))
_assert_shape_equals(self, [sz.batch_size, sz.gh, sz.gw, sz.gd, sz.gc],
grid_grad_data, grid_grad_tensor)
_assert_shape_equals(self, [sz.batch_size, sz.h, sz.w], guide_grad_data,
guide_grad_tensor)
# TODO(jiawen): Read back both CPU and GPU gradients and compare them to each
# other as well as the gradient checker.
def run_grad_test(self, batch_size, h, w, input_channels, gh, gw, gd,
output_channels, use_gpu, grad_tensor_name):
grid_shape = (batch_size, gh, gw, gd,
(1 + input_channels) * output_channels)
guide_shape = (batch_size, h, w)
input_shape = (batch_size, h, w, input_channels)
output_shape = (batch_size, h, w, output_channels)
grid_data = np.random.rand(*grid_shape).astype(np.float32)
guide_data = np.random.rand(*guide_shape).astype(np.float32)
input_data = np.random.rand(*input_shape).astype(np.float32)
dev = _get_device_string(use_gpu)
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
input_tensor = tf.convert_to_tensor(
input_data, name='input', dtype=tf.float32)
output_tensor = ops.bilateral_slice_apply(
grid_tensor, guide_tensor, input_tensor, has_offset=True)
if grad_tensor_name == 'grid':
grad_tensor = grid_tensor
grad_shape = grid_shape
elif grad_tensor_name == 'guide':
grad_tensor = guide_tensor
grad_shape = guide_shape
elif grad_tensor_name == 'input':
grad_tensor = input_tensor
grad_shape = input_shape
# It is important to use self.test_session, which will disable the
# graph optimization, otherwise it won't use GPU ops. See details here:
# https://github.com/tensorflow/tensorflow/issues/2054
with self.test_session(graph=graph, use_gpu=use_gpu, force_gpu=use_gpu):
err = tf.test.compute_gradient_error(grad_tensor, grad_shape,
output_tensor, output_shape)
self.assertLess(err, 1e-2)
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_grid_gradient(self, use_gpu):
"""True grid derivative should closely match numerical derivative."""
self.run_grad_test(
batch_size=3,
h=8,
w=5,
input_channels=3,
gh=6,
gw=3,
gd=7,
output_channels=4,
use_gpu=use_gpu,
grad_tensor_name='grid')
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_guide_gradient(self, use_gpu):
"""True guide derivative should closely match numerical derivative."""
self.run_grad_test(
batch_size=1,
h=6,
w=18,
input_channels=1,
gh=3,
gw=9,
gd=7,
output_channels=1,
use_gpu=use_gpu,
grad_tensor_name='guide')
@parameterized.expand([('CPU', False), ('GPU', True)])
def test_input_gradient(self, use_gpu):
"""True input derivative should closely match numerical derivative."""
self.run_grad_test(
batch_size=3,
h=8,
w=5,
input_channels=3,
gh=6,
gw=3,
gd=7,
output_channels=4,
use_gpu=use_gpu,
grad_tensor_name='input')
if __name__ == '__main__':
tf.test.main()
|
import logging
import sys
import time
from contextlib import contextmanager
import pandas as pd
from ..rpc import RPC
from ..taskqueue.objs import current_job_id
from .. import settings
from ..serialization import serializer, deserializer
logger = logging.getLogger(__name__)
def cancel_all():
keys = settings.redis_conn.keys("rq:job*")
if keys:
settings.redis_conn.delete(*keys)
def retrieve_profile(jids):
connection = settings.redis_conn
all_messages = []
for jid in jids:
key = "rq:profile:%s" % jid
msgs = connection.lrange(key, 0, -1)
if msgs:
connection.ltrim(key, len(msgs), -1)
big_message = {}
for msg in msgs:
msg = deserializer('cloudpickle')(msg)
big_message.update(msg)
all_messages.append(big_message)
data = pd.DataFrame(all_messages)
start_spread = data.pop('start')
end_spread = data.pop('end')
runtimes = end_spread - start_spread
total_runtimes = runtimes.sum()
if all_messages:
result = data.sum()
result['start_spread'] = start_spread.max() - start_spread.min()
result['end_spread'] = end_spread.max() - end_spread.min()
result['total_runtimes'] = total_runtimes
result['last_finish'] = end_spread.max()
return result
else:
return None
#from dabaez
def save_profile(key, value, jid):
connection = settings.redis_conn
msg = {key : value}
msg = serializer('cloudpickle')(msg)
key = "rq:profile:%s" % jid
connection.lpush(key, msg)
connection.expire(key, settings.profile_ttl)
def timethis(what, jid=None):
@contextmanager
def benchmark():
start = time.time()
yield
end = time.time()
if benchmark.jid is None:
jid = current_job_id()
else:
jid = benchmark.jid
if settings.is_server and settings.profile and jid:
save_profile(what, end-start, jid)
else:
print("%s : %0.3f seconds" % (what, end-start))
benchmark.jid = jid
return benchmark()
def make_rpc():
rpc = RPC()
rpc.register_function(cancel_all)
rpc.register_function(retrieve_profile)
return rpc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.