content
stringlengths 5
1.05M
|
|---|
texto = input("Ingrese su texto: ")
def SpaceToDash(texto):
return texto.replace(" ", "-")
print (SpaceToDash(texto))
|
import socket
import serial
import time
class CommsObject:
"""
Base Class for a variety of communications types with standard interfaces
"""
def __init__(self, name = "", type = ""):
"""
Initializes the class
Args:
name: String- name of the object
type: String- type of the object
Returns:
comms object
"""
self.name = name
self.type = type
self.forwardList = []
def sendMessage(self):
"""
Sends a message
"""
return True
def recvMessage(self, sleeptime=.2):
"""
Receives a message
Args:
sleeptime: optional sleeptime float, if waiting for a response
"""
return None, False
def setName(self, name):
"""
Sets the name of the object
"""
self.name = name
def getName(self):
"""
Returns the name of the object
"""
return self.name
def openCom(self):
"""
Opens the communications channel
"""
return 0
def closeCom(self):
"""
Closes the communications channel
"""
return 0
class SerialObject(CommsObject):
"""
Handler for Serial Communication
"""
def __init__(self, name, port = "COM1", baud = 9600):
"""
Initializes a serial connection
Args:
name: String - name of the object
port: Optional String, Defaults to COM1, port to select
baud: Optional int, Defaults to 9600, baud rate to select
Returns:
CommsObject
"""
super().__init__(name, "Serial")
self.port = port
self.baud = baud
self.ser = None
def openCom(self):
"""
Opens the communications line
"""
self.ser = serial.Serial(self.port, self.baud)
def closeCom(self):
"""
Closes the communications line
"""
self.ser.close()
def sendMessage(self, message):
"""
Sends a message
Args:
message: data to be sent along the connection
Returns:
bytes written
"""
bwr = 0
try:
bwr = self.ser.write(message)
except:
bwr = self.ser.write(message.encode('utf-8'))
return bwr > 0
def recvMessage(self, sleeptime = .2):
"""
Receives a message
Args:
sleeptime: Optional Float - amount of time to sleep before checking for message
Returns:
msg: data retrieved, if any
success: boolean for whether or not data was retrieved
"""
time.sleep(.2)
msg = self.ser.read(self.ser.in_waiting)
try:
msg = msg.decode('utf-8')
except:
if len(msg) == 0:
return "", False
return msg, True
def setPort(self, port):
"""
Sets the serial port
Args:
port: String - port to be set
"""
self.port = port
def setBaud(self, baud):
"""
Sets the baud rate
Args:
baud: Int - Baud rate to be set
"""
self.baud = baud
def getPort(self):
"""
Returns the set port
Returns:
String: port
"""
return self.port
def getBaud(self):
"""
Returns the baud rate
Returns:
Int: baud
"""
return self.baud
class UDPObject(CommsObject):
def __init__(self, name, IP = "192.168.1.1", Port = 8000):
super().__init__(name, "UDP")
"""
Creates a UDP Communications instance
Args:
name: String - Name to give instance
IP: String - IP address to bind to
Port: Int - Port to bind to
Returns:
CommsObject instance
"""
self.IP = IP
self.Port = Port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.bufferLen = 1024
self.lastAddr = None
def sendMessage(self, message):
"""
Sends a message
Args:
message: data to be sent along the connection
"""
print("Send Message: " + message)
self.sock.sendto(message.encode('utf-8'), (self.IP, self.Port))
def recvMessage(self):
"""
Receives a message
Returns:
msg: data retrieved, if any
success: boolean for whether or not data was retrieved
"""
success = True
data, addr = self.sock.recvfrom(self.bufferLen)
if data == None:
success = False
else:
self.lastAddr = addr
return data, success
def setIP(self, IP):
"""
Sets the IP address of the comms handle
Args:
IP: String - IP to bind to
"""
self.IP = IP
def setPort(self, Port):
"""
Sets the Port of the comms handle
Args:
Port: Int - Port to bind to
"""
self.Port = Port
def openCom(self):
"""
Opens a communications object and binds
"""
self.sock.bind((self.IP, self.Port))
def closeCom(self):
"""
Closes a communications object
"""
self.sock.close()
def setBufferLen(self, bufferLen):
"""
Sets the buffer length
"""
self.bufferLen = bufferLen
def getIP(self):
"""
Returns the bound IP Address
Returns:
String: IP
"""
return self.IP
def getPort(self):
"""
Returns the bound port number
Returns:
Int: Port
"""
return self.Port
def getBufferLen(self):
"""
Returns the buffer length
Returns:
Int: Buffer length
"""
return self.bufferLen
class Communications:
"""
Communications wrapper class for multiple communications objects
"""
def __init__(self):
"""
Initializes an empty communications object
"""
self.commsObjects = []
self.updateObjects = []
def newComPort(self, name, type, args = []):
"""
Creates a new named comm port and adds it to the register
Args:
name: String - name to identify new port
type: String - type of port "UDP" or "Serial"
args: Optional - List of arguments specific to comm port
"""
newObj = None
if type == "UDP":
if len(args) == 2:
newObj = UDPObject(name, args[0], args[1])
else:
newObj = UDPObject(name)
elif type == "Serial":
if (len(args) == 2):
newObj = SerialObject(name, args[0], args[1])
else:
newObj = SerialObject(name)
self.commsObjects.append(newObj)
def sendMessage(self, name, message):
"""
Send a message from a comm port with a specific name
Args:
name: String - unique name of comm port
message: Data to send
"""
for i in range(len(self.commsObjects)):
if self.commsObjects[i].name == name:
self.commsObjects[i].sendMessage(message)
return
def recvMessage(self, name):
"""
Receives a message from a comm port with a specific name
Args:
name: String - unique name of comm port
Returns:
data: Retrieved data
success: Whether it was able to retrieve anything at all
"""
for i in range(len(self.commsObjects)):
if self.commsObjects[i].name == name:
return self.commsObjects[i].recvMessage()
|
##########################################################################
# Author: Samuca
#
# brief: Game! Guess what the number is
#
# this is a list exercise available on youtube:
# https://www.youtube.com/playlist?list=PLHz_AreHm4dm6wYOIW20Nyg12TAjmMGT-
#########################################################################
from random import random
from math import floor
from time import sleep
#random return an number between 0 and 1
raffle = floor(random()*5)+1
#it is possible to generate a random int number with randint(a,b) from random
number = int(input("Guess a number between 1 and 5: "))
print("Proceeding...")
#wait for 3 seconds
sleep(3)
if number == raffle:
print("Congratulations!!")
else:
print(f"my number was {raffle}, dumb")
|
import sqlite3
class Database:
def create_card_table(self):
cur = self.conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS card(id INTEGER,
number TEXT, pin TEXT , balance INTEGER DEFAULT 0)''')
self.conn.commit()
def __init__(self):
self.conn = sqlite3.connect('card.s3db')
self.create_card_table()
class BankDatabaseApi:
def add_card(self, card_number, card_pin):
cur = self.conn.cursor()
cur.execute(f'''INSERT INTO card(number, pin) VALUES({card_number}, {card_pin})''')
self.conn.commit()
def get_card(self, card_number):
cur = self.conn.cursor()
cur.execute(f'''SELECT * from card where number == {card_number}''')
row = cur.fetchone()
return row
def get_card_pin(self, card_number, pin_number):
cur = self.conn.cursor()
cur.execute(f'''SELECT * from card where number == {card_number} and pin =={pin_number} ''')
row = cur.fetchone()
return row
def remove_card(self, card_number):
cur = self.conn.cursor()
cur.execute(f''' DELETE from card where number =={card_number}''')
self.conn.commit()
def mod_balance(self, card_number, income):
cur = self.conn.cursor()
cur.execute(f'''UPDATE card SET balance = balance + {income} where number == {card_number} ''')
self.conn.commit()
def mod_pin(self, card_number, new_pin):
pass
def get_balance(self, card_number):
cur = self.conn.cursor()
cur.execute(f'''SELECT balance from card where number == {card_number}''')
row = cur.fetchone()
return row[0]
def __init__(self):
self.conn = sqlite3.connect('file:database/card.s3db?mode=rw', uri=True)
if __name__ == '__main__':
Database()
|
__author__ = 'bptripp'
import argparse
import numpy as np
import cPickle as pickle
import matplotlib.pyplot as plt
from alexnet import preprocess, load_net
from orientation import find_stimuli, get_images
parser = argparse.ArgumentParser()
parser.add_argument('action', help='either save (evaluate and save tuning curves) or plot (plot examples)')
parser.add_argument('valid_image_path', help='path to stimulus images for validation')
valid_image_path = parser.parse_args().valid_image_path
action = parser.parse_args().action
curves_file = 'orientation_curves.pkl'
n = 200
if action == 'save':
model = load_net()
model.load_weights('orientation_weights.h5')
extension = '.png'
valid_stimuli = find_stimuli(valid_image_path, extension)
curves = []
for stimulus in valid_stimuli:
print(stimulus)
images = get_images(stimulus, extension)
curves.append(model.predict(images)[:,:n])
curves = np.array(curves, dtype=np.float16)
print(curves.shape)
f = open(curves_file, 'wb')
pickle.dump((valid_stimuli, curves), f)
f.close()
if action == 'plot':
f = open(curves_file, 'rb')
valid_stimuli, curves = pickle.load(f)
f.close()
plt.figure(figsize=(12,12))
for i in range(8):
for j in range(8):
ind = 8*i+j
plt.subplot(8,8,ind+1)
plt.plot(curves[0,:,ind])
plt.show()
|
from enum import Enum
class WebDriverFile(Enum):
# The Chrome driver file.
CHROME = "chromedriver"
# The Firefox driver file.
FIREFOX = "geckodriver"
# The Internet Explorer driver file.
IE = "IEDriverServer"
# The Edge driver file.
EDGE = "Microsoft Web Driver"
|
from .... pyaz_utils import _call_az
def list(cluster_name, name, resource_group):
'''
List version of a given application type.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- name -- Specify the application type name.
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az sf application-type version list", locals())
def delete(cluster_name, name, resource_group, version):
'''
Delete an application type version.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- name -- Specify the application type name.
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
- version -- Specify the application type version.
'''
return _call_az("az sf application-type version delete", locals())
def show(cluster_name, name, resource_group, version):
'''
Show the properties of an application type version on an Azure Service Fabric cluster.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- name -- Specify the application type name.
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
- version -- Specify the application type version.
'''
return _call_az("az sf application-type version show", locals())
def create(cluster_name, name, package_url, resource_group, version):
'''
Create a new application type on an Azure Service Fabric cluster.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- name -- Specify the application type name.
- package_url -- Specify the url of the application package sfpkg file.
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
- version -- Specify the application type version.
'''
return _call_az("az sf application-type version create", locals())
|
# For ad when viewing a recipe
ad_equipment = [
'pan',
'frying pan',
'saute pan',
'sauce pan',
'saucepan',
'casserole pot',
'steamer basket',
'steamer',
'skimmer',
'spatula',
'ladle',
'spoon',
'solid spoon',
'pasta spoon',
]
|
"""
Supplement for Embodied AI lecture 20170112
Some Reinforcement Learning examples
Implementing only Temporal Difference methods so far:
- TD(0) prediction
- Q-Learning
- SARSA
TODO
- x use function approximation for v,q,q_Q,q_SARSA
- policy search for continuous space
- use state matrix as visual input / compare pg-pong, although that uses policy gradient
- use pushback for implementing lambda?
- saving/loading of previously learnt models
- clean up class structure
2017 Oswald Berthold
"""
import argparse, sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# uh oh
from dimstack import dimensional_stacking
# # from scikit neural networks
# from sknn.mlp import Regressor, Layer
# try using keras
try:
from keras.layers import Input, Dense, Lambda, Merge
from keras.models import Model
from keras.optimizers import RMSprop
# from keras import initializations
from keras import initializers
# from keras.engine.topology import Merge
HAVE_KERAS = True
except ImportError, e:
print "Couldn't import Keras because %s" % e
HAVE_KERAS = False
sensorimotor_loops = [
"td_0_prediction", # TD(0) prediction of v
"td_0_off_policy_control", # aka Q-Learning
"td_0_on_policy_control", # aka SARSA"
]
def my_init(shape, name=None):
# return initializations.normal(shape, scale=0.01, name=name)
return initializers.normal(shape, stddev=0.01)
class Environment(object):
def __init__(self, agents = []):
self.agents = agents
self.t = 0
def step(self):
print "%s.step a = %s" % (self.__class__.__name__, a)
s = a
self.t += 1
return s
def reset(self):
print "Implement me"
class GridEnvironment(Environment):
def __init__(self, agents = [], num_x = 3, num_y = 3):
Environment.__init__(self, agents = agents)
self.num_x, self.num_y = num_x, num_y
self.lim_l = np.array([[0], [0]])
self.lim_u = np.array([[self.num_x-1], [self.num_y-1]])
# # constant goal
# self.goal = np.array([[4], [1]])
# self.goal = np.array([[2], [3]])
# self.goal = np.array([[0], [0]])
# self.goal = np.array([[0], [2]])
# self.goal = np.array([[1], [2]])
# self.goal = np.array([[4], [4]])
# random fixed goal
self.goal = np.random.uniform([0, 0], [self.num_x, self.num_y], size=(1, 2)).T.astype(int) #
self.reset()
def reset(self):
# print "%s.reset" % self.__class__.__name__
# init state
self.s = np.zeros((len(self.agents), self.num_x, self.num_y))
# initialize agents
for agent_idx, agent in enumerate(self.agents):
x = np.random.randint(0, self.num_x)
y = np.random.randint(0, self.num_y)
self.s[agent_idx,x,y] = 1
agent.terminal = False
agent.terminal_ = 1
# print self.s # [agent_idx,x,y]
def step(self):
"""Actual gridworld mechanics"""
# loop over agents
for agent_idx, agent in enumerate(self.agents):
# if agent.terminal:
# return self.s
# print "ev.s", self.s[agent_idx]
# get agent location as coordinates
a_pos = self.decode_state_to_loc(self.s[agent_idx])
# get agent reward
a_reward = self.decode_loc_to_reward(a_pos)
# debug
# print "a_pos, a_reward", a_pos, a_reward
# compute agent sensors from location and reward
sensors = np.array([a_pos.flatten().tolist() + [a_reward]]).T
# step the agent
a = agent.step(sensors)
# check terminal for state a_pos, separate from reward computation
isterminal = self.decode_loc_to_terminal(a_pos)
agent.terminal = isterminal
self.s[agent_idx] = self.do_action(agent_idx, a)
# print "%s.step #%04d a_%d = %s, s_%d = %s" % (self.__class__.__name__, self.t, agent_idx, a, agent_idx, self.s[agent_idx])
self.t += 1
return self.s
def decode_state_to_loc(self, s):
return np.array([
[np.sum(np.argmax(s, axis=0))],
[np.sum(np.argmax(s, axis=1))]
])
def decode_loc_to_reward(self, l):
return (np.sum(l == self.goal) > 1.0) * 1.0
def decode_loc_to_terminal(self, l):
return np.all(l == self.goal)
def do_action(self, agent_idx, a):
s = self.s[agent_idx]
# print "s", s
# implement s = self.move(s, a)
# get agent world state: location x,y
ag_pos = self.decode_state_to_loc(s)
# decode action
ag_vel = self.decode_action(a)
# print "ag_vel = %s" % (ag_vel)
# # include map with walls / real maze
# # print "ag_pos", ag_pos
# if ag_pos[0,0] in [2,3,4] and ag_pos[1,0] in [3]:
# # ag_vel = np.clip(ag_vel, )
# ag_vel[1,0] = np.clip(ag_vel[1,0], -np.inf, 0)
ag_pos_ = np.clip(ag_pos + ag_vel, self.lim_l, self.lim_u)
ag_pos = ag_pos.flatten()
ag_pos_ = ag_pos_.flatten()
assert s[ag_pos[0], ag_pos[1]] == 1.0
# print "s", s[ag_pos[0], ag_pos[1]], s[ag_pos_[0], ag_pos_[1]]
# move
s[ag_pos[0], ag_pos[1] ] = 0.0
s[ag_pos_[0], ag_pos_[1]] = 1.0
# print "s = %s\na = %s/%s\ns' = %s" % (ag_pos, a, ag_vel, ag_pos_)
return s
def decode_action(self, a):
assert a.shape == (1, 1)
#
if a[0,0] == 0: # stay
vel = [0, 0]
elif a[0,0] == 1: # west
vel = [1, 0]
elif a[0,0] == 3: # north
vel = [0, 1]
elif a[0,0] == 5: # east
vel = [-1, 0]
elif a[0,0] == 7: # south
vel = [0, -1]
elif a[0,0] == 2: # northwest
vel = [1, 1]
elif a[0,0] == 4: # northeast
vel = [-1, 1]
elif a[0,0] == 6: # southeast
vel = [-1, -1]
elif a[0,0] == 8: # southwest
vel = [1, -1]
return np.array([vel]).T
class Agent(object):
def __init__(self, ndim_s = 2, ndim_a = 1):
self.ndim_a = ndim_a
self.ndim_s = ndim_s
self.a = np.zeros((self.ndim_a, 1))
self.s = np.zeros((self.ndim_s, 1))
self.t = 0
self.terminal = False
self.terminal_ = 1
def step(self, s):
print "s = %s" % s
self.t += 1
a = s
return a
class TD0PredictionAgent(Agent):
# def __init__(self, ndim_s = 3, ndim_a = 1, ndim_x = 3, ndim_y = 3, alpha = 1e-3, gamma = 0.0):
def __init__(self, args=argparse.Namespace(ndim_s = 3, ndim_a = 1)):
Agent.__init__(self, args.ndim_s, args.ndim_a)
# world dims
self.ndim_x = args.ndim_x
self.ndim_y = args.ndim_y
# learning rate
self.alpha = args.alpha # 5e-3
# policy epsilon
self.epsilon = args.epsilon # 5e-3
# discount factor
self.gamma = args.gamma # 0.7
# type of learner / experiment
self.sensorimotor_loop = args.sensorimotor_loop
# type of value functions representation: table, parameterized approximation
self.repr = args.repr
# fallback
self.avg_loss = 0.0
# hardcoded gridworld actions
self.actions = ["nop", "w", "nw", "n", "ne", "e", "se", "s", "sw"]
self.actions_num = np.arange(len(self.actions), dtype=int).reshape((len(self.actions), 1))
# action
self.a = np.zeros((self.actions_num.shape[1], 1))
self.a_tm1 = self.a.copy()
# state
self.s = np.zeros((self.ndim_s, 1)) # x, y, r
self.s_tm1 = self.s.copy()
# estimated state value function v
self.v_tbl = np.ones((self.ndim_x, self.ndim_y)) * 0.1
# estimated state-action value function q
q_shape = (self.ndim_x, self.ndim_y, len(self.actions))
self.q_tbl = np.ones(q_shape) * 0.0 # 2.0
# self.q_tbl = np.random.uniform(0, 10, q_shape)
# self.q_tbl = np.arange(np.prod(q_shape)).reshape(q_shape)
self.q_Q_tbl = np.ones(q_shape) * 0.0 # 2.0
# self.q_Q_tbl = np.random.uniform(0, 0.1, q_shape)
# self.q_Q_tbl[self.goal[0,0], self.goal[1,0]] = 0.0
self.q_SARSA_tbl = np.ones(q_shape) * 0.0 # 2.0
if self.repr == "table":
self.v = self.v_tbl_predict
self.q = self.q_tbl_predict
self.q_Q = self.q_Q_tbl_predict
self.q_SARSA = self.q_SARSA_tbl_predict
self.v_update = self.v_tbl_update
self.q_update = self.q_tbl_update
self.q_Q_update = self.q_Q_tbl_update
self.q_SARSA_update = self.q_SARSA_tbl_update
elif self.repr == "approximation" and HAVE_KERAS:
self.init_fa()
self.v = self.v_fa_predict
self.q = self.q_fa_predict
self.q_Q = self.q_Q_fa_predict
self.q_SARSA = self.q_SARSA_fa_predict
self.v_update = self.v_fa_update
self.q_update = self.q_fa_update
self.q_Q_update = self.q_Q_fa_update
self.q_SARSA_update = self.q_SARSA_fa_update
else:
print "Something went wrong, check the output"
sys.exit(1)
# set pplicy according to learner
print "self.sensorimotor_loop", self.sensorimotor_loop
if self.sensorimotor_loop == "td_0_prediction":
self.policy_func = self.policy_random
elif self.sensorimotor_loop == "td_0_off_policy_control" or \
self.sensorimotor_loop == "td_0_on_policy_control":
print "epsilon greedy"
self.policy_func = self.policy_epsilon_greedy
else:
# self.policy_func = self.policy_random
print "Unknown learner %s, exiting" % (self.sensorimotor_loop)
sys.exit(1)
def init_fa(self):
# init_str = "normal"
init_str = my_init
layer_1_num_units = 200
layer_2_num_units = 20
output_gain = 1.0
input_gain = 10.0
# this returns a tensor
inputs = Input(shape=(2,))
inputs_gain = Lambda(lambda x: x * input_gain)(inputs)
# inputs_squared = Lambda(lambda x: (x ** 2) * 0.1)(inputs)
# inputs_combined = Merge(mode="concat", concat_axis=1)([inputs_gain, inputs_squared])
# a layer instance is callable on a tensor, and returns a tensor
# x = Dense(layer_1_num_units, activation='tanh', init=init_str)(inputs_gain)
# x = Dense(layer_2_num_units, activation='tanh', init=init_str)(x)
x = Dense(layer_1_num_units, activation='tanh', kernel_initializer='random_normal')(inputs_gain)
x = Dense(layer_2_num_units, activation='tanh', kernel_initializer='random_normal')(x)
predictions = Dense(1, activation='linear')(x)
outputs_gain = Lambda(lambda x: x * output_gain)(predictions)
# this creates a model that includes
# the Input layer and three Dense layers
opt_v_fa = RMSprop(lr = self.alpha)
self.v_fa = Model(input=inputs, output=outputs_gain)
self.v_fa.compile(optimizer=opt_v_fa, loss='mse')
self.v_fa_training_cnt = 0
self.v_fa_training_loss = 0
# Q approximation
# this returns a tensor
inputs_q_fa = Input(shape=(2 + len(self.actions),))
# inputs_q_fa = Input(shape=(3,))
inputs_gain = Lambda(lambda x: x * input_gain)(inputs_q_fa)
# inputs_squared = Lambda(lambda x: (x ** 2) * 0.1)(inputs_q_fa)
# inputs_combined = Merge(mode="concat", concat_axis=1)([inputs_gain, inputs_squared])
# a layer instance is callable on a tensor, and returns a tensor
# x = Dense(layer_1_num_units, activation='tanh', init=init_str)(inputs_gain)
# x = Dense(layer_2_num_units, activation='tanh', init=init_str)(x)
x = Dense(layer_1_num_units, activation='tanh', kernel_initializer='random_normal')(inputs_gain)
x = Dense(layer_2_num_units, activation='tanh', kernel_initializer='random_normal')(x)
predictions = Dense(1, activation='linear')(x)
outputs_gain = Lambda(lambda x: x * output_gain)(predictions)
# this creates a model that includes
# the Input layer and three Dense layers
opt_q_fa = RMSprop(lr = self.alpha)
self.q_fa = Model(input=inputs_q_fa, output=outputs_gain)
self.q_fa.compile(optimizer=opt_q_fa, loss='mse')
self.q_fa_training_cnt = 0
self.q_fa_training_loss = 0
# this returns a tensor
# inputs_q_Q_fa = Input(shape=(3,))
inputs_q_Q_fa = Input(shape=(2 + len(self.actions),))
inputs_gain = Lambda(lambda x: x * input_gain)(inputs_q_Q_fa)
# inputs_squared = Lambda(lambda x: (x ** 2) * 0.1)(inputs_q_Q_fa)
# inputs_combined = Merge(mode="concat", concat_axis=1)([inputs_gain, inputs_squared])
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(layer_1_num_units, activation='tanh')(inputs_gain)
x = Dense(layer_2_num_units, activation='tanh')(x)
predictions = Dense(1, activation='linear')(x)
outputs_gain = Lambda(lambda x: x * output_gain)(predictions)
# this creates a model that includes
# the Input layer and three Dense layers
opt_q_Q_fa = RMSprop(lr = self.alpha)
self.q_Q_fa = Model(input=inputs_q_Q_fa, output=outputs_gain)
self.q_Q_fa.compile(optimizer=opt_q_Q_fa, loss='mse')
self.q_Q_fa_training_cnt = 0
self.q_Q_fa_training_loss = 0
# this returns a tensor
inputs_q_SARSA_fa = Input(shape=(2 + len(self.actions),))
inputs_gain = Lambda(lambda x: x * input_gain)(inputs_q_SARSA_fa)
# inputs_squared = Lambda(lambda x: (x ** 2) * 0.1)(inputs_q_SARSA_fa)
# inputs_combined = Merge(mode="concat", concat_axis=1)([inputs_gain, inputs_squared])
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(layer_1_num_units, activation='tanh')(inputs_gain)
x = Dense(layer_2_num_units, activation='tanh')(x)
predictions = Dense(1, activation='linear')(x)
outputs_gain = Lambda(lambda x: x * output_gain)(predictions)
# this creates a model that includes
# the Input layer and three Dense layers
opt_q_SARSA_fa = RMSprop(lr = self.alpha)
self.q_SARSA_fa = Model(input=inputs_q_SARSA_fa, output=outputs_gain)
self.q_SARSA_fa.compile(optimizer=opt_q_SARSA_fa, loss='mse')
self.q_SARSA_fa_training_cnt = 0
self.q_SARSA_fa_training_loss = 0
def v_fa_predict(self, s):
return self.v_fa.predict(s[:2,0].reshape((1,2)) * 1.0) * 1.0
def v_fa_update(self, s):
# print "s", s
v_fa_tm1 = self.v(self.s_tm1)
v_fa = self.v(s)
x = self.s_tm1[:2,0].reshape((1,2))
y = s[2,0] + self.gamma * v_fa
if True or self.v_fa_training_cnt > 100 or s[2,0] > 0.0:
# target_weight = (1.0 + s[2] * 10.0).reshape()
target_weight = np.ones((1,)) + s[2] * 10.0
self.v_fa_training_loss = self.v_fa.train_on_batch(x * 1.0, y * 1.0, sample_weight = target_weight) # starts training
self.v_fa_training_cnt += 1
def q_fa_predict(self, s, a):
a_ = np.zeros((len(self.actions),1))
a_[int(a[0,0]),0] = 1.0
# x = np.vstack((s[:2,0].reshape((2,1)), a))
x = np.vstack((s[:2,0].reshape((2,1)), a_))
return self.q_fa.predict(x.T * 1.0) * 1.0
def q_fa_update(self, s, a):
# print "s", s
a_tm1_ = np.zeros((len(self.actions),1))
a_tm1_[int(self.a_tm1[0,0]),0] = 1.0
# print "a_tm1_", a_tm1_
# q_fa_tm1 = self.q(self.s_tm1, self.a_tm1)
q_fa = self.q(s, a)
# x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), self.a_tm1)).T
x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), a_tm1_)).T
# print "x", x
y = s[2,0] + self.gamma * q_fa
if True or self.q_fa_training_cnt > 100 or s[2,0] > 0.0:
target_weight = np.ones((1,)) + s[2] * 10.0
self.q_fa_training_loss = self.q_fa.train_on_batch(x * 1.0, y * 1.0, sample_weight = target_weight) # starts training
self.q_fa_training_cnt += 1
def q_Q_fa_predict(self, s, a):
a_ = np.zeros((len(self.actions),1))
a_[a[0,0],0] = 1.0
x = np.vstack((s[:2,0].reshape((2,1)), a_))
# x = np.vstack((s[:2,0].reshape((2,1)), a))
return self.q_Q_fa.predict(x.T)
def q_Q_fa_update(self, s, a):
# print "s", s
a_tm1_ = np.zeros((len(self.actions),1))
a_tm1_[int(self.a_tm1[0,0]),0] = 1.0
# q_Q_fa_tm1 = self.q_Q(self.s_tm1, self.a_tm1)
q_Q_fa_ = []
for a_ in range(len(self.actions)):
q_Q_fa_.append(self.q_Q(self.s, np.array([[a_]])))
q_Q_fa_ = np.array([q_Q_fa_])
q_Q_fa_max = np.max(q_Q_fa_)
q_Q_fa_max = np.array([[q_Q_fa_max]]) # ?
# print "argmax", q_Q_fa_max
x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), a_tm1_)).T
y = s[2,0] + self.gamma * q_Q_fa_max
# print "x", x, "y", y
if True or self.q_Q_fa_training_cnt > 100 or s[2,0] > 0.0:
target_weight = np.ones((1,)) + s[2] * 10.0
self.q_Q_fa_training_loss = self.q_Q_fa.train_on_batch(x, y, sample_weight = target_weight) # starts training
self.q_Q_fa_training_cnt += 1
def q_SARSA_fa_predict(self, s, a):
a_ = np.zeros((len(self.actions),1))
a_[a[0,0],0] = 1.0
x = np.vstack((s[:2,0].reshape((2,1)), a_))
# x = np.vstack((s[:2,0].reshape((2,1)), a))
return self.q_SARSA_fa.predict(x.T)
def q_SARSA_fa_update(self, s, a):
# print "s", s
a_tm1_ = np.zeros((len(self.actions),1))
a_tm1_[int(self.a_tm1[0,0]),0] = 1.0
q_SARSA_fa = self.q_SARSA(s, a)
x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), a_tm1_)).T
y = s[2,0] + self.gamma * q_SARSA_fa
if True or self.q_SARSA_fa_training_cnt > 100 or s[2,0] > 0.0:
target_weight = np.ones((1,)) + s[2] * 10.0
self.q_SARSA_fa_training_loss = self.q_SARSA_fa.train_on_batch(x, y, sample_weight = target_weight) # starts training
self.q_SARSA_fa_training_cnt += 1
################################################################################
def update_get_indices(self, s, s_tm1, a_tm1):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_x_tm1 = int(s_tm1[0,0])
l_y_tm1 = int(s_tm1[1,0])
l_a_tm1 = int(a_tm1[0,0])
return (l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1)
def v_tbl_predict(self, s):
l_x = int(s[0,0])
l_y = int(s[1,0])
return self.v_tbl[l_x, l_y]
def q_tbl_predict(self, s, a):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_a = int(a[0,0])
return self.q_tbl[l_x, l_y, l_a]
def q_Q_tbl_predict(self, s, a):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_a = int(a[0,0])
return self.q_Q_tbl[l_x, l_y, l_a]
def q_SARSA_tbl_predict(self, s, a):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_a = int(a[0,0])
return self.q_SARSA_tbl[l_x, l_y, l_a]
def v_tbl_update(self, s):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state value once
# self.v_tbl_s_tm1 = self.v_tbl[l_x_tm1, l_y_tm1].copy()
self.v_tbl_s_tm1 = self.v(self.s_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# self.v_tbl[l_x_tm1, l_y_tm1] = self.v_tbl_s_tm1 + self.alpha * 0.1 * (s[2,0] + self.gamma * self.v_tbl[l_x, l_y] - self.v_tbl_s_tm1)
self.v_tbl[l_x_tm1, l_y_tm1] = self.v_tbl_s_tm1 + self.alpha * 0.1 * (s[2,0] + self.gamma * self.v(s) - self.v_tbl_s_tm1)
def q_tbl_update(self, s, a):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state-action value once
# self.q_tbl_sa_tm1 = self.q_tbl[l_x_tm1, l_y_tm1, l_a_tm1].copy()
self.q_tbl_sa_tm1 = self.q(self.s_tm1, self.a_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# self.q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_tbl_sa_tm1 + self.alpha * (self.s[2,0] + self.gamma * self.q_tbl[l_x, l_y, l_a_tm1] - self.q_tbl_sa_tm1)
self.q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_tbl_sa_tm1 + self.alpha * (self.s[2,0] + self.gamma * self.q(s, self.a_tm1) - self.q_tbl_sa_tm1)
def q_Q_tbl_update(self, s, a):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state-action value once Q-Learning
# self.q_Q_tbl_tm1 = self.q_Q_tbl[l_x_tm1, l_y_tm1, l_a_tm1].copy()
self.q_Q_tbl_tm1 = self.q_Q(self.s_tm1, self.a_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# print "q_Q update max(Q_q(S, a))", np.max(self.q_Q_tbl[l_x, l_y, l_a_tm1])
# print "self.q_Q_tbl[l_x, l_y, l_a_tm1]", self.q_Q_tbl[l_x, l_y, :]
self.q_Q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_Q_tbl_tm1 + self.alpha * (self.s[2,0] + self.gamma * np.max(self.q_Q_tbl[l_x, l_y, :]) - self.q_Q_tbl_tm1)
# self.q_Q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_Q_tbl_tm1 + self.alpha * (self.s[2,0] + self.gamma * np.max(self.q_Q_tbl[l_x, l_y, l_a_tm1]) - self.q_Q_tbl_tm1)
def q_SARSA_tbl_update(self, s, a):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state-action value once Q-Learning
# self.q_SARSA_tbl_tm1 = self.q_SARSA_tbl[l_x_tm1, l_y_tm1, l_a_tm1].copy()
self.q_SARSA_tbl_tm1 = self.q_SARSA(self.s_tm1, self.a_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# self.q_SARSA_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_SARSA_tbl_tm1 + self.alpha * (self.s[2,0] + (self.gamma * self.q_SARSA_tbl[l_x, l_y, self.a]) - self.q_SARSA_tbl_tm1)
self.q_SARSA_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_SARSA_tbl_tm1 + self.alpha * (self.s[2,0] + (self.gamma * self.q_SARSA(s, a)) - self.q_SARSA_tbl_tm1)
# policies
def policy(self, q, s, epsilon = 0.0):
return self.policy_func(q, s)
def policy_random(self, q, s):
return np.random.randint(len(self.actions), size=self.a.shape)
def policy_epsilon_greedy(self, q, s, epsilon = 0.05):
if np.random.uniform() < epsilon:
return self.policy_random(q, s)
else:
# get best action according to current q estimate
q_s = q[int(s[0,0]), int(s[1,0])]
# print "%s.policy_epsilon_greedy q_s = %s" % (self.__class__.__name__, q_s)
a_s = np.argmax(q_s).reshape(self.a.shape)
# print "%s.policy_epsilon_greedy a_s = %s" % (self.__class__.__name__, a_s)
return a_s
def step(self, s):
# stop episode
if self.terminal:
self.terminal_ -= 1
if self.repr == "approximation":
if not hasattr(self, "avg_loss"):
self.avg_loss = 0.0
self.avg_loss = 0.9 * self.avg_loss + 0.1 * np.sum([self.v_fa_training_loss, self.q_fa_training_loss, self.q_Q_fa_training_loss, self.q_SARSA_fa_training_loss])
print "tc", self.v_fa_training_cnt, self.v_fa_training_loss, self.q_fa_training_cnt, self.q_fa_training_loss, self.q_Q_fa_training_cnt, self.q_Q_fa_training_loss, self.q_SARSA_fa_training_cnt, self.q_SARSA_fa_training_loss
print "avg loss", self.avg_loss
# sensory measurement: [x, y, reward].T
self.s = s.copy()
# print "%s.step s = %s" % (self.__class__.__name__, self.s)
# current state
l_x = int(self.s[0,0])
l_y = int(self.s[1,0])
# last state
l_x_tm1 = int(self.s_tm1[0,0])
l_y_tm1 = int(self.s_tm1[1,0])
l_a_tm1 = int(self.a_tm1[0,0])
# print "l", l_x, l_y, "l_tm1", l_x_tm1, l_y_tm1
# update v
# print "v", l_x, l_y, self.v_tbl[l_x, l_y]
# update value functions
# v
self.v_update(self.s)
# q with td0 update
self.q_update(self.s, self.a)
# q with Q update
self.q_Q_update(self.s, self.a)
# policy: some functional thing that produces an action
if self.sensorimotor_loop == "td_0_prediction":
self.a = self.policy(self.q_tbl, self.s)
elif self.sensorimotor_loop == "td_0_off_policy_control":
# back up old q_Q for off policy foo
self.a = self.policy(self.q_Q_tbl, self.s, epsilon = self.epsilon)
elif self.sensorimotor_loop == "td_0_on_policy_control":
self.a = self.policy(self.q_SARSA_tbl, self.s, epsilon = self.epsilon)
# print self.a
# q with sarsa update
self.q_SARSA_update(self.s, self.a)
# back up state
self.s_tm1 = self.s.copy()
# back up action
self.a_tm1 = self.a.copy()
self.t += 1
return self.a
################################################################################
# operations
def plot_init(ev):
plt.ion()
fig = plt.figure()
# sensorimotor_loop
smls = []
for a in ev.agents:
smls.append(a.sensorimotor_loop)
fig.suptitle("TD(0) learning of v and q, %d agents using %s" % (len(ev.agents), ", ".join(smls)))
gs_numcol = 1 + 1 # 1 + 1 + 4 # 3
gs = gridspec.GridSpec(len(ev.agents) * 4, gs_numcol)
axs = []
# plt.subplots_adjust(left=0.2)
# plt.subplots_adjust(bottom=-0.2)
for i, a in enumerate(ev.agents):
# # subplothost foo double labels
# ax_s = SubplotHost(fig, gs[i*2+3,0])
# ax_v = SubplotHost(fig, gs[i*2+3,1])
# ax_q = SubplotHost(fig, gs[i*2,:])
# ax_q_Q = SubplotHost(fig, gs[i*2+1,:])
# ax_q_SARSA = SubplotHost(fig, gs[i*2+2,:])
axs.append([
# fig.add_subplot(gs[gs_numcol*i]),
# fig.add_subplot(gs[gs_numcol*i+1]),
# fig.add_subplot(gs[gs_numcol*i+2:])
# # subplothost foo double labels
# fig.add_subplot(ax_s),
# fig.add_subplot(ax_v),
# fig.add_subplot(ax_q),
# fig.add_subplot(ax_q_Q),
# fig.add_subplot(ax_q_SARSA),
fig.add_subplot(gs[i*2+3,0]),
fig.add_subplot(gs[i*2+3,1]),
fig.add_subplot(gs[i*2,:]),
fig.add_subplot(gs[i*2+1,:]),
fig.add_subplot(gs[i*2+2,:]),
])
axs[-1][0].set_title("Agent %d state (position on grid)" % i, fontsize=8)
axs[-1][0].set_xlabel("x")
axs[-1][0].set_ylabel("y")
axs[-1][0].set_aspect(1)
axs[-1][1].set_title("Agent %d state value v(s)" % i, fontsize = 8)
axs[-1][1].set_xlabel("x")
axs[-1][1].set_ylabel("y")
axs[-1][1].set_aspect(1)
ax_q = axs[-1][2]
ax_q.set_title("Agent %d state-action value q(s,a)" % i, fontsize = 8)
ax_q.set_xlabel("f(a, x)")
ax_q.set_ylabel("y")
ax_q.set_aspect(1)
# ax_q.set_aspect((len(a.actions)*ev.num_x)/float(ev.num_y))
# ax_q.set_aspect((len(a.actions)*ev.num_x)/float(ev.num_y))
axs[-1][3].set_aspect(1)
axs[-1][4].set_aspect(1)
return fig, gs, axs
def plot_pcolor_coordinates():
pass
def plot_draw_ev(fig, gs, axs, ev):
for i, a in enumerate(ev.agents):
# print "plot_draw_ev s_%d = %s" % (i, ev.s[i])
# plot state
ax_s = axs[i][0]
# clean up
ax_s.clear()
# plot state
# print "ev.s[i].shape", ev.s[i].shape, a.v_tbl.shape, a.q_tbl.shape
ax_s.pcolormesh(ev.s[i].T, cmap=plt.get_cmap("gray"))
# ax_s.pcolormesh(ev.s[i][::-1], cmap=plt.get_cmap("gray"))
ax_s.plot([ev.goal[0,0] + 0.5], [ev.goal[1,0] + 0.5], "ro", markersize = 20, alpha= 0.5)
ax_s.set_title("Agent %d state (position on grid)" % i, fontsize=8)
ax_s.set_xlabel("x")
ax_s.set_ylabel("y")
ax_s.set_aspect(1)
# meshgrid
# v
v_img = np.zeros((ev.num_x, ev.num_y))
for k in range(ev.num_x):
for l in range(ev.num_y):
v_img[k,l] = a.v(np.array([[k, l, 0]]).T)
ev.agents[i].v_tbl = v_img.T
# q
q_img = np.zeros((ev.num_x, ev.num_y, 9))
for k in range(ev.num_x):
for l in range(ev.num_y):
for m in range(9):
q_img[k,l,m] = a.q(np.array([[k, l]]).T, np.array([[m]]).T)
# q_img_full = ev.agents[i].q_tbl
ev.agents[i].q_tbl = q_img.copy().transpose([0, 1, 2])
# q_Q
q_Q_img = np.zeros((ev.num_x, ev.num_y, 9))
for k in range(ev.num_x):
for l in range(ev.num_y):
for m in range(9):
q_Q_img[k,l,m] = a.q_Q(np.array([[k, l]]).T, np.array([[m]]).T)
ev.agents[i].q_Q_tbl = q_Q_img.copy().transpose([0, 1, 2])
# q_SARSA
q_SARSA_img = np.zeros((ev.num_x, ev.num_y, 9))
for k in range(ev.num_x):
for l in range(ev.num_y):
for m in range(9):
q_SARSA_img[k,l,m] = a.q_SARSA(np.array([[k, l]]).T, np.array([[m]]).T)
ev.agents[i].q_SARSA_tbl = q_SARSA_img.copy().transpose([0, 1, 2])
# plot state value
ax_v = axs[i][1]
ax_v.clear()
# v_img = np.log(ev.agents[i].v_tbl + 1.0)
v_img = ev.agents[i].v_tbl
ax_v.pcolormesh(v_img, cmap=plt.get_cmap("gray"))#, vmin = 0.0) # , vmax = 1.0)
ax_v.set_title("Agent %d state value v(s)" % i, fontsize = 8)
ax_v.set_xlabel("x")
ax_v.set_ylabel("y")
ax_v.set_aspect(1)
# plot state-action value
ax_q = axs[i][2]
ax_q.clear()
ax_q.set_title("Q_{TD(0)", fontsize=8)
q_img = ev.agents[i].q_tbl
print "q_img", np.min(q_img), np.max(q_img)
q_img = dimensional_stacking(np.transpose(q_img, [1, 0, 2]), [2, 1], [0])
# print "q_img.shape", q_img.shape
ax_q.pcolormesh(q_img, cmap=plt.get_cmap("gray"))#, vmin = 0.0)#, vmax = 2.0)
ax_q.set_title("Agent %d state-action value q(s,a)" % i, fontsize = 8)
# ax_q.set_xlabel("f(a, x)")
ax_q.set_ylabel("y")
# ax_q.set_aspect((len(a.actions)*ev.num_x)/float(ev.num_y))
# ax_q.set_aspect((len(a.actions)*ev.num_x)/float(ev.num_y))
ax_q.set_xticks([])
# ax_q_x = ax_q.twiny()
# # ax_q_x.set_xlim((0, 3))
# offset = 0.0, -25
# new_axisline = ax_q_x.get_grid_helper().new_fixed_axis
# ax_q_x.axis["bottom"] = new_axisline(loc="bottom", axes=ax_q_x, offset=offset)
# ax_q_x.axis["top"].set_visible(False)
# ax_q.set_xticks(np.arange(5+1))# + 0.5)
# # ax_q.set_xticklabels(np.tile(measures.values(), 3))
# ax_q_x.set_xticks(np.arange(9+1))# + 0.5)
# # ax_q_x.set_xticklabels(robots.values())
# ax_q_x.set_aspect(1)
# plot state-action value
ax_q_Q = axs[i][3]
ax_q_Q.clear()
ax_q_Q.set_title("Q_{Q}, min = %f, max = %f" % (np.min(ev.agents[i].q_Q_tbl), np.max(ev.agents[i].q_Q_tbl)), fontsize=8)
q_Q_img = ev.agents[i].q_Q_tbl
print "q_Q_img", np.min(q_Q_img), np.max(q_Q_img)
q_img_Q = dimensional_stacking(np.transpose(q_Q_img, [1, 0, 2]), [2, 1], [0])
# q_img = dimensional_stacking(ev.agents[i].q_SARSA_tbl, [2, 1], [0])
# print "q_img.shape", q_img.shape
ax_q_Q.pcolormesh(q_img_Q, cmap=plt.get_cmap("gray"))#, vmin = 0.0) #, vmax = 2.0)
ax_q_Q.set_aspect(1)
ax_q_Q.set_xticks([])
# plot state-action value
ax_q_SARSA = axs[i][4]
ax_q_SARSA.clear()
ax_q_SARSA.set_title("Q_{SARSA} min = %f, max = %f" % (np.min(ev.agents[i].q_SARSA_tbl), np.max(ev.agents[i].q_SARSA_tbl)), fontsize=8)
q_SARSA_img = ev.agents[i].q_SARSA_tbl
print "q_SARSA_img", np.min(q_SARSA_img), np.max(q_SARSA_img)
q_img_SARSA = dimensional_stacking(np.transpose(q_SARSA_img, [1, 0, 2]), [2, 1], [0])
# print "q_img.shape", q_img.shape
mpabl = ax_q_SARSA.pcolormesh(q_img_SARSA, cmap=plt.get_cmap("gray"))#, vmin = 0.0, vmax = 5.0)
ax_q_SARSA.set_aspect(1)
# plt.colorbar(mpabl, ax=ax_q_SARSA, orientation="horizontal")
ax_q_SARSA.set_xticks(np.arange(0, 5*9, 2.5))
ticklabels = ["x=x,a=nop", "x=x,a=w", "x=x,a=nw", "x=x,a=n", "x=x,a=ne", "x=x,a=e", "x=x,a=se", "x=x,a=s", "x=x,a=sw"]
# ticklabels.insert(0, "")
ticklabels2 = []
for i_q_tl, q_tl in enumerate(ticklabels):
ticklabels2.append("")
ticklabels2.append(q_tl)
ticklabels2.append("")
ax_q_SARSA.set_xticklabels(ticklabels2, fontsize=8)
plt.draw()
plt.pause(1e-3)
def get_agent(args):
# if args.sensorimotor_loop == "td_0_prediction":
# return TD0PredictionAgent(ndim_s = 3, ndim_a = 1, ndim_x = args.world_x, ndim_y = args.world_y, alpha = args.alpha, gamma = args.gamma)
return TD0PredictionAgent(args)
# elif args.sensorimotor_loop == "td_0_off_policy_control":
# return TD0OffPolicyControlAgent(ndim_s = 3, ndim_a = 1, ndim_x = args.world_x, ndim_y = args.world_y, alpha = args.alpha, gamma = args.gamma)
# elif args.sensorimotor_loop == "td_0_on_policy_control":
# else:
# print "Unknown sm loop %s, exiting" % (args.sensorimotor_loop)
# sys.exit(1)
def rl_experiment(args):
# numepisodes = args.numepisodes
# maxsteps = args.maxsteps
# plotfreq = args.plotfreq
setattr(args, "ndim_s", 3)
setattr(args, "ndim_a", 1)
setattr(args, "ndim_x", args.world_x)
setattr(args, "ndim_y", args.world_y)
if args.sensorimotor_loop == "td0":
args.sensorimotor_loop = "td_0_prediction"
elif args.sensorimotor_loop in ["q", "Q"]:
args.sensorimotor_loop = "td_0_off_policy_control"
elif args.sensorimotor_loop in ["sarsa", "SARSA"]:
args.sensorimotor_loop = "td_0_on_policy_control"
ag = get_agent(args)
# ag2 = TD0PredictionAgent(ndim_s = 3, ndim_a = 1)
ev = GridEnvironment(agents = [ag], num_x = args.world_x, num_y = args.world_y)
# ag.q_Q_tbl[ev.goal[0,0], ev.goal[1,0],:] = 0.1
# ag.q_SARSA_tbl[ev.goal[0,0], ev.goal[1,0],:] = 0.0
# s = ag.s
# a = ag.a
fig, gs, axs = plot_init(ev)
print "environment", ev
print " agent", ag
for i in range(args.numepisodes):
# reset agent
ev.reset()
t = 0
terminal = False
while not terminal and t < args.maxsteps:
# for t in range(maxsteps):
# print "epi %d, step %d" % (i, t)
# step the world
ev.step()
# print "td_0_prediction a[t = %d] = %s, s[t = %d] = %s" % (t, a, t, s)
if (i * args.maxsteps + t) % args.plotfreq == 0:
print "plotting at step %d" % (i * args.maxsteps + t)
plot_draw_ev(fig, gs, axs, ev)
terminal = np.all(np.array([agent.terminal_ < 1 for agent in ev.agents]))
t += 1
print "epi %d, final step %d, avg loss = %f" % (i, t, ev.agents[0].avg_loss)
print "ev.steps = %d" % (ev.t)
print "ag.steps = %d" % (ag.t)
# save result
for i, agent in enumerate(ev.agents):
np.save("td0_ag%d_v.npy" % i, agent.v_tbl)
np.save("td0_ag%d_q.npy" % i, agent.q_tbl)
plt.ioff()
plt.show()
def main(args):
rl_experiment(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--alpha", default=1e-2, type=float, help="Learning rate \alpha [0.01]")
parser.add_argument("-e", "--epsilon", default=0.1, type=float, help="\epsilon-greedy \epsilon [0.1]")
parser.add_argument("-g", "--gamma", default=0.8, type=float, help="Discount factor \gamma [0.8]")
parser.add_argument("-ne", "--numepisodes", default=1000, type=int, help="Number of episodes [500]")
parser.add_argument("-ms", "--maxsteps", default=100, type=int, help="Maximum number of steps per episodes [100]")
parser.add_argument("-sm", "--sensorimotor_loop", default="td_0_prediction", type=str, help="Which sm loop (Learner), one of " + ", ".join(sensorimotor_loops) + " [td_0_prediction]")
parser.add_argument("-p", "--plotfreq", default=1000, type=int, help="Plotting interval in steps [1000]")
parser.add_argument("-r", "--repr", default="table", type=str, help="Value function representation [table]")
parser.add_argument("-wx", "--world_x", default=5, type=int, help="Size of world along x [5]")
parser.add_argument("-wy", "--world_y", default=5, type=int, help="Size of world along y [5]")
args = parser.parse_args()
main(args)
# main_extended(args)
|
#!/usr/bin/env python3
# Copyright 2019 Genialis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import shlex
# Get package metadata from '__about__.py' file
about = {}
with open('../__about__.py') as f:
exec(f.read(), about)
# -- General configuration ------------------------------------------------
# The extension modules to enable.
extensions = ['sphinx.ext.autodoc']
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = about['__title__']
version = about['__version__']
release = version
author = about['__author__']
copyright = about['__copyright__']
# Set variables that can be used by reStructuredText documents
rst_epilog = """
.. |project_name| replace:: {project}
.. |project_git_repo_link| replace:: `{project}' git repository`_
.. _{project}' git repository: {git_repo_url}
""".format(
project=project, git_repo_url=about['__git_repo_url__']
)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Sort members as they appear in the source code
autodoc_member_order = 'bysource'
# Warn about all references where the target cannot be found
nitpicky = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'resolwe-runtime-utilsdoc'
|
import telebot
import tokken
import requests
import os
import random
appid = "e03925cafba20b665dc891e119dcd297"
#city_id for Abakana
city_id = 1512236
bot = telebot.TeleBot(tokken.token)
@bot.message_handler(commands=['start'])
def handle_start(message):
user_markup = telebot.types.ReplyKeyboardMarkup()
user_markup.row('\U0001F4DDновости', '\U0001F302погода',)
user_markup.row('\U00002665чаты', '\U0001F695такси', '\U000026A0sos')
user_markup.row('/start', '\U0001F307Абакан')
bot.send_message(message.chat.id, 'Привет! Меня зовут Коля\U0001F60C\n\nМогу тебе показать:\n\U0001F4DDновости\n\U0001F302погоду\n\U00002764чаты\n\U0001F695номера такси\n\U0001F6A8номера экстренных служб\n\U0001F307рассказать про Абакан', reply_markup=user_markup)
@bot.message_handler(content_types=["text"])
def handle_text(message):
if message.text == "\U00002665чаты":
user_markup = telebot.types.ReplyKeyboardMarkup()
user_markup.row('\U00002764знакомства','\U0001F697автолюбители')
user_markup.row('/start')
bot.send_message(message.chat.id, 'Выбор чатов', reply_markup=user_markup)
if message.text == "\U0001F4DDновости":
bot.send_message(message.chat.id, "\U00002709Последние события\n\U00002712Интересные статьи\n\U0001F3ADОбзоры\n\U0001F53Dпереходи по ссылке: \n@abakanlife")
if message.text == "\U0001F697автолюбители":
bot.send_message(message.chat.id, "\U0001F539события\n\U0001F539ситуации на дорогах\n\U0001F539покупка-продажа\n\U0001F53Dпереходи по ссылке: \n@auto_19")
if message.text == "\U00002764знакомства":
bot.send_message(message.chat.id, "\U0001F538добавляемся\n\U0001F538пишем возраст\n\U0001F538интересы\n\U0001F538общаемся\U0001F60C\n\U0001F53Dпереходи по ссылке: @abk_chat")
if message.text == "\U0001F695такси":
bot.send_message(message.chat.id, "Maxim\U0001F695 83902300102\nТакси Саяны\U0001F695 83902222222\nЛидер\U0001F695 83902222666")
if message.text[:7] == "\U0001F302погода" or message.text[:7] == "погода" :
city = message.text[7:]
r = requests.get("http://api.openweathermap.org/data/2.5/weather",
params={'id': city_id, 'units': 'metric', 'lang': 'ru', 'APPID': appid})
data = r.json()
temp = data["main"]["temp"]
conditions=data['weather'][0]['description']
bot.send_message(message.chat.id, "\U000026C4Погода в Абакане:{} \nТемпература: {} C".format(city, temp))
bot.send_message(message.chat.id, "Обстановочка{}: {} ".format(city, conditions))
if message.text == "\U0001F307Абакан":
bot.send_message(message.chat.id, "http://telegra.ph/Abakan-11-02")
if message.text == "\U000026A0sos":
bot.send_message(message.chat.id, "\U000026A0Единый телефон экстренных служб:\n112\n\U000026A0МЧС России:\n101\n\U000026A0Скорая помощь Абакан:ул. Т. Шевченко, 83 «А»\n83902226372\n83902223497")
bot.polling()
|
x = [i, i**2 for i in range(10)]
|
import utils.dtw as dtw
import time
import numpy as np
import math
import csv
import os
import sys
from utils.constants import nb_classes, class_modifier_add, class_modifier_multi, max_seq_len
from utils.proto_select import selector_selector, random_selection, center_selection, k_centers_selection, border_selection, spanning_selection
def get_dtwfeatures(proto_data, proto_number, local_sample):
local_sample_length = np.shape(local_sample)[0]
features = np.zeros((local_sample_length, proto_number))
for prototype in range(proto_number):
local_proto = proto_data[prototype]
output, cost, DTW, path = dtw.dtw(local_proto, local_sample, extended=True)
for f in range(local_sample_length):
features[f, prototype] = cost[path[0][f]][path[1][f]]
return features
def read_dtw_matrix(version):
if not os.path.exists(os.path.join("data", "all-"+version+"-dtw-matrix.txt")):
exit("Please run cross_dtw.py first")
return np.genfromtxt(os.path.join("data", "all-"+version+"-dtw-matrix.txt"), delimiter=' ')
if __name__ == "__main__":
if len(sys.argv) < 5:
print("Error, Syntax: {0} [version] [prototype selection] [classwise/independent] [prototype number]".format(sys.argv[0]))
exit()
version = sys.argv[1]
selection = sys.argv[2]
classwise = sys.argv[3]
proto_number = int(sys.argv[4])
print("Starting: {} {} {}".format(version, selection, classwise))
# load settings
full_train_file = os.path.join("data", version + "_TRAIN")
full_test_file = os.path.join("data", version + "_TEST")
# load data
full_train = np.genfromtxt(full_train_file, delimiter=',')
full_test = np.genfromtxt(full_test_file, delimiter=',')
no_classes = nb_classes(version)
# print(proto_number)
train_max = np.max(full_train[:,1:])
train_min = np.min(full_train[:,1:])
train_data = 2. * (full_train[:,1:] - train_min) / (train_max - train_min) - 1.
train_labels = (full_train[:,0] + class_modifier_add(version))*class_modifier_multi(version)
train_number = np.shape(train_labels)[0]
#print(np.shape(train_data))
#print(np.shape(train_labels))
test_data = 2. * (full_test[:,1:] - train_min) / (train_max - train_min) - 1.
test_labels = (full_test[:,0] + class_modifier_add(version))*class_modifier_multi(version)
#print(np.shape(test_data))
#print(np.shape(test_labels))
test_number = np.shape(test_labels)[0]
seq_length = max_seq_len(version)
train_data = train_data.reshape((-1,seq_length, 1))
test_data = test_data.reshape((-1, seq_length, 1))
distances = train_number if selection == "random" else read_dtw_matrix(version)
if classwise == "classwise":
proto_loc = np.zeros(0, dtype=np.int32)
proto_factor = int(proto_number / no_classes)
for c in range(no_classes):
cw = np.where(train_labels == c)[0]
if selection == "random":
cw_distances = []
else:
cw_distances = distances[cw]
cw_distances = cw_distances[:,cw]
cw_proto = selector_selector(selection, proto_factor, cw_distances)
proto_loc = np.append(proto_loc, cw[cw_proto])
else:
proto_loc = selector_selector(selection, proto_number, distances)
proto_data = train_data[proto_loc]
print(proto_loc)
# start generation
test_label_fileloc = os.path.join("data", "all-test-label-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
test_raw_fileloc = os.path.join("data", "all-raw-test-data-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
test_dtw_fileloc = os.path.join("data", "all-dtw_features-test-data-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
test_combined_fileloc = os.path.join("data", "all-dtw_features-plus-raw-test-data-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
train_label_fileloc = os.path.join("data", "all-train-label-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
train_raw_fileloc = os.path.join("data", "all-raw-train-data-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
train_dtw_fileloc = os.path.join("data", "all-dtw_features-train-data-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
train_combined_fileloc = os.path.join("data", "all-dtw_features-plus-raw-train-data-{}-{}-{}-{}.txt".format(version, selection, classwise, proto_number))
# test set
with open(test_label_fileloc, 'w') as test_label_file, open(test_raw_fileloc, 'w') as test_raw_file, open(
test_dtw_fileloc, 'w') as test_dtw_file, open(test_combined_fileloc, 'w') as test_combined_file:
writer_test_label = csv.writer(test_label_file, quoting=csv.QUOTE_NONE, delimiter=" ")
writer_test_raw = csv.writer(test_raw_file, quoting=csv.QUOTE_NONE, delimiter=" ")
writer_test_dtw = csv.writer(test_dtw_file, quoting=csv.QUOTE_NONE, delimiter=" ")
writer_test_combined = csv.writer(test_combined_file, quoting=csv.QUOTE_NONE, delimiter=" ")
for sample in range(test_number):
local_sample = test_data[sample]
features = get_dtwfeatures(proto_data, proto_number, local_sample)
class_value = test_labels[sample]
# write files
feature_flat = features.reshape(seq_length * proto_number)
local_sample_flat = local_sample.reshape(seq_length)
writer_test_raw.writerow(local_sample_flat)
writer_test_dtw.writerow(feature_flat)
writer_test_combined.writerow(np.append(local_sample_flat, feature_flat))
writer_test_label.writerow(["{}-{}_test.png".format(class_value, sample), class_value])
if sample % (test_number // 16) == 0:
print("{} {}%: Test < {} Done".format(version, str(round(100. * sample / test_number, 1)),str(sample)))
print("{}: Test Done".format(version))
# train set
with open(train_label_fileloc, 'w') as train_label_file, open(train_raw_fileloc, 'w') as train_raw_file, open(
train_dtw_fileloc, 'w') as train_dtw_file, open(train_combined_fileloc, 'w') as train_combined_file:
writer_train_label = csv.writer(train_label_file, quoting=csv.QUOTE_NONE, delimiter=" ")
writer_train_raw = csv.writer(train_raw_file, quoting=csv.QUOTE_NONE, delimiter=" ")
writer_train_dtw = csv.writer(train_dtw_file, quoting=csv.QUOTE_NONE, delimiter=" ")
writer_train_combined = csv.writer(train_combined_file, quoting=csv.QUOTE_NONE, delimiter=" ")
for sample in range(train_number):
local_sample = train_data[sample]
features = get_dtwfeatures(proto_data, proto_number, local_sample)
class_value = train_labels[sample]
# write files
feature_flat = features.reshape(seq_length * proto_number)
local_sample_flat = local_sample.reshape(seq_length)
writer_train_raw.writerow(local_sample_flat)
writer_train_dtw.writerow(feature_flat)
writer_train_combined.writerow(np.append(local_sample_flat, feature_flat))
writer_train_label.writerow(["{}-{}_train.png".format(class_value, sample), class_value])
if sample % (train_number // 16) == 0:
print("{} {}%: Training < {} Done".format(version, str(round(100. * sample / train_number,1)),str(sample)))
print("{}: Training Done".format(version))
print("Done")
|
import torch
for i in range(12):
for j in range(i+1,12):
count = 0
for idx in range(361):
attn_map = torch.load(f'attn_map/{idx}.pth', map_location = torch.device('cuda'))
for batch in range(attn_map[0].shape[0]):
for token in range(49):
if(torch.dot(attn_map[0][batch,i,:,token], attn_map[0][batch,j,:,token])/torch.norm(attn_map[0][batch,i,:,token])/torch.norm(attn_map[0][batch,j,:,token]) > 0.7):
count = count + 1
similarity = count / 50000 / 49 / 4
print(i, j, count, similarity)
|
import sys
import subprocess
import docker
BASE_DOCKERFILE = '''FROM python:3.8-slim
WORKDIR /code
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY src/ .
EXPOSE 5000/tcp
CMD [ "python", "./server.py"]
'''
DOCKER_TEMPLATE = '''FROM underpants_base:latest
ENV SERVER_MODE {}
ENV SLEEP_TIME {}
'''
SERVER_MODES = {
'collect-test',
'sauce-test',
'profit-test',
'unreliablesauce-test',
'increment-test',
'decrement-test',
'orz-test',
}
SLEEP_TIME = 0
if __name__ == '__main__':
if '--clean' in sys.argv:
client = docker.from_env()
image_set = set([tag for image in client.images.list() for tag in image.tags])
tag_set = set([f'{repo}:latest' for repo in SERVER_MODES | {'underpants_base'}])
for tag in tag_set & image_set:
client.images.remove(tag)
mk_docker_cmd = lambda tag: ['docker', 'build', '-f', '-', '-t', tag, '.'] # noqa
subprocess.run(
mk_docker_cmd('underpants_base:latest'), input=BASE_DOCKERFILE, text=True)
for mode in SERVER_MODES:
subprocess.run(
mk_docker_cmd(f'{mode}:latest'), input=DOCKER_TEMPLATE.format(mode, SLEEP_TIME),
text=True)
|
class GameTime(object):
def __init__(self):
self.seconds = 0
self.minutes = 0
self.hours = 0
self.days = 0
def pass_turns(self, turns=1):
minute_updated = False
hours_updated = False
days_updated = False
seconds = turns * 6
self.seconds += seconds
if self.seconds >= 60:
self.seconds -= 60
self.minutes += 1
minute_updated = True
if self.minutes >= 60:
self.minutes -= 60
self.hours += 1
hours_updated = True
if self.hours >= 24:
self.hours -= 24
self.days += 1
days_updated = True
return TimeUpdateResult(minute_updated, hours_updated, days_updated)
class TimeUpdateResult(object):
__slots__ = ["minute_updated", "hours_updated", "days_updated"]
def __init__(self, minute_updated=False, hours_updated=False, days_updated=False):
self.minute_updated = minute_updated
self.hours_updated = hours_updated
self.days_updated = days_updated
|
from app.notify_client import NotifyAdminAPIClient
class PlatformStatsAPIClient(NotifyAdminAPIClient):
# Fudge assert in the super __init__ so
# we can set those variables later.
def __init__(self):
super().__init__("a" * 73, "b")
def get_aggregate_platform_stats(self, params_dict=None):
return self.get("/platform-stats", params=params_dict)
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.server.twisted`` module contains a server transport compatible
with the Twisted event loop. It uses the TwistedWebResource object as transport.
Also see the twisted examples in the examples directory of the source
distribution.
This module is EXPERIMENTAL. Your mileage may vary. Patches are welcome.
"""
import logging
logger = logging.getLogger(__name__)
from twisted.python.log import err
from twisted.internet.interfaces import IPullProducer
from twisted.internet.defer import Deferred
from twisted.web.iweb import IBodyProducer
from twisted.web.iweb import UNKNOWN_LENGTH
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from zope.interface import implements
from spyne.auxproc import process_contexts
from spyne.server.http import HttpMethodContext
from spyne.server.http import HttpBase
from spyne.const.ansi_color import LIGHT_GREEN
from spyne.const.ansi_color import END_COLOR
from spyne.const.http import HTTP_405
def _reconstruct_url(request):
server_name = request.getRequestHostname()
server_port = request.getHost().port
if (bool(request.isSecure()), server_port) not in [(True, 443), (False, 80)]:
server_name = '%s:%d' % (server_name, server_port)
if request.isSecure():
url_scheme = 'https'
else:
url_scheme = 'http'
return ''.join([url_scheme, "://", server_name, request.uri])
class _Producer(object):
implements(IPullProducer)
deferred = None
def __init__(self, body, consumer):
""":param body: an iterable of strings"""
# check to see if we can determine the length
try:
len(body) # iterator?
self.length = sum([len(fragment) for fragment in body])
self.body = iter(body)
except TypeError:
self.length = UNKNOWN_LENGTH
self.deferred = Deferred()
self.consumer = consumer
def resumeProducing(self):
try:
chunk = self.body.next()
except StopIteration, e:
self.consumer.unregisterProducer()
if self.deferred is not None:
self.deferred.callback(self.consumer)
self.deferred = None
return
self.consumer.write(chunk)
def pauseProducing(self):
pass
def stopProducing(self):
if self.deferred is not None:
self.deferred.errback(
Exception("Consumer asked us to stop producing"))
self.deferred = None
class TwistedHttpTransport(HttpBase):
@staticmethod
def decompose_incoming_envelope(prot, ctx, message):
"""This function is only called by the HttpRpc protocol to have the
twisted web's Request object is parsed into ``ctx.in_body_doc`` and
``ctx.in_header_doc``.
"""
request = ctx.in_document
ctx.method_request_string = '{%s}%s' % (prot.app.interface.get_tns(),
request.path.split('/')[-1])
logger.debug("%sMethod name: %r%s" % (LIGHT_GREEN,
ctx.method_request_string, END_COLOR))
ctx.in_header_doc = request.headers
ctx.in_body_doc = request.args
class TwistedWebResource(Resource):
"""A server transport that exposes the application as a twisted web
Resource.
"""
isLeaf = True
def __init__(self, app, chunked=False, max_content_length=2 * 1024 * 1024,
block_length=8 * 1024):
Resource.__init__(self)
self.http_transport = TwistedHttpTransport(app, chunked,
max_content_length, block_length)
self._wsdl = None
def render_GET(self, request):
_ahv = self.http_transport._allowed_http_verbs
if request.uri.endswith('.wsdl') or request.uri.endswith('?wsdl'):
return self.__handle_wsdl_request(request)
elif not (_ahv is None or "GET" in _ahv):
request.setResponseCode(405)
return HTTP_405
else:
return self.handle_rpc(request)
def render_POST(self, request):
return self.handle_rpc(request)
def handle_error(self, p_ctx, others, error, request):
resp_code = self.http_transport.app.out_protocol \
.fault_to_http_response_code(error)
request.setResponseCode(int(resp_code[:3]))
p_ctx.out_object = error
self.http_transport.get_out_string(p_ctx)
process_contexts(self.http_transport, others, p_ctx, error=error)
return ''.join(p_ctx.out_string)
def handle_rpc(self, request):
initial_ctx = HttpMethodContext(self.http_transport, request,
self.http_transport.app.out_protocol.mime_type)
initial_ctx.in_string = [request.content.getvalue()]
contexts = self.http_transport.generate_contexts(initial_ctx)
p_ctx, others = contexts[0], contexts[1:]
if p_ctx.in_error:
return self.handle_error(p_ctx, others, p_ctx.in_error, request)
else:
self.http_transport.get_in_object(p_ctx)
if p_ctx.in_error:
return self.handle_error(p_ctx, others, p_ctx.in_error, request)
else:
self.http_transport.get_out_object(p_ctx)
if p_ctx.out_error:
return self.handle_error(p_ctx, others, p_ctx.out_error, request)
self.http_transport.get_out_string(p_ctx)
process_contexts(self.http_transport, others, p_ctx)
def _cb_request_finished(request):
request.finish()
producer = _Producer(p_ctx.out_string, request)
producer.deferred.addErrback(err).addCallback(_cb_request_finished)
request.registerProducer(producer, False)
return NOT_DONE_YET
def __handle_wsdl_request(self, request):
ctx = HttpMethodContext(self.http_transport, request,
"text/xml; charset=utf-8")
url = _reconstruct_url(request)
try:
ctx.transport.wsdl = self._wsdl
if ctx.transport.wsdl is None:
from spyne.interface.wsdl.wsdl11 import Wsdl11
wsdl = Wsdl11(self.http_transport.app.interface)
wsdl.build_interface_document(url)
self._wsdl = ctx.transport.wsdl = wsdl.get_interface_document()
assert ctx.transport.wsdl != None
self.http_transport.event_manager.fire_event('wsdl', ctx)
for k,v in ctx.transport.resp_headers.items():
request.setHeader(k,v)
return ctx.transport.wsdl
except Exception, e:
ctx.transport.wsdl_error = e
self.http_transport.event_manager.fire_event('wsdl_exception', ctx)
raise
|
"""Update LLC Register to add Charge History table
Revision ID: 37d3726feb0a
Revises: 2282351403f3
Create Date: 2017-04-12 11:23:50.439820
"""
# revision identifiers, used by Alembic.
revision = '37d3726feb0a'
down_revision = '2282351403f3'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.create_table('local_land_charge_history',
sa.Column('id', sa.BigInteger(), primary_key=True),
sa.Column('llc_item', postgresql.JSONB(), nullable=False),
sa.Column('entry_number', sa.BigInteger(), primary_key=True),
sa.Column('cancelled', sa.Boolean(), nullable=False))
op.execute("GRANT SELECT ON local_land_charge_history TO " +
current_app.config.get("APP_SQL_USERNAME"))
op.execute("GRANT SELECT ON local_land_charge_history_id_seq TO " +
current_app.config.get("APP_SQL_USERNAME"))
def downgrade():
op.drop_table('local_land_charge_history')
|
import os
import random
import numpy as np
def set_random_seed(seed_num:int=42):
os.environ["PYTHONHASHSEED"] = str(seed_num)
random.seed(seed_num)
np.random.seed(seed_num)
|
class Solution:
def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
ans = []
def postorder(root: Optional[TreeNode]) -> None:
if not root:
return
postorder(root.left)
postorder(root.right)
ans.append(root.val)
postorder(root)
return ans
|
"""
CertList - command ``getcert list``
====================================
"""
from insights.core import CommandParser
from insights.parsers import ParseException, keyword_search
from insights.core.plugins import parser
from insights.specs import Specs
@parser(Specs.getcert_list)
class CertList(CommandParser):
"""
Parse the output of ``getcert list``.
Stores data as a pseudo-dictionary, keyed on request ID. But it's much
easier to find requests based on their properties, using the ``search``
method. This finds requests based on their keys, e.g.
``search(stuck='no')``. Spaces and dashes are converted to underscores
in the keys being sought, so one can search for ``key_pair_storage`` or
``pre_save_command``. Multiple keys can be searched in the same call,
e.. ``search(CA="IPA", stuck='yes')``. If no keys are given, no requests
are returned.
Sample output::
Number of certificates and requests being tracked: 2.
Request ID '20130725003533':
status: MONITORING
stuck: no
key pair storage: type=NSSDB,location='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM/pwdfile.txt'
certificate: type=NSSDB,location='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB'
CA: IPA
issuer: CN=Certificate Authority,O=LDAP.EXAMPLE.COM
subject: CN=master.LDAP.EXAMPLE.COM,O=LDAP.EXAMPLE.COM
expires: 2017-06-28 12:52:12 UTC
eku: id-kp-serverAuth,id-kp-clientAuth
pre-save command:
post-save command: /usr/lib64/ipa/certmonger/restart_dirsrv LDAP-EXAMPLE-COM
track: yes
auto-renew: yes
Request ID '20130725003602':
status: MONITORING
stuck: no
key pair storage: type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-PKI-IPA/pwdfile.txt'
certificate: type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA',nickname='Server-Cert',token='NSS Certificate DB'
CA: IPA
issuer: CN=Certificate Authority,O=EXAMPLE.COM
subject: CN=ldap.EXAMPLE.COM,O=EXAMPLE.COM
expires: 2017-06-28 12:52:13 UTC
eku: id-kp-serverAuth,id-kp-clientAuth
pre-save command:
post-save command: /usr/lib64/ipa/certmonger/restart_dirsrv PKI-IPA
track: yes
auto-renew: yes
Attributes:
num_tracked (int): The number of 'tracked' certificates and requests,
as given in the first line of the output.
requests (list): The list of request IDs as they appear in the output,
as strings.
Examples:
>>> certs = shared[Cert_List]
>>> certs.num_tracked # number of certificates tracked from first line
2
>>> len(certs) # number of requests stored - may be smaller than num_tracked
2
>>> certs.requests
['20130725003533', '20130725003602']
>>> '20130725003533' in certs
True
>>> certs['20130725003533']['issuer']
'CN=Certificate Authority,O=LDAP.EXAMPLE.COM'
>>> for request in certs.search(CA='IPA'):
... print request['certificate']
...
type=NSSDB,location='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB'
type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA',nickname='Server-Cert',token='NSS Certificate DB'
"""
def parse_content(self, content):
"""
We're only interested in lines that contain a ':'. Special lines
start with 'Request ID' and 'Number of certificates...'; we handle
those separately. All other lines are stripped of surrounding white
space and stored as a key-value pair against the last request ID.
"""
self._data = {}
self.num_tracked = 0
self.requests = []
self._rq_list = []
current_request = None
_TRACK_HEADER = 'Number of certificates and requests being tracked: '
_RQ_HEADER = 'Request ID '
for line in content:
line = line.strip()
if line.startswith(_TRACK_HEADER):
num_tracked = line[len(_TRACK_HEADER):-1]
if not num_tracked.isdigit():
raise ParseException("Incorrectly formatted number of certificates and requests")
self.num_tracked = int(num_tracked)
elif line.startswith(_RQ_HEADER):
current_request = line[len(_RQ_HEADER) + 1:-2]
if current_request in self._data:
raise ParseException("Found duplicate request ID '{rq}'".format(rq=current_request))
self._data[current_request] = {}
self.requests.append(current_request)
self._rq_list.append(self._data[current_request])
elif line.endswith(':'):
# Key with no value - fake it
key = line[:-1]
self._data[current_request][key] = ''
elif ': ' in line:
key, val = line.split(': ', 1)
self._data[current_request][key] = val
def __contains__(self, rq):
"""
Does the certificate collection contain the given request ID?
"""
return rq in self._data
def __len__(self):
"""
Return the number of requests found (not the number tracked)
"""
return len(self._data)
def __getitem__(self, rq):
"""
Return the request with the given ID.
"""
return self._data[rq]
def search(self, **kwargs):
"""
Search for one or more key-value pairs in the given data. See the
documentation of meth:insights.parsers.keyword_search for more
details on how to use it.
"""
return keyword_search(self._rq_list, **kwargs)
|
from abc import ABCMeta, abstractstaticmethod
class LPerson(metaclass = ABCMeta):
@abstractstaticmethod
def personMethod():
"""iface method"""
class Student(LPerson):
def __init__(self):
self.name = "zulkepretes"
def personMethod(self):
print("Student name {}".format(self.name))
class Teacher(LPerson):
def __init__(self):
self.name = "sayutes"
def personMethod(self):
print("Teacher name {}".format(self.name))
class PersonFactory:
@staticmethod
def build_person(person_type):
if person_type == "Student":
return Student()
if person_type == "Teacher":
return Teacher()
print("invalid type")
return -1
if __name__ == '__main__':
choice = input("what category (Student / Teacher): ")
person = PersonFactory.build_person(choice)
person.personMethod()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Python framework for developing neural network emulators of
RRTMGP gas optics scheme
This program takes existing input-output data generated with RRTMGP and
user-specified hyperparameters such as the number of neurons,
scales the data if requested, and trains a neural network.
Alternatively, an automatic tuning method can be used for
finding a good set of hyperparameters (expensive).
Right now just a placeholder, pasted some of the code I used in my paper
Contributions welcome!
@author: Peter Ukkonen
"""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import losses, optimizers
import tensorflow as tf
import tensorflow.keras.backend as K
# from keras.models import Sequential
# from keras.layers import Dense, Dropout, Activation, Flatten,Input
import numpy as np
import h5py
# import optuna
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops, control_flow_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.training.optimizer import Optimizer
import shlex
from subprocess import Popen, PIPE
# err_metrics_rrtmgp_lw = np.array([0.0792, 0.0630, 0.0499, -0.1624, -0.3475, -0.4379, 0.0025])
# Heating rate (all exps), Heating rate (present), SFC forcing (pre-industrial to present),
# SFC forcing (present to future), TOA forcing (present to future),
# TOA forcing CO2 (pre-industrial to 8x), SFC forcing N2O (pre-industrial to present)
def hybrid_loss_wrapper(alpha):
def loss_expdiff(y_true, y_pred):
err_tot = K.mean(K.square(y_pred - y_true))
err_diff = expdiff(y_true, y_pred)
err = (alpha) * err_diff + (1 - alpha)*err_tot
return err
return loss_expdiff
def expdiff(y_true, y_pred):
diff_pred = y_pred[1::2,:] - y_pred[0::2,:]
diff_true = y_true[1::2,:] - y_true[0::2,:]
# err_diff = K.mean(K.square(diff_pred - diff_true))
err_diff = K.mean(K.abs(diff_pred - diff_true))
return err_diff
def get_stdout(cmd):
"""
Execute the external command and get its exitcode, stdout and stderr.
"""
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
# exitcode = proc.returncode
#
out = out.decode("utf-8")
return out, err
from tensorflow.keras.callbacks import Callback
class RunRadiationScheme(Callback):
def __init__(self, cmd, modelpath, modelsaver, patience=5, interval=1):
super(Callback, self).__init__()
self.interval = interval
self.cmd = cmd
self.modelpath = modelpath
self.modelsaver = modelsaver
self.patience = patience
# best_weights to store the weights at which the minimum loss occurs.
self.best_weights = None
self.err_metrics_rrtmgp = None
def on_train_begin(self, logs=None):
# The number of epoch it has waited when loss is no longer minimum.
self.wait = 0
# The epoch the training stops at.
self.stopped_epoch = 0
# Initialize the best as infinity.
self.best = np.Inf
self.best_epoch = 0
print("Using RunRadiationScheme earlystopper, fluxes are validated " \
"against Line-By-Line benchmark (RFMIP),\nand training stopped when a "\
"weighted mean of the metrics printed by the radiation program have\n"\
"not improved for {} epochs".format(self.patience ))
print("The temporary model is saved to {}".format(self.modelpath))
# First run the RRTMGP code without NNs to get the reference errors
cmd_ref = self.cmd[0:75]
out,err = get_stdout(cmd_ref)
outstr = out.split('--------')
err_metrics_str = outstr[2].strip('\n')
err_metrics_str = err_metrics_str.split(',')
self.err_metrics_rrtmgp = np.float32(err_metrics_str)
# print("Reference errors were: {}".format(err_metrics_str))
# err_metrics_norm = err_metrics / err_metrics_rrtmgp_lw
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
# Shortwave or Longwave?
# Weight heating rates more for SW
# if 'sw' in self.cmd:
# sw = True
# weight_hr = 0.75
# else:
# sw = False
# weight_hr = 0.5
# y_pred = self.model.predict_proba(self.X_val, verbose=0)
# score = roc_auc_score(self.y_val, y_pred)
# SAVE MODEL
# print("saving to {}".format(self.modelpath))
self.modelsaver(self.modelpath, self.model)
# RUN RADIATION CODE WITH MODEL
# print("running: {}".format(self.cmd))
# cmd = './rrtmgp_lw_eval_nn_rfmip 8 ../../rrtmgp/data/rrtmgp-data-lw-g128-210809.nc 1 1 ' + modelinput
out,err = get_stdout(self.cmd)
outstr = out.split('--------')
metric_names = outstr[1].strip('\n')
metric_names = metric_names.split(',')
for i in range(len(metric_names)): metric_names[i] = metric_names[i].lstrip().rstrip()
err_metrics_str = outstr[2].strip('\n')
err_metrics_str = err_metrics_str.split(',')
err_metrics = np.float32(err_metrics_str)
# err_metrics_norm = err_metrics / err_metrics_rrtmgp_lw
err_metrics = err_metrics / self.err_metrics_rrtmgp
# find position where forcing errors start
indices = [i for i, elem in enumerate(metric_names) if 'HR' in elem]
ind_forc = indices[-1] + 1
# score = err_metrics.mean()
logs["mean_relative_heating_rate_error"] = err_metrics[0]
# Construct "overall" accuracy score for radiation
# forcing_err = np.abs(err_metrics[2:]).mean()
forcing_err = np.sqrt(np.mean(np.square(err_metrics[ind_forc:])))
logs["mean_relative_forcing_error"] = forcing_err
# hr_err = np.abs(err_metrics[0:ind_forc]).mean()
hr_err = np.sqrt(np.mean(np.square(err_metrics[0:ind_forc])))
# weight_forcing = 1 - weight_hr
# score = weight_hr * hr_err + weight_forcing * forcing_err
score = np.sqrt(np.mean(np.square(err_metrics)))
logs["radiation_score"] = score
# print("SCORE {:9} {:9} {:16} {:20} {:20} {:20} {:20}". format(*metric_names))
# print("{:.2f} {:.2f} {:.2f} {:.2f} {:.2f} "\
# " {:.2f} {:.2f} "\
# " {:.2f}". format(score,*err_metrics))
print("The RFMIP accuracy relative to RRTGMP was: {:.2f} (HR {:.2f}, FLUXES/FORCINGS {:.2f})".format(score, hr_err, forcing_err))
for i in range(len(err_metrics)):
if (i==len(err_metrics)-1):
print("{}: {:.2f} \n".format(metric_names[i],err_metrics[i]), end =" ")
else:
print("{}: {:.2f}, ".format(metric_names[i],err_metrics[i]), end =" ")
# hr_ref = 0.0711
# forcing_ref = 0.2
# print("LBL errors - heating rate {:.3f} (RRTMGP {:.3f}), "\
# "TOA/sfc forcings {:.2f} ({:.2f}): weighted metric: {:.6f}".format(hr_err, hr_ref, forcing_err, forcing_ref, score))
current = logs.get("radiation_score")
# if epoch > 30:
# A local/temporary minimum can be found quickly, don't want to
# get stuck there: only start considering early stopping after a while
if np.less(current, self.best):
self.best = current
self.wait = 0
# Record the best weights if current results is better (less).
self.best_weights = self.model.get_weights()
self.best_epoch = epoch
else:
self.wait += 1
if self.wait >= self.patience:
print("Early stopping, the best radiation score (comprised of LBL heating rate"\
" and forcing errors normalized by RRTGMP values) was {:.2f}".format(self.best))
self.stopped_epoch = epoch
self.model.stop_training = True
print("Restoring model weights from the end of the best epoch ({})".format(self.best_epoch+1))
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
class COCOB(Optimizer):
def __init__(self, alpha=100, use_locking=False, name='COCOB'):
'''
constructs a new COCOB optimizer
'''
super(COCOB, self).__init__(use_locking, name)
self._alpha = alpha
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
gradients_sum = constant_op.constant(0,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
grad_norm_sum = constant_op.constant(0,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
L = constant_op.constant(1e-8, shape=v.get_shape(), dtype=v.dtype.base_dtype)
tilde_w = constant_op.constant(0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype)
reward = constant_op.constant(0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype)
self._get_or_make_slot(v, L, "L", self._name)
self._get_or_make_slot(v, grad_norm_sum, "grad_norm_sum", self._name)
self._get_or_make_slot(v, gradients_sum, "gradients_sum", self._name)
self._get_or_make_slot(v, tilde_w, "tilde_w", self._name)
self._get_or_make_slot(v, reward, "reward", self._name)
def _apply_dense(self, grad, var):
gradients_sum = self.get_slot(var, "gradients_sum")
grad_norm_sum = self.get_slot(var, "grad_norm_sum")
tilde_w = self.get_slot(var, "tilde_w")
L = self.get_slot(var, "L")
reward = self.get_slot(var, "reward")
L_update = tf.maximum(L,tf.abs(grad))
gradients_sum_update = gradients_sum + grad
grad_norm_sum_update = grad_norm_sum + tf.abs(grad)
reward_update = tf.maximum(reward-grad*tilde_w,0)
new_w = -gradients_sum_update/(L_update*(tf.maximum(grad_norm_sum_update+L_update,self._alpha*L_update)))*(reward_update+L_update)
var_update = var-tilde_w+new_w
tilde_w_update=new_w
gradients_sum_update_op = state_ops.assign(gradients_sum, gradients_sum_update)
grad_norm_sum_update_op = state_ops.assign(grad_norm_sum, grad_norm_sum_update)
var_update_op = state_ops.assign(var, var_update)
tilde_w_update_op = state_ops.assign(tilde_w, tilde_w_update)
L_update_op = state_ops.assign(L, L_update)
reward_update_op = state_ops.assign(reward, reward_update)
return control_flow_ops.group(*[gradients_sum_update_op,
var_update_op,
grad_norm_sum_update_op,
tilde_w_update_op,
reward_update_op,
L_update_op])
def _apply_sparse(self, grad, var):
return self._apply_dense(grad, var)
def _resource_apply_dense(self, grad, handle):
return self._apply_dense(grad, handle)
# 1. Define an objective function to be maximized.
def create_model_hyperopt(trial, nx, ny):
model = Sequential()
# We define our MLP.
# number of hidden layers
n_layers = trial.suggest_int("n_layers", 1, 3)
model = Sequential()
# Input layer
activ0 = trial.suggest_categorical('activation', ['relu', 'softsign'])
num_hidden0 = trial.suggest_int("n_neurons_l0_l", 64, 256)
model.add(Dense(num_hidden0, input_dim=nx, activation=activ0))
for i in range(1, n_layers):
num_hidden = trial.suggest_int("n_neurons_l{}".format(i), 64, 256)
activ =trial.suggest_categorical('activation', ['relu', 'softsign']),
model.add(Dense(num_hidden, activation=activ))
# output layer
model.add(Dense(ny, activation='linear'))
# We compile our model with a sampled learning rate.
lr = trial.suggest_loguniform('lr', 1e-5, 1e-1)
lossfunc = losses.mean_squared_error
model.compile(
loss=lossfunc,
optimizer=optimizers.Adam(learning_rate=lr),
metrics = ['mean_absolute_error'],
)
return model
def create_model_mlp(nx,ny,neurons=[40,40], activ=['softsign','softsign','linear'],
kernel_init='he_uniform'):
model = Sequential()
# input layer (first hidden layer)
model.add(Dense(neurons[0], input_dim=nx, kernel_initializer=kernel_init, activation=activ[0]))
# further hidden layers
for i in range(1,np.size(neurons)):
model.add(Dense(neurons[i], activation=activ[i],kernel_initializer=kernel_init))
# output layer
model.add(Dense(ny, activation=activ[-1],kernel_initializer=kernel_init))
return model
def savemodel(kerasfile, model):
model.summary()
newfile = kerasfile[:-3]+".txt"
# model.save(kerasfile)
try:
model.save(kerasfile)
except Exception:
pass
print("saving to {}".format(newfile))
h5_to_txt(kerasfile,newfile)
def get_available_layers(model_layers, available_model_layers=[b"dense"]):
parsed_model_layers = []
for l in model_layers:
for g in available_model_layers:
if g in l:
parsed_model_layers.append(l)
return parsed_model_layers
# # KERAS HDF5 NEURAL NETWORK MODEL FILE TO NEURAL-FORTRAN ASCII MODEL FILE
# def h5_to_txt(weights_file_name, output_file_name=''):
# #check and open file
# with h5py.File(weights_file_name,'r') as weights_file:
# weights_group_key=list(weights_file.keys())[0]
# # activation function information in model_config
# model_config = weights_file.attrs['model_config'].decode('utf-8') # Decode using the utf-8 encoding
# model_config = model_config.replace('true','True')
# model_config = model_config.replace('false','False')
# model_config = model_config.replace('null','None')
# model_config = eval(model_config)
# model_layers = list(weights_file['model_weights'].attrs['layer_names'])
# model_layers = get_available_layers(model_layers)
# print("names of layers in h5 file: %s \n" % model_layers)
# # attributes needed for .txt file
# # number of model_layers + 1(Fortran includes input layer),
# # dimensions, biases, weights, and activations
# num_model_layers = len(model_layers)+1
# dimensions = []
# bias = {}
# weights = {}
# activations = []
# print('Processing the following {} layers: \n{}\n'.format(len(model_layers),model_layers))
# if 'Input' in model_config['config']['layers'][0]['class_name']:
# model_config = model_config['config']['layers'][1:]
# else:
# model_config = model_config['config']['layers']
# for num,l in enumerate(model_layers):
# layer_info_keys=list(weights_file[weights_group_key][l][l].keys())
# #layer_info_keys should have 'bias:0' and 'kernel:0'
# for key in layer_info_keys:
# if "bias" in key:
# bias.update({num:np.array(weights_file[weights_group_key][l][l][key])})
# elif "kernel" in key:
# weights.update({num:np.array(weights_file[weights_group_key][l][l][key])})
# if num == 0:
# dimensions.append(str(np.array(weights_file[weights_group_key][l][l][key]).shape[0]))
# dimensions.append(str(np.array(weights_file[weights_group_key][l][l][key]).shape[1]))
# else:
# dimensions.append(str(np.array(weights_file[weights_group_key][l][l][key]).shape[1]))
# if 'Dense' in model_config[num]['class_name']:
# activations.append(model_config[num]['config']['activation'])
# else:
# print('Skipping bad layer: \'{}\'\n'.format(model_config[num]['class_name']))
# if not output_file_name:
# # if not specified will use path of weights_file with txt extension
# output_file_name = weights_file_name.replace('.h5', '.txt')
# with open(output_file_name,"w") as output_file:
# output_file.write(str(num_model_layers) + '\n')
# output_file.write("\t".join(dimensions) + '\n')
# if bias:
# for x in range(len(model_layers)):
# bias_str="\t".join(list(map(str,bias[x].tolist())))
# output_file.write(bias_str + '\n')
# if weights:
# for x in range(len(model_layers)):
# weights_str="\t".join(list(map(str,weights[x].T.flatten())))
# output_file.write(weights_str + '\n')
# if activations:
# for a in activations:
# if a == 'softmax':
# print('WARNING: Softmax activation not allowed... Replacing with Linear activation')
# a = 'linear'
# output_file.write(a + "\n")
def h5_to_txt(weights_file_name, output_file_name=''):
#check and open file
with h5py.File(weights_file_name,'r') as weights_file:
weights_group_key=list(weights_file.keys())[0]
# activation function information in model_config
model_config = weights_file.attrs['model_config']#.decode('utf-8') # Decode using the utf-8 encoding
model_config = model_config.replace('true','True')
model_config = model_config.replace('false','False')
model_config = model_config.replace('null','None')
model_config = eval(model_config)
model_layers = list(weights_file['model_weights'].attrs['layer_names'])
# model_layers = get_available_layers(model_layers)
print("names of layers in h5 file: %s \n" % model_layers)
# attributes needed for .txt file
# number of model_layers + 1(Fortran includes input layer),
# dimensions, biases, weights, and activations
num_model_layers = len(model_layers)+1
dimensions = []
bias = {}
weights = {}
activations = []
print('Processing the following {} layers: \n{}\n'.format(len(model_layers),model_layers))
if 'Input' in model_config['config']['layers'][0]['class_name']:
model_config = model_config['config']['layers'][1:]
else:
model_config = model_config['config']['layers']
for num,l in enumerate(model_layers):
layer_info_keys=list(weights_file[weights_group_key][l][l].keys())
#layer_info_keys should have 'bias:0' and 'kernel:0'
for key in layer_info_keys:
if "bias" in key:
bias.update({num:np.array(weights_file[weights_group_key][l][l][key])})
elif "kernel" in key:
weights.update({num:np.array(weights_file[weights_group_key][l][l][key])})
if num == 0:
dimensions.append(str(np.array(weights_file[weights_group_key][l][l][key]).shape[0]))
dimensions.append(str(np.array(weights_file[weights_group_key][l][l][key]).shape[1]))
else:
dimensions.append(str(np.array(weights_file[weights_group_key][l][l][key]).shape[1]))
if 'Dense' in model_config[num]['class_name']:
activations.append(model_config[num]['config']['activation'])
else:
print('Skipping bad layer: \'{}\'\n'.format(model_config[num]['class_name']))
if not output_file_name:
# if not specified will use path of weights_file with txt extension
output_file_name = weights_file_name.replace('.h5', '.txt')
with open(output_file_name,"w") as output_file:
output_file.write(str(num_model_layers) + '\n')
output_file.write("\t".join(dimensions) + '\n')
if bias:
for x in range(len(model_layers)):
bias_str="\t".join(list(map(str,bias[x].tolist())))
output_file.write(bias_str + '\n')
if weights:
for x in range(len(model_layers)):
weights_str="\t".join(list(map(str,weights[x].T.flatten())))
output_file.write(weights_str + '\n')
if activations:
for a in activations:
if a == 'softmax':
print('WARNING: Softmax activation not allowed... Replacing with Linear activation')
a = 'linear'
output_file.write(a + "\n")
|
import dateutil.parser
def parse_date(r):
return dateutil.parser.parse(r).date()
|
# This Python file uses the following encoding: utf-8
"""Gather data from github user and render an html templatewith that data."""
|
keyboard.send_keys("<shift>+9")
|
"""iais URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path, re_path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("register", views.register_view, name="register"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("images", views.get_user_images, name="images"),
path("advanced_analysis", views.advanced_analysis, name="advanced_analysis"),
path("upload_image", views.upload_image, name="upload_image"),
path("get_image/<int:img_id>", views.get_image, name="get_image"),
path("request_img_analysis", views.request_img_analysis, name="request_img_analysis"),
path("get_img_analysis", views.get_img_analysis, name="get_img_analysis"),
re_path(r'(?P<id>[0-9a-f]{32})', views.display_img_search, name="displayimgsearch"),
re_path(r'(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})', views.display_img_analysis, name="displayimgsearch")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
from itertools import combinations
import sys
input = sys.stdin.readline
N, M = map(int, input().split())
jido = [[int(i) for i in input().split()] for _ in range(N)]
chickens = []
jip = []
for x in range(N):
for y in range(N):
if jido[x][y] == 2:
chickens.append((x,y))
if jido[x][y] == 1:
jip.append((x,y))
def dist(x, y, a, b):
return abs(x-a) + abs(y-b)
answer = 987654321
for c in combinations(chickens, M):
sub_sum = sum([min([dist(i[0],i[1],j[0],j[1]) for i in c]) for j in jip])
answer = min(sub_sum, answer)
print(answer)
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 7 19:57:09 2016
@author: castaned
"""
import main_modules as mmod
import pylab as plt
import seaborn as sns
sns.set(style="white",rc={"figure.figsize": (8, 8),'axes.labelsize': 16,
'ytick.labelsize': 12,'xtick.labelsize': 12,
'legend.fontsize': 16,'axes.titlesize':16,'font.size':14})
r_ord = 4
kind = "p"
ells = [0,1,2,3,4,5,6]
mds = mmod.list_gen(ells,r_ord,r_ord,kind)
mss = "1p875"
incl = [0,10,20,30,40,50,60,70,80,90]
for i in mds:
mde = i
ll = int(mde.split()[0])
clr = sns.color_palette("Set2", 12)[ll]
plt.plot(incl,map(lambda x: x,[mmod.emode(mss,0,mde).calc_area(incl=i) for i in incl]),label=r"$\ell$="+str(ll),color=clr)
#plt.ylim(0,1.2)
plt.xlabel("Inclination")
plt.ylabel("(1.0 - Area_Factor)")
plt.legend(loc="best")
plt.title(r"M="+mss+"M$\odot$ - V = 0 km s$^{-1}$")
plt.grid()
|
import ray
import torch
import random
import argparse
import numpy as np
from tqdm import tqdm
from copy import deepcopy
from simrl.utils import setup_seed, soft_critic_update
from simrl.utils.modules import OnehotActor, BoundedContinuousActor, Critic
from simrl.utils.envs import make_env
from simrl.utils.data import CollectorServer, ReplayBuffer
from simrl.utils.logger import Logger
from simrl.utils.actor import DistributionActor
class SAC:
@staticmethod
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='CartPole-v1')
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--buffer_size', type=int, default=int(1e6))
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--tau', type=float, default=0.005)
parser.add_argument('--epoch', type=int, default=100)
parser.add_argument('--data_collection_per_epoch', type=int, default=10000)
parser.add_argument('--num_collectors', type=int, default=4)
parser.add_argument('--training_step_per_epoch', type=int, default=1000)
parser.add_argument('--test-num', type=int, default=20)
parser.add_argument('--base_alpha', type=float, default=0.2)
parser.add_argument('--auto_alpha', type=lambda x: [False, True][int(x)], default=True)
parser.add_argument('--test_frequency', type=int, default=5)
parser.add_argument('--log_video', action='store_true')
parser.add_argument('--log', type=str, default=None)
parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')
parser.add_argument('-ahf', '--actor_hidden_features', type=int, default=128)
parser.add_argument('-ahl', '--actor_hidden_layers', type=int, default=2)
parser.add_argument('-aa', '--actor_activation', type=str, default='leakyrelu')
parser.add_argument('-an', '--actor_norm', type=str, default=None)
parser.add_argument('-chf', '--critic_hidden_features', type=int, default=128)
parser.add_argument('-chl', '--critic_hidden_layers', type=int, default=2)
parser.add_argument('-ca', '--critic_activation', type=str, default='leakyrelu')
parser.add_argument('-cn', '--critic_norm', type=str, default=None)
args = parser.parse_args()
return args.__dict__
def __init__(self, config):
self.config = config
setup_seed(self.config['seed'])
self.env = make_env(config)
self.state_dim = self.env.observation_space.shape[0]
self.action_dim = self.env.action_space.shape[0]
self.device = self.config['device']
if self.config['env_type'] == 'discrete':
self.actor = OnehotActor(self.state_dim, self.action_dim,
hidden_features=self.config.get('actor_hidden_features', 128),
hidden_layers=self.config.get('actor_hidden_layers', 1),
hidden_activation=self.config.get('actor_activation', 'leakyrelu'),
norm=self.config.get('actor_norm', None))
elif self.config['env_type'] == 'continuous':
self.actor = BoundedContinuousActor(self.state_dim, self.action_dim,
hidden_features=self.config.get('actor_hidden_features', 128),
hidden_layers=self.config.get('actor_hidden_layers', 1),
hidden_activation=self.config.get('actor_activation', 'leakyrelu'),
norm=self.config.get('actor_norm', None),
min_action=self.config.get('min_action', -1),
max_action=self.config.get('max_action', 1))
else:
raise ValueError('{} is not supported!'.format(self.config['env_type']))
self.q1 = Critic(self.state_dim,
action_dim=self.action_dim,
hidden_features=self.config.get('critic_hidden_features', 128),
hidden_layers=self.config.get('critic_hidden_layers', 1),
hidden_activation=self.config.get('critic_activation', 'leakyrelu'),
norm=self.config.get('critic_norm', None))
self.q2 = Critic(self.state_dim,
action_dim=self.action_dim,
hidden_features=self.config.get('critic_hidden_features', 128),
hidden_layers=self.config.get('critic_hidden_layers', 1),
hidden_activation=self.config.get('critic_activation', 'leakyrelu'),
norm=self.config.get('critic_norm', None))
self.buffer = ray.remote(ReplayBuffer).remote(self.config['buffer_size'])
self.collector = CollectorServer.remote(self.config, deepcopy(DistributionActor(self.actor)), self.buffer, self.config['num_collectors'])
self.logger = Logger.remote(config, deepcopy(DistributionActor(self.actor)), 'sac')
self.actor = self.actor.to(self.device)
self.q1 = self.q1.to(self.device)
self.q2 = self.q2.to(self.device)
self.q1_target = deepcopy(self.q1)
self.q2_target = deepcopy(self.q2)
self.log_alpha = torch.nn.Parameter(torch.zeros(1, device=self.device) + np.log(self.config['base_alpha'])).float()
if self.config['auto_alpha']:
if self.config['env_type'] == 'discrete':
self.target_entropy = 0.9 * np.log(self.action_dim)
elif self.config['env_type'] == 'continuous':
self.target_entropy = - self.action_dim
self.alpha_optimizor = torch.optim.Adam([self.log_alpha], lr=1e-3)
self.actor_optimizor = torch.optim.Adam(self.actor.parameters(), lr=config['lr'])
self.critic_optimizor = torch.optim.Adam([*self.q1.parameters(), *self.q2.parameters()], lr=config['lr'])
def run(self):
for i in tqdm(range(self.config['epoch'])):
batchs_id = self.collector.collect_steps.remote(self.config['data_collection_per_epoch'], self.actor.get_weights())
batchs = ray.get(batchs_id)
for _ in tqdm(range(self.config['training_step_per_epoch'])):
batchs = ray.get(self.buffer.sample.remote(self.config['batch_size']))
batchs.to_torch(dtype=torch.float32, device=self.device)
''' update critic '''
with torch.no_grad():
next_action_dist = self.actor(batchs['next_obs'])
next_action = next_action_dist.mode
next_action_log_prob = next_action_dist.log_prob(next_action)
next_q1 = self.q1_target(batchs['next_obs'], next_action)
next_q2 = self.q2_target(batchs['next_obs'], next_action)
next_q = torch.min(next_q1, next_q2)
target = batchs['reward'] + self.config['gamma'] * (1 - batchs['done']) * (next_q - next_action_log_prob.unsqueeze(dim=-1))
q1 = self.q1(batchs['obs'], batchs['action'])
q2 = self.q2(batchs['obs'], batchs['action'])
critic_loss = torch.mean((q1 - target) ** 2) + torch.mean((q2 - target) ** 2)
self.critic_optimizor.zero_grad()
critic_loss.backward()
self.critic_optimizor.step()
soft_critic_update(self.q1, self.q1_target, self.config['tau'])
soft_critic_update(self.q2, self.q2_target, self.config['tau'])
''' update actor '''
action_dist = self.actor(batchs['obs'])
new_action = action_dist.rsample()
log_prob = action_dist.log_prob(new_action)
if self.config['auto_alpha']:
# update alpha
alpha_loss = - torch.mean(self.log_alpha * (log_prob + self.target_entropy).detach())
self.alpha_optimizor.zero_grad()
alpha_loss.backward()
self.alpha_optimizor.step()
# update actor
q = torch.min(self.q1(batchs['obs'], new_action), self.q2(batchs['obs'], new_action))
actor_loss = - q.mean() + torch.exp(self.log_alpha) * log_prob.mean()
self.actor_optimizor.zero_grad()
actor_loss.backward()
self.actor_optimizor.step()
info = {
"critic_loss" : critic_loss.item(),
"actor_loss" : actor_loss.item(),
}
self.logger.test_and_log.remote(None, info)
self.logger.test_and_log.remote(self.actor.get_weights() if i % self.config['test_frequency'] == 0 else None, info)
if __name__ == '__main__':
ray.init()
config = SAC.get_config()
config['seed'] = config['seed'] or random.randint(0, 1000000)
experiment = SAC(config)
experiment.run()
|
from typing import List, Optional
from fastapi import FastAPI
from pydantic.main import BaseModel
from fastapi_hypermodel import HyperModel, UrlFor, LinkSet
from fastapi_hypermodel.hypermodel import HALFor
class ItemSummary(HyperModel):
name: str
id: str
href = UrlFor("read_item", {"item_id": "<id>"})
class ItemDetail(ItemSummary):
description: Optional[str] = None
price: float
class ItemUpdate(BaseModel):
name: Optional[str]
description: Optional[str]
price: Optional[float]
class ItemCreate(ItemUpdate):
id: str
class Person(HyperModel):
name: str
id: str
items: List[ItemSummary]
href = UrlFor("read_person", {"person_id": "<id>"})
links = LinkSet(
{
"self": UrlFor("read_person", {"person_id": "<id>"}),
"items": UrlFor("read_person_items", {"person_id": "<id>"}),
}
)
hal_href = HALFor("read_person", {"person_id": "<id>"})
hal_links = LinkSet(
{
"self": HALFor("read_person", {"person_id": "<id>"}),
"items": HALFor("read_person_items", {"person_id": "<id>"}),
"addItem": HALFor(
"put_person_items",
{"person_id": "<id>"},
description="Add an item to this person and the items list",
),
}
)
class Config:
# Alias hal_links to _links as per the HAL standard
fields = {"hal_links": "_links"}
items = {
"item01": {"id": "item01", "name": "Foo", "price": 50.2},
"item02": {
"id": "item02",
"name": "Bar",
"description": "The Bar fighters",
"price": 62,
},
"item03": {
"id": "item03",
"name": "Baz",
"description": "There goes my baz",
"price": 50.2,
},
}
people = {
"person01": {
"id": "person01",
"name": "Alice",
"items": [items["item01"], items["item02"]],
},
"person02": {"id": "person02", "name": "Bob", "items": [items["item03"]]},
}
def create_app():
app = FastAPI()
HyperModel.init_app(app)
@app.get(
"/items",
response_model=List[ItemSummary],
)
def read_items():
return list(items.values())
@app.get("/items/{item_id}", response_model=ItemDetail)
def read_item(item_id: str):
return items[item_id]
@app.put("/items/{item_id}", response_model=ItemDetail)
def update_item(item_id: str, item: ItemUpdate):
items[item_id].update(item.dict(exclude_none=True))
return items[item_id]
@app.get(
"/people",
response_model=List[Person],
)
def read_people():
return list(people.values())
@app.get("/people/{person_id}", response_model=Person)
def read_person(person_id: str):
return people[person_id]
@app.get("/people/{person_id}/items", response_model=List[ItemDetail])
def read_person_items(person_id: str):
return people[person_id]["items"]
@app.put("/people/{person_id}/items", response_model=List[ItemDetail])
def put_person_items(person_id: str, item: ItemCreate):
items[item.id] = item.dict()
people[person_id]["items"].append(item.dict())
return people[person_id]["items"]
return app
|
from typing import Any, Dict, List
from pydantic.main import BaseModel
from ..feature import FeatureSchema
class ClassificationAnswer(FeatureSchema):
"""
- Represents a classification option.
- Because it inherits from FeatureSchema
the option can be represented with either the name or feature_schema_id
"""
extra: Dict[str, Any] = {}
class Radio(BaseModel):
""" A classification with only one selected option allowed """
answer: ClassificationAnswer
class Checklist(BaseModel):
""" A classification with many selected options allowed """
answer: List[ClassificationAnswer]
class Text(BaseModel):
""" Free form text """
answer: str
class Dropdown(BaseModel):
"""
- A classification with many selected options allowed .
- This is not currently compatible with MAL.
"""
answer: List[ClassificationAnswer]
|
# coding:utf-8
from setuptools import setup
# or
# from distutils.core import setup
setup(
name='pypcie',
version='0.1',
description='Pcie utils',
author='Alex',
author_email='alex.zhang1012@gmail.com',
url='https://github.com/shmily1012/pypcie',
packages=['pcie'],
)
|
input("your name : ")
print("Hello " + a)
|
# Time domain response functions
#
# m.mieskolainen@imperial.ac.uk, 2020
import numpy as np
import numba
import scipy
import copy
from scipy.integrate import trapz
from scipy.integrate import simps
import tools
def gamma_pdf(x, k, theta):
""" Gamma pdf density.
Args:
x : input argument
k : shape > 0
theta : scale > 0
Returns:
pdf values
"""
xx = copy.deepcopy(x)
xx[x < 0] = 0
y = 1.0/(scipy.special.gamma(k)*theta**k) * (xx**(k-1)) * np.exp(-xx/theta)
return y
@numba.njit
def normpdf(x,mu,std):
""" Normal pdf
Args:
x : array of argument values
mu : mean value
std : standard deviation
Returns:
density values for each x
"""
return 1/np.sqrt(2*np.pi*std**2) * np.exp(-(x-mu)**2/(2*std**2))
#@numba.njit
def h_exp(t, a, normalize=False):
""" Exponential density
Args:
t: input argument (array)
a: parameter > 0
normalize: trapz integral normalization over t
Returns:
function values
"""
y = np.zeros(len(t))
y[t>0] = np.exp(-t[t>0] / a) / a
y[np.isinf(y) | np.isnan(y)] = 0 # Protect underflows
if normalize:
y /= np.abs(trapz(x=t, y=y)) # abs for numerical protection
return y
#@numba.njit
def h_wei(t, a, k, normalize=False):
""" Weibull density
Args:
t: input argument (array)
a: scale parameter > 0
k: shape parameter > 0
normalize: trapz integral normalization over t
Returns:
function values
"""
y = np.zeros(len(t))
y[t>0] = (k/a) * (t[t>0]/a)**(k-1) * np.exp(-(t[t>0]/a)**k)
y[np.isinf(y) | np.isnan(y)] = 0 # Protect underflows
if normalize:
y /= np.abs(trapz(x=t, y=y)) # abs for numerical protection
return y
#@numba.njit
def h_lgn(t, mu, sigma, normalize=False):
""" Log-normal density
Args:
t: input argument (array)
mu: mean parameter (-infty,infty)
sigma: std parameter > 0
normalize: trapz integral normalization over t
Returns:
function values
"""
y = np.zeros(len(t))
y[t>0] = 1/(t[t>0]*sigma*np.sqrt(2*np.pi)) * np.exp(-(np.log(t[t>0]) - mu)**2 / (2*sigma**2))
y[np.isinf(y) | np.isnan(y)] = 0 # Protect underflows
if normalize:
y /= np.abs(trapz(x=t, y=y)) # abs for numerical protection
return y
def I_log(t, i0, beta, L):
""" Fixed beta logistic equation solution
Args:
t : time
i0 : initial condition
beta : fixed growth rate
L : solution maximum
"""
return L / (1 - (1-L/i0)*np.exp(-beta*t))
def dIdt_log(t, i0, beta, L):
""" Fixed beta logistic equation solution time derivative
"""
return (-i0*np.exp(-beta*t)*(i0-L)*L*beta) / (i0 + np.exp(-beta*t)*(L-i0))**2
def I_log_running(t, i0, L, beta, beta_param):
""" Running beta logistic equation solution
"""
beta_ = beta(t, **beta_param)
betaint = np.zeros(len(t))
for i in range(1,len(t)):
tval = t[0:i]
betaint[i] = simps(x=tval, y=beta(tval, **beta_param))
return (-i0*L) / ((i0-L)*np.exp(-betaint) - i0)
def dIdt_log_running(t, i0, L, beta, beta_param):
""" Running beta logistic equation solution time derivative
"""
beta_ = beta(t, **beta_param)
betaint = np.zeros(len(t))
for i in range(1,len(t)):
tval = t[0:i]
betaint[i] = simps(x=tval, y=beta(tval, **beta_param))
return (-i0*np.exp(-betaint)*(i0-L)*L*beta_) / (i0 + np.exp(-betaint)*(L-i0))**2
def betafunc(t, beta_0, beta_D, beta_lambda):
""" Running effective beta-function
"""
y = np.ones(len(t))
for i in range(len(t)):
if t[i] < beta_D:
y[i] = beta_0
else:
y[i] = beta_0 * np.exp(-(t[i] - beta_D)/beta_lambda)
y[y < 0] = 0
return y
|
"""
random: Implements a simple AI, that only performs random actions.
"""
from time import sleep
from numpy.random import uniform
from controller.game_ai import GameAI
from view.log import log
class Random(GameAI):
def execute_action(self, state):
super.__doc__
actions = self.game.actions(state)
index = int(uniform(0, len(actions)))
chosen = actions[index]
#self.game.store_random_statistics({a: uniform() for a in actions})
log("Random action: {}".format(chosen))
return self.game.result(state, chosen)
def __str__(self):
return "Random Moves Algorithm"
|
from fastapi import APIRouter
from monailabel.interfaces import MONAILabelApp
from monailabel.utils.others.app_utils import app_instance
router = APIRouter(
prefix="/info",
tags=["AppService"],
responses={404: {"description": "Not found"}},
)
@router.get("/", summary="Get App Info")
async def app_info():
instance: MONAILabelApp = app_instance()
return instance.info()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, api, exceptions, fields, models
class SMSRecipient(models.TransientModel):
_name = 'sms.resend.recipient'
_description = 'Resend Notification'
_rec_name = 'sms_resend_id'
sms_resend_id = fields.Many2one('sms.resend', required=True)
notification_id = fields.Many2one('mail.notification', required=True, ondelete='cascade')
resend = fields.Boolean(string="Resend", default=True)
failure_type = fields.Selection([
('sms_number_missing', 'Missing Number'),
('sms_number_format', 'Wrong Number Format'),
('sms_credit', 'Insufficient Credit'),
('sms_server', 'Server Error')], related='notification_id.failure_type', related_sudo=True, readonly=True)
partner_id = fields.Many2one('res.partner', 'Partner', related='notification_id.res_partner_id', readonly=True)
partner_name = fields.Char('Recipient', readonly='True')
sms_number = fields.Char('Number')
class SMSResend(models.TransientModel):
_name = 'sms.resend'
_description = 'SMS Resend'
_rec_name = 'mail_message_id'
@api.model
def default_get(self, fields):
result = super(SMSResend, self).default_get(fields)
if result.get('mail_message_id'):
mail_message_id = self.env['mail.message'].browse(result['mail_message_id'])
result['recipient_ids'] = [(0, 0, {
'notification_id': notif.id,
'resend': True,
'failure_type': notif.failure_type,
'partner_name': notif.res_partner_id.display_name or mail_message_id.record_name,
'sms_number': notif.sms_number,
}) for notif in mail_message_id.notification_ids if notif.notification_type == 'sms' and notif.notification_status in ('exception', 'bounce')]
return result
mail_message_id = fields.Many2one('mail.message', 'Message', readonly=True, required=True)
recipient_ids = fields.One2many('sms.resend.recipient', 'sms_resend_id', string='Recipients')
has_cancel = fields.Boolean(compute='_compute_has_cancel')
has_insufficient_credit = fields.Boolean(compute='_compute_has_insufficient_credit')
@api.depends("recipient_ids.failure_type")
def _compute_has_insufficient_credit(self):
self.has_insufficient_credit = self.recipient_ids.filtered(lambda p: p.failure_type == 'sms_credit')
@api.depends("recipient_ids.resend")
def _compute_has_cancel(self):
self.has_cancel = self.recipient_ids.filtered(lambda p: not p.resend)
def _check_access(self):
if not self.mail_message_id or not self.mail_message_id.model or not self.mail_message_id.res_id:
raise exceptions.UserError(_('You do not have access to the message and/or related document.'))
record = self.env[self.mail_message_id.model].browse(self.mail_message_id.res_id)
record.check_access_rights('read')
record.check_access_rule('read')
def action_resend(self):
self._check_access()
all_notifications = self.env['mail.notification'].sudo().search([
('mail_message_id', '=', self.mail_message_id.id),
('notification_type', '=', 'sms'),
('notification_status', 'in', ('exception', 'bounce'))
])
sudo_self = self.sudo()
to_cancel_ids = [r.notification_id.id for r in sudo_self.recipient_ids if not r.resend]
to_resend_ids = [r.notification_id.id for r in sudo_self.recipient_ids if r.resend]
if to_cancel_ids:
all_notifications.filtered(lambda n: n.id in to_cancel_ids).write({'notification_status': 'canceled'})
if to_resend_ids:
record = self.env[self.mail_message_id.model].browse(self.mail_message_id.res_id)
sms_pid_to_number = dict((r.partner_id.id, r.sms_number) for r in self.recipient_ids if r.resend and r.partner_id)
pids = list(sms_pid_to_number.keys())
numbers = [r.sms_number for r in self.recipient_ids if r.resend and not r.partner_id]
rdata = []
for pid, cid, active, pshare, ctype, notif, groups in self.env['mail.followers']._get_recipient_data(record, 'sms', False, pids=pids):
if pid and notif == 'sms':
rdata.append({'id': pid, 'share': pshare, 'active': active, 'notif': notif, 'groups': groups or [], 'type': 'customer' if pshare else 'user'})
if rdata or numbers:
record._notify_record_by_sms(
self.mail_message_id, {'partners': rdata}, check_existing=True,
sms_numbers=numbers, sms_pid_to_number=sms_pid_to_number,
put_in_queue=False
)
self.mail_message_id._notify_sms_update()
return {'type': 'ir.actions.act_window_close'}
def action_cancel(self):
self._check_access()
sudo_self = self.sudo()
sudo_self.mapped('recipient_ids.notification_id').write({'notification_status': 'canceled'})
self.mail_message_id._notify_sms_update()
return {'type': 'ir.actions.act_window_close'}
def action_buy_credits(self):
url = self.env['iap.account'].get_credits_url(service_name='sms')
return {
'type': 'ir.actions.act_url',
'url': url,
}
|
import gym
from gym import wrappers
env = gym.make('Reacher-v1')
env.reset()
env.render()
outdir = './log/'
f_act = open(outdir + 'log_act.txt', 'w')
f_obs = open(outdir + 'log_obs.txt', 'w')
f_rwd = open(outdir + 'log_rwd.txt', 'w')
f_info = open(outdir + 'log_info.txt', 'w')
env = wrappers.Monitor(env, directory=outdir, force=True)
for i_episode in range(101):
observation = env.reset()
for t in range(100):
env.render()
# action selection
action = env.action_space.sample()
# take the action and observe the reward and next state
observation, reward, done, info = env.step(action)
# print observation
f_act.write(str(action) + "\n")
f_obs.write(str(observation) + "\n")
f_rwd.write(str(reward) + "\n")
f_info.write(str(info) + "\n")
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.monitor.close()
|
from django.contrib import admin
from .models import raspberry
admin.site.register(raspberry)
|
import typing
import os.path as path
from .... import config
from ...util.fs import mkdir_without_exception
from ...codegen.base import ConfigBase
class Generator(object):
"""
负责与第三方 RPC 框架进行对接的类型
用于生成 Service 的 Server 跟 Client 代码,
最终按照预定义的目录格式,将文件保存到指定的位置
"""
def __init__(self, configs: typing.List[ConfigBase]):
"""
生成 cfg 定义的服务信息到 output 定义的位置
:param configs:
"""
self.configs = configs
self.target_path = config.server_output_path
self.client_path = config.client_output_path
def generate(self):
"""
使用 generate_config 生成的配置文件,调用第三方引擎生成其编码及解码器
:return:
"""
raise NotImplemented()
class ClientDirConfig(object):
"""
Rpc client 的目录结构,
所有生成的目录,都在自身的模块名下
"""
def __init__(self, base_path, client_path: str):
"""
client 的 root 是 base_dir 之外的 rpc-client 的地址
:param base_path:
"""
self.base_dir = path.abspath(base_path)
self.client_path = path.abspath(client_path)
# 接口生成目录
self.root = self.client_path
# client 的代码默认放在 src 目录下
root = "./src"
# 第三方工具生成代码的目录, 如 proto buf 的配置
self.mid_file = path.join(self.client_path, root, "./mid_file")
# 编码及解码信息的目录
self.encode = path.join(self.client_path, root, "./encode")
self.impl = path.join(self.client_path, root, "./impl")
# 通用 rpc 运行时的目录
self.runtime = path.join(self.client_path, "./runtime")
def ensure_dir(self):
"""
初始化 rpc 目录
:return:
"""
ensure_dir(self.root, self.mid_file, self.encode, self.impl)
# ensure_dir(self.runtime, is_package=False)
def join(self, *sub_paths: str):
"""
返回 DirConfig.root 跟 path 路径的拼接结果
:param sub_paths:
:return:
"""
return path.join(self.root, *sub_paths)
def ensure_dir(*dirs: str, is_package: bool = True):
"""
确认目录结构的正确性
:param dirs:
:param is_package:
:return:
"""
for d in dirs:
mkdir_without_exception(d)
if is_package and not path.exists(path.join(d, "./__init__.py")):
open(path.join(d, "./__init__.py"), "w")
class ServerDirConfig(ClientDirConfig):
"""
rpc server 目录结构,
跟 client 唯一的区别是多了一层 rpc 目录
"""
def __init__(self, base_path: str):
ClientDirConfig.__init__(self, base_path, "")
self.root = path.join(self.base_dir, "./rpc")
# 第三方工具生成代码的目录, 如 proto buf 的配置
self.mid_file = path.join(self.root, "./mid_file")
# 编码及解码信息的目录
self.encode = path.join(self.root, "./encode")
# 具体 grpc 服务的目录
self.impl = path.join(self.root, "./impl")
# 通用 rpc 运行时的目录
self.runtime = path.join(self.root, "./runtime")
def ensure_dir(self):
"""
初始化 rpc 目录
:return:
"""
# 具体 grpc 服务的目录
# ClientDirConfig.ensure_dir(self)
# 构建 server 自己的目录
ensure_dir(self.mid_file, self.encode, self.impl)
# ensure_dir(self.runtime, is_package=False)
|
# -*- coding: utf-8 -*-
import os
import errno
def silentremove(filename):
"""If ``filename`` exists, delete it. Otherwise, return nothing.
See http://stackoverflow.com/q/10840533/2823213."""
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
|
import logging
import json
from functools import wraps
from time import gmtime, strftime
logger = logging.getLogger(__name__)
def _default_get_user_identifier(request):
if request.user.is_authenticated:
return request.user.email
return None
def log_event(event_type, request, extra_data=None, level=logging.INFO):
"""
Logs an event with default information and, optionally, additional data included
:param event_type: Event identifier to show in the logs.
:param request: Django request associated with the event.
:param extra_data: Extra data to include in the logged event.
:param level: Log level to use.
"""
event_dict = {
"event_type": event_type,
"timestamp": strftime("%Y-%m-%d %H:%M:%S", gmtime()),
"ip_address": request.META["REMOTE_ADDR"],
}
user_identifier = _default_get_user_identifier(request)
if user_identifier:
event_dict["user"] = user_identifier
if extra_data:
event_dict.update(extra_data)
logger.log(level, f"ZYGOAT: {json.dumps(event_dict)}")
def log_view_outcome(event_type=None):
"""
Creates a decorator that logs basic info about the result of the view
:param event_type: Event identifier to show in the logs.
:return: A decorator that logs the outcome of a view.
"""
def decorator(view):
@wraps(view)
def inner(request, *args, **kwargs):
response = view(request, *args, **kwargs)
extra_data = {
"status_code": response.status_code,
}
log_event(event_type or view.__name__, request, extra_data=extra_data)
return response
return inner
return decorator
|
def lowercase_count(strng: str) -> int:
"""Returns count of lowercase characters."""
return sum(c.islower() for c in strng)
|
def seperate(sen):
left, right = 0, 0
u, v, answer = '', '', ''
makeR = []
if sen == '':
return ''
for a in sen:
if a == ')':
right += 1
if len(makeR) != 0:
makeR.pop()
elif a == '(':
makeR.append(a)
left += 1
if right == left and right != 0:
u = sen[:(right+left)]
v = sen[(right+left):]
break
print(u, v)
if len(makeR) == 0:
u += seperate(v)
return u
else:
temp = '('
temp += seperate(v)
temp += ')'
u = u[1:len(u)-1]
for i in range(0, len(u)):
if u[i] == '(':
temp += ')'
elif u[i] == ')':
temp += '('
return temp
def solution(p):
answer = seperate(p)
return answer
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.drawing
class CaptionType(object):
"""
Const Class
This constants specifies the geometry of the line of a CaptionShape.
See Also:
`API CaptionType <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1drawing_1_1CaptionType.html>`_
"""
__ooo_ns__: str = 'com.sun.star.drawing'
__ooo_full_ns__: str = 'com.sun.star.drawing.CaptionType'
__ooo_type_name__: str = 'const'
straight = 0
"""
the caption line is a straight line from a caption area edge to the caption point.
"""
angled = 1
"""
the caption line is the shortest line from the caption area edge to the caption point.
"""
connector = 2
"""
the caption line is build up with a straight line from the caption area edge, followed by the shortest line to the caption area point.
"""
__all__ = ['CaptionType']
|
import json
from llamalogs.helpers import ms_time
class AggregateLog:
def __init__(self, log):
self.sender = log.sender
self.receiver = log.receiver
self.account = log.account
self.message = ''
self.errorMessage = ''
self.initialMessageCount = 0
self.graph = log.graph
self.total = 0
self.errors = 0
self.elapsed = 0
self.elapsedCount = 0
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def toAPIFormat(self):
api_log = {}
api_log["sender"] = self.sender
api_log["receiver"] = self.receiver
api_log["count"] = self.total
api_log["errorCount"] = self.errors
api_log["message"] = self.message
api_log["errorMessage"] = self.errorMessage
api_log["clientTimestamp"] = ms_time()
api_log["graph"] = self.graph or 'noGraph'
api_log["account"] = self.account
api_log["initialMessageCount"] = self.initialMessageCount
return api_log
|
"""
Small wrapper for the Python Google Drive API client library.
https://developers.google.com/api-client-library/python/apis/drive/v2
"""
from __future__ import print_function
import os
from apiclient.http import MediaFileUpload
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from . import settings
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/drive'
def get_credentials():
"""
Get valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid, the
OAuth2 flow is completed to obtain the new credentials.
:return: Credentials, the obtained credentials.
:rtype: An instance of a Google Drive API class.
"""
try:
import argparse
flags = argparse.ArgumentParser(
parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, '{}.json'.format(
settings.APPLICATION_NAME))
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
settings.GOOGLE_API_CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = settings.APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def upload_media(service, path, mime_type, parents=None, resumable=True):
"""
Upload file to Google Drive.
:param service: Google Drive API instance.
:param str path: Path of file to upload.
:param str mime_type: MIME type of file to upload.
:param list parents: Ids of folders to upload file to; file uploaded to
root folder by default.
:param bool resumable: Can file uploading be resumed.
:return: Google Drive API response.
:rtype: dict
"""
if not os.path.isfile(path):
raise FileNotFoundError('Media file does not exist.')
file_metadata = {'name': os.path.basename(path)}
if parents:
file_metadata['parents'] = parents
media = MediaFileUpload(
path, mimetype=mime_type, resumable=resumable)
return service.files().create(
body=file_metadata, media_body=media, fields='id,name').execute()
def create_folder(service, name):
"""
Create folder in Google Drive.
:param service: Google Drive API instance.
:param str name: Name of folder to create.
:return: Google Drive API response.
:rtype: dict
"""
if not isinstance(name, str):
raise TypeError('Folder name should be a string.')
file_metadata = {
'name': name,
'mimeType': 'application/vnd.google-apps.folder'
}
return service.files().create(
body=file_metadata, fields='id,name').execute()
def delete_file(service, file_id):
"""
Delete file in Google Drive.
:param service: Google Drive API instance.
:param str file_id: Id of file to delete.
:return: Google Drive API response.
:rtype: dict
"""
if not isinstance(file_id, str):
raise TypeError('File id should be a string.')
return service.files().delete(fileId=file_id).execute()
|
from flask import current_app as app
# individual product class
class OneProduct:
def __init__(self, pid, name, price, available, img):
self.pid = pid
self.name = name
self.price = price
self.available = available
self.img = img
# get all basic info related to this product and return it
@staticmethod
def get_all_product_info(pid):
rows = app.db.execute('''
SELECT pid, name, price, available, img
FROM Products
WHERE pid = :pid
''',
pid=pid)
return [OneProduct(*(rows[0])) if rows is not None else None]
|
from instapy import InstaPy
session = InstaPy(username="d.sen17", password="clameclame") #headless_browser=True)
session.login()
comments = ['Truly Loved Your Post', 'Its awesome', 'Your Post Is inspiring']
session.set_do_comment(enabled=False, percentage=100)
session.set_comments(comments)
session.like_by_users(['spandan.spams'], amount=12)
session.end()
|
from __future__ import print_function, unicode_literals
CONSUMER_KEY = "c5e79584a52144467a1ad3d0e766811f052c60e9b"
CONSUMER_SECRET = "62350daf22709506832908a48f219a95"
DATA_DIR = "~/.moe-upnp"
MOEFOU_API_ROOT = "http://api.moefou.org"
MOEFM_API_ROOT = "http://moe.fm"
|
import xml.etree.ElementTree as Et
tree = Et.parse('movies.xml')
root = tree.getroot()
for child in root:
print(child.tag, child.attrib)
input("gyerek elemek listázása")
for movie in root.iter('movie'):
print(movie.attrib)
input("filmek attribútumai")
for description in root.iter('description'):
print(description.text)
input("filmek leírása")
# search the tree for movies that came out in 1992:
for movie in root.findall("./genre/decade/movie/[year='1992']"):
print(movie.attrib)
input("1992-es filmek listája")
# find movies that are available in multiple formats
for movie in root.findall("./genre/decade/movie/format/[@multiple='Yes']"):
print(movie.attrib)
input("multiformátumú filmek listázása")
# find movies that are available in multiple formats print the parent element
# with ...
for movie in root.findall("./genre/decade/movie/format/[@multiple='Yes']..."):
print(movie.attrib)
input("multiformátum szülő lista")
# Egy elem megkeresése és cserélése
b2tf = root.find("./genre/decade/movie/[@title='Back 2 the Future']")
print(b2tf)
b2tf.attrib['title'] = "Back to the Future"
print(b2tf.attrib)
input("Back2thefuture javítása")
"""
# Fájl kiírása vissza és a javított dolog elhelyezése
tree.write("movies.xml")
# Ismételt kiírás ellenőrzésileg
tree.Et.parse('movies.xml')
root = tree.getroot()
for movie in root.iter('movie'):
print(movie.attrib)
"""
for form in root.findall("./genre/decade/movie/format"):
print(form.attrib, form.text)
input("multiformat kiirása")
# A movie format multiple javítása csak Yes vagy No-ra
"""
import re
for form in root.findall("./genre/decade/movie/format"):
# Search for the commas in the format text
match = re.search(',',form.text)
if match:
form.set('multiple','Yes')
else:
form.set('multiple','No')
# Write out the tree to the file again
tree.write("movies.xml")
tree = ET.parse('movies.xml')
root = tree.getroot()
for form in root.findall("./genre/decade/movie/format"):
print(form.attrib, form.text)
"""
# Évtized és évek kiírása
for decade in root.findall("./genre/decade"):
print(decade.attrib)
for year in decade.findall("./movie/year"):
print(year.text, '\n')
input("Évtizedek és évek kiirása")
# milyen filmek vannak 2000 ből
for movie in root.findall("./genre/decade/movie/[year='2000']"):
print(movie.attrib)
input("Flimek listázása 2000ből")
# Új évtized hozzáadása az akciófilmek kategóriához
action = root.find("./genre[@category='Action']")
new_dec = Et.SubElement(action, 'decade')
new_dec.attrib["years"] = '2000s'
print(Et.tostring(action, encoding='utf8').decode('utf8'))
input("Új kategória hozzáadva")
# márcsak átrakjuk az X-ment a 90-esből a 2000-es évtizedbe
xmen = root.find("./genre/decade/movie[@title='X-Men']")
dec2000s = root.find("./genre[@category='Action']/decade[@years='2000s']")
dec2000s.append(xmen)
dec1990s = root.find("./genre[@category='Action']/decade[@years='1990s']")
dec1990s.remove(xmen)
print(Et.tostring(action, encoding='utf8').decode('utf8'))
input("Csere megtörtént")
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch
import torch.optim as optim
from nni.nas.pytorch.trainer import Trainer
from nni.nas.pytorch.utils import AverageMeterGroup
from .mutator import EnasMutator
logger = logging.getLogger(__name__)
class EnasTrainer(Trainer):
def __init__(self, model, loss, metrics, reward_function,
optimizer, num_epochs, dataset_train, dataset_valid,
mutator=None, batch_size=64, workers=4, device=None, log_frequency=None, callbacks=None,
entropy_weight=0.0001, skip_weight=0.8, baseline_decay=0.999,
mutator_lr=0.00035, mutator_steps_aggregate=20, mutator_steps=50, aux_weight=0.4):
super().__init__(model, mutator if mutator is not None else EnasMutator(model),
loss, metrics, optimizer, num_epochs, dataset_train, dataset_valid,
batch_size, workers, device, log_frequency, callbacks)
self.reward_function = reward_function
self.mutator_optim = optim.Adam(self.mutator.parameters(), lr=mutator_lr)
self.entropy_weight = entropy_weight
self.skip_weight = skip_weight
self.baseline_decay = baseline_decay
self.baseline = 0.
self.mutator_steps_aggregate = mutator_steps_aggregate
self.mutator_steps = mutator_steps
self.aux_weight = aux_weight
n_train = len(self.dataset_train)
split = n_train // 10
indices = list(range(n_train))
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:-split])
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[-split:])
self.train_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=batch_size,
sampler=train_sampler,
num_workers=workers)
self.valid_loader = torch.utils.data.DataLoader(self.dataset_train,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=workers)
self.test_loader = torch.utils.data.DataLoader(self.dataset_valid,
batch_size=batch_size,
num_workers=workers)
def train_one_epoch(self, epoch):
# Sample model and train
self.model.train()
self.mutator.eval()
meters = AverageMeterGroup()
for step, (x, y) in enumerate(self.train_loader):
x, y = x.to(self.device), y.to(self.device)
self.optimizer.zero_grad()
with torch.no_grad():
self.mutator.reset()
logits = self.model(x)
if isinstance(logits, tuple):
logits, aux_logits = logits
aux_loss = self.loss(aux_logits, y)
else:
aux_loss = 0.
metrics = self.metrics(logits, y)
loss = self.loss(logits, y)
loss = loss + self.aux_weight * aux_loss
loss.backward()
self.optimizer.step()
metrics["loss"] = loss.item()
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
logger.info("Model Epoch [%s/%s] Step [%s/%s] %s", epoch + 1,
self.num_epochs, step + 1, len(self.train_loader), meters)
# Train sampler (mutator)
self.model.eval()
self.mutator.train()
meters = AverageMeterGroup()
mutator_step, total_mutator_steps = 0, self.mutator_steps * self.mutator_steps_aggregate
while mutator_step < total_mutator_steps:
for step, (x, y) in enumerate(self.valid_loader):
x, y = x.to(self.device), y.to(self.device)
self.mutator.reset()
with torch.no_grad():
logits = self.model(x)
metrics = self.metrics(logits, y)
reward = self.reward_function(logits, y)
if self.entropy_weight is not None:
reward += self.entropy_weight * self.mutator.sample_entropy
self.baseline = self.baseline * self.baseline_decay + reward * (1 - self.baseline_decay)
self.baseline = self.baseline.detach().item()
loss = self.mutator.sample_log_prob * (reward - self.baseline)
if self.skip_weight:
loss += self.skip_weight * self.mutator.sample_skip_penalty
metrics["reward"] = reward
metrics["loss"] = loss.item()
metrics["ent"] = self.mutator.sample_entropy.item()
metrics["baseline"] = self.baseline
metrics["skip"] = self.mutator.sample_skip_penalty
loss = loss / self.mutator_steps_aggregate
loss.backward()
meters.update(metrics)
if mutator_step % self.mutator_steps_aggregate == 0:
self.mutator_optim.step()
self.mutator_optim.zero_grad()
if self.log_frequency is not None and step % self.log_frequency == 0:
logger.info("RL Epoch [%s/%s] Step [%s/%s] %s", epoch + 1, self.num_epochs,
mutator_step // self.mutator_steps_aggregate + 1, self.mutator_steps, meters)
mutator_step += 1
if mutator_step >= total_mutator_steps:
break
def validate_one_epoch(self, epoch):
pass
|
"""simple gui for library.
Copyright (c) 2022 Ali Farzanrad <ali_farzanrad@riseup.net>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from os import environ
from pathlib import Path
from sys import argv
from tkinter import N, W, S, E, Listbox, StringVar, Tk
from tkinter.ttk import Button, Entry, Frame, Label
from cp2077save import SaveFile
SEARCH_PATH = "Saved Games", "CD Projekt Red", "Cyberpunk 2077"
HOME = environ.get("USERPROFILE") if len(argv) < 2 else argv[1]
HOME = Path(HOME or ".").resolve(strict=True)
if HOME.joinpath(*SEARCH_PATH).is_dir():
HOME = HOME.joinpath(*SEARCH_PATH)
elif HOME.joinpath(*SEARCH_PATH[1:]).is_dir():
HOME = HOME.joinpath(*SEARCH_PATH[1:])
elif (HOME / SEARCH_PATH[-1]).is_dir():
HOME = HOME / SEARCH_PATH[-1]
if HOME.is_file():
HOME = HOME.parent
if not HOME.is_dir():
raise Exception
def get_savefiles():
res = []
for item in HOME.iterdir():
try:
item = SaveFile.summary(item)
except Exception:
pass
else:
res.append((item.date, item.time, item))
return [item[-1] for item in sorted(res, reverse=True)]
if (HOME / SaveFile.NAME).is_file() and not get_savefiles():
HOME = HOME.parent
class Window:
TITLE = ""
WIDTH = 600
HEIGHT = 400
def __init__(self):
self.savefiles = tuple(get_savefiles())
self.root = root = Tk()
root.title(self.TITLE)
root.minsize(self.WIDTH // 2, self.HEIGHT // 2)
root.geometry("%dx%d" % (self.WIDTH, self.HEIGHT))
root.rowconfigure(0, weight=1)
root.columnconfigure(0, weight=1)
frm = Frame(root, padding=10)
frm.grid(row=0, column=0, sticky=(N, W, S, E))
self._vars = {}
self._savefile = None
self.init(frm)
root.mainloop()
@property
def savefile(self):
summary = self.selected_savefile()
if summary is None:
self._savefile = None
return None
res = self._savefile
if res is None or res.path != summary.path:
try:
res = SaveFile(summary.path)
except Exception:
res = None
self._savefile = res
return res
def vars(self, name, *args):
all_vars = self._vars
if name not in all_vars:
all_vars[name] = StringVar()
res = all_vars[name]
if args:
value, *default = args
if len(default) == 1:
default = default[0]
if value is None:
res.set(default or "")
else:
res.set(value)
return res
def savefile_selectbox(self, parent, row, col, **kw):
var = StringVar(value=tuple(map(str, self.savefiles)))
lbox = Listbox(parent, listvariable=var)
lbox.grid(row=row, column=col, sticky=(N, W, S, E), **kw)
self._savefile_selectbox = lbox
for i in range(0, len(self.savefiles), 2):
lbox.itemconfigure(i, background="#f0f0ff")
return lbox
def select_savefile(self, ind):
savefiles = self.savefiles
n = len(savefiles)
try:
if ind not in range(n):
ind = savefiles.index(ind)
except Exception:
ind = -1
try:
lbox = self._savefile_selectbox
if ind > 0:
lbox.selection_clear(0, ind - 1)
if ind < n - 1:
lbox.selection_clear(ind + 1, n - 1)
if ind >= 0:
lbox.selection_set(ind)
except Exception:
pass
def selected_savefile(self):
ind = None
try:
ind = self._savefile_selectbox.curselection()[-1]
except Exception:
pass
try:
return self.savefiles[ind]
except Exception:
return None
def ro_label_entry(self, parent, title, variable):
lbl = Label(parent, text=title)
lbl.grid(column=0, sticky=E)
row = lbl.grid_info()["row"]
Entry(
parent, textvariable=self.vars(variable), state=["readonly"]
).grid(row=row, column=1, sticky=(W, E))
def savefile_detailbox(self, parent, row, col, **kw):
frm = Frame(parent, padding=10)
frm.grid(row=row, column=col, sticky=(N, W, S, E), **kw)
self.ro_label_entry(frm, "Name:", "name")
self.ro_label_entry(frm, "Game Version:", "version")
self.ro_label_entry(frm, "Save Date:", "date")
self.ro_label_entry(frm, "Save Time:", "time")
return frm
def update_savefile_summary(self, summary=None):
setv = self.vars
setv("name", summary and summary.name)
setv("version", summary and f"{summary.version / 1000:g}")
setv("date", summary and summary.date)
setv("time", summary and summary.time)
class DatamineVirtuosoFixer(Window):
TITLE = "Cyberpunk Datamine Virtuoso Fixer"
def init(self, top):
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
top.columnconfigure(1, weight=1)
lbox = self.savefile_selectbox(top, 0, 0, rowspan=2)
right = Frame(top, padding=10)
right.grid(row=0, column=1, sticky=(N, W, E))
right.rowconfigure(0, weight=1)
right.columnconfigure(0, weight=1)
right.columnconfigure(1, weight=1)
dfrm = self.savefile_detailbox(right, 0, 0, columnspan=2)
self.ro_label_entry(dfrm, "failedShardDrops:", "f_shard_drops")
self.btn_load = Button(
right, text="Load File", command=self.load_file
)
self.btn_fix = Button(
right, text="Fix File", command=self.fix_file
)
self.btn_load.grid(row=1, column=0)
self.btn_fix.grid(row=1, column=1)
self.select_savefile(0)
self.selection_changed()
lbox.bind("<<ListboxSelect>>", self.selection_changed)
def selection_changed(self, *args):
enable = "!disabled"
summary = self.selected_savefile()
if summary is None:
enable = enable[1:]
self.btn_load.state([enable])
self.btn_fix.state(["disabled"])
self.update_savefile_summary(summary)
self.vars("f_shard_drops", "load to find")
def load_file(self):
self.btn_load.state(["disabled"])
self._savefile = None
if self.savefile is None:
self.btn_load.state(["!disabled"])
self.vars("f_shard_drops", "failed to load :-(")
else:
try:
nodes = self.savefile.nodes
self.node = nodes.ScriptableSystemsContainer
self.config = self.node.__enter__()
res = self.config.DataTrackingSystem.failedShardDrops
except Exception:
self.vars("f_shard_drops", "failed to locate data :-(")
raise
if res:
self.vars("f_shard_drops", res)
self.btn_fix.state(["!disabled"])
else:
self.vars("f_shard_drops", "0 ; no need to fix")
def fix_file(self):
self.btn_load.state(["disabled"])
self.btn_fix.state(["disabled"])
try:
self.config.DataTrackingSystem.failedShardDrops = 0
self.node.__exit__(None, None, None)
except Exception:
self.vars("f_shard_drops", "failed to change field :-(")
del self.config, self.node
raise
del self.config, self.node
try:
self.savefile.save()
except Exception:
self.vars("f_shard_drops", "could not save file :-(")
raise
self.vars("f_shard_drops", "0 ; file has been changed :-)")
|
"""Init command for Ginpar projects.
This module implements the initialization command for the ginpar static content
generator.
`init` will prompt for a series of values to write the site configuration file.
Examples
--------
To initialize a project in a standard way to specify the configuration values::
ginpar init
To skip the prompts and initialize the project with the default values::
ginpar init --quick
ginpar init --q
To force the initialization in case there is a directory with the same name
of the project to initialize::
ginpar init --force
ginpar init -f
"""
import os
import click
import yaml
from ginpar.utils.echo import info, echo, success, error, alert
from ginpar.utils.files import create_file, create_folder, try_remove
from ginpar.utils.strings import space_to_kebab
def prompt_site_config(quick):
"""Echo the prompts and create the configuration dict.
Echo the instructions and configuration fields, store each input,
and create a dictionary containing those values.
Parameters
----------
quick : bool
Returns the default values immediatle if True.
Returns
-------
dict
Used to generate the site configuration file.
"""
site = {
"author": "David Omar",
"sitename": "My site",
"description": "This is a Ginpar project",
"url": "/",
"theme": "davidomarf/gart",
"content_path": "sketches",
"build_path": "public",
}
if quick:
return site
info("Welcome to ginpar! We'll ask for some values to initialize your project.")
echo("")
site["sitename"] = click.prompt("Site name", default=site["sitename"])
site["description"] = click.prompt("Description", default=site["description"])
site["author"] = click.prompt("Author", default=site["author"])
site["url"] = click.prompt("url", default=site["url"])
info("\nIf you're unsure about the next prompts, accept the defaults")
echo("")
site["theme"] = click.prompt("Theme", default=site["theme"])
site["content_path"] = click.prompt("Sketches path", default=site["content_path"])
site["build_path"] = click.prompt("Build path", default=site["build_path"])
return site
def init(force, quick):
"""Main function of the module. This is what `ginpar init` calls.
Parameters
----------
force : bool
Remove conflicting files when true.
quick : bool
Skip prompts when true.
"""
if force:
alert("You're forcing the initialization.")
alert("This will replace any existent file relevant to the project.")
click.confirm("Do you want to proceed?", abort=True)
site = prompt_site_config(quick)
path = space_to_kebab(site["sitename"]).lower()
content_path = os.path.join(path, site["content_path"])
config_yaml = os.path.join(path, "config.yaml")
echo("\n---\n")
if force:
echo("\n---\n")
try_remove(path)
echo("\n---\n")
create_folder(content_path)
with open(config_yaml, "w") as file:
yaml.dump(site, file)
file.write("scripts:\n p5:\n https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.9.0/p5.min.js")
echo("\n---\n")
success(
"Done!\nRun `ginpar serve` or `ginpar build` and see your new site in action!\n"
)
|
"""FFmpeg writer tests of moviepy."""
import multiprocessing
import os
from PIL import Image
import pytest
from moviepy.video.compositing.concatenate import concatenate_videoclips
from moviepy.video.io.ffmpeg_writer import ffmpeg_write_image, ffmpeg_write_video
from moviepy.video.io.gif_writers import write_gif
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.tools.drawing import color_gradient
from moviepy.video.VideoClip import BitmapClip, ColorClip
@pytest.mark.parametrize(
"with_mask",
(False, True),
ids=("with_mask=False", "with_mask=True"),
)
@pytest.mark.parametrize(
"write_logfile",
(False, True),
ids=("write_logfile=False", "write_logfile=True"),
)
@pytest.mark.parametrize(
("codec", "is_valid_codec", "ext"),
(
pytest.param(
"libcrazyfoobar", False, ".mp4", id="codec=libcrazyfoobar-ext=.mp4"
),
pytest.param(None, True, ".mp4", id="codec=default-ext=.mp4"),
pytest.param("libtheora", False, ".avi", id="codec=libtheora-ext=.mp4"),
),
)
@pytest.mark.parametrize(
"bitrate",
(None, "5000k"),
ids=("bitrate=None", "bitrate=5000k"),
)
@pytest.mark.parametrize(
"threads",
(None, multiprocessing.cpu_count()),
ids=("threads=None", "threads=multiprocessing.cpu_count()"),
)
def test_ffmpeg_write_video(
util,
codec,
is_valid_codec,
ext,
write_logfile,
with_mask,
bitrate,
threads,
):
filename = os.path.join(util.TMP_DIR, f"moviepy_ffmpeg_write_video{ext}")
if os.path.isfile(filename):
try:
os.remove(filename)
except PermissionError:
pass
logfile_name = filename + ".log"
if os.path.isfile(logfile_name):
os.remove(logfile_name)
clip = BitmapClip([["R"], ["G"], ["B"]], fps=10).with_duration(0.3)
if with_mask:
clip = clip.with_mask(
BitmapClip([["W"], ["O"], ["O"]], fps=10, is_mask=True).with_duration(0.3)
)
kwargs = dict(
logger=None,
write_logfile=write_logfile,
with_mask=with_mask,
)
if codec is not None:
kwargs["codec"] = codec
if bitrate is not None:
kwargs["bitrate"] = bitrate
if threads is not None:
kwargs["threads"] = threads
ffmpeg_write_video(clip, filename, 10, **kwargs)
if is_valid_codec:
assert os.path.isfile(filename)
final_clip = VideoFileClip(filename)
r, g, b = final_clip.get_frame(0)[0][0]
assert r == 254
assert g == 0
assert b == 0
r, g, b = final_clip.get_frame(0.1)[0][0]
assert r == (0 if not with_mask else 1)
assert g == (255 if not with_mask else 1)
assert b == 1
r, g, b = final_clip.get_frame(0.2)[0][0]
assert r == 0
assert g == 0
assert b == (255 if not with_mask else 0)
if write_logfile:
assert os.path.isfile(logfile_name)
@pytest.mark.parametrize(
("size", "logfile", "pixel_format", "expected_result"),
(
pytest.param(
(5, 1),
False,
None,
[[(0, 255, 0), (51, 204, 0), (102, 153, 0), (153, 101, 0), (204, 50, 0)]],
id="size=(5, 1)",
),
pytest.param(
(2, 1), False, None, [[(0, 255, 0), (51, 204, 0)]], id="size=(2, 1)"
),
pytest.param(
(2, 1), True, None, [[(0, 255, 0), (51, 204, 0)]], id="logfile=True"
),
pytest.param(
(2, 1),
False,
"invalid",
(OSError, "MoviePy error: FFMPEG encountered the following error"),
id="pixel_format=invalid-OSError",
),
),
)
def test_ffmpeg_write_image(util, size, logfile, pixel_format, expected_result):
filename = os.path.join(util.TMP_DIR, "moviepy_ffmpeg_write_image.png")
if os.path.isfile(filename):
try:
os.remove(filename)
except PermissionError:
pass
image_array = color_gradient(
size,
(0, 0),
p2=(5, 0),
color_1=(255, 0, 0),
color_2=(0, 255, 0),
)
if hasattr(expected_result[0], "__traceback__"):
with pytest.raises(expected_result[0]) as exc:
ffmpeg_write_image(
filename,
image_array,
logfile=logfile,
pixel_format=pixel_format,
)
assert expected_result[1] in str(exc.value)
return
else:
ffmpeg_write_image(
filename,
image_array,
logfile=logfile,
pixel_format=pixel_format,
)
assert os.path.isfile(filename)
if logfile:
assert os.path.isfile(filename + ".log")
os.remove(filename + ".log")
im = Image.open(filename, mode="r")
for i in range(im.width):
for j in range(im.height):
assert im.getpixel((i, j)) == expected_result[j][i]
@pytest.mark.parametrize("loop", (None, 2), ids=("loop=None", "loop=2"))
@pytest.mark.parametrize(
"opt",
(False, "OptimizeTransparency"),
ids=("opt=False", "opt=OptimizeTransparency"),
)
@pytest.mark.parametrize("clip_class", ("BitmapClip", "ColorClip"))
@pytest.mark.parametrize(
"with_mask", (False, True), ids=("with_mask=False", "with_mask=True")
)
@pytest.mark.parametrize("pixel_format", ("invalid", None))
def test_write_gif(util, clip_class, opt, loop, with_mask, pixel_format):
filename = os.path.join(util.TMP_DIR, "moviepy_write_gif.gif")
if os.path.isfile(filename):
try:
os.remove(filename)
except PermissionError:
pass
fps = 10
if clip_class == "BitmapClip":
original_clip = BitmapClip([["R"], ["G"], ["B"]], fps=fps).with_duration(0.3)
else:
original_clip = concatenate_videoclips(
[
ColorClip(
(1, 1),
color=color,
)
.with_duration(0.1)
.with_fps(fps)
for color in [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
]
)
if with_mask:
original_clip = original_clip.with_mask(
ColorClip((1, 1), color=1, is_mask=True).with_fps(fps).with_duration(0.3)
)
kwargs = {}
if pixel_format is not None:
kwargs["pixel_format"] = pixel_format
write_gif(
original_clip,
filename,
fps=fps,
with_mask=with_mask,
program="ffmpeg",
logger=None,
opt=opt,
loop=loop,
**kwargs,
)
if pixel_format != "invalid":
final_clip = VideoFileClip(filename)
r, g, b = final_clip.get_frame(0)[0][0]
assert r == 252
assert g == 0
assert b == 0
r, g, b = final_clip.get_frame(0.1)[0][0]
assert r == 0
assert g == 252
assert b == 0
r, g, b = final_clip.get_frame(0.2)[0][0]
assert r == 0
assert g == 0
assert b == 255
assert final_clip.duration == (loop or 1) * round(original_clip.duration, 6)
|
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Grasping policies
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import pickle as pkl
import math
import os
from time import time
import copy
import numpy as np
from sklearn.mixture import GaussianMixture
import scipy.ndimage.filters as snf
import matplotlib.pyplot as plt
import autolab_core.utils as utils
from autolab_core import Point, Logger
from perception import BinaryImage, ColorImage, DepthImage, RgbdImage, SegmentationImage, CameraIntrinsics
from visualization import Visualizer2D as vis
from gqcnn.grasping import Grasp2D, SuctionPoint2D, MultiSuctionPoint2D, ImageGraspSamplerFactory, GraspQualityFunctionFactory, GQCnnQualityFunction, GraspConstraintFnFactory
from gqcnn.utils import GripperMode, NoValidGraspsException
FIGSIZE = 16
SEED = 5234709
class RgbdImageState(object):
"""State to encapsulate RGB-D images."""
def __init__(self, rgbd_im, camera_intr,
segmask=None,
obj_segmask=None,
fully_observed=None):
"""
Parameters
----------
rgbd_im : :obj:`perception.RgbdImage`
an RGB-D image to plan grasps on
camera_intr : :obj:`perception.CameraIntrinsics`
intrinsics of the RGB-D camera
segmask : :obj:`perception.BinaryImage`
segmentation mask for the image
obj_segmask : :obj:`perception.SegmentationImage`
segmentation mask for the different objects in the image
full_observed : :obj:`object`
representation of the fully observed state
"""
self.rgbd_im = rgbd_im
self.camera_intr = camera_intr
self.segmask = segmask
self.obj_segmask = obj_segmask
self.fully_observed = fully_observed
def save(self, save_dir):
""" Save to a directory.
Parameters
----------
save_dir : str
the directory to save to
"""
if not os.path.exists(save_dir):
os.mkdir(save_dir)
color_image_filename = os.path.join(save_dir, 'color.png')
depth_image_filename = os.path.join(save_dir, 'depth.npy')
camera_intr_filename = os.path.join(save_dir, 'camera.intr')
segmask_filename = os.path.join(save_dir, 'segmask.npy')
obj_segmask_filename = os.path.join(save_dir, 'obj_segmask.npy')
state_filename = os.path.join(save_dir, 'state.pkl')
self.rgbd_im.color.save(color_image_filename)
self.rgbd_im.depth.save(depth_image_filename)
self.camera_intr.save(camera_intr_filename)
if self.segmask is not None:
self.segmask.save(segmask_filename)
if self.obj_segmask is not None:
self.obj_segmask.save(obj_segmask_filename)
if self.fully_observed is not None:
pkl.dump(self.fully_observed, open(state_filename, 'wb'))
@staticmethod
def load(save_dir):
""" Load an :obj:`RGBDImageState`.
Parameters
----------
save_dir : str
the directory to load from
"""
if not os.path.exists(save_dir):
raise ValueError('Directory %s does not exist!' %(save_dir))
color_image_filename = os.path.join(save_dir, 'color.png')
depth_image_filename = os.path.join(save_dir, 'depth.npy')
camera_intr_filename = os.path.join(save_dir, 'camera.intr')
segmask_filename = os.path.join(save_dir, 'segmask.npy')
obj_segmask_filename = os.path.join(save_dir, 'obj_segmask.npy')
state_filename = os.path.join(save_dir, 'state.pkl')
camera_intr = CameraIntrinsics.load(camera_intr_filename)
color = ColorImage.open(color_image_filename, frame=camera_intr.frame)
depth = DepthImage.open(depth_image_filename, frame=camera_intr.frame)
segmask = None
if os.path.exists(segmask_filename):
segmask = BinaryImage.open(segmask_filename, frame=camera_intr.frame)
obj_segmask = None
if os.path.exists(obj_segmask_filename):
obj_segmask = SegmentationImage.open(obj_segmask_filename, frame=camera_intr.frame)
fully_observed = None
if os.path.exists(state_filename):
fully_observed = pkl.load(open(state_filename, 'rb'))
return RgbdImageState(RgbdImage.from_color_and_depth(color, depth),
camera_intr,
segmask=segmask,
obj_segmask=obj_segmask,
fully_observed=fully_observed)
class GraspAction(object):
""" Action to encapsulate grasps.
"""
def __init__(self, grasp, q_value, image=None, policy_name=None):
"""
Parameters
----------
grasp : :obj`Grasp2D` or :obj:`SuctionPoint2D`
2D grasp to wrap
q_value : float
grasp quality
image : :obj:`perception.DepthImage`
depth image corresponding to grasp
policy_name : str
policy name
"""
self.grasp = grasp
self.q_value = q_value
self.image = image
self.policy_name = policy_name
def save(self, save_dir):
""" Save grasp action.
Parameters
----------
save_dir : str
directory to save the grasp action to
"""
if not os.path.exists(save_dir):
os.mkdir(save_dir)
grasp_filename = os.path.join(save_dir, 'grasp.pkl')
q_value_filename = os.path.join(save_dir, 'pred_robustness.pkl')
image_filename = os.path.join(save_dir, 'tf_image.npy')
pkl.dump(self.grasp, open(grasp_filename, 'wb'))
pkl.dump(self.q_value, open(q_value_filename, 'wb'))
if self.image is not None:
self.image.save(image_filename)
@staticmethod
def load(save_dir):
""" Load a saved grasp action.
Parameters
----------
save_dir : str
directory of the saved grasp action
Returns
-------
:obj:`GraspAction`
loaded grasp action
"""
if not os.path.exists(save_dir):
raise ValueError('Directory %s does not exist!' %(save_dir))
grasp_filename = os.path.join(save_dir, 'grasp.pkl')
q_value_filename = os.path.join(save_dir, 'pred_robustness.pkl')
image_filename = os.path.join(save_dir, 'tf_image.npy')
grasp = pkl.load(open(grasp_filename, 'rb'))
q_value = pkl.load(open(q_value_filename, 'rb'))
image = None
if os.path.exists(image_filename):
image = DepthImage.open(image_filename)
return GraspAction(grasp, q_value, image)
class Policy(object):
""" Abstract policy class. """
__metaclass__ = ABCMeta
def __call__(self, state):
""" Execute the policy on a state. """
return self.action(state)
@abstractmethod
def action(self, state):
""" Returns an action for a given state. """
pass
class GraspingPolicy(Policy):
""" Policy for robust grasping with Grasp Quality Convolutional Neural Networks (GQ-CNN). """
def __init__(self, config, init_sampler=True):
"""
Parameters
----------
config : dict
python dictionary of parameters for the policy
init_sampler : bool
whether or not to initialize the grasp sampler
Notes
-----
Required configuration parameters are specified in Other Parameters
Other Parameters
----------------
sampling : dict
dictionary of parameters for grasp sampling, see gqcnn/image_grasp_sampler.py
gqcnn_model : str
string path to a trained GQ-CNN model see gqcnn/neural_networks.py
"""
# store parameters
self._config = config
self._gripper_width = 0.05
if 'gripper_width' in config.keys():
self._gripper_width = config['gripper_width']
# set the logging dir and possibly log file
self._logging_dir = None
log_file = None
if 'logging_dir' in self.config.keys():
self._logging_dir = self.config['logging_dir']
if not os.path.exists(self._logging_dir):
os.makedirs(self._logging_dir)
log_file = os.path.join(self._logging_dir, 'policy.log')
# setup logger
self._logger = Logger.get_logger(self.__class__.__name__, log_file=log_file, global_log_file=True)
# init grasp sampler
if init_sampler:
self._sampling_config = config['sampling']
self._sampling_config['gripper_width'] = self._gripper_width
if 'crop_width' in config['metric'].keys() and 'crop_height' in config['metric'].keys():
pad = max(
math.ceil(np.sqrt(2) * (float(config['metric']['crop_width']) / 2)),
math.ceil(np.sqrt(2) * (float(config['metric']['crop_height']) / 2))
)
self._sampling_config['min_dist_from_boundary'] = pad
self._sampling_config['gripper_width'] = self._gripper_width
sampler_type = self._sampling_config['type']
self._grasp_sampler = ImageGraspSamplerFactory.sampler(sampler_type,
self._sampling_config)
# init constraint function
self._grasp_constraint_fn = None
if 'constraints' in self._config.keys():
self._constraint_config = self._config['constraints']
constraint_type = self._constraint_config['type']
self._grasp_constraint_fn = GraspConstraintFnFactory.constraint_fn(constraint_type,
self._constraint_config)
# init grasp quality function
self._metric_config = config['metric']
metric_type = self._metric_config['type']
self._grasp_quality_fn = GraspQualityFunctionFactory.quality_function(metric_type, self._metric_config)
@property
def config(self):
""" Returns the policy configuration parameters.
Returns
-------
dict
python dictionary of the policy configuration parameters
"""
return self._config
@property
def grasp_sampler(self):
""" Returns the grasp sampler.
Returns
-------
:obj:`gqcnn.grasping.image_grasp_sampler.ImageGraspSampler`
the grasp sampler
"""
return self._grasp_sampler
@property
def grasp_quality_fn(self):
""" Returns the grasp quality function.
Returns
-------
:obj:`gqcnn.grasping.grasp_quality_function.GraspQualityFunction`
the grasp quality function
"""
return self._grasp_quality_fn
@property
def grasp_constraint_fn(self):
""" Returns the grasp constraint function.
Returns
-------
:obj:`gqcnn.grasping.constraint_fn.GraspConstraintFn`
the grasp contraint function
"""
return self._grasp_constraint_fn
@property
def gqcnn(self):
""" Returns the GQ-CNN.
Returns
-------
:obj:`gqcnn.model.tf.GQCNNTF`
the GQ-CNN model
"""
return self._gqcnn
def set_constraint_fn(self, constraint_fn):
""" Sets the grasp constraint function.
Parameters
----------
constraint_fn : :obj`gqcnn.grasping.constraint_fn.GraspConstraintFn`
the grasp contraint function
"""
self._grasp_constraint_fn = constraint_fn
def action(self, state):
""" Returns an action for a given state.
Parameters
----------
state : :obj:`RgbdImageState`
the RGB-D image state to plan grasps on
Returns
-------
:obj:`GraspAction`
the planned grasp action
"""
# save state
if self._logging_dir is not None:
policy_id = utils.gen_experiment_id()
policy_dir = os.path.join(self._logging_dir, 'policy_output_%s' % (policy_id))
while os.path.exists(policy_dir):
policy_id = utils.gen_experiment_id()
policy_dir = os.path.join(self._logging_dir, 'policy_output_%s' % (policy_id))
self._policy_dir = policy_dir
os.mkdir(self._policy_dir)
state_dir = os.path.join(self._policy_dir, 'state')
state.save(state_dir)
# plan action
action = self._action(state)
# save action
if self._logging_dir is not None:
action_dir = os.path.join(self._policy_dir, 'action')
action.save(action_dir)
return action
@abstractmethod
def _action(self, state):
""" Returns an action for a given state.
"""
pass
def show(self, filename=None, dpi=100):
""" Show a figure.
Parameters
----------
filename : str
file to save figure to
dpi : int
dpi of figure
"""
if self._logging_dir is None:
vis.show()
else:
filename = os.path.join(self._policy_dir, filename)
vis.savefig(filename, dpi=dpi)
class UniformRandomGraspingPolicy(GraspingPolicy):
""" Returns a grasp uniformly at random. """
def __init__(self, config):
"""
Parameters
----------
config : dict
python dictionary of policy configuration parameters
filters : dict
python dictionary of functions to apply to filter invalid grasps
"""
GraspingPolicy.__init__(self, config)
self._num_grasp_samples = 1
self._grasp_center_std = 0.0
if 'grasp_center_std' in config.keys():
self._grasp_center_std = config['grasp_center_std']
def _action(self, state):
""" Plans the grasp with the highest probability of success on
the given RGB-D image.
Attributes
----------
state : :obj:`RgbdImageState`
image to plan grasps on
Returns
-------
:obj:`GraspAction`
grasp to execute
"""
# check valid input
if not isinstance(state, RgbdImageState):
raise ValueError('Must provide an RGB-D image state.')
# parse state
rgbd_im = state.rgbd_im
camera_intr = state.camera_intr
segmask = state.segmask
# sample grasps
grasps = self._grasp_sampler.sample(rgbd_im, camera_intr,
self._num_grasp_samples,
segmask=segmask,
visualize=self.config['vis']['grasp_sampling'],
constraint_fn=self._grasp_constraint_fn,
seed=None)
num_grasps = len(grasps)
if num_grasps == 0:
self._logger.warning('No valid grasps could be found')
raise NoValidGraspsException()
# set grasp
grasp = grasps[0]
# perturb grasp
if self._grasp_center_std > 0.0:
grasp_center_rv = ss.multivariate_normal(grasp.center.data, cov=self._grasp_center_std**2)
grasp.center.data = grasp_center_rv.rvs(size=1)[0]
# form tensors
return GraspAction(grasp, 0.0, state.rgbd_im.depth)
class RobustGraspingPolicy(GraspingPolicy):
""" Samples a set of grasp candidates in image space,
ranks the grasps by the predicted probability of success from a GQ-CNN,
and returns the grasp with the highest probability of success.
"""
def __init__(self, config, filters=None):
"""
Parameters
----------
config : dict
python dictionary of policy configuration parameters
filters : dict
python dictionary of functions to apply to filter invalid grasps
Notes
-----
Required configuration dictionary parameters are specified in Other Parameters
Other Parameters
----------------
num_grasp_samples : int
number of grasps to sample
gripper_width : float, optional
width of the gripper in meters
logging_dir : str, optional
directory in which to save the sampled grasps and input images
"""
GraspingPolicy.__init__(self, config)
self._parse_config()
self._filters = filters
def _parse_config(self):
""" Parses the parameters of the policy. """
self._num_grasp_samples = self.config['sampling']['num_grasp_samples']
self._max_grasps_filter = 1
if 'max_grasps_filter' in self.config.keys():
self._max_grasps_filter = self.config['max_grasps_filter']
self._gripper_width = np.inf
if 'gripper_width' in self.config.keys():
self._gripper_width = self.config['gripper_width']
def select(self, grasps, q_value):
""" Selects the grasp with the highest probability of success.
Can override for alternate policies (e.g. epsilon greedy).
Parameters
----------
grasps : list
python list of :obj:`gqcnn.grasping.Grasp2D` or :obj:`gqcnn.grasping.SuctionPoint2D` grasps to select from
q_values : list
python list of associated q-values
Returns
-------
:obj:`gqcnn.grasping.Grasp2D` or :obj:`gqcnn.grasping.SuctionPoint2D`
grasp with highest probability of success
"""
# sort grasps
num_grasps = len(grasps)
grasps_and_predictions = list(zip(np.arange(num_grasps), q_value))
grasps_and_predictions.sort(key = lambda x : x[1], reverse=True)
# return top grasps
if self._filters is None:
return grasps_and_predictions[0][0]
# filter grasps
self._logger.info('Filtering grasps')
i = 0
while i < self._max_grasps_filter and i < len(grasps_and_predictions):
index = grasps_and_predictions[i][0]
grasp = grasps[index]
valid = True
for filter_name, is_valid in self._filters.items():
valid = is_valid(grasp)
self._logger.debug('Grasp {} filter {} valid: {}'.format(i, filter_name, valid))
if not valid:
valid = False
break
if valid:
return index
i += 1
raise NoValidGraspsException('No grasps satisfied filters')
def _action(self, state):
""" Plans the grasp with the highest probability of success on
the given RGB-D image.
Attributes
----------
state : :obj:`RgbdImageState`
image to plan grasps on
Returns
-------
:obj:`GraspAction`
grasp to execute
"""
# check valid input
if not isinstance(state, RgbdImageState):
raise ValueError('Must provide an RGB-D image state.')
# parse state
rgbd_im = state.rgbd_im
camera_intr = state.camera_intr
segmask = state.segmask
# sample grasps
grasps = self._grasp_sampler.sample(rgbd_im, camera_intr,
self._num_grasp_samples,
segmask=segmask,
visualize=self.config['vis']['grasp_sampling'],
constraint_fn=self._grasp_constraint_fn,
seed=None)
num_grasps = len(grasps)
if num_grasps == 0:
self._logger.warning('No valid grasps could be found')
raise NoValidGraspsException()
# compute grasp quality
compute_start = time()
q_values = self._grasp_quality_fn(state, grasps, params=self._config)
self._logger.debug('Grasp evaluation took %.3f sec' %(time()-compute_start))
if self.config['vis']['grasp_candidates']:
# display each grasp on the original image, colored by predicted success
norm_q_values = (q_values - np.min(q_values)) / (np.max(q_values) - np.min(q_values))
vis.figure(size=(FIGSIZE,FIGSIZE))
vis.imshow(rgbd_im.depth,
vmin=self.config['vis']['vmin'],
vmax=self.config['vis']['vmax'])
for grasp, q in zip(grasps, norm_q_values):
vis.grasp(grasp, scale=1.0,
grasp_center_size=10,
show_center=False,
show_axis=True,
color=plt.cm.RdYlBu(q))
vis.title('Sampled grasps')
filename = None
if self._logging_dir is not None:
filename = os.path.join(self._logging_dir, 'grasp_candidates.png')
vis.show(filename)
# select grasp
index = self.select(grasps, q_values)
grasp = grasps[index]
q_value = q_values[index]
if self.config['vis']['grasp_plan']:
vis.figure()
vis.imshow(rgbd_im.depth,
vmin=self.config['vis']['vmin'],
vmax=self.config['vis']['vmax'])
vis.grasp(grasp, scale=2.0, show_axis=True)
vis.title('Best Grasp: d=%.3f, q=%.3f' %(grasp.depth, q_value))
vis.show()
return GraspAction(grasp, q_value, state.rgbd_im.depth)
class CrossEntropyRobustGraspingPolicy(GraspingPolicy):
""" Optimizes a set of grasp candidates in image space using the
cross entropy method:
(1) sample an initial set of candidates
(2) sort the candidates
(3) fit a GMM to the top P%
(4) re-sample grasps from the distribution
(5) repeat steps 2-4 for K iters
(6) return the best candidate from the final sample set
"""
def __init__(self, config, filters=None):
"""
Parameters
----------
config : dict
python dictionary of policy configuration parameters
filters : dict
python dictionary of functions to apply to filter invalid grasps
Notes
-----
Required configuration dictionary parameters are specified in Other Parameters
Other Parameters
----------------
num_seed_samples : int
number of candidate to sample in the initial set
num_gmm_samples : int
number of candidates to sample on each resampling from the GMMs
num_iters : int
number of sample-and-refit iterations of CEM
gmm_refit_p : float
top p-% of grasps used for refitting
gmm_component_frac : float
percentage of the elite set size used to determine number of GMM components
gmm_reg_covar : float
regularization parameters for GMM covariance matrix, enforces diversity of fitted distributions
deterministic : bool, optional
whether to set the random seed to enforce deterministic behavior
gripper_width : float, optional
width of the gripper in meters
"""
GraspingPolicy.__init__(self, config)
self._parse_config()
self._filters = filters
self._case_counter = 0
def _parse_config(self):
""" Parses the parameters of the policy. """
# cross entropy method parameters
self._num_seed_samples = self.config['num_seed_samples']
self._num_gmm_samples = self.config['num_gmm_samples']
self._num_iters = self.config['num_iters']
self._gmm_refit_p = self.config['gmm_refit_p']
self._gmm_component_frac = self.config['gmm_component_frac']
self._gmm_reg_covar = self.config['gmm_reg_covar']
self._depth_gaussian_sigma = 0.0
if 'depth_gaussian_sigma' in self.config.keys():
self._depth_gaussian_sigma = self.config['depth_gaussian_sigma']
self._max_grasps_filter = 1
if 'max_grasps_filter' in self.config.keys():
self._max_grasps_filter = self.config['max_grasps_filter']
self._max_resamples_per_iteration = 100
if 'max_resamples_per_iteration' in self.config.keys():
self._max_resamples_per_iteration = self.config['max_resamples_per_iteration']
self._max_approach_angle = np.inf
if 'max_approach_angle' in self.config.keys():
self._max_approach_angle = np.deg2rad(self.config['max_approach_angle'])
# gripper parameters
self._seed = None
if self.config['deterministic']:
self._seed = SEED
self._gripper_width = np.inf
if 'gripper_width' in self.config.keys():
self._gripper_width = self.config['gripper_width']
# affordance map visualization
self._vis_grasp_affordance_map = False
if 'grasp_affordance_map' in self.config['vis'].keys():
self._vis_grasp_affordance_map = self.config['vis']['grasp_affordance_map']
self._state_counter = 0 # used for logging state data
def select(self, grasps, q_values):
""" Selects the grasp with the highest probability of success. Can override for alternate policies (e.g. epsilon greedy).
Parameters
----------
grasps : list
python list of :obj:`gqcnn.grasping.Grasp2D` or :obj:`gqcnn.grasping.SuctionPoint2D` grasps to select from
q_values : list
python list of associated q-values
Returns
-------
:obj:`gqcnn.grasping.Grasp2D` or :obj:`gqcnn.grasping.SuctionPoint2D`
grasp with highest probability of success
"""
# sort
self._logger.info('Sorting grasps')
num_grasps = len(grasps)
if num_grasps == 0:
raise NoValidGraspsException('Zero grasps')
grasps_and_predictions = list(zip(np.arange(num_grasps), q_values))
grasps_and_predictions.sort(key = lambda x : x[1], reverse=True)
# return top grasps
if self._filters is None:
return grasps_and_predictions[0][0]
# filter grasps
self._logger.info('Filtering grasps')
i = 0
while i < self._max_grasps_filter and i < len(grasps_and_predictions):
index = grasps_and_predictions[i][0]
grasp = grasps[index]
valid = True
for filter_name, is_valid in self._filters.items():
valid = is_valid(grasp)
self._logger.debug('Grasp {} filter {} valid: {}'.format(i, filter_name, valid))
if not valid:
valid = False
break
if valid:
return index
i += 1
raise NoValidGraspsException('No grasps satisfied filters')
def _mask_predictions(self, pred_map, segmask):
self._logger.info('Masking predictions...')
assert pred_map.shape == segmask.shape, 'Prediction map shape {} does not match shape of segmask {}.'.format(pred_map.shape, segmask.shape)
preds_masked = np.zeros_like(pred_map)
nonzero_ind = np.where(segmask > 0)
preds_masked[nonzero_ind] = pred_map[nonzero_ind]
return preds_masked
def _gen_grasp_affordance_map(self, state, stride=1):
self._logger.info('Generating grasp affordance map...')
# generate grasps at points to evaluate(this is just the interface to GraspQualityFunction)
crop_candidate_start_time = time()
point_cloud_im = state.camera_intr.deproject_to_image(state.rgbd_im.depth)
normal_cloud_im = point_cloud_im.normal_cloud_im()
q_vals = []
gqcnn_recep_h_half = self._grasp_quality_fn.gqcnn_recep_height / 2
gqcnn_recep_w_half = self._grasp_quality_fn.gqcnn_recep_width / 2
im_h = state.rgbd_im.height
im_w = state.rgbd_im.width
for i in range(gqcnn_recep_h_half - 1, im_h - gqcnn_recep_h_half, stride):
grasps = []
for j in range(gqcnn_recep_w_half - 1, im_w - gqcnn_recep_w_half, stride):
if self.config['sampling']['type'] == 'suction': #TODO: @Vishal find a better way to find policy type
grasps.append(SuctionPoint2D(Point(np.array([j, i])), axis=-normal_cloud_im[i, j], depth=state.rgbd_im.depth[i, j], camera_intr=state.camera_intr))
else:
raise NotImplementedError('Parallel Jaw Grasp Affordance Maps Not Supported!')
q_vals.extend(self._grasp_quality_fn(state, grasps))
self._logger.info('Generating crop grasp candidates took {} sec.'.format(time() - crop_candidate_start_time))
# mask out predictions not in the segmask(we don't really care about them)
pred_map = np.array(q_vals).reshape((im_h - gqcnn_recep_h_half * 2) / stride + 1, (im_w - gqcnn_recep_w_half * 2) / stride + 1)
tf_segmask = state.segmask.crop(im_h - gqcnn_recep_h_half * 2, im_w - gqcnn_recep_w_half * 2).resize(1.0 / stride, interp='nearest')._data.squeeze() #TODO: @Vishal don't access the raw data like this!
if tf_segmask.shape != pred_map.shape:
new_tf_segmask = np.zeros_like(pred_map)
smaller_i = min(pred_map.shape[0], tf_segmask.shape[0])
smaller_j = min(pred_map.shape[1], tf_segmask.shape[1])
new_tf_segmask[:smaller_i, :smaller_j] = tf_segmask[:smaller_i, :smaller_j]
tf_segmask = new_tf_segmask
pred_map_masked = self._mask_predictions(pred_map, tf_segmask)
return pred_map_masked
def _plot_grasp_affordance_map(self, state, affordance_map, stride=1, grasps=None, q_values=None, plot_max=True, title=None, scale=1.0, save_fname=None, save_path=None):
gqcnn_recep_h_half = self._grasp_quality_fn.gqcnn_recep_height / 2
gqcnn_recep_w_half = self._grasp_quality_fn.gqcnn_recep_width / 2
im_h = state.rgbd_im.height
im_w = state.rgbd_im.width
# plot
vis.figure()
tf_depth_im = state.rgbd_im.depth.crop(im_h - gqcnn_recep_h_half * 2, im_w - gqcnn_recep_w_half * 2).resize(1.0 / stride, interp='nearest')
vis.imshow(tf_depth_im)
plt.imshow(affordance_map, cmap=plt.cm.RdYlGn, alpha=0.3)
if grasps is not None:
grasps = copy.deepcopy(grasps)
for grasp, q in zip(grasps, q_values):
grasp.center.data[0] -= gqcnn_recep_w_half
grasp.center.data[1] -= gqcnn_recep_h_half
vis.grasp(grasp, scale=scale,
show_center=False,
show_axis=True,
color=plt.cm.RdYlGn(q))
if plot_max:
affordance_argmax = np.unravel_index(np.argmax(affordance_map), affordance_map.shape)
plt.scatter(affordance_argmax[1], affordance_argmax[0], c='black', marker='.', s=scale*25)
if title is not None:
vis.title(title)
if save_path is not None:
save_path = os.path.join(save_path, save_fname)
vis.show(save_path)
def action_set(self, state):
""" Plan a set of grasps with the highest probability of success on
the given RGB-D image.
Parameters
----------
state : :obj:`RgbdImageState`
image to plan grasps on
Returns
-------
python list of :obj:`gqcnn.grasping.Grasp2D` or :obj:`gqcnn.grasping.SuctionPoint2D`
grasps to execute
"""
# check valid input
#print(isinstance(state, RgbdImageState))
#if not isinstance(state, RgbdImageState):
# raise ValueError('Must provide an RGB-D image state.')
state_output_dir = None
if self._logging_dir is not None:
state_output_dir = os.path.join(self._logging_dir, 'state_{}'.format(str(self._state_counter).zfill(5)))
if not os.path.exists(state_output_dir):
os.makedirs(state_output_dir)
self._state_counter += 1
# parse state
seed_set_start = time()
rgbd_im = state.rgbd_im
depth_im = rgbd_im.depth
camera_intr = state.camera_intr
segmask = state.segmask
if self._depth_gaussian_sigma > 0:
depth_im_filtered = depth_im.apply(snf.gaussian_filter,
sigma=self._depth_gaussian_sigma)
else:
depth_im_filtered = depth_im
point_cloud_im = camera_intr.deproject_to_image(depth_im_filtered)
normal_cloud_im = point_cloud_im.normal_cloud_im()
# vis grasp affordance map
if self._vis_grasp_affordance_map:
grasp_affordance_map = self._gen_grasp_affordance_map(state)
self._plot_grasp_affordance_map(state, grasp_affordance_map, title='Grasp Affordance Map', save_fname='affordance_map.png', save_path=state_output_dir)
if 'input_images' in self.config['vis'].keys() and self.config['vis']['input_images']:
vis.figure()
vis.subplot(1,2,1)
vis.imshow(depth_im)
vis.title('Depth')
vis.subplot(1,2,2)
vis.imshow(segmask)
vis.title('Segmask')
filename = None
if self._logging_dir is not None:
filename = os.path.join(self._logging_dir, 'input_images.png')
vis.show(filename)
# sample grasps
self._logger.info('Sampling seed set')
grasps = self._grasp_sampler.sample(rgbd_im, camera_intr,
self._num_seed_samples,
segmask=segmask,
visualize=self.config['vis']['grasp_sampling'],
constraint_fn=self._grasp_constraint_fn,
seed=self._seed)
num_grasps = len(grasps)
if num_grasps == 0:
self._logger.warning('No valid grasps could be found')
raise NoValidGraspsException()
grasp_type = 'parallel_jaw'
if isinstance(grasps[0], SuctionPoint2D):
grasp_type = 'suction'
elif isinstance(grasps[0], MultiSuctionPoint2D):
grasp_type = 'multi_suction'
self._logger.info('Sampled %d grasps' %(len(grasps)))
self._logger.info('Computing the seed set took %.3f sec' %(time() - seed_set_start))
# iteratively refit and sample
for j in range(self._num_iters):
self._logger.info('CEM iter %d' %(j))
# predict grasps
predict_start = time()
q_values = self._grasp_quality_fn(state, grasps, params=self._config)
self._logger.info('Prediction took %.3f sec' %(time()-predict_start))
# sort grasps
resample_start = time()
q_values_and_indices = list(zip(q_values, np.arange(num_grasps)))
q_values_and_indices.sort(key = lambda x : x[0], reverse=True)
if self.config['vis']['grasp_candidates']:
# display each grasp on the original image, colored by predicted success
norm_q_values = q_values #(q_values - np.min(q_values)) / (np.max(q_values) - np.min(q_values))
title = 'Sampled Grasps Iter %d' %(j)
if self._vis_grasp_affordance_map:
self._plot_grasp_affordance_map(state, grasp_affordance_map, grasps=grasps, q_values=norm_q_values, scale=2.0, title=title, save_fname='cem_iter_{}.png'.format(j), save_path=state_output_dir)
display_grasps_and_q_values = list(zip(grasps, q_values))
display_grasps_and_q_values.sort(key = lambda x: x[1])
vis.figure(size=(FIGSIZE,FIGSIZE))
vis.imshow(rgbd_im.depth,
vmin=self.config['vis']['vmin'],
vmax=self.config['vis']['vmax'])
for grasp, q in display_grasps_and_q_values:
vis.grasp(grasp, scale=2.0,
jaw_width=2.0,
show_center=False,
show_axis=True,
color=plt.cm.RdYlBu(q))
vis.title('Sampled grasps iter %d' %(j))
filename = None
if self._logging_dir is not None:
filename = os.path.join(self._logging_dir, 'cem_iter_%d.png' %(j))
vis.show(filename)
# fit elite set
elite_start = time()
num_refit = max(int(np.ceil(self._gmm_refit_p * num_grasps)), 1)
elite_q_values = [i[0] for i in q_values_and_indices[:num_refit]]
elite_grasp_indices = [i[1] for i in q_values_and_indices[:num_refit]]
elite_grasps = [grasps[i] for i in elite_grasp_indices]
elite_grasp_arr = np.array([g.feature_vec for g in elite_grasps])
if self.config['vis']['elite_grasps']:
# display each grasp on the original image, colored by predicted success
norm_q_values = (elite_q_values - np.min(elite_q_values)) / (np.max(elite_q_values) - np.min(elite_q_values))
vis.figure(size=(FIGSIZE,FIGSIZE))
vis.imshow(rgbd_im.depth,
vmin=self.config['vis']['vmin'],
vmax=self.config['vis']['vmax'])
for grasp, q in zip(elite_grasps, norm_q_values):
vis.grasp(grasp, scale=1.5, show_center=False, show_axis=True,
color=plt.cm.RdYlBu(q))
vis.title('Elite grasps iter %d' %(j))
filename = None
if self._logging_dir is not None:
filename = os.path.join(self._logging_dir, 'elite_set_iter_%d.png' %(j))
vis.show(filename)
# normalize elite set
elite_grasp_mean = np.mean(elite_grasp_arr, axis=0)
elite_grasp_std = np.std(elite_grasp_arr, axis=0)
elite_grasp_std[elite_grasp_std == 0] = 1e-6
elite_grasp_arr = (elite_grasp_arr - elite_grasp_mean) / elite_grasp_std
self._logger.info('Elite set computation took %.3f sec' %(time()-elite_start))
# fit a GMM to the top samples
num_components = max(int(np.ceil(self._gmm_component_frac * num_refit)), 1)
uniform_weights = (1.0 / num_components) * np.ones(num_components)
gmm = GaussianMixture(n_components=num_components,
weights_init=uniform_weights,
reg_covar=self._gmm_reg_covar)
train_start = time()
gmm.fit(elite_grasp_arr)
self._logger.info('GMM fitting with %d components took %.3f sec' %(num_components, time()-train_start))
# sample the next grasps
grasps = []
loop_start = time()
num_tries = 0
while len(grasps) < self._num_gmm_samples and num_tries < self._max_resamples_per_iteration:
# sample from GMM
sample_start = time()
grasp_vecs, _ = gmm.sample(n_samples=self._num_gmm_samples)
grasp_vecs = elite_grasp_std * grasp_vecs + elite_grasp_mean
self._logger.info('GMM sampling took %.3f sec' %(time()-sample_start))
# convert features to grasps and store if in segmask
for k, grasp_vec in enumerate(grasp_vecs):
feature_start = time()
if grasp_type == 'parallel_jaw':
# form grasp object
grasp = Grasp2D.from_feature_vec(grasp_vec,
width=self._gripper_width,
camera_intr=camera_intr)
elif grasp_type == 'suction':
# read depth and approach axis
u = int(min(max(grasp_vec[1], 0), depth_im.height-1))
v = int(min(max(grasp_vec[0], 0), depth_im.width-1))
grasp_depth = depth_im[u, v, 0]
# approach_axis
grasp_axis = -normal_cloud_im[u, v]
# form grasp object
grasp = SuctionPoint2D.from_feature_vec(grasp_vec,
camera_intr=camera_intr,
depth=grasp_depth,
axis=grasp_axis)
elif grasp_type == 'multi_suction':
# read depth and approach axis
u = int(min(max(grasp_vec[1], 0), depth_im.height-1))
v = int(min(max(grasp_vec[0], 0), depth_im.width-1))
grasp_depth = depth_im[u, v]
# approach_axis
grasp_axis = -normal_cloud_im[u, v]
# form grasp object
grasp = MultiSuctionPoint2D.from_feature_vec(grasp_vec,
camera_intr=camera_intr,
depth=grasp_depth,
axis=grasp_axis)
self._logger.debug('Feature vec took %.5f sec' %(time()-feature_start))
bounds_start = time()
# check in bounds
if state.segmask is None or \
(grasp.center.y >= 0 and grasp.center.y < state.segmask.height and \
grasp.center.x >= 0 and grasp.center.x < state.segmask.width and \
np.any(state.segmask[int(grasp.center.y), int(grasp.center.x)] != 0) and \
grasp.approach_angle < self._max_approach_angle) and \
(self._grasp_constraint_fn is None or self._grasp_constraint_fn(grasp)):
# check validity according to filters
grasps.append(grasp)
self._logger.debug('Bounds took %.5f sec' %(time()-bounds_start))
num_tries += 1
# check num grasps
num_grasps = len(grasps)
if num_grasps == 0:
self._logger.warning('No valid grasps could be found')
raise NoValidGraspsException()
self._logger.info('Resample loop took %.3f sec' %(time()-loop_start))
self._logger.info('Resampling took %.3f sec' %(time()-resample_start))
# predict final set of grasps
predict_start = time()
q_values = self._grasp_quality_fn(state, grasps, params=self._config)
self._logger.info('Final prediction took %.3f sec' %(time()-predict_start))
if self.config['vis']['grasp_candidates']:
# display each grasp on the original image, colored by predicted success
norm_q_values = q_values #(q_values - np.min(q_values)) / (np.max(q_values) - np.min(q_values))
title = 'Final Sampled Grasps'
if self._vis_grasp_affordance_map:
self._plot_grasp_affordance_map(state, grasp_affordance_map, grasps=grasps, q_values=norm_q_values, scale=2.0, title=title, save_fname='final_sampled_grasps.png'.format(j), save_path=state_output_dir)
display_grasps_and_q_values = list(zip(grasps, q_values))
display_grasps_and_q_values.sort(key = lambda x: x[1])
vis.figure(size=(FIGSIZE,FIGSIZE))
vis.imshow(rgbd_im.depth,
vmin=self.config['vis']['vmin'],
vmax=self.config['vis']['vmax'])
for grasp, q in display_grasps_and_q_values:
vis.grasp(grasp, scale=2.0,
jaw_width=2.0,
show_center=False,
show_axis=True,
color=plt.cm.RdYlBu(q))
vis.title('Sampled grasps iter %d' %(j))
filename = None
if self._logging_dir is not None:
filename = os.path.join(self._logging_dir, 'cem_iter_%d.png' %(j))
vis.show(filename)
return grasps, q_values
def _action(self, state):
""" Plans the grasp with the highest probability of success on
the given RGB-D image.
Attributes
----------
state : :obj:`RgbdImageState`
image to plan grasps on
Returns
-------
:obj:`GraspAction`
grasp to execute
"""
# parse state
rgbd_im = state.rgbd_im
depth_im = rgbd_im.depth
camera_intr = state.camera_intr
segmask = state.segmask
# plan grasps
grasps, q_values = self.action_set(state)
# select grasp
index = self.select(grasps, q_values)
grasp = grasps[index]
q_value = q_values[index]
if self.config['vis']['grasp_plan']:
title = 'Best Grasp: d=%.3f, q=%.3f' %(grasp.depth, q_value)
if self._vis_grasp_affordance_map:
self._plot_grasp_affordance_map(state, grasp_affordance_map, grasps=[grasp], q_values=[q_value], scale=2.0, title=title, save_fname=os.path.join(case_output_dir, 'best_grasp.png'))
else:
vis.figure()
vis.imshow(rgbd_im.depth,
vmin=self.config['vis']['vmin'],
vmax=self.config['vis']['vmax'])
vis.grasp(grasp, scale=5.0, show_center=False, show_axis=True, jaw_width=1.0, grasp_axis_width=0.2)
vis.title(title)
filename = None
if self._logging_dir is not None:
filename = os.path.join(self._logging_dir, 'planned_grasp.png')
vis.show(filename)
# form return image
image = state.rgbd_im.depth
if isinstance(self._grasp_quality_fn, GQCnnQualityFunction):
image_arr, _ = self._grasp_quality_fn.grasps_to_tensors([grasp], state)
image = DepthImage(image_arr[0,...],
frame=state.rgbd_im.frame)
# return action
action = GraspAction(grasp, q_value, image)
return action
class QFunctionRobustGraspingPolicy(CrossEntropyRobustGraspingPolicy):
""" Optimizes a set of antipodal grasp candidates in image space using the
cross entropy method with a GQ-CNN that estimates the Q-function
for use in Q-learning.
Notes
-----
Required configuration parameters are specified in Other Parameters
Other Parameters
----------------
reinit_pc1 : bool
whether or not to reinitialize the pc1 layer of the GQ-CNN
reinit_fc3: bool
whether or not to reinitialize the fc3 layer of the GQ-CNN
reinit_fc4: bool
whether or not to reinitialize the fc4 layer of the GQ-CNN
reinit_fc5: bool
whether or not to reinitialize the fc5 layer of the GQ-CNN
num_seed_samples : int
number of candidate to sample in the initial set
num_gmm_samples : int
number of candidates to sample on each resampling from the GMMs
num_iters : int
number of sample-and-refit iterations of CEM
gmm_refit_p : float
top p-% of grasps used for refitting
gmm_component_frac : float
percentage of the elite set size used to determine number of GMM components
gmm_reg_covar : float
regularization parameters for GMM covariance matrix, enforces diversity of fitted distributions
deterministic : bool, optional
whether to set the random seed to enforce deterministic behavior
gripper_width : float, optional
width of the gripper in meters
"""
def __init__(self, config):
CrossEntropyRobustGraspingPolicy.__init__(self, config)
QFunctionRobustGraspingPolicy._parse_config(self)
self._setup_gqcnn()
def _parse_config(self):
""" Parses the parameters of the policy. """
self._reinit_pc1 = self.config['reinit_pc1']
self._reinit_fc3 = self.config['reinit_fc3']
self._reinit_fc4 = self.config['reinit_fc4']
self._reinit_fc5 = self.config['reinit_fc5']
def _setup_gqcnn(self):
""" Sets up the GQ-CNN. """
# close existing session (from superclass initializer)
self.gqcnn.close_session()
# check valid output size
if self.gqcnn.fc5_out_size != 1 and not self._reinit_fc5:
raise ValueError('Q function must return scalar values')
# reinitialize layers
if self._reinit_fc5:
self.gqcnn.fc5_out_size = 1
# TODO: implement reinitialization of pc0
self.gqcnn.reinitialize_layers(self._reinit_fc3,
self._reinit_fc4,
self._reinit_fc5)
self.gqcnn.initialize_network(add_softmax=False)
class EpsilonGreedyQFunctionRobustGraspingPolicy(QFunctionRobustGraspingPolicy):
""" Optimizes a set of antipodal grasp candidates in image space
using the cross entropy method with a GQ-CNN that estimates the
Q-function for use in Q-learning, and chooses a random antipodal
grasp with probability epsilon.
Notes
-----
Required configuration parameters are specified in Other Parameters
Other Parameters
----------------
epsilon : float
"""
def __init__(self, config):
QFunctionRobustGraspingPolicy.__init__(self, config)
self._parse_config()
def _parse_config(self):
""" Parses the parameters of the policy. """
self._epsilon = self.config['epsilon']
@property
def epsilon(self):
return self._epsilon
@epsilon.setter
def epsilon(self, val):
self._epsilon = val
def greedy_action(self, state):
""" Plans the grasp with the highest probability of success on
the given RGB-D image.
Attributes
----------
state : :obj:`RgbdImageState`
image to plan grasps on
Returns
-------
:obj:`GraspAction`
grasp to execute
"""
return CrossEntropyRobustGraspingPolicy.action(self, state)
def _action(self, state):
""" Plans the grasp with the highest probability of success on
the given RGB-D image.
Attributes
----------
state : :obj:`RgbdImageState`
image to plan grasps on
Returns
-------
:obj:`GraspAction`
grasp to execute
"""
# take the greedy action with prob 1 - epsilon
if np.random.rand() > self.epsilon:
self._logger.debug('Taking greedy action')
return CrossEntropyRobustGraspingPolicy.action(self, state)
# otherwise take a random action
self._logger.debug('Taking random action')
# check valid input
if not isinstance(state, RgbdImageState):
raise ValueError('Must provide an RGB-D image state.')
# parse state
rgbd_im = state.rgbd_im
camera_intr = state.camera_intr
segmask = state.segmask
# sample random antipodal grasps
grasps = self._grasp_sampler.sample(rgbd_im, camera_intr,
self._num_seed_samples,
segmask=segmask,
visualize=self.config['vis']['grasp_sampling'],
constraint_fn=self._grasp_constraint_fn,
seed=self._seed)
num_grasps = len(grasps)
if num_grasps == 0:
self._logger.warning('No valid grasps could be found')
raise NoValidGraspsException()
# choose a grasp uniformly at random
grasp_ind = np.random.choice(num_grasps, size=1)[0]
grasp = grasps[grasp_ind]
depth = grasp.depth
# create transformed image
image_tensor, pose_tensor = self.grasps_to_tensors([grasp], state)
image = DepthImage(image_tensor[0,...])
# predict prob success
output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
q_value = output_arr[0,-1]
# visualize planned grasp
if self.config['vis']['grasp_plan']:
scale_factor = float(self.gqcnn.im_width) / float(self._crop_width)
scaled_camera_intr = camera_intr.resize(scale_factor)
vis_grasp = Grasp2D(Point(image.center), 0.0, depth,
width=self._gripper_width,
camera_intr=scaled_camera_intr)
vis.figure()
vis.imshow(image)
vis.grasp(vis_grasp, scale=1.5, show_center=False, show_axis=True)
vis.title('Best Grasp: d=%.3f, q=%.3f' %(depth, q_value))
vis.show()
# return action
return GraspAction(grasp, q_value, image)
class CompositeGraspingPolicy(Policy):
"""Grasping policy composed of multiple sub-policies
Attributes
----------
policies : dict mapping str to `gqcnn.GraspingPolicy`
key-value dict mapping policy names to grasping policies
"""
def __init__(self, policies):
self._policies = policies
self._logger = Logger.get_logger(self.__class__.__name__, log_file=None, global_log_file=True)
@property
def policies(self):
return self._policies
def subpolicy(self, name):
return self._policies[name]
def set_constraint_fn(self, constraint_fn):
for policy in self._policies:
policy.set_constraint_fn(constraint_fn)
class PriorityCompositeGraspingPolicy(CompositeGraspingPolicy):
def __init__(self, policies, priority_list):
# check validity
for name in priority_list:
if str(name) not in policies.keys():
raise ValueError('Policy named %s is not in the list of policies!' %(name))
self._priority_list = priority_list
CompositeGraspingPolicy.__init__(self, policies)
@property
def priority_list(self):
return self._priority_list
def action(self, state, policy_subset=None, min_q_value=-1.0):
""" Returns an action for a given state.
"""
action = None
i = 0
max_q = min_q_value
while action is None or (max_q <= min_q_value and i < len(self._priority_list)):
name = self._priority_list[i]
if policy_subset is not None and name not in policy_subset:
i += 1
continue
self._logger.info('Planning action for sub-policy {}'.format(name))
try:
action = self.policies[policy_name].action(state)
action.policy_name = name
max_q = action.q_value
except NoValidGraspsException:
pass
i += 1
if action is None:
raise NoValidGraspsException()
return action
def action_set(self, state, policy_subset=None, min_q_value=-1.0):
""" Returns an action for a given state.
"""
actions = None
q_values = None
i = 0
max_q = min_q_value
while actions is None or (max_q <= min_q_value and i < len(self._priority_list)):
name = self._priority_list[i]
if policy_subset is not None and name not in policy_subset:
i += 1
continue
self._logger.info('Planning action set for sub-policy {}'.format(name))
try:
actions, q_values = self.policies[name].action_set(state)
for action in actions:
action.policy_name = name
max_q = np.max(q_values)
except NoValidGraspsException:
pass
i += 1
if actions is None:
raise NoValidGraspsException()
return actions, q_values
class GreedyCompositeGraspingPolicy(CompositeGraspingPolicy):
def __init__(self, policies):
CompositeGraspingPolicy.__init__(self, policies)
def action(self, state, policy_subset=None, min_q_value=-1.0):
""" Returns an action for a given state.
"""
# compute all possible actions
actions = []
for name, policy in self.policies.items():
if policy_subset is not None and name not in policy_subset:
continue
try:
action = policy.action(state)
action.policy_name = name
actions.append()
except NoActionFoundException:
pass
if len(actions) == 0:
raise NoValidGraspsException()
# rank based on q value
actions.sort(key = lambda x: x.q_value, reverse=True)
return actions[0]
def action_set(self, state, policy_subset=None, min_q_value=-1.0):
""" Returns an action for a given state.
"""
actions = []
q_values = []
for name, policy in self.policies.items():
if policy_subset is not None and name not in policy_subset:
continue
try:
action_set, q_vals = self.policies[name].action_set(state)
for action in action_set:
action.policy_name = name
actions.extend(action_set)
q_values.extend(q_vals)
except NoValidGraspsException:
continue
if actions is None:
raise NoValidGraspsException()
return actions, q_values
|
# This is the python file that should be run to run the program
import hash_functions
welcome_message = "###########################\n\nWelcome to SSAFuze's hash tool! Please see the options below to get you started"
supported_hashes = ['md5', 'sha1', 'sha224', 'sha256', 'sha384']
print(welcome_message)
while True:
choice = str(input('Please enter 1 to enter the hashing menu, 2 to enter the cracking menu, or type \'exit\' to quit the program: '))
if choice == '1':
session_end = False
while session_end == False:
hash_type = str(input('Please enter which hash algorithm you wish to use. If you are unsure about what this program supports, enter \'supported\'. If you wish to return to the main menu, enter \'return\': '))
if hash_type == 'supported':
print('We support: ')
for i in supported_hashes:
print(i)
elif hash_type == 'return':
session_end = True
elif hash_type in supported_hashes:
text = str(input('Please enter the text you wish to be hashed: '))
function = 'hash_functions.' + hash_type + 'hash'
print(str(eval(function)(text)))
else:
print('That is not recognised. Please try again')
elif choice == '2':
session_end = False
while session_end == False:
crack_type = str(input('Please enter which hash algorithm you wish to crack. If you are unsure about what this program supports, enter \'supported\'. To return to the main menu, enter \'return\': '))
if crack_type == 'supported':
print('We support: ')
for i in supported_hashes:
print(i)
elif crack_type == 'return':
session_end = True
elif crack_type in supported_hashes:
hash = str(input('Please enter the hash you wish to crack: '))
path = str(input('Please enter the path to the wordlist you wish to use: '))
function = 'hash_functions.' + crack_type + 'crack'
print(str(eval(function)(hash, path)))
else:
print('That is not recognised')
elif choice == 'exit':
break
else:
print('That is not a recognise command, please try again')
|
"""
Première tentative d'implémenter A* pour le projet ASD1-Labyrinthes.
On part d'une grille rectangulaire. Chaque case est un "noeud". Les
déplacements permis sont verticaux et horizontaux par pas de 1, représentant
des "arêtes" avec un coût de 1.
Tout est basé sur une grille rectangulaire.
L'objet de base est une cellule, représentée par un tuple (row, col, cost), où
(row, col) sont des coordonnées dans la grille et cost le coût réel pour
arriver jusqu'à cette cellule depuis le départ, s'il est déjà connu, None
sinon.
Author: Dalker
Start Date: 2021.04.06
"""
import logging as log
from viewer import AstarView
class Fringe():
"""
Ensemble de cellules en attente de traitement avec informations de coût.
Une cellule est un tuple (row, col, cost). Le Fringe associe à chacune
aussi un coût estimé, qui doit être fourni lorsque la cellule est ajoutée.
On doit pouvoir extraire efficacement une cellule de priorité minimale,
mais aussi chercher une cellule et modifier la priorité d'un node.
D'après nos recherches, un "Fibonacci Heap" est optimal pour ce cas, mais
pour l'instant nous utilisons un "Heap" beaucoup plus basique et facile à
manipuler, à savoir un (ou plusieurs) dict. L'implémentation de cette
classe peut être modifiée par la suite sans en modifier l'interface.
Attributs:
- cost: coût réel pour accéder à cette cellule
- heuristic: coût heuristique d'une cellule
"""
def __init__(self, first_cell):
"""
Initialiser le fringe.
Entrée: un tuple (ligne, colonne) indiquant l'entrée du labyrinthe.
"""
self.cost = {first_cell: 0}
self.heuristic = {first_cell: 0}
self._predecessor = {first_cell: None}
def append(self, cell, real_cost, estimated_cost, predecessor=None):
"""
Ajouter une cellule au fringe ou la mettre à jour.
Si la cellule est déjà présente, on la met à jour si le nouveau coût
est plus bas que le précédent (on a trouvé un meilleur chemin pour y
arriver).
Entrées:
- cell: cellule sous forme (row, col)
- real_cost: coût réel pour arriver jusqu'à cette cellule
- estimated_cost: coût estimé d'un chemin complet passant par cell
- predecessor: cellule précédente dans le chemin arrivant à cell
avec le coût réel indiqué
"""
if cell not in self.cost or real_cost < self.cost[cell]:
self.cost[cell] = real_cost
self.heuristic[cell] = estimated_cost
self._predecessor[cell] = predecessor
def pop(self):
"""
Extraire un noeud de bas coût ainsi que son prédecesseur.
Sortie: tuple (cellule, prédecesseur, coût)
"""
if not self.heuristic: # fringe is empty
return None, None, None
least = min(self.heuristic,
key=lambda cell: self.heuristic[cell])
del self.heuristic[least]
return least, self._predecessor[least], self.cost[least]
def distance0(cell1, cell2):
"""Return 0 distance for A* to behave like Dijkstra's algorithm."""
return 0
def distance1(cell1, cell2):
"""Return Manhattan distance between cells."""
return abs(cell1[0] - cell2[0]) + abs(cell1[1] - cell2[1])
def distance2(cell1, cell2):
"""Return euclidean distance between cells."""
return ((cell1[0] - cell2[0])**2 + (cell1[1] - cell2[1])**2)**0.5
def astar(grid, distance=distance1, view=None, diagonals=False):
"""
Trouver un chemin optimal dans une grille par algorithme A*.
Entrée: un objet Grid.
Sortie: une liste de cellules successives constituant un chemin
"""
directions = ((0, 1, 1), (0, -1, 1), (-1, 0, 1), (1, 0, 1))
if diagonals:
directions += ((1, 1, 1.4), (1, -1, 1.4), (-1, 1, 1.4), (-1, -1, 1.4))
closed = dict() # associations cellule_traitée -> prédecesseur
fringe = Fringe(grid.start) # file d'attente de cellules à traiter
if view is not None:
astar_view = AstarView(grid, fringe.heuristic, closed, view)
while True:
current, predecessor, cost = fringe.pop()
if current is None:
log.debug("Le labyrinthe ne peut pas être résolu.")
return None
if current == grid.out:
log.debug("Found exit!")
path = [current]
current = predecessor
while current in closed:
path.append(current)
current = closed[current]
path = list(reversed(path))
if view is not None:
astar_view.showpath(path)
return path
for direction in directions:
neighbour = tuple(current[j] + direction[j] for j in (0, 1))
if neighbour not in grid or neighbour in closed:
continue
neighbour_cost = cost + direction[2]
heuristic = neighbour_cost + distance(neighbour, grid.out)
fringe.append(neighbour,
neighbour_cost,
heuristic,
predecessor=current)
closed[current] = predecessor
if view is not None:
astar_view.update()
if __name__ == "__main__":
# test minimal
import cProfile
# from generateur_ascii import MAZE30 as maze
from generateur_ab import Maze
from pstats import SortKey
maze = Maze(50, 60, 0.01)
print(maze)
# print(list(astar(MAZE10, distance=distance1)))
cProfile.run("astar(maze, distance=distance0)", sort=SortKey.TIME)
cProfile.run("astar(maze, distance=distance1)", sort=SortKey.TIME)
cProfile.run("astar(maze, distance=distance2)", sort=SortKey.TIME)
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from celery.task import task
from dateutil.relativedelta import relativedelta
from django.db import connections
from corehq.sql_db.connections import get_aaa_db_alias
from custom.aaa.models import (
AggAwc,
AggregationInformation,
AggVillage,
CcsRecord,
Child,
ChildHistory,
Woman,
WomanHistory,
)
def update_table(domain, slug, method):
window_start = AggregationInformation.objects.filter(
step=slug, aggregation_window_end__isnull=False
).order_by('-created_at').values_list('aggregation_window_end').first()
if window_start is None:
window_start = datetime(1900, 1, 1)
else:
window_start = window_start[0]
window_end = datetime.utcnow()
agg_info = AggregationInformation.objects.create(
domain=domain,
step=slug,
aggregation_window_start=window_start,
aggregation_window_end=window_end,
)
# implement lock
agg_query, agg_params = method(domain, window_start, window_end)
db_alias = get_aaa_db_alias()
with connections[db_alias].cursor() as cursor:
cursor.execute(agg_query, agg_params)
agg_info.end_time = datetime.utcnow()
agg_info.save()
@task
def update_child_table(domain):
for agg_query in Child.aggregation_queries:
update_table(domain, Child.__name__ + agg_query.__name__, agg_query)
@task
def update_child_history_table(domain):
for agg_query in ChildHistory.aggregation_queries:
update_table(domain, ChildHistory.__name__ + agg_query.__name__, agg_query)
@task
def update_woman_table(domain):
for agg_query in Woman.aggregation_queries:
update_table(domain, Woman.__name__ + agg_query.__name__, agg_query)
@task
def update_woman_history_table(domain):
for agg_query in WomanHistory.aggregation_queries:
update_table(domain, Woman.__name__ + agg_query.__name__, agg_query)
@task
def update_ccs_record_table(domain):
for agg_query in CcsRecord.aggregation_queries:
update_table(domain, CcsRecord.__name__ + agg_query.__name__, agg_query)
def update_monthly_table(domain, slug, method, month):
window_start = month.replace(day=1)
window_end = window_start + relativedelta(months=1)
agg_info = AggregationInformation.objects.create(
domain=domain,
step=slug,
aggregation_window_start=window_start,
aggregation_window_end=window_end,
)
agg_query, agg_params = method(domain, window_start, window_end)
db_alias = get_aaa_db_alias()
with connections[db_alias].cursor() as cursor:
cursor.execute(agg_query, agg_params)
agg_info.end_time = datetime.utcnow()
agg_info.save()
@task
def update_agg_awc_table(domain, month):
for agg_query in AggAwc.aggregation_queries:
update_monthly_table(domain, AggAwc.__name__ + agg_query.__name__, agg_query, month)
@task
def update_agg_village_table(domain, month):
for agg_query in AggVillage.aggregation_queries:
update_monthly_table(domain, AggVillage.__name__ + agg_query.__name__, agg_query, month)
|
__all__ = ["module_graph", "module_ilp","module_file"]
|
'''
John Whelchel
Summer 2013
Library of decorators that simplify some views by grouping code that is
always run at the start or end of views.
'''
import logging
from django.http import HttpResponse
import storage.storage as db
from django.template import Context, loader
from django.shortcuts import redirect
from MS.user import SyndicateUser as User
from MS.gateway import AcquisitionGateway as AG
from MS.gateway import UserGateway as UG
from MS.gateway import ReplicaGateway as RG
from django_lib import forms as libforms
def precheck(g_type, redirect_view):
'''
Wrapper function to simplify verifying existence of gateways
and correct passwords when modifying gateways in all gateway views.
All wrappend functions need to take the following args:
+ g_type is the type of gateway, either 'AG' 'UG' or 'RG'
+ redirect_view is the location to be redirected
- request
- g_id
'''
# Three decorator types
def ag_gateway_precheck(f):
def ag_wrapper(request, g_id):
if not request.POST:
return redirect('django_ag.views.viewgateway', g_id=g_id)
session = request.session
username = session['login_email']
try:
g = db.read_acquisition_gateway(g_id)
if not g:
raise Exception("No gateway exists.")
except Exception as e:
logging.error("Error reading gateway %s : Exception: %s" % (g_id, e))
message = "No acquisition gateway by the name of %s exists." % g_id
t = loader.get_template("gateway_templates/viewgateway_failure.html")
c = Context({'message':message, 'username':username})
return HttpResponse(t.render(c))
form = libforms.Password(request.POST)
if not form.is_valid():
session['message'] = "Password required."
return redirect(redirect_view, g_id)
# Check password hash
if not AG.authenticate(g, form.cleaned_data['password']):
session['message'] = "Incorrect password."
return redirect(redirect_view, g_id)
return f(request, g_id)
return ag_wrapper
def ug_gateway_precheck(f):
def ug_wrapper(request, g_id):
if not request.POST:
return redirect('django_ug.views.viewgateway', g_id=g_id)
session = request.session
username = session['login_email']
try:
g = db.read_user_gateway(g_id)
if not g:
raise Exception("No gateway exists.")
except Exception as e:
logging.error("Error reading gateway %s : Exception: %s" % (g_id, e))
message = "No user gateway by the name of %s exists." % g_id
t = loader.get_template("gateway_templates/viewgateway_failure.html")
c = Context({'message':message, 'username':username})
return HttpResponse(t.render(c))
form = libforms.Password(request.POST)
if not form.is_valid():
session['message'] = "Password required."
return redirect(redirect_view, g_id)
# Check password hash
if not UG.authenticate(g, form.cleaned_data['password']):
session['message'] = "Incorrect password."
return redirect(redirect_view, g_id)
return f(request, g_id)
return ug_wrapper
def rg_gateway_precheck(f):
def rg_wrapper(request, g_id):
if not request.POST:
return redirect('django_rg.views.viewgateway', g_id=g_id)
session = request.session
username = session['login_email']
try:
g = db.read_replica_gateway(g_id)
if not g:
raise Exception("No gateway exists.")
except Exception as e:
logging.error("Error reading gateway %s : Exception: %s" % (g_id, e))
message = "No replica gateway by the name of %s exists." % g_id
t = loader.get_template("gateway_templates/viewgateway_failure.html")
c = Context({'message':message, 'username':username})
return HttpResponse(t.render(c))
form = libforms.Password(request.POST)
if not form.is_valid():
session['message'] = "Password required."
return redirect(redirect_view, g_id)
# Check password hash
if not RG.authenticate(g, form.cleaned_data['password']):
session['message'] = "Incorrect password."
return redirect(redirect_view, g_id)
return f(request, g_id)
return rg_wrapper
# Pythonesque case statement to determine what type of decorator to return.
decorators = {"AG": ag_gateway_precheck,
"RG": rg_gateway_precheck,
"UG": ug_gateway_precheck
}
# Executed code
try:
return decorators[g_type]
except KeyError:
logging.error("Gatway type argument %s for decorators.precheck doesn't exist." % g_type)
return redirect('/syn/home')
|
def createText():
textToBeAdded = """\nQPushButton {
color: blue;
}"""
return textToBeAdded
textToBeAdded = createText()
with open("Style.qss", "a") as f:
f.write(textToBeAdded)
new = textToBeAdded.replace("blue","orange")
with open("Style.qss", "r+") as f:
old = f.read() # read everything in the file
newText = old.replace(textToBeAdded,new)
f.seek(0) # rewind
f.write(newText) # write the new line before
|
import sys
import cProfile
from memory_profiler import profile
@profile()
def mem_to_be_profiled():
my_list1 = [i**2 for i in range(50000)]
my_list2 = (i**2 for i in range(100000, 150000))
sum = 0
print("my_list1 = {} bytes".format(sys.getsizeof(my_list1)))
print("my_list2 = {} bytes".format(sys.getsizeof(my_list2)))
for i in my_list2:
sum += i
my_list1.append(i)
print(sum)
mem_to_be_profiled()
|
import sys
sys.path.insert(0, "../../Sknet/")
import sknet
import os
import numpy as np
import time
import tensorflow as tf
from sknet import ops,layers
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('--data_augmentation', type=int)
parser.add_argument('--dataset', type=str)
parser.add_argument('--model', type=str)
#parser.add_argument('--epsilon', type=float)
parser.add_argument('-n', type=int)
parser.add_argument('--gamma', type=float)
parser.add_argument('--lr', type=float)
args = parser.parse_args()
#DATA_AUGMENTATION = args.data_augmentation
#EPSILON = args.epsilon
DATASET = args.dataset
MODEL = args.model
GAMMA = args.gamma
N = args.n
LR = args.lr
# Data Loading
#-------------
if DATASET=='cifar10':
dataset = sknet.datasets.load_cifar10()
elif DATASET=='mnist':
dataset = sknet.datasets.load_mnist()
elif DATASET=='svhn':
dataset = sknet.datasets.load_svhn()
elif DATASET=='cifar100':
dataset = sknet.datasets.load_cifar100()
if "valid_set" not in dataset.sets:
dataset.split_set("train_set","valid_set",0.15)
preprocess = sknet.datasets.Standardize().fit(dataset['images/train_set'])
dataset['images/train_set'] = preprocess.transform(dataset['images/train_set'])
dataset['images/test_set'] = preprocess.transform(dataset['images/test_set'])
dataset['images/valid_set'] = preprocess.transform(dataset['images/valid_set'])
time = np.linspace(0, 1, 100).reshape((-1, 1, 1, 1))
dataset['images/interpolation'] = np.concatenate([np.expand_dims(dataset['images/train_set'][i], 0)*time+\
np.expand_dims(dataset['images/train_set'][i+1], 0)*(1-time) for i in range(20)] +\
[np.expand_dims(dataset['images/test_set'][i], 0)*time+\
np.expand_dims(dataset['images/test_set'][i+1], 0)*(1-time) for i in range(20)], 0).astype('float32')
#perm = np.random.permutation(N)
#dataset['images/train_set'] = dataset['images/train_set'][perm]
#dataset['labels/train_set'] = dataset['labels/train_set'][perm]
options = {'train_set': "random_see_all",
'valid_set': 'continuous',
'interpolation' : 'continuous',
'test_set': 'continuous'}
dataset.create_placeholders(32, options, device="/cpu:0")
const = 1.#(2*EPSILON)**(1./2)
# Create Network
#---------------
dnn = sknet.Network()
#if DATA_AUGMENTATION:
start = 1
# dnn.append(sknet.ops.RandomAxisReverse(dataset.images, axis=[-1]))
if DATASET == 'fashion':
dnn.append(sknet.ops.RandomCrop(dataset.images, (28, 28), pad=(4, 4), seed=10))
elif DATASET in ['cifar10', 'cifar100', 'svhn']:
dnn.append(sknet.ops.RandomCrop(dataset.images, (32, 32), pad=(4, 4), seed=10))
#else:
# dnn.append(dataset.images)
# start = 1
if MODEL == 'cnn':
sknet.networks.ConvLarge(dnn)
elif MODEL == 'simpleresnet':
sknet.networks.Resnet(dnn, D=4, W=1, block=sknet.layers.ResBlockV2)
elif MODEL == 'resnet':
sknet.networks.Resnet(dnn, D=10, W=1, block=sknet.layers.ResBlockV2)
elif MODEL == 'wideresnet':
sknet.networks.Resnet(dnn, D=6, W=2, block=sknet.layers.ResBlockV2)
dnn.append(sknet.ops.Dense(dnn[-1], dataset.n_classes))
# accuracy and loss
prediction = dnn[-1]
accu = sknet.losses.streaming_mean(sknet.losses.accuracy(dataset.labels,
dnn[-1]))
loss = sknet.losses.crossentropy_logits(dataset.labels, dnn[-1])
# optimizer and updates
B = dataset.N_BATCH('train_set')
lr = sknet.schedules.PiecewiseConstant(LR, {70*B: LR/3, 120*B: LR/9})
optimizer = sknet.optimizers.Adam(loss,
dnn.variables(trainable=True), lr)
minimizer = tf.group(*optimizer.updates, *dnn.updates)
reset = tf.group(optimizer.reset_variables_op, dnn.reset_variables_op)
# Workers
train = sknet.Worker(minimizer, loss=loss, accu=accu,
context='train_set', to_print=loss,
feed_dict=dnn.deter_dict(False))
test = sknet.Worker(loss=loss, accu=accu,
context='test_set', to_print=accu,
feed_dict=dnn.deter_dict(True))
# Pipeline
workplace = sknet.utils.Workplace(dataset=dataset)
path = '/mnt/drive1/rbalSpace/Hessian/naked_{}_{}_{}_{}.h5'
#path = '/mnt/project2/rb42Data/BatchNorm/pretrain_{}_{}_{}_{}_{}.h5'
#for run in range(5):
workplace.init_file(path.format(MODEL, DATASET, N, LR))
# workplace.execute_worker(inter)
workplace.execute_worker((train, test), repeat=150)
# workplace.execute_worker(inter)
# workplace.session.run(reset)
|
# Copyright (c) 2021 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2021-01-05 10:36
@author: johannes
"""
from abc import ABC
class Reader(ABC):
"""
"""
def __init__(self):
super().__init__()
def load(self, *args, **kwargs):
raise NotImplementedError
def read_element(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def eliminate_empty_rows(df):
return df.loc[df.apply(any, axis=1), :].reset_index(drop=True)
|
from snowddl.blueprint import TechRoleBlueprint
from snowddl.resolver.abc_role_resolver import AbstractRoleResolver
class TechRoleResolver(AbstractRoleResolver):
def get_role_suffix(self):
return self.config.TECH_ROLE_SUFFIX
def get_blueprints(self):
return self.config.get_blueprints_by_type(TechRoleBlueprint)
|
f = open("links.txt", "r")
f1 = open("cleanedLinks.txt", "w")
for line in f:
if "javascript:void(0)" in line or "login?" in line or "vote?" in line or "item?" in line or "user?" in line or "hide?" in line or "fave?" in line or "reply?" in line:
pass
else:
f1.write(line + "\n")
|
import random
import discord
from discord.ext import commands
import time
from momiji.modules import permissions
class MomijiSpeak(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
async with self.bot.db.execute("SELECT channel_id, extension_name FROM bridged_extensions") as cursor:
bridged_extensions = await cursor.fetchall()
if bridged_extensions:
for bridge in bridged_extensions:
if int(bridge[0]) == int(message.channel.id):
return
if message.guild:
async with self.bot.db.execute("SELECT guild_id FROM mmj_enabled_guilds WHERE guild_id = ?",
[int(message.guild.id)]) as cursor:
is_enabled_guild = await cursor.fetchall()
if not is_enabled_guild:
return
await self.main(message)
@commands.Cog.listener()
async def on_message_delete(self, message):
async with self.bot.db.execute("SELECT channel_id, extension_name FROM bridged_extensions") as cursor:
bridged_extensions = await cursor.fetchall()
if bridged_extensions:
for bridge in bridged_extensions:
if int(bridge[0]) == int(message.channel.id):
return
if message.guild:
async with self.bot.db.execute("SELECT guild_id FROM mmj_enabled_guilds WHERE guild_id = ?",
[int(message.guild.id)]) as cursor:
is_enabled_guild = await cursor.fetchall()
if not is_enabled_guild:
return
await self.bot.db.execute("UPDATE mmj_message_logs SET deleted = ? WHERE message_id = ?",
[1, int(message.id)])
await self.bot.db.commit()
@commands.Cog.listener()
async def on_message_edit(self, before, after):
async with self.bot.db.execute("SELECT channel_id, extension_name FROM bridged_extensions") as cursor:
bridged_extensions = await cursor.fetchall()
if bridged_extensions:
for bridge in bridged_extensions:
if int(bridge[0]) == int(after.channel.id):
return
if after.guild:
async with self.bot.db.execute("SELECT guild_id FROM mmj_enabled_guilds WHERE guild_id = ?",
[int(after.guild.id)]) as cursor:
is_enabled_guild = await cursor.fetchall()
if not is_enabled_guild:
return
if not await self.check_privacy(after):
await self.bot.db.execute("UPDATE mmj_message_logs SET contents = ? WHERE message_id = ?",
[str(after.content), int(after.id)])
await self.bot.db.commit()
@commands.Cog.listener()
async def on_guild_channel_delete(self, deleted_channel):
async with self.bot.db.execute("SELECT channel_id, extension_name FROM bridged_extensions") as cursor:
bridged_extensions = await cursor.fetchall()
if bridged_extensions:
for bridge in bridged_extensions:
if int(bridge[0]) == int(deleted_channel.id):
return
async with self.bot.db.execute("SELECT guild_id FROM mmj_enabled_guilds WHERE guild_id = ?",
[int(deleted_channel.guild.id)]) as cursor:
is_enabled_guild = await cursor.fetchall()
if not is_enabled_guild:
return
await self.bot.db.execute("UPDATE mmj_message_logs SET deleted = ? WHERE channel_id = ?",
[1, int(deleted_channel.id)])
await self.bot.db.commit()
async def join_spam_train(self, message):
counter = 0
async for previous_message in message.channel.history(limit=2 + random.randint(1, 4)):
if (message.content == previous_message.content) and (message.author.id != previous_message.author.id):
if message.author.bot:
counter = -500
else:
counter += 1
if counter == 3:
if await self.check_message_contents(message.content):
await message.channel.send(message.content)
async def check_privacy(self, message):
"""
Checks if the message belongs to a private guild or a channel
:param message: discord.py's message object
:return: True if the message belongs to a private guild or a channel, False if not.
"""
if message.guild:
async with self.bot.db.execute("SELECT * FROM mmj_enabled_guilds WHERE guild_id = ? AND metadata_only = 1",
[int(message.guild.id)]) as cursor:
is_metadata_only = await cursor.fetchall()
if is_metadata_only:
return True
async with self.bot.db.execute("SELECT guild_id FROM mmj_private_guilds WHERE guild_id = ?",
[int(message.guild.id)]) as cursor:
private_guild_check = await cursor.fetchall()
if private_guild_check:
return True
async with self.bot.db.execute("SELECT channel_id FROM mmj_private_channels WHERE channel_id = ?",
[int(message.channel.id)]) as cursor:
private_channel_check = await cursor.fetchall()
if private_channel_check:
return True
return False
async def bridge_check(self, channel_id):
async with self.bot.db.execute("SELECT depended_channel_id FROM mmj_channel_bridges "
"WHERE channel_id = ?", [int(channel_id)]) as cursor:
bridged_channel = await cursor.fetchall()
if bridged_channel:
return int(bridged_channel[0][0])
else:
return int(channel_id)
async def check_message_contents(self, string):
if len(string) > 0:
async with self.bot.db.execute("SELECT word FROM mmj_word_blacklist") as cursor:
blacklist = await cursor.fetchall()
if not (any(str(c[0]) in str(string.lower()) for c in blacklist)):
if not (any(string.startswith(c) for c in (";", "'", "!", ",", ".", "=", "-", "t!", "t@", "$"))):
return True
return False
async def pick_message(self, message, depended_channel_id):
async with self.bot.db.execute("SELECT guild_id, channel_id, user_id, message_id, "
"username, bot, contents, timestamp, deleted "
"FROM mmj_message_logs "
"WHERE channel_id = ? AND bot = ? AND deleted = ?",
[int(depended_channel_id), 0, 0]) as cursor:
all_potential_messages = await cursor.fetchall()
if all_potential_messages:
counter = 0
while True:
if counter > 50:
print("I looked over 50 random messages to send but nothing passed the check.")
return False
counter += 1
message_from_db = random.choice(all_potential_messages)
if await self.check_privacy(message):
self.bot.get_channel(int(depended_channel_id))
picked_message = await message.channel.fetch_message(message_from_db[3])
content_to_send = picked_message.content
else:
content_to_send = str(message_from_db[6])
if await self.check_message_contents(content_to_send):
return content_to_send
else:
print("The query returned nothing")
return False
async def momiji_speak(self, message):
channel = message.channel
depended_channel_id = await self.bridge_check(channel.id)
async with channel.typing():
message_contents_to_send = await self.pick_message(message, depended_channel_id)
if message_contents_to_send:
sent_message = await channel.send(message_contents_to_send)
await self.bot.db.execute("INSERT INTO cr_pair VALUES (?, ?)", [int(message.id), int(sent_message.id)])
await self.bot.db.commit()
return True
else:
return False
async def store_message(self, message):
if await self.check_privacy(message):
content = None
else:
content = str(message.content)
if message.guild:
message_guild_id = message.guild.id
else:
message_guild_id = 0
await self.bot.db.execute("INSERT INTO mmj_message_logs VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
[int(message_guild_id), int(message.channel.id), int(message.author.id),
int(message.id), str(message.author.name), int(message.author.bot), content,
int(time.mktime(message.created_at.timetuple())), 0])
await self.bot.db.commit()
async def main(self, message):
await self.store_message(message)
if await permissions.is_ignored(message):
return
if message.author.bot:
return
msg = message.content.lower()
if message.mention_everyone:
await message.channel.send("https://i.imgur.com/UCuY8qP.gif")
return
if "momiji" in msg or self.bot.user.mention in message.content:
await self.momiji_speak(message)
return
# await self.join_spam_train(message)
if message.content.isupper() and len(message.content) > 2 and random.randint(0, 20) == 1:
await self.momiji_speak(message)
async with self.bot.db.execute("SELECT trigger, response, type, one_in FROM mmj_responses") as cursor:
momiji_responses = await cursor.fetchall()
for trigger, response, condition, chances in momiji_responses:
one_in = int(chances)
if self.condition_validate(condition, msg, trigger):
if random.randint(1, one_in) == 1:
if len(response) > 0:
response_msg = await message.channel.send(response)
await self.bot.db.execute("INSERT INTO cr_pair VALUES (?, ?)",
[int(message.id), int(response_msg.id)])
await self.bot.db.commit()
else:
await self.momiji_speak(message)
return
def condition_validate(self, condition, msg, trigger):
if int(condition) == 1:
return msg.startswith(trigger)
elif int(condition) == 2:
return msg == trigger
elif int(condition) == 3:
return trigger in msg
def setup(bot):
bot.add_cog(MomijiSpeak(bot))
|
from .application import settings
from .redis import redis
from .facebook import facebook
from .google import google
from .smtp import smtp
__all__ = (
settings,
redis,
facebook,
google,
smtp,
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import djangobmf.numbering.validators
import djangobmf.utils.generate_filename
from django.conf import settings
import django.utils.timezone
import djangobmf.document.storage
class Migration(migrations.Migration):
replaces = [('djangobmf', '0001_version_0_2_0'), ('djangobmf', '0008_activity_meta_options'), ('djangobmf', '0009_soft_dependency_for_projects_and_customer')]
dependencies = [
('contenttypes', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('topic', models.CharField(blank=True, max_length=100, verbose_name='Topic', null=True)),
('text', models.TextField(blank=True, verbose_name='Text', null=True)),
('action', models.PositiveSmallIntegerField(choices=[(1, 'Comment'), (2, 'Created'), (3, 'Updated'), (4, 'Workflow'), (5, 'File')], verbose_name='Action', null=True, default=1, editable=False)),
('template', models.CharField(max_length=100, verbose_name='Template', null=True, editable=False)),
('parent_id', models.PositiveIntegerField()),
('modified', models.DateTimeField(verbose_name='Modified', auto_now=True)),
('parent_ct', models.ForeignKey(related_name='bmf_history_parent', to='contenttypes.ContentType')),
('user', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'History',
'verbose_name': 'History',
'get_latest_by': 'modified',
'ordering': ('-modified',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Configuration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('app_label', models.CharField(max_length=100, verbose_name='Application', null=True, editable=False)),
('field_name', models.CharField(max_length=100, verbose_name='Fieldname', null=True, editable=False)),
('value', models.TextField(verbose_name='Value', null=True)),
],
options={
'verbose_name_plural': 'Configurations',
'verbose_name': 'Configuration',
'default_permissions': ('change',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=100, verbose_name='Name', null=True)),
('user', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL, related_name='+')),
],
options={
'ordering': ('name', 'id'),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NumberCycle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name_template', models.CharField(validators=[djangobmf.numbering.validators.template_name_validator], max_length=64, verbose_name='Template', null=True)),
('counter_start', models.PositiveIntegerField(null=True, default=1)),
('current_period', models.DateField(null=True, default=django.utils.timezone.now)),
('ct', models.OneToOneField(related_name='bmf_numbercycle', null=True, to='contenttypes.ContentType', editable=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('reporttype', models.CharField(max_length=20, verbose_name='Reporttype')),
('mimetype', models.CharField(max_length=20, verbose_name='Mimetype', editable=False, default='pdf')),
('options', models.TextField(blank=True, help_text='Options for the renderer. Empty this field to get all available options with default values', verbose_name='Options')),
('modified', models.DateTimeField(verbose_name='Modified', auto_now=True)),
('contenttype', models.ForeignKey(blank=True, help_text='Connect a Report to an BMF-Model', null=True, to='contenttypes.ContentType', related_name='bmf_report')),
],
options={
'verbose_name_plural': 'Reports',
'verbose_name': 'Report',
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('watch_id', models.PositiveIntegerField(null=True)),
('triggered', models.BooleanField(verbose_name='Triggered', editable=False, db_index=True, default=True)),
('unread', models.BooleanField(verbose_name='Unread', editable=False, db_index=True, default=True)),
('last_seen_object', models.PositiveIntegerField(null=True)),
('new_entry', models.BooleanField(verbose_name='New entry', default=False, db_index=True)),
('comment', models.BooleanField(verbose_name='Comment written', default=False, db_index=True)),
('file', models.BooleanField(verbose_name='File added', default=False, db_index=True)),
('changed', models.BooleanField(verbose_name='Object changed', default=False, db_index=True)),
('workflow', models.BooleanField(verbose_name='Workflowstate changed', default=False, db_index=True)),
('modified', models.DateTimeField(verbose_name='Modified', null=True, default=django.utils.timezone.now, editable=False)),
('user', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL)),
('watch_ct', models.ForeignKey(to='contenttypes.ContentType', null=True)),
],
options={
'verbose_name_plural': 'Watched activities',
'verbose_name': 'Watched activity',
'default_permissions': (),
'ordering': ('-modified',),
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='notification',
unique_together=set([('user', 'watch_ct', 'watch_id')]),
),
migrations.CreateModel(
name='Workspace',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('slug', models.SlugField(max_length=30)),
('url', models.CharField(max_length=255, editable=False, db_index=True)),
('public', models.BooleanField(default=True)),
('editable', models.BooleanField(default=True)),
('module', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('ct', models.ForeignKey(blank=True, null=True, to='contenttypes.ContentType', related_name='+')),
('parent', models.ForeignKey(blank=True, null=True, to='djangobmf.Workspace', related_name='children')),
],
options={
'verbose_name_plural': 'Workspace',
'verbose_name': 'Workspace',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='workspace',
unique_together=set([('parent', 'slug')]),
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(blank=True, max_length=120, verbose_name='Name', null=True, editable=False)),
('file', models.FileField(upload_to=djangobmf.utils.generate_filename.generate_filename, storage=djangobmf.document.storage.BMFStorage(), verbose_name='File')),
('size', models.PositiveIntegerField(blank=True, null=True, editable=False)),
('is_static', models.BooleanField(default=False)),
('content_id', models.PositiveIntegerField(blank=True, null=True, editable=False)),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modified', null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created', null=True)),
('content_type', models.ForeignKey(blank=True, null=True, to='contenttypes.ContentType', related_name='bmf_document', editable=False, on_delete=django.db.models.deletion.SET_NULL)),
('created_by', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL, related_name='+', editable=False, on_delete=django.db.models.deletion.SET_NULL)),
('modified_by', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL, related_name='+', editable=False, on_delete=django.db.models.deletion.SET_NULL)),
('customer_pk', models.PositiveIntegerField(blank=True, null=True, db_index=True, editable=False)),
('project_pk', models.PositiveIntegerField(blank=True, null=True, db_index=True, editable=False)),
],
options={
'verbose_name_plural': 'Documents',
'verbose_name': 'Document',
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.AlterModelOptions(
name='activity',
options={'verbose_name_plural': 'Activity', 'verbose_name': 'Activity', 'get_latest_by': 'modified', 'ordering': ('-modified',)},
),
]
|
import os
import pathlib
import sys
import tempfile
import unittest
from io import StringIO
from modulegraph2 import (
Alias,
AliasNode,
BuiltinModule,
DependencyInfo,
ExcludedModule,
ExtensionModule,
InvalidModule,
InvalidRelativeImport,
MissingModule,
ModuleGraph,
NamespacePackage,
Package,
PyPIDistribution,
SourceModule,
distribution_named,
)
from modulegraph2._distributions import distribution_for_file
from . import util
from .test_distributions import build_and_install
INPUT_DIR = pathlib.Path(__file__).resolve().parent / "modulegraph-dir"
class TestModuleGraphScripts(unittest.TestCase, util.TestMixin):
@classmethod
def setUpClass(cls):
util.clear_sys_modules(INPUT_DIR)
@classmethod
def tearDownClass(cls):
util.clear_sys_modules(INPUT_DIR)
def test_trivial_script(self):
mg = ModuleGraph()
mg.add_script(INPUT_DIR / "trivial-script")
self.assertEqual(len(list(mg.roots())), 1)
(node,) = mg.roots()
self.assert_valid_script_node(node, INPUT_DIR / "trivial-script")
self.assertEqual(len(list(mg.iter_graph(node=node))), 1)
(graph_node,) = mg.iter_graph(node=node)
self.assertIs(graph_node, node)
def test_self_referential_script(self):
mg = ModuleGraph()
mg.add_script(INPUT_DIR / "self-referential-script.py")
self.assertEqual(len(list(mg.roots())), 1)
(node,) = mg.roots()
self.assert_valid_script_node(node, INPUT_DIR / "self-referential-script.py")
main = mg.find_node("__main__")
self.assertIsInstance(main, ExcludedModule)
def test_setuptools_script(self):
mg = ModuleGraph()
mg.add_script(INPUT_DIR / "setup.py")
node = mg.find_node("configparser")
self.assertIsInstance(node, (SourceModule, Package))
import setuptools
if int(setuptools.__version__.split(".")[0]) < 47:
node = mg.find_node("setuptools._vendor.six.moves.configparser")
self.assertIsInstance(node, AliasNode)
node = mg.find_node("setuptools.extern.six.moves.configparser")
self.assertIsInstance(node, AliasNode)
self.assert_has_edge(
mg,
"setuptools.extern.six.moves.configparser",
"configparser",
{DependencyInfo(False, True, False, None)},
)
node = mg.find_node("setuptools._vendor.packaging")
self.assertIsInstance(node, Package)
node = mg.find_node("setuptools.extern.packaging")
self.assertIsInstance(node, AliasNode)
self.assert_has_edge(
mg,
"setuptools.extern.packaging",
"setuptools._vendor.packaging",
{DependencyInfo(False, True, False, None)},
)
def test_add_script_twice(self):
mg = ModuleGraph()
mg.add_script(INPUT_DIR / "trivial-script")
self.assertRaises(ValueError, mg.add_script, INPUT_DIR / "trivial-script")
def no_test_stdlib_script(self):
mg = ModuleGraph()
mg.add_script(INPUT_DIR / "stdlib-script")
self.assertEqual(len(list(mg.roots())), 1)
(node,) = mg.roots()
self.assert_valid_script_node(node, INPUT_DIR / "stdlib-script")
node = mg.find_node("os")
self.assertIsInstance(node, SourceModule)
node = mg.find_node("xml.etree.ElementTree")
self.assertIsInstance(node, SourceModule)
mg.report()
class TestModuleGraphAbsoluteImports(unittest.TestCase, util.TestMixin):
@classmethod
def setUpClass(cls):
util.clear_sys_modules(INPUT_DIR)
@classmethod
def tearDownClass(cls):
util.clear_sys_modules(INPUT_DIR)
def setUp(self):
sys.path.insert(0, os.fspath(INPUT_DIR))
def tearDown(self):
assert sys.path[0] == os.fspath(INPUT_DIR)
del sys.path[0]
def test_add_module_twice(self):
with self.subTest("adding root again"):
mg = ModuleGraph()
n1 = mg.add_module("no_imports")
n2 = mg.add_module("no_imports")
self.assertIs(n1, n2)
self.assert_has_roots(mg, "no_imports")
with self.subTest("adding loaded module"):
mg = ModuleGraph()
mg.add_module("global_import")
n1 = mg.find_node("no_imports")
n2 = mg.add_module("no_imports")
self.assertIs(n1, n2)
self.assert_has_roots(mg, "global_import", "no_imports")
def test_add_module_package(self):
mg = ModuleGraph()
mg.add_module("package.submod")
self.assert_has_node(mg, "package.submod", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
def test_no_imports(self):
mg = ModuleGraph()
mg.add_module("no_imports")
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_edge_count(mg, 0)
self.assert_has_roots(mg, "no_imports")
self.assert_has_nodes(mg, "no_imports")
def test_global_import(self):
mg = ModuleGraph()
mg.add_module("global_import")
self.assert_has_node(mg, "global_import", SourceModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"global_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 1)
self.assert_has_roots(mg, "global_import")
self.assert_has_nodes(mg, "global_import", "no_imports")
def test_circular_imports(self):
mg = ModuleGraph()
mg.add_module("circular_a")
self.assert_has_node(mg, "circular_a", SourceModule)
self.assert_has_node(mg, "circular_b", SourceModule)
self.assert_has_node(mg, "circular_c", SourceModule)
self.assert_has_edge(
mg, "circular_a", "circular_b", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "circular_b", "circular_c", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "circular_c", "circular_a", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 3)
self.assert_has_roots(mg, "circular_a")
self.assert_has_nodes(mg, "circular_a", "circular_b", "circular_c")
def test_circular_from(self):
mg = ModuleGraph()
mg.add_module("circular_from_a")
self.assert_has_node(mg, "circular_from_a", SourceModule)
self.assert_has_node(mg, "circular_from_b", SourceModule)
self.assert_has_node(mg, "circular_from_c", SourceModule)
self.assert_has_edge(
mg,
"circular_from_a",
"circular_from_b",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"circular_from_b",
"circular_from_c",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"circular_from_c",
"circular_from_a",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 3)
self.assert_has_roots(mg, "circular_from_a")
self.assert_has_nodes(
mg, "circular_from_a", "circular_from_b", "circular_from_c"
)
def test_circular_from_star(self):
mg = ModuleGraph()
mg.add_module("circular_from_star_a")
self.assert_has_node(mg, "circular_from_star_a", SourceModule)
self.assert_has_node(mg, "circular_from_star_b", SourceModule)
self.assert_has_node(mg, "circular_from_star_c", SourceModule)
self.assert_has_edge(
mg,
"circular_from_star_a",
"circular_from_star_b",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"circular_from_star_b",
"circular_from_star_c",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"circular_from_star_c",
"circular_from_star_a",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 3)
self.assert_has_roots(mg, "circular_from_star_a")
self.assert_has_nodes(
mg, "circular_from_star_a", "circular_from_star_b", "circular_from_star_c"
)
def test_missing_toplevel(self):
mg = ModuleGraph()
mg.add_module("missing")
self.assert_has_node(mg, "missing", SourceModule)
self.assert_has_node(mg, "nosuchmodule", MissingModule)
self.assert_has_edge(
mg, "missing", "nosuchmodule", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 1)
self.assert_has_roots(mg, "missing")
self.assert_has_nodes(mg, "missing", "nosuchmodule")
def test_wrong_import(self):
mg = ModuleGraph()
mg.add_module("missing_in_package.py") # Whoops, forgot to strip ".py"
self.assert_has_node(mg, "missing_in_package", SourceModule)
self.assert_has_node(mg, "missing_in_package.py", MissingModule)
def test_missing_in_package(self):
mg = ModuleGraph()
mg.add_module("missing_in_package")
self.assert_has_node(mg, "missing_in_package", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.missingmodule", MissingModule)
self.assert_has_edge(
mg,
"missing_in_package",
"package.missingmodule",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.missingmodule",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 2)
self.assert_has_roots(mg, "missing_in_package")
self.assert_has_nodes(
mg, "missing_in_package", "package", "package.missingmodule"
)
def test_missing_package(self):
mg = ModuleGraph()
mg.add_module("missing_package")
self.assert_has_node(mg, "missing_package", SourceModule)
self.assert_has_node(mg, "missingpackage", MissingModule)
self.assert_has_node(mg, "missingpackage.module", MissingModule)
self.assert_has_edge(
mg,
"missing_package",
"missingpackage.module",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"missingpackage.module",
"missingpackage",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 2)
self.assert_has_roots(mg, "missing_package")
self.assert_has_nodes(
mg, "missing_package", "missingpackage", "missingpackage.module"
)
def test_missing_nested_package(self):
mg = ModuleGraph()
mg.add_module("missing_nested_package")
self.assert_has_node(mg, "missing_nested_package", SourceModule)
self.assert_has_node(mg, "missingpackage", MissingModule)
self.assert_has_node(mg, "missingpackage.missingsubpackage", MissingModule)
self.assert_has_node(
mg, "missingpackage.missingsubpackage.module", MissingModule
)
self.assert_has_edge(
mg,
"missing_nested_package",
"missingpackage.missingsubpackage.module",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"missingpackage.missingsubpackage.module",
"missingpackage.missingsubpackage",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"missingpackage.missingsubpackage",
"missingpackage",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 3)
self.assert_has_roots(mg, "missing_nested_package")
self.assert_has_nodes(
mg,
"missing_nested_package",
"missingpackage",
"missingpackage.missingsubpackage",
"missingpackage.missingsubpackage.module",
)
def test_package_import_one_level(self):
mg = ModuleGraph()
mg.add_module("package_import_single_level")
self.assert_has_node(mg, "package_import_single_level")
self.assert_has_node(mg, "package.submod")
self.assert_has_node(mg, "package")
self.assert_has_node(mg, "no_imports")
self.assert_has_edge(
mg,
"package_import_single_level",
"package.submod",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"package.submod",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 3)
self.assert_has_roots(mg, "package_import_single_level")
self.assert_has_nodes(
mg, "package_import_single_level", "package", "package.submod", "no_imports"
)
def test_package_import_two_levels(self):
mg = ModuleGraph()
mg.add_module("package_import_two_levels")
self.assert_has_node(mg, "package_import_two_levels")
self.assert_has_node(mg, "package.submod2")
self.assert_has_node(mg, "package")
self.assert_has_node(mg, "no_imports")
self.assert_has_node(mg, "global_import")
self.assert_has_edge(
mg,
"package_import_two_levels",
"package.submod2",
{DependencyInfo(False, True, False, "submod2")},
)
self.assert_has_edge(
mg, "package.submod2", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"package.submod2",
"global_import",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"global_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 4)
self.assert_has_roots(mg, "package_import_two_levels")
self.assert_has_nodes(
mg,
"package_import_two_levels",
"package",
"package.submod2",
"no_imports",
"global_import",
)
def test_import_two_levels(self):
mg = ModuleGraph()
mg.add_module("import_two_levels")
self.assert_has_node(mg, "import_two_levels")
self.assert_has_node(mg, "package")
self.assert_has_node(mg, "package.subpackage")
self.assert_has_node(mg, "package.subpackage.subpackage2")
self.assert_has_node(mg, "package.subpackage.subpackage2.subsubmod")
self.assert_has_node(mg, "package.subpackage.submod")
self.assert_has_edge(
mg,
"import_two_levels",
"package.subpackage.subpackage2.subsubmod",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.subpackage.subpackage2.subsubmod",
"package.subpackage.subpackage2",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.subpackage.subpackage2",
"package.subpackage",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.subpackage",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"import_two_levels",
"package.subpackage.submod",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.subpackage.submod",
"package.subpackage",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 6)
self.assert_has_roots(mg, "import_two_levels")
self.assert_has_nodes(
mg,
"import_two_levels",
"package.subpackage.subpackage2.subsubmod",
"package.subpackage.subpackage2",
"package.subpackage",
"package.subpackage.submod",
"package",
)
def test_diamond(self):
mg = ModuleGraph()
mg.add_module("diamond_a")
self.assert_has_node(mg, "diamond_a", SourceModule)
self.assert_has_node(mg, "diamond_b1", SourceModule)
self.assert_has_node(mg, "diamond_b2", SourceModule)
self.assert_has_node(mg, "diamond_c", SourceModule)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_edge(
mg, "diamond_a", "diamond_b1", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "diamond_a", "diamond_b2", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "diamond_b1", "diamond_c", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "diamond_b2", "diamond_c", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "diamond_c", "sys", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 5)
self.assert_has_roots(mg, "diamond_a")
self.assert_has_nodes(
mg, "diamond_a", "diamond_b1", "diamond_b2", "diamond_c", "sys"
)
def test_alias_import(self):
mg = ModuleGraph()
mg.add_module("alias_toplevel")
self.assert_has_node(mg, "alias_toplevel", SourceModule)
self.assert_has_node(mg, "alias_import", SourceModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.frommod", SourceModule)
self.assert_has_node(mg, "package.nosuchmodule", MissingModule)
self.assert_has_edge(
mg,
"alias_toplevel",
"alias_import",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "alias_toplevel", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"alias_import",
"no_imports",
{DependencyInfo(False, True, False, "really")},
)
self.assert_has_edge(
mg,
"alias_toplevel",
"package.frommod",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg, "package.frommod", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"alias_toplevel",
"package.nosuchmodule",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"package.nosuchmodule",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 8)
self.assert_has_roots(mg, "alias_toplevel")
self.assert_has_nodes(
mg,
"alias_toplevel",
"alias_import",
"no_imports",
"package",
"package.frommod",
"package.nosuchmodule",
)
def test_from_sys_import_star(self):
mg = ModuleGraph()
mg.add_module("import_sys_star")
self.assert_has_node(mg, "import_sys_star", SourceModule)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_edge(
mg, "import_sys_star", "sys", {DependencyInfo(True, True, False, None)}
)
self.assert_edge_count(mg, 1)
self.assert_has_roots(mg, "import_sys_star")
self.assert_has_nodes(mg, "import_sys_star", "sys")
def test_package_import_star(self):
mg = ModuleGraph()
mg.add_module("from_package_import_star")
self.assert_has_node(mg, "from_package_import_star", SourceModule)
self.assert_has_node(mg, "star_package", Package)
self.assertEqual(
mg.find_node("from_package_import_star").globals_written,
mg.find_node("star_package").globals_written,
)
self.assert_has_edge(
mg,
"from_package_import_star",
"star_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"star_package",
"star_package.submod",
{DependencyInfo(False, True, False, "submod")},
)
self.assert_has_edge(
mg, "star_package", "sys", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"star_package.submod",
"star_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 4)
self.assert_has_roots(mg, "from_package_import_star")
self.assert_has_nodes(
mg, "from_package_import_star", "star_package", "star_package.submod", "sys"
)
def test_package_import_star2(self):
mg = ModuleGraph()
mg.add_module("from_package_import_star2")
self.assert_has_node(mg, "from_package_import_star2", SourceModule)
self.assert_has_node(mg, "star_package2", Package)
self.assertEqual(
mg.find_node("from_package_import_star2").globals_written,
mg.find_node("star_package2").globals_written,
)
self.assert_has_edge(
mg,
"from_package_import_star2",
"star_package2",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 1)
self.assert_has_roots(mg, "from_package_import_star2")
self.assert_has_nodes(mg, "from_package_import_star2", "star_package2")
def test_from_implicit_import_star(self):
mg = ModuleGraph()
mg.add_module("from_implicit_package_import_star")
self.assert_has_node(mg, "from_implicit_package_import_star", SourceModule)
self.assert_has_node(mg, "implicit_package", NamespacePackage)
self.assertEqual(
mg.find_node("from_implicit_package_import_star").globals_written, set()
)
self.assert_has_edge(
mg,
"from_implicit_package_import_star",
"implicit_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 1)
self.assert_has_roots(mg, "from_implicit_package_import_star")
self.assert_has_nodes(
mg, "from_implicit_package_import_star", "implicit_package"
)
def test_multi_level_star(self):
mg = ModuleGraph()
mg.add_module("multi_level_star_import")
self.assert_has_node(mg, "multi_level_star_import", SourceModule)
self.assert_has_node(mg, "pkg_a", Package)
self.assert_has_node(mg, "pkg_b", Package)
self.assert_has_node(mg, "pkg_c", Package)
self.assert_has_node(mg, "pkg_d", Package)
self.assert_has_edge(
mg,
"multi_level_star_import",
"pkg_a",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "pkg_a", "pkg_b", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "pkg_b", "pkg_c", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "pkg_c", "pkg_d", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 4)
self.assertEqual(mg.find_node("pkg_d").globals_written, {"e"})
self.assertEqual(mg.find_node("pkg_c").globals_written, {"e"})
self.assertEqual(mg.find_node("pkg_b").globals_written, {"e"})
self.assertEqual(mg.find_node("pkg_a").globals_written, {"e"})
self.assertEqual(
mg.find_node("from_implicit_package_import_star").globals_written, {"a"}
)
self.assert_has_roots(mg, "multi_level_star_import")
self.assert_has_nodes(
mg, "multi_level_star_import", "pkg_a", "pkg_b", "pkg_c", "pkg_d"
)
def test_multi_level_star2(self):
mg = ModuleGraph()
mg.add_module("multi_level_star_import2")
self.assert_has_node(mg, "multi_level_star_import2", SourceModule)
self.assert_has_node(mg, "pkg_a", Package)
self.assert_has_node(mg, "pkg_b", Package)
self.assert_has_node(mg, "pkg_c", Package)
self.assert_has_node(mg, "pkg_d", Package)
self.assert_has_edge(
mg,
"multi_level_star_import2",
"pkg_a",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "pkg_a", "pkg_b", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "pkg_b", "pkg_c", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "pkg_c", "pkg_d", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 4)
self.assertEqual(mg.find_node("pkg_d").globals_written, {"e"})
self.assertEqual(mg.find_node("pkg_c").globals_written, {"e"})
self.assertEqual(mg.find_node("pkg_b").globals_written, {"e"})
self.assertEqual(mg.find_node("pkg_a").globals_written, {"e"})
self.assertEqual(
mg.find_node("multi_level_star_import2").globals_written, {"e"}
)
self.assert_has_roots(mg, "multi_level_star_import2")
self.assert_has_nodes(
mg, "multi_level_star_import2", "pkg_a", "pkg_b", "pkg_c", "pkg_d"
)
def test_multi_level_star_import_missing(self):
mg = ModuleGraph()
mg.add_module("multi_level_star_import_missing")
self.assert_has_node(mg, "multi_level_star_import_missing", SourceModule)
self.assert_has_node(mg, "pkg_c", Package)
self.assert_has_node(mg, "pkg_d", Package)
self.assert_has_node(mg, "pkg_c.f", MissingModule)
self.assert_has_edge(
mg,
"multi_level_star_import_missing",
"pkg_c",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"multi_level_star_import_missing",
"pkg_c.f",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg, "pkg_c", "pkg_d", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "pkg_c.f", "pkg_c", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 4)
self.assertEqual(mg.find_node("pkg_d").globals_written, {"e"})
self.assertEqual(mg.find_node("pkg_c").globals_written, {"e"})
self.assertEqual(
mg.find_node("multi_level_star_import_missing").globals_written, {"f"}
)
self.assert_has_roots(mg, "multi_level_star_import_missing")
self.assert_has_nodes(
mg, "multi_level_star_import_missing", "pkg_c", "pkg_d", "pkg_c.f"
)
def test_imported_aliased_toplevel(self):
mg = ModuleGraph()
mg.add_module("imported_aliased_toplevel")
self.assert_has_node(mg, "imported_aliased_toplevel", SourceModule)
self.assert_has_node(mg, "aliasing_package", Package)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"imported_aliased_toplevel",
"aliasing_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "aliasing_package", "sys", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"aliasing_package",
"no_imports",
{DependencyInfo(False, True, False, "foo")},
)
self.assert_has_edge(
mg,
"imported_aliased_toplevel",
"sys",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"imported_aliased_toplevel",
"no_imports",
{DependencyInfo(False, True, True, None)},
)
self.assert_edge_count(mg, 5)
self.assert_has_roots(mg, "imported_aliased_toplevel")
self.assert_has_nodes(
mg, "imported_aliased_toplevel", "aliasing_package", "sys", "no_imports"
)
def test_import_from_package_with_star(self):
mg = ModuleGraph()
mg.add_module("import_from_package_with_star")
self.assert_has_node(mg, "import_from_package_with_star", SourceModule)
self.assert_has_node(mg, "package_with_star_import", Package)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"import_from_package_with_star",
"package_with_star_import",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package_with_star_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 2)
self.assert_has_roots(mg, "import_from_package_with_star")
self.assert_has_nodes(
mg,
"import_from_package_with_star",
"package_with_star_import",
"no_imports",
)
self.assertIs(mg.find_node("import_from_package_with_star.a"), None)
def test_import_from_package_with_star_two_levels(self):
mg = ModuleGraph()
mg.add_module("import_from_package_with_star_two_levels")
self.assert_has_node(
mg, "import_from_package_with_star_two_levels", SourceModule
)
self.assert_has_node(mg, "package_with_star_import2", Package)
self.assert_has_node(mg, "package_with_star_import", Package)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"import_from_package_with_star_two_levels",
"package_with_star_import2",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package_with_star_import2",
"package_with_star_import",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package_with_star_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 3)
self.assert_has_roots(mg, "import_from_package_with_star_two_levels")
self.assert_has_nodes(
mg,
"import_from_package_with_star_two_levels",
"package_with_star_import2",
"package_with_star_import",
"no_imports",
)
self.assertIs(mg.find_node("import_from_package_with_star2.a"), None)
def test_alias_in_sys_modules(self):
try:
import no_imports
sys.modules["there_are_no_imports"] = no_imports
mg = ModuleGraph()
mg.add_module("alias_in_sys_modules")
self.assert_has_roots(mg, "alias_in_sys_modules")
self.assert_has_nodes(
mg, "alias_in_sys_modules", "there_are_no_imports", "no_imports"
)
self.assert_has_node(mg, "alias_in_sys_modules", SourceModule)
self.assert_has_node(mg, "there_are_no_imports", AliasNode)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"alias_in_sys_modules",
"there_are_no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"there_are_no_imports",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 2)
finally:
del sys.modules["there_are_no_imports"]
del sys.modules["no_imports"]
def test_alias_in_sys_modules2(self):
try:
import no_imports
sys.modules["there_are_no_imports"] = no_imports
mg = ModuleGraph()
mg.add_module("no_imports")
mg.add_module("alias_in_sys_modules")
self.assert_has_roots(mg, "alias_in_sys_modules", "no_imports")
self.assert_has_nodes(
mg, "alias_in_sys_modules", "there_are_no_imports", "no_imports"
)
self.assert_has_node(mg, "alias_in_sys_modules", SourceModule)
self.assert_has_node(mg, "there_are_no_imports", AliasNode)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"alias_in_sys_modules",
"there_are_no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"there_are_no_imports",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 2)
finally:
del sys.modules["there_are_no_imports"]
del sys.modules["no_imports"]
def test_package_without_spec(self):
# Usecase: fake packages in sys.modules might not
# have __spec__ and that confuses importlib.util.find_spec
import without_spec # noqa: F401
mg = ModuleGraph()
mg.add_module("without_spec.submod")
self.assert_has_nodes(mg, "without_spec", "without_spec.submod", "no_imports")
self.assert_has_edge(
mg,
"without_spec.submod",
"without_spec",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"without_spec.submod",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 2)
def test_module_without_spec(self):
# Usecase: fake packages in sys.modules might not
# have __spec__ and that confuses importlib.util.find_spec
import no_imports
try:
del no_imports.__spec__
mg = ModuleGraph()
mg.add_module("global_import")
self.assert_has_nodes(mg, "global_import", "no_imports")
self.assert_has_edge(
mg,
"global_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 1)
finally:
del sys.modules["no_imports"]
def test_package_init_missing_import(self):
mg = ModuleGraph()
mg.add_module("package_init_missing_import.submod")
self.assert_has_node(mg, "package_init_missing_import", Package)
self.assert_has_node(mg, "package_init_missing_import.submod", SourceModule)
self.assert_has_node(mg, "nosuchmodule", MissingModule)
self.assert_has_node(mg, "nosuchmodule2", MissingModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"package_init_missing_import",
"nosuchmodule",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package_init_missing_import",
"nosuchmodule2",
{DependencyInfo(False, True, False, None)},
)
def test_invalid_module(self):
mg = ModuleGraph()
mg.add_module("invalid_module")
self.assert_has_node(mg, "invalid_module", InvalidModule)
def test_sys_path(self):
mg = ModuleGraph()
mg.add_module("import_sys_path")
self.assert_has_node(mg, "sys.path", MissingModule)
def test_invalid_package_init(self):
mg = ModuleGraph()
mg.add_module("invalid_package_init")
self.assert_has_node(mg, "invalid_package_init", Package)
node = mg.find_node("invalid_package_init")
self.assertTrue(isinstance(node.init_module, InvalidModule))
def test_invalid_package_init_submod(self):
mg = ModuleGraph()
mg.add_module("invalid_package_init.submod")
self.assert_has_node(mg, "invalid_package_init", Package)
node = mg.find_node("invalid_package_init")
self.assertTrue(isinstance(node.init_module, InvalidModule))
self.assert_has_node(mg, "invalid_package_init.submod", SourceModule)
self.assert_has_edge(
mg,
"invalid_package_init.submod",
"invalid_package_init",
{DependencyInfo(False, True, False, None)},
)
def test_renamed_from(self):
mg = ModuleGraph()
mg.add_module("renamed_a")
self.assert_has_node(mg, "renamed_a", SourceModule)
self.assert_has_node(mg, "renamed_b", SourceModule)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_node(mg, "package.submod", SourceModule)
self.assert_has_edge(
mg, "renamed_a", "renamed_b", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "renamed_b", "sys", {DependencyInfo(False, True, False, "c")}
)
self.assert_has_edge(
mg, "renamed_b", "package.submod", {DependencyInfo(False, True, True, "d")}
)
self.assert_has_edge(
mg, "renamed_a", "sys", {DependencyInfo(False, True, True, None)}
)
self.assert_has_edge(
mg, "renamed_a", "package.submod", {DependencyInfo(False, True, True, None)}
)
def test_renamed_attribute(self):
mg = ModuleGraph()
mg.add_module("renamed_attr")
self.assert_has_node(mg, "renamed_attr", SourceModule)
self.assert_has_node(mg, "renamed_package", Package)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_edge(
mg,
"renamed_attr",
"renamed_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "renamed_package", "sys", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 2)
def test_import_aliased_missing(self):
mg = ModuleGraph()
mg.add_implies({"aliased": Alias("nosuchmodule")})
mg.add_module("import_aliased_missing")
self.assert_has_node(mg, "import_aliased_missing", SourceModule)
self.assert_has_node(mg, "aliased", AliasNode)
self.assert_has_node(mg, "nosuchmodule", MissingModule)
self.assert_has_edge(
mg,
"import_aliased_missing",
"aliased",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "aliased", "nosuchmodule", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 2)
class TestModuleGraphRelativeImports(unittest.TestCase, util.TestMixin):
# Same as previous class, for relative imports
@classmethod
def setUpClass(cls):
util.clear_sys_modules(INPUT_DIR)
@classmethod
def tearDownClass(cls):
util.clear_sys_modules(INPUT_DIR)
def setUp(self):
sys.path.insert(0, os.fspath(INPUT_DIR))
def tearDown(self):
assert sys.path[0] == os.fspath(INPUT_DIR)
del sys.path[0]
def test_relative_import_toplevel(self):
mg = ModuleGraph()
mg.add_module("toplevel_invalid_relative_import")
self.assert_has_node(mg, "toplevel_invalid_relative_import", SourceModule)
self.assert_has_node(mg, ".relative", InvalidRelativeImport)
self.assert_has_edge(
mg,
"toplevel_invalid_relative_import",
".relative",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 1)
self.assert_has_nodes(mg, "toplevel_invalid_relative_import", ".relative")
def test_relative_import_toplevel_multiple(self):
mg = ModuleGraph()
mg.add_module("toplevel_invalid_relative_import_multiple")
self.assert_has_node(
mg, "toplevel_invalid_relative_import_multiple", SourceModule
)
self.assert_has_node(mg, ".relative", InvalidRelativeImport)
self.assert_has_edge(
mg,
"toplevel_invalid_relative_import_multiple",
".relative",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 1)
self.assert_has_nodes(
mg, "toplevel_invalid_relative_import_multiple", ".relative"
)
def test_relative_import_to_outside_package(self):
mg = ModuleGraph()
mg.add_module("package_invalid_relative_import")
self.assert_has_node(mg, "package_invalid_relative_import", SourceModule)
self.assert_has_node(mg, "invalid_relative_package", Package)
self.assert_has_node(mg, "..relative", InvalidRelativeImport)
self.assert_has_edge(
mg,
"package_invalid_relative_import",
"invalid_relative_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"invalid_relative_package",
"..relative",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 2)
self.assert_has_nodes(
mg,
"package_invalid_relative_import",
"invalid_relative_package",
"..relative",
)
def test_global_import(self):
mg = ModuleGraph()
mg.add_module("basic_relative_import")
self.assert_has_node(mg, "basic_relative_import", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.relative", SourceModule)
self.assert_has_node(mg, "package.submod", SourceModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"basic_relative_import",
"package.relative",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.relative",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.relative",
"package.submod",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"package.submod",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 5)
self.assert_has_nodes(
mg,
"basic_relative_import",
"package",
"package.relative",
"package.submod",
"no_imports",
)
def test_circular_imports(self):
mg = ModuleGraph()
mg.add_module("circular_relative")
self.assert_has_node(mg, "circular_relative", SourceModule)
self.assert_has_node(mg, "package.circular_a", SourceModule)
self.assert_has_node(mg, "package.circular_b", SourceModule)
self.assert_has_node(mg, "package.circular_c", SourceModule)
self.assert_has_edge(
mg,
"circular_relative",
"package.circular_a",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.circular_a",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.circular_b",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.circular_c",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.circular_a",
"package.circular_b",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"package.circular_b",
"package.circular_c",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"package.circular_c",
"package.circular_a",
{DependencyInfo(False, True, True, None)},
)
self.assert_edge_count(mg, 7)
self.assert_has_roots(mg, "circular_relative")
self.assert_has_nodes(
mg,
"circular_relative",
"package",
"package.circular_a",
"package.circular_b",
"package.circular_c",
)
def test_missing_relative(self):
mg = ModuleGraph()
mg.add_module("missing_relative")
self.assert_has_node(mg, "missing_relative", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.missing_relative", SourceModule)
self.assert_has_node(mg, "package.nosuchmodule", MissingModule)
self.assert_has_edge(
mg,
"missing_relative",
"package.missing_relative",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.missing_relative",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.nosuchmodule",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.missing_relative",
"package.nosuchmodule",
{DependencyInfo(False, True, True, None)},
)
self.assert_edge_count(mg, 4)
self.assert_has_roots(mg, "missing_relative")
self.assert_has_nodes(
mg,
"missing_relative",
"package",
"package.missing_relative",
"package.nosuchmodule",
)
def test_missing_package(self):
mg = ModuleGraph()
mg.add_module("missing_relative_package")
self.assert_has_nodes(
mg,
"missing_relative_package",
"relative_package_with_missing",
"relative_package_with_missing.package",
"relative_package_with_missing.package.subpackage",
)
# The "from" imported names aren't in the graph because MG
# doesn't know if the MissingModules are packages. The current
# behaviour results in cleaner graphs.
#
self.assert_has_node(mg, "missing_relative_package", SourceModule)
self.assert_has_node(mg, "relative_package_with_missing", Package)
self.assert_has_node(mg, "relative_package_with_missing.package", MissingModule)
self.assert_has_node(
mg, "relative_package_with_missing.package.subpackage", MissingModule
)
self.assert_has_edge(
mg,
"missing_relative_package",
"relative_package_with_missing",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"relative_package_with_missing",
"relative_package_with_missing.package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"relative_package_with_missing",
"relative_package_with_missing.package.subpackage",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"relative_package_with_missing.package",
"relative_package_with_missing",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"relative_package_with_missing.package.subpackage",
"relative_package_with_missing.package",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 5)
def test_multiple_imports(self):
mg = ModuleGraph()
mg.add_module("multiple_relative_imports")
self.assert_has_nodes(
mg,
"multiple_relative_imports",
"package",
"package.multiple_relative",
"package.submod",
"no_imports",
)
self.assert_has_node(mg, "multiple_relative_imports", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.multiple_relative", SourceModule)
self.assert_has_node(mg, "package.submod", SourceModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"multiple_relative_imports",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"multiple_relative_imports",
"package.multiple_relative",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"package.multiple_relative",
"package.submod",
{
DependencyInfo(False, True, True, None),
DependencyInfo(True, True, True, "whole"),
},
)
self.assert_has_edge(
mg,
"package.multiple_relative",
"package",
{
DependencyInfo(False, True, False, None),
DependencyInfo(True, True, False, None),
},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"package.submod",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 6)
def test_diamond(self):
mg = ModuleGraph()
mg.add_module("package_diamond")
self.assert_has_node(mg, "package_diamond", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.diamond_a", SourceModule)
self.assert_has_node(mg, "package.diamond_b1", SourceModule)
self.assert_has_node(mg, "package.diamond_b2", SourceModule)
self.assert_has_node(mg, "package.diamond_c", SourceModule)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_edge(
mg,
"package_diamond",
"package.diamond_a",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.diamond_a",
"package.diamond_b1",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"package.diamond_a",
"package.diamond_b2",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"package.diamond_b1",
"package.diamond_c",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg,
"package.diamond_b2",
"package.diamond_c",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg, "package.diamond_c", "sys", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"package.diamond_a",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.diamond_b1",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.diamond_b2",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.diamond_c",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 10)
self.assert_has_roots(mg, "package_diamond")
self.assert_has_nodes(
mg,
"package_diamond",
"package",
"package.diamond_a",
"package.diamond_b1",
"package.diamond_b2",
"package.diamond_c",
"sys",
)
def test_alias_import(self):
mg = ModuleGraph()
mg.add_module("aliasing_relative")
self.assert_has_node(mg, "aliasing_relative", SourceModule)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.aliasing_relative", SourceModule)
self.assert_has_node(mg, "package.submod", SourceModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"aliasing_relative",
"package.aliasing_relative",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.aliasing_relative",
"package.submod",
{DependencyInfo(False, True, True, "other")},
)
self.assert_has_edge(
mg,
"package.aliasing_relative",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"package.submod",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 5)
self.assert_has_roots(mg, "aliasing_relative")
self.assert_has_nodes(
mg,
"aliasing_relative",
"package",
"package.aliasing_relative",
"package.submod",
"no_imports",
)
def test_renamed_from(self):
mg = ModuleGraph()
mg.add_module("package.renamed_a")
self.assert_has_node(mg, "package.renamed_a", SourceModule)
self.assert_has_node(mg, "package.renamed_b", SourceModule)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_node(mg, "package.submod", SourceModule)
self.assert_has_edge(
mg,
"package.renamed_a",
"package.renamed_b",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "package.renamed_b", "sys", {DependencyInfo(False, True, False, "c")}
)
self.assert_has_edge(
mg,
"package.renamed_b",
"package.submod",
{DependencyInfo(False, True, True, "d")},
)
self.assert_has_edge(
mg, "package.renamed_a", "sys", {DependencyInfo(False, True, True, None)}
)
self.assert_has_edge(
mg,
"package.renamed_a",
"package.submod",
{DependencyInfo(False, True, True, None)},
)
def test_renamed_attribute(self):
mg = ModuleGraph()
mg.add_module("package.renamed_attr")
self.assert_has_node(mg, "package.renamed_attr", SourceModule)
self.assert_has_node(mg, "package.renamed_package", Package)
self.assert_has_node(mg, "sys", BuiltinModule)
self.assert_has_edge(
mg,
"package.renamed_attr",
"package.renamed_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.renamed_package",
"sys",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.renamed_package",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"package.renamed_attr",
"package",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 4)
class TestModuleGrapDistributions(unittest.TestCase):
@classmethod
def setUpClass(cls):
util.clear_sys_modules(INPUT_DIR)
@classmethod
def tearDownClass(cls):
util.clear_sys_modules(INPUT_DIR)
def test_missing(self):
mg = ModuleGraph()
self.assertRaises(ValueError, mg.add_distribution, "nosuchdistribution")
def test_simple_named(self):
with tempfile.TemporaryDirectory() as tmpdir:
try:
sys.path.insert(0, tmpdir)
build_and_install(
os.path.join(os.path.dirname(__file__), "simple-package"), tmpdir
)
mg = ModuleGraph()
d = mg.add_distribution("simple-package")
self.assertTrue(isinstance(mg.find_node("toplevel"), SourceModule))
self.assertTrue(isinstance(mg.find_node("extension"), ExtensionModule))
self.assertTrue(isinstance(mg.find_node("package"), Package))
self.assertTrue(
isinstance(mg.find_node("package.module"), SourceModule)
)
self.assertIs(mg.find_node("package.__init__"), None)
self.assertEqual(d.name, "simple-package")
d2 = mg.add_distribution("simple-package")
self.assertIs(d, d2)
finally:
del sys.path[0]
util.clear_sys_modules(tmpdir)
def test_simple_dist(self):
with tempfile.TemporaryDirectory() as tmpdir:
try:
sys.path.insert(0, tmpdir)
build_and_install(
os.path.join(os.path.dirname(__file__), "simple-package"), tmpdir
)
dist = distribution_named("simple-package")
self.assertIsNot(dist, None)
mg = ModuleGraph()
d = mg.add_distribution(dist)
self.assertTrue(isinstance(mg.find_node("toplevel"), SourceModule))
self.assertTrue(isinstance(mg.find_node("extension"), ExtensionModule))
self.assertTrue(isinstance(mg.find_node("package"), Package))
self.assertTrue(
isinstance(mg.find_node("package.module"), SourceModule)
)
self.assertIs(mg.find_node("package.__init__"), None)
self.assertIs(d, dist)
finally:
util.clear_sys_modules(tmpdir)
del sys.path[0]
class TestModuleGraphHooks(unittest.TestCase, util.TestMixin):
@classmethod
def setUpClass(cls):
util.clear_sys_modules(INPUT_DIR)
@classmethod
def tearDownClass(cls):
util.clear_sys_modules(INPUT_DIR)
def setUp(self):
sys.path.insert(0, os.fspath(INPUT_DIR))
def tearDown(self):
assert sys.path[0] == os.fspath(INPUT_DIR)
del sys.path[0]
def test_post_processing(self):
# This adds a number of other test modules to verify
# that the post processing hook is called when needed.
nodes_processed = set()
def hook(graph, node):
nodes_processed.add(node.identifier)
mg = ModuleGraph()
mg.add_post_processing_hook(hook)
mg.add_module("global_import")
mg.add_module("nosuchmodule")
mg.add_module("missing_in_package")
mg.add_module("missing_relative")
mg.add_module("toplevel_invalid_relative_import")
mg.add_script(INPUT_DIR / "trivial-script")
self.assertEqual(
nodes_processed,
{
"global_import",
"no_imports",
"nosuchmodule",
os.fspath(INPUT_DIR / "trivial-script"),
"missing_in_package",
"package",
"package.missingmodule",
"package.nosuchmodule",
"package.missing_relative",
"missing_relative",
"toplevel_invalid_relative_import",
".relative",
},
)
def test_excluded_module(self):
mg = ModuleGraph()
mg.add_excludes(["global_import"])
mg.add_module("excluded_import")
self.assert_has_nodes(mg, "excluded_import", "global_import")
self.assert_has_node(mg, "global_import", ExcludedModule)
self.assert_has_edge(
mg,
"excluded_import",
"global_import",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 1)
self.assertRaises(TypeError, mg.add_excludes, "some_name")
def test_excluded_package(self):
mg = ModuleGraph()
mg.add_excludes(["package"])
mg.add_module("package_import_single_level")
self.assert_has_nodes(
mg, "package_import_single_level", "package", "package.submod"
)
self.assert_has_node(mg, "package", ExcludedModule)
self.assert_has_node(mg, "package.submod", ExcludedModule)
self.assert_has_edge(
mg,
"package_import_single_level",
"package.submod",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 2)
def test_implied_stdlib(self):
with self.subTest("using implies"):
mg = ModuleGraph()
mg.add_module("_curses")
self.assert_has_node(mg, "_curses")
self.assert_has_node(mg, "curses")
self.assert_has_edge(
mg, "_curses", "curses", {DependencyInfo(False, True, False, None)}
)
with self.subTest("without implies"):
mg = ModuleGraph(use_stdlib_implies=False)
mg.add_module("_curses")
self.assert_has_node(mg, "_curses")
self.assertIs(mg.find_node("curses"), None)
def test_implied_imports(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": ("sys", "gc"), "global_import": ("marshal",)})
mg.add_module("import_with_implies")
self.assert_has_nodes(
mg,
"import_with_implies",
"no_imports",
"global_import",
"sys",
"gc",
"marshal",
)
self.assert_has_roots(mg, "import_with_implies")
self.assert_has_edge(
mg,
"import_with_implies",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"import_with_implies",
"global_import",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "no_imports", "sys", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "no_imports", "gc", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"global_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "global_import", "marshal", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 6)
def test_implies_to_package(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": ("package.submod",)})
mg.add_module("global_import")
self.assert_has_edge(
mg,
"no_imports",
"package.submod",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
def test_implies_vs_add_module(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": ("marshal",)})
mg.add_module("no_imports")
self.assert_has_edge(
mg, "no_imports", "marshal", {DependencyInfo(False, True, False, None)}
)
def test_implies_vs_import_module(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": ("marshal",)})
node = MissingModule("missing")
mg.add_node(node)
mg.add_root(node)
mg.import_module(node, "no_imports")
mg._run_stack()
self.assert_has_edge(
mg, "no_imports", "marshal", {DependencyInfo(False, True, False, None)}
)
def test_implies_vs_excludes(self):
mg = ModuleGraph()
mg.add_excludes(["no_imports"])
mg.add_implies({"no_imports": ("sys",)})
mg.add_module("global_import")
self.assert_has_node(mg, "no_imports", ExcludedModule)
self.assert_has_nodes(mg, "global_import", "no_imports")
def test_implies_vs_excludes2(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": ("sys",)})
mg.add_excludes(["no_imports"])
mg.add_module("global_import")
self.assert_has_node(mg, "no_imports", ExcludedModule)
self.assert_has_nodes(mg, "global_import", "no_imports")
def test_implies_order(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": ("sys",)})
mg.add_module("sys")
mg.add_module("global_import")
self.assert_has_nodes(mg, "global_import", "no_imports", "sys")
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg, "no_imports", "sys", {DependencyInfo(False, True, False, None)}
)
def test_alias(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": Alias("marshal")})
mg.add_module("global_import")
self.assert_has_nodes(mg, "global_import", "no_imports", "marshal")
self.assert_has_node(mg, "no_imports", AliasNode)
self.assert_has_edge(
mg,
"global_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "no_imports", "marshal", {DependencyInfo(False, True, False, None)}
)
def test_alias_to_package(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": Alias("package.submod")})
mg.add_module("global_import")
self.assert_has_nodes(
mg, "global_import", "no_imports", "package", "package.submod"
)
self.assert_has_node(mg, "no_imports", AliasNode)
self.assert_has_edge(
mg,
"global_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg,
"no_imports",
"package.submod",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
def test_alias_to_package_import_from(self):
mg = ModuleGraph()
mg.add_implies({"the_package": Alias("package")})
mg.add_module("alias_to_package_import_from")
self.assert_has_node(mg, "the_package", AliasNode)
self.assert_has_node(mg, "package", Package)
self.assert_has_edge(
mg, "the_package", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_nodes(
mg,
"alias_to_package_import_from",
"the_package",
"package",
"package.submod",
"no_imports",
)
self.assert_has_node(mg, "alias_to_package_import_from", SourceModule)
self.assert_has_node(mg, "the_package", AliasNode)
self.assert_has_node(mg, "package", Package)
self.assert_has_node(mg, "package.submod", SourceModule)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"alias_to_package_import_from",
"the_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "the_package", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"alias_to_package_import_from",
"package.submod",
{DependencyInfo(False, True, True, None)},
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg,
"package.submod",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_edge_count(mg, 5)
def test_alias_to_module_import_from(self):
mg = ModuleGraph()
mg.add_implies({"the_package": Alias("no_imports")})
mg.add_module("alias_to_module_import_from")
self.assert_has_nodes(
mg, "alias_to_module_import_from", "the_package", "no_imports"
)
self.assert_has_node(mg, "alias_to_module_import_from", SourceModule)
self.assert_has_node(mg, "the_package", AliasNode)
self.assert_has_node(mg, "no_imports", SourceModule)
self.assert_has_edge(
mg,
"alias_to_module_import_from",
"the_package",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "the_package", "no_imports", {DependencyInfo(False, True, False, None)}
)
self.assert_edge_count(mg, 2)
def test_alias_order(self):
mg = ModuleGraph()
mg.add_implies({"no_imports": Alias("marshal")})
mg.add_module("marshal")
mg.add_module("global_import")
self.assert_has_nodes(mg, "global_import", "no_imports", "marshal")
self.assert_has_node(mg, "no_imports", AliasNode)
self.assert_has_edge(
mg,
"global_import",
"no_imports",
{DependencyInfo(False, True, False, None)},
)
self.assert_has_edge(
mg, "no_imports", "marshal", {DependencyInfo(False, True, False, None)}
)
def test_import_module(self):
mg = ModuleGraph()
node = MissingModule("missing")
mg.add_node(node)
mg.add_root(node)
mg.import_module(node, "no_imports")
self.assert_has_edge(
mg, "missing", "no_imports", {DependencyInfo(False, True, False, None)}
)
def test_import_module_existing(self):
mg = ModuleGraph()
node = MissingModule("missing")
mg.add_node(node)
mg.add_root(node)
mg.add_module("no_imports")
mg.import_module(node, "no_imports")
self.assert_has_edge(
mg, "missing", "no_imports", {DependencyInfo(False, True, False, None)}
)
def test_import_module_twice(self):
mg = ModuleGraph()
node = MissingModule("missing")
mg.add_node(node)
mg.add_root(node)
mg.import_module(node, "no_imports")
mg.import_module(node, "no_imports")
self.assert_has_edge(
mg, "missing", "no_imports", {DependencyInfo(False, True, False, None)}
)
def test_import_module_package(self):
mg = ModuleGraph()
node = MissingModule("missing")
mg.add_node(node)
mg.add_root(node)
mg.import_module(node, "package.submod")
self.assert_has_edge(
mg, "missing", "package.submod", {DependencyInfo(False, True, False, None)}
)
self.assert_has_edge(
mg, "package.submod", "package", {DependencyInfo(False, True, False, None)}
)
def test_using_missing_hook(self):
missing = set()
def missing_hook(graph, importing_module, module_name):
missing.add(module_name)
node = InvalidRelativeImport(module_name)
graph.add_node(node)
return node
mg = ModuleGraph(use_builtin_hooks=False)
mg.add_missing_hook(missing_hook)
mg.add_module("missing")
self.assertEqual(missing, {"nosuchmodule"})
self.assert_has_node(mg, "missing", SourceModule)
self.assert_has_node(mg, "nosuchmodule", InvalidRelativeImport)
self.assert_has_edge(
mg, "missing", "nosuchmodule", {DependencyInfo(False, True, False, None)}
)
# need to test for the correct "importing_module"
# as well (for various cases)
REPORT_HEADER = """
Class Name File
----- ---- ----
"""
class TestModuleGraphQuerying(unittest.TestCase):
#
# Tests for APIs that query the graph (other than those inherited from ObjectGraph)
#
def test_distributions_empty(self):
mg = ModuleGraph()
self.assertEqual(set(mg.distributions()), set())
n1 = MissingModule("n1")
mg.add_node(n1)
mg.add_root(n1)
self.assertEqual(set(mg.distributions()), set())
def test_distributions_real(self):
import pip
mg = ModuleGraph()
node = MissingModule("nosuchmodule")
node.distribution = distribution_for_file(pip.__file__, sys.path)
self.assertIsNot(node.distribution, None)
mg.add_node(node)
result = list(mg.distributions())
self.assertEqual(len(result), 0)
result = list(mg.distributions(False))
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], PyPIDistribution)
self.assertEqual(result[0].name, "pip")
mg.add_root(node)
result = list(mg.distributions())
self.assertIsInstance(result[0], PyPIDistribution)
self.assertEqual(result[0].name, "pip")
def test_some_distributions(self):
def make_distribution(name):
return PyPIDistribution(name, name, "", set(), set())
mg = ModuleGraph()
n1 = MissingModule("n1")
n2 = MissingModule("n2")
n3 = MissingModule("n3")
n1.distribution = n2.distribution = n3.distribution = make_distribution("dist1")
mg.add_node(n1)
mg.add_node(n2)
mg.add_node(n3)
mg.add_root(n1)
mg.add_root(n2)
mg.add_root(n3)
result = list(mg.distributions())
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, "dist1")
n4 = MissingModule("n4")
n4.distribution = make_distribution("dist2")
mg.add_node(n4)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, "dist1")
mg.add_root(n4)
result = list(mg.distributions())
self.assertEqual(len(result), 2)
self.assertEqual({d.name for d in result}, {"dist1", "dist2"})
def test_distributions_in_graph(self):
def make_distribution(name):
return PyPIDistribution(name, name, "", set(), set())
mg = ModuleGraph()
n1 = MissingModule("n1")
n2 = MissingModule("n2")
n3 = MissingModule("n3")
n1.distribution = n2.distribution = n3.distribution = make_distribution("dist1")
mg.add_node(n1)
mg.add_node(n2)
mg.add_node(n3)
mg.add_root(n1)
mg.add_root(n2)
mg.add_root(n3)
dist2 = make_distribution("dist2")
mg.add_node(dist2)
mg.add_root(dist2)
result = list(mg.distributions())
self.assertEqual(len(result), 1)
self.assertEqual({d.name for d in result}, {"dist1"})
def test_report_empty(self):
mg = ModuleGraph()
fp = StringIO()
mg.report(fp)
self.assertEqual(fp.getvalue(), REPORT_HEADER)
def test_report_unreachable(self):
mg = ModuleGraph()
fp = StringIO()
mg.add_node(MissingModule("n1"))
mg.report(fp)
self.assertEqual(fp.getvalue(), REPORT_HEADER)
def test_report_one(self):
mg = ModuleGraph()
fp = StringIO()
n1 = MissingModule("n1")
n1.filename = "FILE"
mg.add_node(n1)
mg.add_root(n1)
mg.report(fp)
self.assertEqual(
fp.getvalue(),
REPORT_HEADER + "MissingModule n1 FILE\n",
)
def test_report_with_distribution(self):
mg = ModuleGraph()
fp = StringIO()
n1 = MissingModule("n1")
n1.filename = "FILE"
mg.add_node(n1)
dist = distribution_named("pip")
self.assertIsNot(dist, None)
mg.add_node(dist)
mg.add_root(dist)
mg.add_edge(dist, n1, None)
mg.report(fp)
self.assertEqual(
fp.getvalue(),
REPORT_HEADER + "MissingModule n1 FILE\n",
)
|
# import importlib
#
#
# path = 'scrapy.middlerware.C1'
#
# md,cls_name = path.rsplit('.', maxsplit=1)
# print(cls_name)
#
# importlib.import_module(md)
class Foo(object):
def __getitem__(self, item):
return "123"
def __setitem__(self, key, value):
pass
def __delitem__(self):
pass
obj = Foo()
# b = obj['k1']
# print(b)
# obj['k1'] = 666
#
# del obj['k1']
class CacheSession(object):
def __getitem__(self, item):
return '123'
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
class RedisSession(object):
def __getitem__(self, item):
return '123'
class SessionFactory(object):
@staticmethod
def get_session():
import settings
md, cls_name = settings.SESSION_ENGINE.rsplit('.', maxsplit=1)
import importlib
md = importlib.import_module(md)
cls = getattr(md, cls_name)
return cls
|
import operator
import collections
import pandas as pd
import numpy as np
from ramm_tox import util, stats
def round_concentration(values):
"""Round concentration values to 4 decimal places in log space."""
return 10 ** np.round(np.log10(values), 4)
def strings_to_wordsets(strings, stop_words=None):
"""Build a dict of wordsets from a list of strings, with optional filter.
For each distinct word found in the list of strings, the wordset dict will
map that word to a set of the strings that contain it. A list of words to
ignore may be passed in stop_words.
"""
string_words = [set(w.split(' ')) for w in (s.lower() for s in strings)]
words = reduce(operator.or_, string_words)
if stop_words:
words -= set(stop_words)
wordsets = collections.OrderedDict(
(w, set(strings[i] for i, s in enumerate(string_words) if w in s))
for w in sorted(words))
return wordsets
util.init_paths()
viability_path = util.data_path.child('CellViability_Combined_mean&variance2.xlsx')
imaging_path = util.data_path.child('HCI_Rep1-4_zscores.csv')
viability_data = pd.read_excel(viability_path, 0)
vd_filter_no_media = ~viability_data.name.str.lower().str.startswith('medium')
viability_data = viability_data[vd_filter_no_media]
vd_filter_72h = viability_data['time [h]'] == 72
viability_data = viability_data[vd_filter_72h].drop('time [h]', axis=1)
viability_data.dose = round_concentration(viability_data.dose)
imaging_data = pd.read_csv(imaging_path, encoding='utf-8')
imaging_filter_no_media = ~imaging_data.pert_iname.str.lower().str.startswith('medium')
imaging_data = imaging_data[imaging_filter_no_media]
imaging_data.pert_dose = round_concentration(imaging_data.pert_dose)
viability_single = viability_data[viability_data.name == 'Omeprazole'] \
[['dose', 'average']].pivot_table(index='dose').average
imaging_single = imaging_data[imaging_data.pert_iname == 'Omeprazole'] \
.drop('pert_iname', axis=1).rename(columns=({'pert_dose': 'dose'})) \
.pivot_table(index='dose', columns='Timepoint [h]')
corr, p = stats.pearsonr(imaging_single.values.T, viability_single.values)
fake_genes = [x[0] + ' t=' + unicode(x[1]) + 'h' for x in imaging_single.columns]
fake_genes = [s.replace('(2)-', '(2) -') for s in fake_genes]
wordsets = strings_to_wordsets(fake_genes, stop_words=['', '-'])
with open('genesets.gmt', 'w') as f:
gmt_rows = ('\t'.join([w, ''] + list(ss)) for w, ss in wordsets.items())
f.write('\n'.join(gmt_rows).encode('utf-8'))
rnk_data = pd.Series(corr, index=fake_genes)
rnk_data.to_csv('data.rnk', sep='\t', encoding='utf8')
|
import json
import random
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import copy
import math
import h5py
import models.Constants as Constants
from bisect import bisect_left
import torch.nn.functional as F
import pickle
from pandas.io.json import json_normalize
def resampling(source_length, target_length):
return [round(i * (source_length-1) / (target_length-1)) for i in range(target_length)]
def get_frames_idx(length, n_frames, random_type, equally_sampling=False):
bound = [int(i) for i in np.linspace(0, length, n_frames+1)]
idx = []
all_idx = [i for i in range(length)]
if random_type == 'all_random' and not equally_sampling:
idx = random.sample(all_idx, n_frames)
else:
for i in range(n_frames):
if not equally_sampling:
tmp = np.random.randint(bound[i], bound[i+1])
else:
tmp = (bound[i] + bound[i+1]) // 2
idx.append(tmp)
return sorted(idx)
class VideoDataset(Dataset):
def __init__(self, opt, mode, print_info=False, shuffle_feats=0, specific=-1):
super(VideoDataset, self).__init__()
self.mode = mode
self.random_type = opt.get('random_type', 'segment_random')
assert self.mode in ['train', 'validate', 'test', 'all', 'trainval']
assert self.random_type in ['segment_random', 'all_random']
# load the json file which contains information about the dataset
data = pickle.load(open(opt['info_corpus'], 'rb'))
info = data['info']
self.itow = info['itow']
self.wtoi = {v: k for k, v in self.itow.items()}
self.itoc = info.get('itoc', None)
self.itop = info.get('itop', None)
self.itoa = info.get('itoa', None)
self.length_info = info['length_info']
self.splits = info['split']
if self.mode == 'trainval':
self.splits['trainval'] = self.splits['train'] + self.splits['validate']
self.split_category = info.get('split_category', None)
self.id_to_vid = info.get('id_to_vid', None)
self.captions = data['captions']
self.pos_tags = data['pos_tags']
self.references = pickle.load(open(opt['reference'], 'rb'))
self.specific = specific
self.num_category = opt.get('num_category', 20)
self.max_len = opt["max_len"]
self.n_frames = opt['n_frames']
self.equally_sampling = opt.get('equally_sampling', False)
self.total_frames_length = opt.get('total_frames_length', 60)
self.data_i = [self.load_database(opt["feats_i"]), opt["dim_i"], opt.get("dummy_feats_i", False)]
self.data_m = [self.load_database(opt["feats_m"]), opt["dim_m"], opt.get("dummy_feats_m", False)]
#self.data_i = [[], opt["dim_i"], opt.get("dummy_feats_i", False)]
#self.data_m = [[], opt["dim_m"], opt.get("dummy_feats_m", False)]
self.data_a = [self.load_database(opt["feats_a"]), opt["dim_a"], opt.get("dummy_feats_a", False)]
self.data_s = [self.load_database(opt.get("feats_s", [])), opt.get("dim_s", 10), False]
self.data_t = [self.load_database(opt.get("feats_t", [])), opt.get('dim_t', 10), False]
self.mask_prob = opt.get('teacher_prob', 1)
self.decoder_type = opt['decoder_type']
self.random = np.random.RandomState(opt.get('seed', 0))
self.obj = self.load_database(opt.get('object_path', ''))
self.all_caps_a_round = opt['all_caps_a_round']
self.load_feats_type = opt['load_feats_type']
self.method = opt.get('method', 'mp')
self.demand = opt['demand']
self.opt = opt
if print_info: self.print_info(opt)
self.beta_low, self.beta_high = opt.get('beta', [0, 1])
if (opt.get('triplet', False) or opt.get('knowledge_distillation_with_bert', False)) and self.mode == 'train':
self.bert_embeddings = self.load_database(opt['bert_embeddings'])
else:
self.bert_embeddings = None
if opt.get('load_generated_captions', False):
self.generated_captions = pickle.load(open(opt['generated_captions'], 'rb'))
assert self.mode in ['test']
else:
self.generated_captions = None
self.infoset = self.make_infoset()
def get_references(self):
return self.references
def get_preprocessed_references(self):
return self.captions
def make_infoset(self):
infoset = []
# decide the size of infoset
if self.specific != -1:
# we only evaluate partial examples with a specific category (MSRVTT, [0, 19])
ix_set = [int(item) for item in self.split_category[self.mode][self.specific]]
else:
# we evaluate all examples
ix_set = [int(item) for item in self.splits[self.mode]]
vatex = self.opt['dataset'] == 'VATEX' and self.mode == 'test'
for ix in ix_set:
vid = 'video%d' % ix
if vatex:
category = 0
captions = [[0]]
pos_tags = [[0]]
length_target = [0]
else:
category = self.itoc[ix] if self.itoc is not None else 0
captions = self.captions[vid]
pos_tags = self.pos_tags[vid] if self.pos_tags is not None else ([None] * len(captions))
# prepare length info for each video example, only if decoder_type == 'NARFormmer'
# e.g., 'video1': [0, 0, 3, 5, 0]
if self.length_info is None:
length_target = np.zeros(self.max_len)
else:
length_target = self.length_info[vid]
#length_target = length_target[1:self.max_len+1]
length_target = length_target[:self.max_len]
if len(length_target) < self.max_len:
length_target += [0] * (self.max_len - len(length_target))
#right_sum = sum(length_target[self.max_len+1:])
#length_target[-1] += right_sum
length_target = np.array(length_target) / sum(length_target)
if self.mode == 'train' and self.all_caps_a_round:
# infoset will contain all captions
for i, (cap, pt) in enumerate(zip(captions, pos_tags)):
item = {
'vid': vid,
'labels': cap,
'pos_tags': pt,
'category': category,
'length_target': length_target,
'cap_id': i,
}
infoset.append(item)
else:
if self.generated_captions is not None:
# edit the generated captions
cap = self.generated_captions[vid][-1]['caption']
#print(cap)
labels = [Constants.BOS]
for w in cap.split(' '):
labels.append(self.wtoi[w])
labels.append(Constants.EOS)
#print(labels)
item = {
'vid': vid,
'labels': labels,
'pos_tags': pos_tags[0],
'category': category,
'length_target': length_target
}
else:
# infoset will contain partial captions, one caption per video clip
cap_ix = random.randint(0, len(self.captions[vid]) - 1) if self.mode == 'train' else 0
#print(captions[0])
item = {
'vid': vid,
'labels': captions[cap_ix],
'pos_tags': pos_tags[cap_ix],
'category': category,
'length_target': length_target,
'cap_id': cap_ix,
}
infoset.append(item)
return infoset
def shuffle(self):
random.shuffle(self.infoset)
def __getitem__(self, ix):
vid = self.infoset[ix]['vid']
labels = self.infoset[ix]['labels']
taggings = self.infoset[ix]['pos_tags']
category = self.infoset[ix]['category']
length_target = self.infoset[ix]['length_target']
cap_id = self.infoset[ix].get('cap_id', None)
if cap_id is not None and self.bert_embeddings is not None:
bert_embs = np.asarray(self.bert_embeddings[0][vid])#[cap_id]
else:
bert_embs = None
attribute = self.itoa[vid]
frames_idx = get_frames_idx(
self.total_frames_length,
self.n_frames,
self.random_type,
equally_sampling = True if self.mode != 'train' else self.equally_sampling
) if self.load_feats_type == 0 else None
load_feats_func = self.load_feats if self.load_feats_type == 0 else self.load_feats_padding
feats_i = load_feats_func(self.data_i, vid, frames_idx)
feats_m = load_feats_func(self.data_m, vid, frames_idx, padding=False)#, scale=0.1)
feats_a = load_feats_func(self.data_a, vid, frames_idx)#, padding=False)
feats_s = load_feats_func(self.data_s, vid, frames_idx)
feats_t = load_feats_func(self.data_t, vid, frames_idx)#, padding=False)
results = self.make_source_target(labels, taggings)
tokens, labels, pure_target, taggings = map(
lambda x: results[x],
["dec_source", "dec_target", "pure_target", "tagging"]
)
tokens_1 = results.get('dec_source_1', None)
labels_1 = results.get('dec_target_1', None)
data = {}
data['feats_i'] = torch.FloatTensor(feats_i)
data['feats_m'] = torch.FloatTensor(feats_m)#.mean(0).unsqueeze(0).repeat(self.n_frames, 1)
data['feats_a'] = torch.FloatTensor(feats_a)
data['feats_s'] = F.softmax(torch.FloatTensor(feats_s), dim=1)
#print(feats_t.shape)
data['feats_t'] = torch.FloatTensor(feats_t)
data['tokens'] = torch.LongTensor(tokens)
data['labels'] = torch.LongTensor(labels)
data['pure_target'] = torch.LongTensor(pure_target)
data['length_target'] = torch.FloatTensor(length_target)
data['attribute'] = torch.FloatTensor(attribute)
if tokens_1 is not None:
data['tokens_1'] = torch.LongTensor(tokens_1)
data['labels_1'] = torch.LongTensor(labels_1)
if taggings is not None:
data['taggings'] = torch.LongTensor(taggings)
if bert_embs is not None:
data['bert_embs'] = torch.FloatTensor(bert_embs)
if self.decoder_type == 'LSTM' or self.decoder_type == 'ENSEMBLE':
tmp = np.zeros(self.num_category)
tmp[category] = 1
data['category'] = torch.FloatTensor(tmp)
else:
data['category'] = torch.LongTensor([category])
if frames_idx is not None:
data['frames_idx'] = frames_idx
data['video_ids'] = vid
if len(self.obj):
data['obj'] = torch.FloatTensor(np.asarray(self.obj[0][vid]))
return data
def __len__(self):
return len(self.infoset)
def get_mode(self):
return self.id_to_vid, self.mode
def set_splits_by_json_path(self, json_path):
self.splits = json.load(open(json_path))['videos']
def get_vocab_size(self):
return len(self.get_vocab())
def get_vocab(self):
return self.itow
def print_info(self, opt):
print('vocab size is ', len(self.itow))
print('number of train videos: ', len(self.splits['train']))
print('number of val videos: ', len(self.splits['validate']))
print('number of test videos: ', len(self.splits['test']))
print('load image feats (%d) from %s' % (opt["dim_i"], opt["feats_i"]))
print('load motion feats (%d) from %s' % (opt["dim_m"], opt["feats_m"]))
print('load audio feats (%d )from %s' % (opt["dim_a"], opt["feats_a"]))
print('max sequence length in data is', self.max_len)
print('load feats type: %d' % self.load_feats_type)
def load_database(self, path):
if not path:
return []
database = []
if isinstance(path, list):
for p in path:
if '.hdf5' in p:
database.append(h5py.File(p, 'r'))
else:
if '.hdf5' in path:
database.append(h5py.File(path, 'r'))
return database
def load_feats(self, data, vid, frames_idx, padding=True):
databases, dim, dummy = data
if not len(databases) or dummy:
return np.zeros((self.n_frames, dim))
feats = []
for database in databases:
if vid not in database.keys():
return np.zeros((self.n_frames, dim))
else:
data = np.asarray(database[vid])
if len(data.shape) == 1 and padding:
data = data[np.newaxis, :].repeat(self.total_frames_length, axis=0)
feats.append(data)
if len(feats[0].shape) == 1:
feats = np.concatenate(feats, axis=0)
return feats
feats = np.concatenate(feats, axis=1)
return feats[frames_idx]
def load_feats_padding(self, data, vid, dummy=None, padding=True, scale=1):
databases, dim, _ = data
if not len(databases):
return np.zeros((self.n_frames, dim))
feats = []
for database in databases:
if vid not in database.keys():
if padding:
return np.zeros((self.n_frames, dim))
else:
return np.zeros(dim)
else:
data = np.asarray(database[vid])
if len(data.shape) == 1 and padding:
data = data[np.newaxis, :].repeat(self.total_frames_length, axis=0)
feats.append(data * scale)
if len(feats[0].shape) == 1:
feats = np.concatenate(feats, axis=0)
return feats
feats = np.concatenate(feats, axis=1)
source_length = feats.shape[0]
if source_length > self.n_frames:
frames_idx = get_frames_idx(
source_length,
self.n_frames,
self.random_type,
equally_sampling = True if self.mode != 'train' else self.equally_sampling)
else:
frames_idx = resampling(source_length, self.n_frames)
#frames_idx = [i for i in range(feats.size(0))]
#frames_idx += [-1] * (self.n_frames - feats.size(0))
#print(vid, feats.sum(), feats.shape, frames_idx)
return feats[frames_idx]
def padding(self, seq, add_eos=True):
if seq is None:
return None
res = seq.copy()
if len(res) > self.max_len:
res = res[:self.max_len]
if add_eos:
res[-1] = Constants.EOS
else:
res += [Constants.PAD] * (self.max_len - len(res))
return res
def make_source_target(self, target, tagging):
if self.decoder_type == 'NARFormer':
results = self.source_target_mlm(target[1:-1]) # exclude <bos> <eos>
else:
# ARFormer
results = {
'dec_source': self.padding(target, add_eos=True),
'dec_target': self.padding(target, add_eos=True)
}
assert len(results['dec_source']) == len(results['dec_target'])
if self.method in ['ag', 'nv']:
results.update(self.source_target_visual_word(target=target, pos_tag=tagging))
if 'pure_target' not in results.keys():
results['pure_target'] = self.padding(target.copy(), add_eos=True)
if 'tagging' not in results.keys():
results['tagging'] = self.padding(tagging, add_eos=True)
return results
def source_target_mlm(self, target):
assert target[0] != Constants.BOS
assert target[-1] != Constants.EOS
min_num_masks = 1
dec_source = torch.LongTensor(target)
dec_target_cp = torch.LongTensor(target)
dec_target = torch.LongTensor([Constants.PAD] * len(dec_source))
if self.mode == 'train':
if min_num_masks >= len(dec_source):
ind = np.array([],dtype=np.uint8)
else:
low = max(int(len(dec_source) * self.beta_low), min_num_masks)
high = max(int(len(dec_source) * self.beta_high), min_num_masks+1)
sample_size = self.random.randint(low, high)
ind = self.random.choice(len(dec_source) , size=sample_size, replace=False)
dec_source[ind] = Constants.MASK
dec_target[ind] = dec_target_cp[ind]
else:
dec_source[dec_source!=Constants.PAD] = Constants.MASK
dec_target = dec_target_cp
dec_source = self.padding(dec_source.tolist(), add_eos=False)
dec_target = self.padding(dec_target.tolist(), add_eos=False)
pure_target = self.padding(target, add_eos=False)
return {'dec_source': dec_source, 'dec_target': dec_target, 'pure_target': pure_target}
def source_target_visual_word(self, **kwargs):
target = kwargs['target']
pos_tag = kwargs['pos_tag']
sent_length = len(target[1:-1]) # exclude <bos> <eos>
if self.decoder_type == 'NARFormer':
visual_tag = Constants.BOS
target_tag = Constants.MASK
else:
visual_tag = Constants.MASK
target_tag = Constants.BOS
if self.mode != 'train':
dec_target_1 = [0]
dec_source_1 = [0]
else:
assert len(target) == len(pos_tag)
assert self.itop is not None
dec_target_cp = torch.LongTensor(target[1:-1])
dec_source_1 = self.padding([visual_tag] * sent_length if self.decoder_type == 'NARFormer' else len(target),
add_eos=False if self.decoder_type == 'NARFormer' else True)
# get the position of tokens that have the pos_tag we demand
pos_satisfied_ind = []
for i, item in enumerate(pos_tag[1:-1]):
w = self.itow[target[i+1]]
# we ignore verb ``be''
if self.itop[item] in self.demand and w not in ['is', 'are', 'was', 'were', 'be']:
pos_satisfied_ind.append(i)
pos_satisfied_ind = np.array(pos_satisfied_ind)
# decoder1 need to predict tokens with satisfied pos_tag from scratch
# meanwhile, decoder1 should learn to keep the remaining tokens (i.e., <mask>) unchanged
dec_target_1 = torch.LongTensor([target_tag] * sent_length)
dec_target_1[pos_satisfied_ind] = dec_target_cp[pos_satisfied_ind]
if self.decoder_type == 'NARFormer':
dec_target_1 = self.padding(dec_target_1.tolist(), add_eos=False)
else:
# when training with autoregressive transformer, the first token will be ignored, i.e., label = dec_target_1[1:]
dec_target_1 = self.padding([target[0]] + dec_target_1.tolist() + [Constants.BOS], add_eos=True)
#print(dec_source_1, dec_target_1)
return {'dec_source_1': dec_source_1, 'dec_target_1': dec_target_1}
class BD_Dataset(Dataset):
def __init__(self, opt, mode, print_info=False, shuffle_feats=0, specific=-1, target_ratio=-1):
super(BD_Dataset, self).__init__()
self.mode = mode
self.random_type = opt.get('random_type', 'segment_random')
self.total_frames_length = 60
assert self.mode in ['train', 'validate', 'trainval']
data = pickle.load(open(opt['info_corpus'], 'rb'))
info = data['info']
self.itoc = info.get('itoc', None)
self.splits = info['split']
self.data = pickle.load(open(opt['bd_training_data'], 'rb'))
if self.mode == 'trainval':
self.splits['trainval'] = self.splits['train'] + self.splits['validate']
self.max_len = opt["max_len"]
self.n_frames = opt['n_frames']
self.equally_sampling = opt.get('equally_sampling', False)
self.data_i = [self.load_database(opt["feats_i"]), opt["dim_i"], opt.get("dummy_feats_i", False)]
self.data_m = [self.load_database(opt["feats_m"]), opt["dim_m"], opt.get("dummy_feats_m", False)]
self.data_a = [self.load_database(opt["feats_a"]), opt["dim_a"], opt.get("dummy_feats_a", False)]
self.bd_load_feats = opt.get('bd_load_feats', False)
self.infoset = self.make_infoset()
def load_database(self, path):
if not path:
return []
database = []
if isinstance(path, list):
for p in path:
if '.hdf5' in p:
database.append(h5py.File(p, 'r'))
else:
if '.hdf5' in path:
database.append(h5py.File(path, 'r'))
return database
def load_feats_padding(self, data, vid, dummy=None, padding=True, scale=1):
databases, dim, _ = data
if not len(databases):
return np.zeros((self.n_frames, dim))
feats = []
for database in databases:
if vid not in database.keys():
if padding:
return np.zeros((self.n_frames, dim))
else:
return np.zeros(dim)
else:
data = np.asarray(database[vid])
if len(data.shape) == 1 and padding:
data = data[np.newaxis, :].repeat(self.total_frames_length, axis=0)
feats.append(data * scale)
if len(feats[0].shape) == 1:
feats = np.concatenate(feats, axis=0)
return feats
feats = np.concatenate(feats, axis=1)
source_length = feats.shape[0]
if source_length > self.n_frames:
frames_idx = get_frames_idx(
source_length,
self.n_frames,
self.random_type,
equally_sampling = True if self.mode != 'train' else self.equally_sampling)
else:
frames_idx = resampling(source_length, self.n_frames)
return feats[frames_idx]
def make_infoset(self):
infoset = []
ix_set = [int(item) for item in self.splits[self.mode]]
for ix in ix_set:
vid = 'video%d' % ix
category = self.itoc[ix] if self.itoc is not None else 0
captions = self.data['caption'][vid]
labels = self.data['label'][vid]
for i, (cap, lab) in enumerate(zip(captions, labels)):
item = {
'vid': vid,
'caption': cap,
'label': lab,
'category': category,
}
infoset.append(item)
return infoset
def __getitem__(self, ix):
vid = self.infoset[ix]['vid']
caption = self.padding(self.infoset[ix]['caption'], add_eos=False)
label = self.infoset[ix]['label']
category = self.infoset[ix]['category']
load_feats_func = self.load_feats_padding
if self.bd_load_feats:
feats_i = load_feats_func(self.data_i, vid)
feats_m = load_feats_func(self.data_m, vid)#, scale=0.1)
feats_a = load_feats_func(self.data_a, vid)#, padding=False)
return torch.LongTensor(caption), torch.LongTensor([label]), torch.LongTensor([category]), \
torch.FloatTensor(feats_i), torch.FloatTensor(feats_m), torch.FloatTensor(feats_a)
return torch.LongTensor(caption), torch.LongTensor([label]), torch.LongTensor([category])
def __len__(self):
return len(self.infoset)
def padding(self, seq, add_eos=True):
if seq is None:
return None
res = seq.copy()
if len(res) > self.max_len:
res = res[:self.max_len]
if add_eos:
res[-1] = Constants.EOS
else:
res += [Constants.PAD] * (self.max_len - len(res))
return res
|
"""Top-level package for ElevatorProblem."""
__author__ = """Ravi Agrawal"""
__email__ = 'raviagrawal.iitkgp@gmail.com'
__version__ = '0.1.0'
|
import numpy as np
import librosa
class Element(object):
def __init__(self,
num_mic=4,
sampling_frequency=16000,
fft_length=512,
fft_shift=256,
sound_speed=343,
theta_step=1,
frame_num=1000000):
self.num_mic = num_mic
self.mic_angle_vector = np.array([45, 315, 225, 135])
# self.mic_angle_vector = np.array([315, 45, 225, 135])
self.mic_diameter = 0.064
self.sampling_frequency = sampling_frequency
self.fft_length = fft_length
self.fft_shift = fft_shift
self.sound_speed = sound_speed
self.theta_step = theta_step
self.frame_num = frame_num
def get_sterring_vector(self, look_direction):
'''
return: sv of shape (N//2+1,num_mic)
'''
frequency_vector = librosa.fft_frequencies(self.sampling_frequency, self.fft_length)
steering_vector = np.exp(1j * 2 * np.pi / self.sound_speed * self.mic_diameter / 2 *
np.einsum("i,j->ij",frequency_vector, np.cos(np.deg2rad(look_direction) - np.deg2rad(
self.mic_angle_vector))))
return steering_vector.T/self.num_mic
def get_correlation_matrix(self, multi_signal):
length = multi_signal.shape[1]
frequency_grid = librosa.fft_frequencies(self.sampling_frequency, self.fft_length)
R_mean = np.zeros((len(frequency_grid), self.num_mic, self.num_mic), dtype=np.complex64)
num_frames = (length-self.fft_shift)//self.fft_shift
if num_frames >= self.frame_num:
num_frames = self.frame_num
start = 0
end = self.fft_length
for _ in range(0, num_frames):
multi_signal_cut = multi_signal[:, start:start + self.fft_length]
complex_signal = np.fft.rfft(multi_signal_cut, axis=1)
for f in range(0, len(frequency_grid)):
R_mean[f, ...] += np.einsum("i,j->ij",complex_signal[:, f], np.conj(complex_signal[:, f]).T)
start = start + self.fft_shift
end = end + self.fft_shift
return R_mean/num_frames
def get_correlation_matrix_fb(self, multi_signal):
length = multi_signal.shape[1]
frequency_grid = librosa.fft_frequencies(self.sampling_frequency, self.fft_length)
R_mean = np.zeros((len(frequency_grid), self.num_mic, self.num_mic), dtype=np.complex64)
num_frames = self.frame_num
start = 0
end = self.fft_length
for _ in range(0, num_frames):
multi_signal_cut = multi_signal[:, start:start + self.fft_length]
complex_signal = np.fft.fft(multi_signal_cut, axis=1)
for f in range(len(frequency_grid)):
R_mean[f, ...] += np.outer(complex_signal[:, f], np.conj(complex_signal[:, f]).T)
start = start + self.fft_shift
end = end + self.fft_shift
start1 = length
for _ in range(0, num_frames):
multi_signal_cut1 = multi_signal[:, start1 - self.fft_length:start1]
complex_signal = np.fft.fft(multi_signal_cut1, axis=1)
for f in range(len(frequency_grid)):
R_mean[f, ...] += np.outer(complex_signal[:, f], np.conj(complex_signal[:, f]).T)
start1 = start1 - self.fft_shift
return R_mean/num_frames/2
|
from .borg import Borg
from .encrypted_file import EncryptedFile
from .sentinel import Sentinel
from .singleton import Singleton
from .classproperty import classproperty
from .deprecated import deprecated
from .retryable import retryable
from .sampled import sampled
from .timed import timed
from .timeoutable import timeoutable
__all__ = (
"Borg",
"EncryptedFile",
"Sentinel",
"Singleton",
"classproperty",
"deprecated",
"retryable",
"sampled",
"timed",
"timeoutable",
)
__version__ = "1.3.1"
|
# 015-反转链表
#输入一个链表,反转链表后,输出新链表的表头。
# 思路:
假设翻转1->2->3->4->5,步骤如下:
head->4->3->2->1->5
p tp
1.我们将p的next指向tp的next
2.将tp的next指向head的next
3.将head的next指向tp
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def ReverseList(pHead):
if not pHead:return None
head = ListNode(0) # @不要忘记写!
head.next = pHead
p = pHead
while(p.next):
tp = p.next
p.next = p.next.next #思路的步骤1
tp.next = head.next #2
head.next = tp #3
return head.next
|
# Test that group functionality is working
from unittest import TestCase
import datetime
from oasis.lib import DB, Groups, Periods, Courses
class TestGroups(TestCase):
@classmethod
def setUpClass(cls):
DB.MC.flush_all()
def test_create_group(self):
""" Fetch a group back and check it
"""
period1 = Periods.Period(name="Period 01",
title="Test 01",
start=datetime.datetime.now(),
finish=datetime.datetime.now(),
code="CODE1"
)
period1.save()
period2 = Periods.Period(name="Period 01")
self.assertTrue(period2)
self.assertEqual(period2.title, "Test 01")
period3 = Periods.Period(code="CODE1")
self.assertEqual(period2.title, "Test 01")
self.assertEqual(period2.id, period3.id)
period4 = Periods.Period(period2.id)
self.assertEqual(period2.start, period4.start)
name = "TESTGROUP1"
title = "Test Group 01"
gtype = 1
source = None
feed = None
feed_args = ""
self.assertFalse(Groups.get_ids_by_name(name))
group = Groups.Group(g_id=0)
group.name = name
group.title = title
group.gtype = gtype
group.source = source
group.period = period2.id
group.feed = feed
group.feedargs = feed_args
group.active = True
group.save()
self.assertTrue(Groups.get_ids_by_name(name))
def test_course_config(self):
""" Test course configuration templates
"""
course1_id = Courses.create("TEMPL01", "Test course templates", 1, 1)
period = Periods.Period(name="TEMPL01",
title="Template 01",
start=datetime.datetime.now(),
finish=datetime.datetime.now(),
code="TMPL1"
)
period.save()
period2 = Periods.Period(code="TMPL1")
Courses.create_config(course1_id, "large", period2.id)
groups = Courses.get_groups(course1_id)
self.assertEqual(len(groups), 1)
course2_id = Courses.create("TEMPL02", "Test course standard", 1, 1)
Courses.create_config(course2_id, "standard", period2.id)
groups = Courses.get_groups(course2_id)
self.assertEqual(len(groups), 2)
course3_id = Courses.create("TEMPL03", "Test course demo", 1, 1)
Courses.create_config(course3_id, "demo", period2.id)
groups = Courses.get_groups(course3_id)
self.assertEqual(len(groups), 3)
self.assertListEqual(groups.keys(), [4, 5, 6])
self.assertEqual(groups[4].members(), [])
self.assertEqual(groups[5].members(), [])
self.assertEqual(groups[6].members(), [])
groups[4].add_member(1)
self.assertEqual(groups[4].members(), [1])
self.assertEqual(groups[5].members(), [])
self.assertEqual(groups[6].members(), [])
groups[4].add_member(1)
groups[5].add_member(1)
self.assertEqual(groups[4].members(), [1])
self.assertEqual(groups[5].members(), [1])
groups[4].remove_member(1)
self.assertEqual(groups[4].members(), [])
self.assertEqual(groups[5].members(), [1])
self.assertListEqual(groups[4].member_unames(), [])
self.assertListEqual(groups[5].member_unames(), ["admin"])
self.assertEqual(groups[4].size(), 0)
self.assertEqual(groups[5].size(), 1)
groups[5].flush_members()
self.assertEqual(groups[5].members(), [])
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# sqlserver connection string
from djimix.settings.local import MSSQL_EARL
from djimix.settings.local import INFORMIX_ODBC, INFORMIX_ODBC_TRAIN
from djimix.settings.local import (
INFORMIXSERVER,
DBSERVERNAME,
INFORMIXDIR,
ODBCINI,
ONCONFIG,
INFORMIXSQLHOSTS,
LD_LIBRARY_PATH,
LD_RUN_PATH
)
# Debug
DEBUG = False
INFORMIX_DEBUG = 'debug'
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
SECRET_KEY = ''
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = False
USE_L10N = False
USE_TZ = False
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(__file__)
DATABASES = {
'default': {
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': 'django_djtreeater',
'ENGINE': 'django.db.backends.mysql',
'USER': '',
'PASSWORD': ''
},
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'djtreeater.core',
'djtools',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
'/data2/django_templates/djcher/',
'/data2/django_templates/django-djskins/',
],
'APP_DIRS': True,
'OPTIONS': {
'debug':DEBUG,
'context_processors': [
'djtools.context_processors.sitevars',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
#'loaders': [
# # insert your TEMPLATE_LOADERS here
#]
},
},
]
# caching
CACHES = {
'default': {
# 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_djtreeater_cache',
'TIMEOUT': 60*20,
'KEY_PREFIX': 'DJTREEATOR_',
'OPTIONS': {
'MAX_ENTRIES': 80000,
}
}
}
# SMTP settings
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_FAIL_SILENTLY = False
DEFAULT_FROM_EMAIL = ''
SERVER_EMAIL = ''
SERVER_MAIL=''
# Adirondack Application
ADIRONDACK_TXT_OUTPUT = ''
ADIRONDACK_JPG_OUTPUT = ''
ADIRONDACK_ZIP_OUTPUT = ''
ADIRONDACK_ARCHIVED = ''
ADIRONDACK_ROOM_ARCHIVED = ''
ADIRONDACK_TO_EMAIL = ''
ADIRONDACK_ASCII_EMAIL = ''
ADIRONDACK_FROM_EMAIL = ''
ADIRONDACK_LIS_SUPPORT = ''
ADIRONDACK_HOST = ''
ADIRONDACK_PORT = 0
ADIRONDACK_USER =''
ADIRONDACK_PASS =''
ADIRONDACK_TEST_API_SECRET = ''
ADIRONDACK_API_SECRET = ''
ADIRONDACK_ROOM_FEES = ''
ADIRONDACK_ROOM_ASSIGNMENTS = ''
ADIRONDACK_APPLICATONS = ''
# ADIRONDACK_ACCESS_KEY = ''
# ADIRONDACK_SECRET = ''
# ADIRONDACK_BUCKET = ''
# logging
LOG_FILEPATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs/')
DEBUG_LOG_FILENAME = LOG_FILEPATH + 'debug.log'
INFO_LOG_FILENAME = LOG_FILEPATH + 'info.log'
ERROR_LOG_FILENAME = LOG_FILEPATH + 'error.log'
CUSTOM_LOG_FILENAME = LOG_FILEPATH + 'custom.log'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format' : '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',
'datefmt' : '%Y/%b/%d %H:%M:%S'
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt' : '%Y/%b/%d %H:%M:%S'
},
'custom': {
'format': '%(asctime)s: %(levelname)s: %(message)s',
'datefmt' : '%m/%d/%Y %I:%M:%S %p'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'custom_logfile': {
'level':'ERROR',
'filters': ['require_debug_true'], # do not run error logger in production
'class': 'logging.FileHandler',
'filename': CUSTOM_LOG_FILENAME,
'formatter': 'custom',
},
'info_logfile': {
'level':'INFO',
'class':'logging.handlers.RotatingFileHandler',
'backupCount': 10,
'maxBytes': 50000,
'filters': ['require_debug_false'], # run logger in production
'filename': INFO_LOG_FILENAME,
'formatter': 'simple',
},
'debug_logfile': {
'level': 'DEBUG',
'filters': ['require_debug_true'], # do not run debug logger in production
'class': 'logging.FileHandler',
'filename': DEBUG_LOG_FILENAME,
'formatter': 'verbose'
},
'error_logfile': {
'level': 'ERROR',
'filters': ['require_debug_true'], # do not run error logger in production
'class': 'logging.FileHandler',
'filename': ERROR_LOG_FILENAME,
'formatter': 'verbose'
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'include_html': True,
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'djtreeater': {
'handlers':['error_logfile'],
'propagate': True,
'level': 'ERROR'
},
'error_logger': {
'handlers': ['error_logfile'],
'level': 'ERROR'
},
'info_logger': {
'handlers': ['info_logfile'],
'level': 'INFO'
},
'debug_logger': {
'handlers':['debug_logfile'],
'propagate': True,
'level':'DEBUG',
},
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
def duplicate_resource_exception_handler(ex):
# wraps DocumentDB 409 error in CLIError
from pydocumentdb.errors import HTTPFailure
if isinstance(ex, HTTPFailure) and ex.status_code == 409:
raise CLIError(
'Operation Failed: Resource Already Exists')
raise ex
def resource_not_found_exception_handler(ex):
# wraps DocumentDB 404 error in CLIError
from pydocumentdb.errors import HTTPFailure
if isinstance(ex, HTTPFailure) and ex.status_code == 404:
raise CLIError('Operation Failed: Resource Not Found')
raise ex
def invalid_arg_found_exception_handler(ex):
# wraps DocumentDB 400 error in CLIError
from pydocumentdb.errors import HTTPFailure
if isinstance(ex, HTTPFailure) and ex.status_code == 400:
cli_error = None
try:
if ex._http_error_message: # pylint:disable=protected-access
msg = json.loads(ex._http_error_message) # pylint:disable=protected-access
if msg['message']:
msg = msg['message'].split('\n')[0]
msg = msg[len('Message: '):] if msg.find('Message: ') == 0 else msg
cli_error = CLIError('Operation Failed: Invalid Arg {}'.format(msg))
except Exception: # pylint:disable=broad-except
pass
if cli_error:
raise cli_error # pylint:disable=raising-bad-type
raise CLIError('Operation Failed: Invalid Arg {}'.format(str(ex)))
raise ex
def unknown_server_failure_exception_handler(ex):
# wraps unknown documentdb error in CLIError
from pydocumentdb.errors import HTTPFailure
if isinstance(ex, HTTPFailure):
raise CLIError('Operation Failed: {}'.format(str(ex)))
raise ex
def exception_handler_chain_builder(handlers):
# creates a handler which chains the handler
# as soon as one of the chained handlers raises CLIError, it raises CLIError
# if no handler raises CLIError it raises the original exception
def chained_handler(ex):
if isinstance(ex, CLIError):
raise ex
for h in handlers:
try:
h(ex)
except CLIError as cli_error:
raise cli_error
except Exception: # pylint:disable=broad-except
pass
raise ex
return chained_handler
def network_exception_handler(ex):
# wraps a connection exception in CLIError
import requests.exceptions
if isinstance(ex, (requests.exceptions.ConnectionError, requests.exceptions.HTTPError)):
raise CLIError('Please ensure you have network connection. Error detail: ' + str(ex))
raise ex
def generic_exception_handler(ex):
logger.debug(ex)
chained_handler = exception_handler_chain_builder([duplicate_resource_exception_handler,
resource_not_found_exception_handler,
invalid_arg_found_exception_handler,
unknown_server_failure_exception_handler,
network_exception_handler])
chained_handler(ex)
|
from .dataclasses import PassiveSkill, ActiveSkill, SkillTargetType, Card
from .string_mgr import DictionaryAccess
from typing import Callable, Union
from collections import UserDict
from .skill_cs_enums import (
ST,
IMPLICIT_TARGET_SKILL_TYPES,
PERCENT_VALUE_SKILL_TYPES,
MIXED_VALUE_SKILL_TYPES,
)
AnySkill = Union[ActiveSkill, PassiveSkill]
VALUE_PERCENT = 1
VALUE_MIXED = 2
class SkillEffectDSLHelper(UserDict):
def __call__(self, skill_type_id):
def _(describer):
self.data[skill_type_id] = describer
return describer
return _
class SkillEffectDescriberContext(object):
def __init__(self):
self.finish = self.default_finish
self.birdseye = self.default_birdseye
self.trigger = self.default_trigger
self.target = self.default_target
self.combiner = self.default_combiner
self.skill_effect = SkillEffectDSLHelper()
@staticmethod
def mod_value(vs):
effect_type = vs[2]
calc_type = vs[5]
value = vs[3]
eff_d_type = 1
if effect_type in MIXED_VALUE_SKILL_TYPES:
eff_d_type = calc_type
elif effect_type in PERCENT_VALUE_SKILL_TYPES:
eff_d_type = 2
if eff_d_type == 2:
vf = value / 100
vi = value // 100
if vf == vi:
return f"{vi}%"
return f"{vf}%"
return f"{value}"
def default_birdseye(self, effect1, effect2, mod):
return ""
def default_finish(self, skill: AnySkill):
return ""
def default_trigger(self, skill: AnySkill):
return ""
def default_target(self, tt: SkillTargetType, strings: DictionaryAccess, context: Card):
return ""
def default_combiner(self, trigger: str, effect: str, finish: str):
return " ".join([trigger, effect, finish])
def finish_clause(self, f: Callable[[AnySkill, dict], str]):
self.finish = f
return f
def birdseye_clause(self, f: Callable[[tuple, tuple], str]):
self.birdseye = f
return f
def trigger_clause(self, f: Callable[[AnySkill, dict], str]):
self.trigger = f
return f
def target_clause(self, f: Callable[[SkillTargetType, Card], str]):
self.target = f
return f
def final_combiner(self, f: Callable[[str, str, str], str]):
self.combiner = f
return f
def format_single_value(self, level_struct):
return self.mod_value(level_struct)
def format_target(self, tt: AnySkill, strings: DictionaryAccess, context: Card = None):
if tt.levels[0][2] in IMPLICIT_TARGET_SKILL_TYPES:
return ""
return self.target(tt.target, strings, context)
def format_effect(self, skill: AnySkill, level: int = None, format_args: dict = None):
if format_args is None:
format_args = {"var": "", "let": "", "end": ""}
effect_type = skill.levels[0][2]
desc = self.skill_effect.get(effect_type)
if not desc:
return None
if len(skill.levels) == 1:
level = 0
if level is not None:
value = self.mod_value(skill.levels[level])
else:
value = self.birdseye(skill.levels[0], skill.levels[-1])
trigger = self.trigger(skill, format_args)
if callable(desc):
formatter = desc
else:
formatter = desc.format
effect = formatter(value=value, **format_args)
finish = self.finish(skill, format_args)
return self.combiner(trigger, effect, finish)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""This module corresponds to the photoobj directory in photoop.
"""
def sdss_calibv():
"""Return calibration for velocities from pix/frame to deg/day.
Returns
-------
:class:`float`
The conversion from pixels per frame to degrees per day
Notes
-----
Assumes frame time difference of 71.72 seconds and pixel scale of
0.396 arcsec, both fixed. Also note that observations of the same part of
sky from adjacent bands are separated by *two* frame numbers,
so we multiply by a factor two.
"""
pixscale = 0.396 # arcsec
ftime = 71.72 # seconds
pixframe2degday = 2.0*pixscale/(3600.0) * (3600.0)*24.0/ftime
return pixframe2degday
def unwrap_objid(objid):
"""Unwrap CAS-style objID into run, camcol, field, id, rerun.
See :func:`~pydl.pydlutils.sdss.sdss_objid` for details on how the bits
within an objID are assigned.
Parameters
----------
objid : :class:`numpy.ndarray`
An array containing 64-bit integers or strings. If strings are passed,
they will be converted to integers internally.
Returns
-------
:class:`numpy.recarray`
A record array with the same length as `objid`, with the columns
'skyversion', 'rerun', 'run', 'camcol', 'firstfield', 'frame', 'id'.
Notes
-----
For historical reasons, the inverse of this function,
:func:`~pydl.pydlutils.sdss.sdss_objid` is not in the same namespace as
this function.
'frame' is used instead of 'field' because record arrays have a method
of the same name.
Examples
--------
>>> from numpy import array
>>> from pydl.photoop.photoobj import unwrap_objid
>>> unwrap_objid(array([1237661382772195474]))
rec.array([(2, 301, 3704, 3, 0, 91, 146)],
dtype=[('skyversion', '<i4'), ('rerun', '<i4'), ('run', '<i4'), ('camcol', '<i4'), ('firstfield', '<i4'), ('frame', '<i4'), ('id', '<i4')])
"""
import numpy as np
if objid.dtype.type is np.string_ or objid.dtype.type is np.unicode_:
tempobjid = objid.astype(np.int64)
elif objid.dtype.type is np.int64:
tempobjid = objid.copy()
else:
raise ValueError('Unrecognized type for objid!')
unwrap = np.recarray(objid.shape,
dtype=[('skyversion', 'i4'), ('rerun', 'i4'),
('run', 'i4'), ('camcol', 'i4'),
('firstfield', 'i4'),
('frame', 'i4'), ('id', 'i4')])
unwrap.skyversion = np.bitwise_and(tempobjid >> 59, 2**4 - 1)
unwrap.rerun = np.bitwise_and(tempobjid >> 48, 2**11 - 1)
unwrap.run = np.bitwise_and(tempobjid >> 32, 2**16 - 1)
unwrap.camcol = np.bitwise_and(tempobjid >> 29, 2**3 - 1)
unwrap.firstfield = np.bitwise_and(tempobjid >> 28, 2**1 - 1)
unwrap.frame = np.bitwise_and(tempobjid >> 16, 2**12 - 1)
unwrap.id = np.bitwise_and(tempobjid, 2**16 - 1)
return unwrap
|
__all__ = ('HashPreimage',)
import os
import ctypes
from ctypes import cdll
from ethsnarks.verifier import Proof, VerifyingKey
class HashPreimage(object):
def __init__(self, native_library_path, vk, pk_file=None):
if pk_file:
if not os.path.exists(pk_file):
raise RuntimeError("Proving key file doesnt exist: " + pk_file)
self._pk_file = pk_file
if not isinstance(vk, VerifyingKey):
if isinstance(vk, dict):
vk = VerifyingKey.from_dict(vk)
elif os.path.exists(vk):
vk = VerifyingKey.from_file(vk)
else:
vk = VerifyingKey.from_json(vk)
if not isinstance(vk, VerifyingKey):
raise TypeError("Invalid vk type")
self._vk = vk
lib = cdll.LoadLibrary(native_library_path)
lib_prove = lib.hashpreimage_prove
lib_prove.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
lib_prove.restype = ctypes.c_char_p
self._prove = lib_prove
lib_verify = lib.hashpreimage_verify
lib_verify.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
lib_verify.restype = ctypes.c_bool
self._verify = lib_verify
def prove(self, preimage, pk_file=None):
if pk_file is None:
pk_file = self._pk_file
if pk_file is None:
raise RuntimeError("No proving key file")
if len(preimage) != 64:
raise RuntimeError("Invalid preimage size, must be 64 bytes")
pk_file_cstr = ctypes.c_char_p(pk_file.encode('ascii'))
preimage_cstr = ctypes.c_char_p(preimage)
data = self._prove(pk_file_cstr, preimage_cstr)
if data is None:
raise RuntimeError("Could not prove!")
return Proof.from_json(data)
def verify(self, proof):
if not isinstance(proof, Proof):
raise TypeError("Invalid proof type")
vk_cstr = ctypes.c_char_p(self._vk.to_json().encode('ascii'))
proof_cstr = ctypes.c_char_p(proof.to_json().encode('ascii'))
return self._verify( vk_cstr, proof_cstr )
|
from .create import CreateRoomView
from .index import IndexView
from .member import MemberView
from .room import RoomHistoryView, RoomView
from .settings import SettingsView
from .status import StatusView
__all__ = (
'CreateRoomView',
'IndexView',
'MemberView',
'RoomHistoryView',
'RoomView',
'SettingsView',
'StatusView',
)
|
"""
Preprocess the dataset
"""
# import module
import pandas as pd
import os
from datetime import datetime, timedelta
from dateutil.rrule import rrule, DAILY
import requests
import random
import urllib
#################################################################################################################
# helper function for api data, segment data, and other calcualtion #
#################################################################################################################
"""
Helper functions for generating api data, segment data and even the arrival time
list of helper functions:
* calculate_arrival_time
* calculate_arrival_distance
* extract_time
* calculate_time_span
* calculate_time_from_stop
* filter_single_history
"""
def calculate_arrival_time(stop_dist, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
Calculate the arrival time according to the given tuple (prev_dist, next_dist), the current location, the timestamp of the prev location, and the timestamp of the next location
:param stop_dist: the distance of the target stop between the prev and next tuple
:param prev_dist: the distance of the location of the bus at the previous record
:param next_dist: the distance of the location of the bus at the next record
:param prev_timestamp: the timestamp of the bus at the previous record
:param next_timestamp: the timestamp of the bus at the next record
:return result: the timestamp of the bus arrival the target stop
"""
distance_prev_next = next_dist - prev_dist
distance_prev_stop = stop_dist - prev_dist
ratio = float(distance_prev_stop) / float(distance_prev_next)
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_stop = ratio * duration_prev_next.total_seconds()
duration_prev_stop = timedelta(0, duration_prev_stop)
stop_timestamp = prev_timestamp + duration_prev_stop
return stop_timestamp
def calculate_arrival_distance(time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp):
"""
calculate arrival distance according to the given input: time_of_day, prev_dist, next_dist, prev_timestamp, next_timestamp
:param time_of_day: the given time for calculating the dist_along_route
:param prev_dist: the distance of the location of the bus for the previous record in historical data
:param next_dist: the distance of the location of the bus for the next record in historical data
:param prev_timestamp: the timestamp of the bus for the previous record in historical data
:param next_timestamp: the timestamp of the bus for the next record in historical data
:return result: dist_along_route for the bus at the given time_of_day
"""
duration_prev_next = next_timestamp - prev_timestamp
duration_prev_time = time_of_day - prev_timestamp
duration_prev_next = duration_prev_next.total_seconds()
duration_prev_time = duration_prev_time.total_seconds()
ratio = duration_prev_time / duration_prev_next
distance_prev_next = next_dist - prev_dist
distance_prev_time = distance_prev_next * ratio
dist_along_route = prev_dist + distance_prev_time
return dist_along_route
def extract_time(time):
"""
Convert the string into datetime format.
:param time: string of time need to be converted. Example: '2017-01-16T15:09:28Z'
:return: the time in datetime format
"""
result = datetime.strptime(time[11: 19], '%H:%M:%S')
return result
def calculate_time_span(time1, time2):
"""
Calculate the duration of two timepoints
:param time1: previous time point in string format, ex: '2017-01-16T15:09:28Z'
:param time2: next time point in string format, ex: '2017-01-16T15:09:28Z'
:return: float number of seconds
"""
timespan = extract_time(time2) - extract_time(time1)
return timespan.total_seconds()
def calculate_time_from_stop(segment_df, dist_along_route, prev_record, next_record):
"""
Calculate the time from stop within the tuple (prev_record, next_record)
Algorithm:
if prev_record = next_record:
the bus is parking at the stop, return 0
Calcualte the distance within the tuple
Calculate the distance between the current location and the prev record
Calcualte the ratio of these two distances
Use the ratio to calcualte the time_from_stop
:param segment_df: dataframe for the preprocessed segment data
:param dist_along_route: distance between the intial stop and the current location of the bus
:param prev_record: single record of the route_stop_dist.csv file
:param next_record: single record of the route_stop_dist.csv file
:return: total seconds of the time_from_stop
"""
if prev_record.get('stop_id') == next_record.get('stop_id'):
return 0.0
distance_stop_stop = next_record.get('dist_along_route') - prev_record.get('dist_along_route')
distance_bus_stop = next_record.get('dist_along_route') - dist_along_route
ratio = float(distance_bus_stop) / float(distance_stop_stop)
assert ratio < 1
try:
travel_duration = segment_df[(segment_df.segment_start == prev_record.get('stop_id')) & (
segment_df.segment_end == next_record.get('stop_id'))].iloc[0]['travel_duration']
except:
travel_duration = segment_df['travel_duration'].mean()
time_from_stop = travel_duration * ratio
return time_from_stop
def filter_single_history(single_history, stop_sequence):
"""
Filter the history file with only one day and one stop sequence to remove abnormal record
:param single_history: dataframe for history table with only one day
:param stop_sequence: list of stop id
:return: dataframe for filtered history table
"""
current_history = single_history[
(single_history.next_stop_id.isin(stop_sequence)) & (single_history.dist_along_route > 0)]
if len(current_history) < 3:
return None
tmp_history = pd.DataFrame(columns=current_history.columns)
i = 1
prev_record = current_history.iloc[0]
while i < len(current_history):
next_record = current_history.iloc[i]
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
while prev_distance >= next_distance:
i += 1
if i == len(current_history):
break
next_record = current_history.iloc[i]
next_distance = float(next_record.total_distance)
tmp_history.loc[len(tmp_history)] = prev_record
prev_record = next_record
i += 1
if float(prev_record.total_distance) > float(tmp_history.iloc[-1].total_distance):
tmp_history.loc[len(tmp_history)] = prev_record
return tmp_history
#################################################################################################################
# weather.csv #
#################################################################################################################
def get_precip(gooddate, api_token):
"""
Download the weather information for a specific date
:param gooddate: date for downloading
:param api_token: the token for api interface
:return: list of the data
"""
urlstart = 'http://api.wunderground.com/api/' + api_token + '/history_'
urlend = '/q/NY/New_York.json'
url = urlstart + str(gooddate) + urlend
data = requests.get(url).json()
result = None
for summary in data['history']['dailysummary']:
rain = summary['rain']
snow = summary['snow']
if snow == '1':
weather = '2'
elif rain == '1':
weather = '1'
else:
weather = '0'
result = [gooddate, rain, snow, weather]
return result
def download_weather(date_start, date_end, api_token):
"""
download the weather information for a date range
:param date_start: start date, string, ex: '20160101'
:param date_end: similar to date_start
:param api_token: the token for api interface
:return: dataframe for weather table
weather = 2: snow
weather = 1: rain
weather = 0: sunny
"""
a = datetime.strptime(date_start, '%Y%m%d')
b = datetime.strptime(date_end, '%Y%m%d')
result = pd.DataFrame(columns=['date', 'rain', 'snow', 'weather'])
for dt in rrule(DAILY, dtstart=a, until=b):
current_data = get_precip(dt.strftime("%Y%m%d"), api_token)
if current_data is None:
continue
else:
result.loc[len(result)] = current_data
return result
#################################################################################################################
# route_stop_dist.csv #
#################################################################################################################
"""
Calculate the distance of each stops for a specific route from the initial stop.
It will read three different files: trips.txt, stop_times.txt and history file.
Use the stop_times.txt and trips.txt file to obtain the stop sequence for each route and use the historical data to calculate the actual distance for each stop.
"""
def calculate_stop_distance(stop_times, history, direction_id=0):
"""
Calculate the distance of each stop with its initial stop. Notice that the dist_along_route is the distance between the next_stop and the initial stop
Algorithm:
split the history and stop_times table according to the route id and shape id
for each subset of the divided history table:
get the route id and shape id for the subset
get the corresponding subset of the stop_times table
get the stop sequence from this subset
define a new dataframe based on the stop sequence for that shape id
find the distance from history data for the corresponding stop and shape id
save the result for this subset
concatenate all the results
:param stop_times: the stop_times table read from stop_times.txt file in GTFS
:param history: the history table from preprocessed history.csv file
:param direction_id: the direction id which can be 0 or 1
:return: the route_stop_dist table in dataframe
"""
route_grouped_history = history.groupby(['route_id', 'shape_id'])
route_grouped_stop_times = stop_times.groupby(['route_id', 'shape_id'])
result_list = []
for name, single_route_history in route_grouped_history:
route_id, shape_id = name
flag = 0
current_result = pd.DataFrame()
single_stop_times = route_grouped_stop_times.get_group((route_id, shape_id))
trip_id = single_stop_times.iloc[0]['trip_id']
single_stop_times = single_stop_times[single_stop_times.trip_id == trip_id]
single_stop_times.reset_index(inplace=True)
current_result['stop_id'] = single_stop_times['stop_id']
current_result['route_id'] = route_id
current_result['shape_id'] = shape_id
current_result['direction_id'] = direction_id
stop_grouped = single_route_history.groupby(['next_stop_id']).mean()
stop_grouped.reset_index(inplace=True)
stop_grouped['next_stop_id'] = pd.to_numeric(stop_grouped['next_stop_id'])
stop_set = set(stop_grouped['next_stop_id'])
for i in xrange(len(current_result)):
next_stop_id = current_result.iloc[i]['stop_id']
if next_stop_id not in stop_set:
print route_id, shape_id
flag = 1
break
else:
dist_along_route = stop_grouped[stop_grouped.next_stop_id == next_stop_id].iloc[0]['dist_along_route']
current_result.set_value(i, 'dist_along_route', dist_along_route)
if flag == 1:
continue
else:
result_list.append(current_result)
result = pd.concat(result_list, ignore_index=True)
return result
#################################################################################################################
# segment.csv #
#################################################################################################################
"""
generate the segment table
"""
def generate_original_segment(full_history_var, weather, stop_times_var):
"""
Generate the original segment data
Algorithm:
Split the full historical data according to the service date, trip_id with groupby function
For name, item in splitted historical dataset:
service date, trip_id = name[0], name[1]
Find the vehicle id which is the majority elements in this column (For removing the abnormal value in historical data)
calcualte the travel duration within the segement of this splitted historical data and save the result into list
concatenate the list
:param full_history_var: the historical data after filtering
:param weather: the dataframe for the weather information
:param stop_times_var: the dataframe from stop_times.txt
:return: dataframe for the original segment
format:
segment_start, segment_end, timestamp, travel_duration, weather, service date, day_of_week, trip_id, vehicle_id
"""
full_history_var = full_history_var[full_history_var.total_distance > 0]
grouped = list(full_history_var.groupby(['service_date', 'trip_id']))
result_list = []
step_count = range(0, len(grouped), len(grouped) / 10)
for index in range(len(grouped)):
name, single_history = grouped[index]
if index in step_count:
print "process: ", str(step_count.index(index)) + '/' + str(10)
service_date, trip_id = name
if service_date <= 20160103:
continue
grouped_vehicle_id = list(single_history.groupby(['vehicle_id']))
majority_length = -1
majority_vehicle = -1
majority_history = single_history
for vehicle_id, item in grouped_vehicle_id:
if len(item) > majority_length:
majority_length = len(item)
majority_history = item
majority_vehicle = vehicle_id
stop_sequence = [item for item in list(stop_times_var[stop_times_var.trip_id == trip_id].stop_id)]
current_segment_df = generate_original_segment_single_history(majority_history, stop_sequence)
if current_segment_df is None:
continue
current_weather = weather[weather.date == service_date].iloc[0]['weather']
current_segment_df['weather'] = current_weather
day_of_week = datetime.strptime(str(service_date), '%Y%m%d').weekday()
current_segment_df['service_date'] = service_date
current_segment_df['day_of_week'] = day_of_week
current_segment_df['trip_id'] = trip_id
current_segment_df['vehicle_id'] = majority_vehicle
result_list.append(current_segment_df)
if result_list != []:
result = pd.concat(result_list, ignore_index=True)
else:
return None
return result
def generate_original_segment_single_history(history, stop_sequence):
"""
Calculate the travel duration for a single historical data
Algorithm:
Filter the historical data with the stop sequence here
arrival_time_list = []
i = 0
while i < len(history):
use prev and the next to mark the record:
prev = history[i - 1]
next = history[i]
check whether the prev stop is the same as the next stop:
if yes, skip this row and continue to next row
prev_distance = prev.dist_along_route - prev.dist_from_stop
next_distance = next.dist_along_route - next.dist_from_stop
if prev_distance == next_distance:
continue to next row
elif prev.dist_from_stop = 0:
current_arrival_time = prev.timestamp
else:
current_arrival_duration = calcualte_arrival_time(prev.dist_along_route, prev_distance, next_distance, prev_timestamp, next_timestamp)
arrival_time_list.append((prev.next_stop_id, current_arrival_time))
result = pd.Dataframe
for i in range(1, len(arrival_time_list)):
prev = arrival_time_list[i - 1]
next = arrival_time_list[i]
segment_start, segment_end obtained
travel_duration = next[1] - prev[1]
timestamp = prev[1]
save the record to result
:param history: single historical data
:param stop_sequence: stop sequence for the corresponding trip id
:return: the dataframe of the origianl segment dataset
format:
segment_start, segment_end, timestamp, travel_duration
"""
single_history = filter_single_history(history, stop_sequence)
if single_history is None or len(single_history) < 3:
return None
arrival_time_list = []
grouped_list = list(single_history.groupby('next_stop_id'))
if len(grouped_list) < 3:
return None
history = pd.DataFrame(columns=single_history.columns)
for i in xrange(len(grouped_list)):
history.loc[len(history)] = grouped_list[i][1].iloc[-1]
history.sort_values(by='timestamp', inplace=True)
if history.iloc[0]['total_distance'] < 1:
prev_record = history.iloc[1]
i = 2
else:
prev_record = history.iloc[0]
i = 1
while i < len(history):
next_record = history.iloc[i]
while stop_sequence.index(prev_record.next_stop_id) >= stop_sequence.index(next_record.next_stop_id):
i += 1
if i == len(history):
break
next_record = history.iloc[i]
if i == len(history):
break
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
prev_timestamp = datetime.strptime(prev_record.timestamp, '%Y-%m-%dT%H:%M:%SZ')
next_timestamp = datetime.strptime(next_record.timestamp, '%Y-%m-%dT%H:%M:%SZ')
if prev_distance == next_distance:
# the bus didn't move yet
i += 1
continue
if prev_record.dist_from_stop == 0:
# if prev.dist_from_stop is 0, the bus is 0, then save result into timestamp
current_arrival_time = prev_timestamp
else:
stop_dist = prev_record.dist_along_route
current_arrival_time = calculate_arrival_time(stop_dist, prev_distance, next_distance, prev_timestamp,
next_timestamp)
arrival_time_list.append((prev_record.next_stop_id, current_arrival_time))
prev_record = next_record
i += 1
result = pd.DataFrame(columns=['segment_start', 'segment_end', 'timestamp', 'travel_duration'])
for i in range(1, len(arrival_time_list)):
prev_record = arrival_time_list[i - 1]
next_record = arrival_time_list[i]
segment_start, segment_end = prev_record[0], next_record[0]
timestamp = prev_record[1]
travel_duration = next_record[1] - prev_record[1]
travel_duration = travel_duration.total_seconds()
result.loc[len(result)] = [segment_start, segment_end, str(timestamp), travel_duration]
return result
def improve_dataset_unit(segment_df, stop_sequence):
"""
improve the dataset for a specific trip_id at a specific date: add the skipped segments back into the dataframe
Algorithm:
define result_df
For each row in segment_df:
obtain segment_start, segment_end, timestamp, travel_duration from the current row
start_index: index of segment_start in stop_sequence
end_index: ...
count = end_index - start_index
if count is 1, save the current row and continue to next row
average_travel_duration = travel_duration / count
For index in range(start_index, end_index):
current_segment_start = stop_sequence[index]
current_segment_end = stop_sequence[index + 1]
save the new row with the timestamp, average_travel_duration, current_segment_start, and current_segment_end into result_df
timestamp = timestamp + average_travel_duration
return result_df
:param segment_df: a subset of segment table with one trip id and service date
:param stop_sequence: stop sequence for the corresponding trip id
:return: dataframe for improved segment table
return format:
segment_start, segment_end, timestamp, travel_duration
"""
result = pd.DataFrame(columns=['segment_start', 'segment_end', 'timestamp', 'travel_duration'])
for i in xrange(len(segment_df)):
segment_start = segment_df.iloc[i]['segment_start']
segment_end = segment_df.iloc[i]['segment_end']
timestamp = segment_df.iloc[i]['timestamp']
travel_duration = segment_df.iloc[i]['travel_duration']
start_index = stop_sequence.index(segment_start)
end_index = stop_sequence.index(segment_end)
count = end_index - start_index
if count <= 0:
print "error"
continue
average_travel_duration = float(travel_duration) / float(count)
for j in range(start_index, end_index):
current_segment_start = stop_sequence[j]
current_segment_end = stop_sequence[j + 1]
result.loc[len(result)] = [current_segment_start, current_segment_end, timestamp, average_travel_duration]
timestamp = datetime.strptime(timestamp[:19], '%Y-%m-%d %H:%M:%S') + timedelta(0, average_travel_duration)
timestamp = str(timestamp)
return result
def improve_dataset(segment_df, stop_times, weather_df):
"""
Improve the segment table by adding the skipped stops and other extra columns like weather, day_of_week, etc.
algorithm:
split the segment dataframe by groupby(service_date, trip_id)
result_list = []
for name, item in grouped_segment:
obtained the improved segment data for the item
add the columns: weather, service date, day_of_week, trip_id, vehicle_id
save the result into result_list
concatenate the dataframe in the result_list
:param segment_df: the dataframe of segment table
:param stop_times: the dataframe of the stop_times.txt file in GTFS dataset
:param weather_df: the dataframe of the weather information
:return: the dataframe of the improved segment table
"""
grouped_list = list(segment_df.groupby(['service_date', 'trip_id']))
result_list = []
for i in xrange(len(grouped_list)):
name, item = grouped_list[i]
service_date, trip_id = name
stop_sequence = list(stop_times[stop_times.trip_id == trip_id].stop_id)
current_segment = improve_dataset_unit(item, stop_sequence)
if current_segment is None:
continue
# add the other columns
current_segment['weather'] = weather_df[weather_df.date == service_date].iloc[0].weather
current_segment['service_date'] = service_date
current_segment['day_of_week'] = datetime.strptime(str(service_date), '%Y%m%d').weekday()
current_segment['trip_id'] = trip_id
current_segment['vehicle_id'] = item.iloc[0].vehicle_id
result_list.append(current_segment)
if result_list == []:
result = None
else:
result = pd.concat(result_list, ignore_index=True)
return result
#################################################################################################################
# api data section #
#################################################################################################################
"""
Generate the api data from the GTFS data and the historical data
"""
def generate_api_data(date_list, time_list, stop_num, route_stop_dist, full_history):
"""
Generate the api data for the test_route_set and given time list
Algorithm:
Generate the set of trip id for test routes
Generate the random test stop id for each test routes
Filtering the historical data with trip id
Generate the list of historical data Groupby(date, trip id)
for each item in the list of the historical data:
obtain the trip id and the date
obtain the corresponding route
obtain the corresponding stop set
for stop in stop set:
for each time point in the time list:
check whether the bus has passed the stop at the time point
if yes, continue to next stop
otherwise, save the record into result
:param time_list: the date list for testing [20160101, 20160102, ...]
:param time_list: the time list for testing, ['12:00:00', '12:05:00', ...]
:param stop_num: the number of the target stop for test
:param route_stop_dist: the dataframe for the route_stop_dist table
:param full_history: the dataframe for the history table
:return: the dataframe for the api data
"""
trip_route_dict = {}
route_stop_dict = {}
grouped = route_stop_dist.groupby(['shape_id'])
for shape_id, single_route_stop_dist in grouped:
stop_sequence = list(single_route_stop_dist.stop_id)
if len(stop_sequence) < 5:
continue
trip_set = set(full_history[full_history.shape_id == shape_id].trip_id)
current_dict = dict.fromkeys(trip_set, shape_id)
trip_route_dict.update(current_dict)
stop_set = set()
for i in range(stop_num):
stop_set.add(stop_sequence[random.randint(2, len(stop_sequence) - 2)])
route_stop_dict[shape_id] = stop_set
history = full_history[
(full_history.trip_id.isin(trip_route_dict.keys())) & (full_history.service_date.isin(date_list))]
history_grouped = history.groupby(['service_date', 'trip_id'])
result = pd.DataFrame(
columns=['trip_id', 'vehicle_id', 'route_id', 'stop_id', 'time_of_day', 'date', 'dist_along_route', 'shape_id'])
print_dict = dict.fromkeys(date_list, True)
for name, single_history in history_grouped:
service_date, trip_id = name
if service_date not in date_list:
continue
if print_dict[service_date]:
print service_date
print_dict[service_date] = False
shape_id = trip_route_dict[trip_id]
stop_set = [str(int(item)) for item in route_stop_dict[shape_id]]
stop_sequence = list(route_stop_dist[route_stop_dist.shape_id == shape_id].stop_id)
# filtering the history data: remove the abnormal value
single_history = filter_single_history(single_history, stop_sequence)
if single_history is None or len(single_history) < 2:
continue
for target_stop in stop_set:
target_index = stop_sequence.index(float(target_stop))
for current_time in time_list:
prev_history = single_history[single_history['timestamp'].apply(lambda x: x[11:19] <= current_time)]
next_history = single_history[single_history['timestamp'].apply(lambda x: x[11:19] > current_time)]
if len(prev_history) == 0:
continue
if len(next_history) == 0:
break
tmp_stop = str(prev_history.iloc[-1].next_stop_id)
tmp_index = stop_sequence.index(float(tmp_stop))
if tmp_index > target_index:
break
# If the bus does not pass the target stop, save the remained stops into the stop sequence and calculate the result
route_id = single_history.iloc[0].route_id
current_list = generate_single_api(current_time, route_id, prev_history, next_history, target_stop, shape_id)
if current_list is not None:
result.loc[len(result)] = current_list
return result
def generate_single_api(current_time, route_id, prev_history, next_history, stop_id, shape_id):
"""
Calculate the single record for the api data
Algorithm for calculate the single record:
According to the time point, find the closest time duration (prev, next)
Calculate the dist_along_route for the bus at current timepoint:
calculate the space distance between the time duration (prev, next)
calculate the time distance of two parts: (prev, current), (prev, next)
use the ratio of the time distance to multiply with the space distance to obtain the dist_along_route for current
According to the dista_along_route and the stop sequence confirm the remained stops including the target stop
Count the number of the remained stops
:param current_time: The current time for generating the api data
:param route_id: the id of the route for the specific record
:param prev_history: the dataframe of the history table before the timestamp on the record of api data with the same trip id
:param next_history: the dataframe of the history table after the timestamp on the record of api data with the same trip id
:param stop_id: The id of the target stop
:param shape_id: The id of the shape (stop sequence)
:return: the list for the result
"""
single_trip = prev_history.iloc[0].trip_id
prev_record = prev_history.iloc[-1]
next_record = next_history.iloc[0]
# calculate the dist_along_route for current
prev_distance = float(prev_record.total_distance)
next_distance = float(next_record.total_distance)
prev_timestamp = datetime.strptime(prev_record['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
next_timestamp = datetime.strptime(next_record['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
# determine the current time
if prev_record['timestamp'][11:19] <= current_time <= next_record['timestamp'][11:19]:
time_of_day = prev_record['timestamp'][:11] + current_time + 'Z'
else:
# case: this trip is crossing between two days
if current_time > next_record['timestamp'][11:19]:
time_of_day = prev_record['timestamp'][11:19] + current_time + 'Z'
else:
time_of_day = next_record['timestamp'][11:19] + current_time + 'Z'
time_of_day = datetime.strptime(time_of_day, '%Y-%m-%dT%H:%M:%SZ')
dist_along_route = calculate_arrival_distance(time_of_day, prev_distance, next_distance, prev_timestamp, next_timestamp)
# Generate the return list
# trip_id vehicle_id route_id stop_id time_of_day date dist_along_route
result = [single_trip, prev_record['vehicle_id'], route_id, stop_id, str(time_of_day), prev_record['service_date'], dist_along_route, shape_id]
return result
#################################################################################################################
# main function section #
#################################################################################################################
"""
Functions for users
"""
# weather data
def obtain_weather(start_date, end_date, api_token, save_path=None, engine=None):
"""
Download the weather.csv file into save_path
:param start_date: start date, string, example: '20160101'
:param end_date: similar to start_date
:param api_token: api_token for wunderground api interface. Anyone can apply for it in free.
:param save_path: path of a csv file for storing the weather table.
:param engine: database connect engine
:return: return the weather table in dataframe
"""
weather = download_weather(start_date, end_date, api_token)
if save_path is not None:
weather.to_csv(save_path)
if engine is not None:
weather.to_sql(name='weather', con=engine, if_exists='replace', index_label='id')
return weather
# history data
def download_history_file(year, month, date_list, save_path):
"""
Download the history data from nyc database. User still needs to uncompress the data into csv file
:param year: integer to represent the year, example: 2016
:param month: integer to represent the month, example: 1
:param date_list: list of integer to represent the dates of the required data
:param save_path: path for downloading the compressed data
:return: None
"""
year = str(year)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
base_url = 'http://data.mytransit.nyc/bus_time/'
url = base_url + year + '/' + year + '-' + month + '/'
download_file = urllib.URLopener()
for date in date_list:
if date < 10:
date = '0' + str(date)
else:
date = str(date)
filename = 'bus_time_' + year + month + date + '.csv.xz'
file_url = url + filename
download_file.retrieve(file_url, save_path + filename)
def obtain_history(start_date, end_date, trips, history_path, save_path=None, engine=None):
"""
Generate the csv file for history data
:param start_date: integer to represent the start date, example: 20160105
:param end_date: integer to represent the end date, format is the same as start date
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param history_path: path of all the historical data. User should place all the historical data under the same directory and use this directory as the history_path. Please notice that the change of the filename might cause error.
:param save_path: path of a csv file to store the history table
:param engine: database connect engine
:return: the history table in dataframe
"""
trip_set = set(trips.trip_id)
# generate the history data
file_list = os.listdir(history_path)
history_list = []
for filename in file_list:
if not filename.endswith('.csv'):
continue
if int(start_date) <= int(filename[9:17]) <= int(end_date):
print filename
ptr_history = pd.read_csv(history_path + filename)
tmp_history = ptr_history[(ptr_history.dist_along_route != '\N') & (ptr_history.dist_along_route != 0) & (ptr_history.progress == 0) & (ptr_history.block_assigned == 1) & (ptr_history.dist_along_route > 1) & (ptr_history.trip_id.isin(trip_set))]
history_list.append(tmp_history)
result = pd.concat(history_list, ignore_index=True)
# add some other information: total_distance, route_id, shape_id
result['dist_along_route'] = pd.to_numeric(result['dist_along_route'])
result['dist_from_stop'] = pd.to_numeric(result['dist_from_stop'])
result['total_distance'] = result['dist_along_route'] - result['dist_from_stop']
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
result['route_id'] = result['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
result['shape_id'] = result['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
# export csv file
if save_path is not None:
result.to_csv(save_path)
if engine is not None:
result.to_sql(name='history', con=engine, if_exists='replace', index_label='id')
return result
# route_stop_dist data
def obtain_route_stop_dist(trips, stop_times, history_file, save_path=None, engine=None):
"""
Generate the csv file for route_stop_dist data. In order to obtain a more complete data for route_stop_dist, the size of the history file should be as large as possible.
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param stop_times: the dataframe storing the table from stop_times.txt file in GTFS dataset
:param history_file: path of the preprocessed history file
:param save_path: path of a csv file to store the route_stop_dist table
:param engine: database connect engine
:return: the route_stop_dist table in dataframe
"""
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
stop_times['route_id'] = stop_times['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
stop_times['shape_id'] = stop_times['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
history = pd.read_csv(history_file)
route_stop_dist = calculate_stop_distance(stop_times, history)
if save_path is not None:
route_stop_dist.to_csv(save_path)
if engine is not None:
route_stop_dist.to_sql(name='route_stop_dist', con=engine, if_exists='replace', index_label='id')
return route_stop_dist
# segment data
def obtain_segment(weather_df, trips, stop_times, route_stop_dist, full_history, training_date_list, save_path=None, engine=None):
"""
Generate the csv file for segment table
:param weather_df: the dataframe storing the weather data
:param trips: the dataframe storing the table from trips.txt file in GTFS dataset
:param stop_times: the dataframe storing the table from stop_times.txt file in GTFS dataset
:param full_history: the dataframe storing the history table
:param training_date_list: the list of dates to generate the segments from history table
:param save_path: path of a csv file to store the segment table
:param engine: database connect engine
:return: the segment table in dataframe
"""
full_history = full_history[full_history.service_date.isin(training_date_list)]
shape_list = set(route_stop_dist.shape_id)
full_history = full_history[full_history.shape_id.isin(shape_list)]
segment_df = generate_original_segment(full_history, weather_df, stop_times)
segment_df = improve_dataset(segment_df, stop_times, weather_df)
trip_route_dict = trips.set_index('trip_id').to_dict(orient='index')
segment_df['route_id'] = segment_df['trip_id'].apply(lambda x: trip_route_dict[x]['route_id'])
segment_df['shape_id'] = segment_df['trip_id'].apply(lambda x: trip_route_dict[x]['shape_id'])
if save_path is not None:
segment_df.to_csv(save_path)
if engine is not None:
segment_df.to_sql(name='segment', con=engine, if_exists='replace', index_label='id')
return segment_df
# api_data table
def obtain_api_data(route_stop_dist, full_history, date_list, time_list, stop_num, save_path=None, engine=None):
"""
Generate the csv file for api_data table
:param route_stop_dist: the dataframe storing route_stop_dist table
:param full_history: the dataframe storing historical data
:param date_list: the list of integers to represent the dates for generating api data. Example: [20160101, 20160102, 20160103]
:param time_list: the list of strings to represent the time for generating api data. Example: ['12:00:00', '12:05:00', '12:10:00', '12:15:00', '12:20:00', '12:25:00', '12:30:00']. Please follow the same format.
:param stop_num: the number of target stop for each shape id
:param save_path: path of a csv file to store the api_data table
:param engine: database connect engine
:return: the dataframe storing api_data table
"""
full_history = full_history[full_history.service_date.isin(date_list)]
result = generate_api_data(date_list, time_list, stop_num, route_stop_dist, full_history)
if save_path is not None:
result.to_csv(save_path)
if engine is not None:
result.to_sql(name='api_data', con=engine, if_exists='replace', index_label='id')
return result
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
from read_hdf5 import *
T = 1.0
nDTout = 100
DT = old_div(T,float(nDTout))
def uex0(x,t):
"""
Exact solution
"""
return 128.0*(1.0-t)*x[...,1]*(1.0-x[...,1])*x[...,2]*(1.0-x[...,2])*x[...,0]*(1.0-x[...,0])
archive = Archiver.XdmfArchive(".","heat_3d",readOnly=True)
label="/%s%d" % ('nodesSpatial_Domain',0)
print('trying to read from %s ' % label)
coord = read_from_hdf5(archive.hdfFile,label)
import numpy as np
u = read_from_hdf5(archive.hdfFile,'/u0')
uex_vals = np.zeros(u.shape,'d')
for i in range(0,nDTout+1):
time_level_to_read=i
label="/%s%d" % ('u',time_level_to_read)
print('trying to read from %s ' % label)
u = read_from_hdf5(archive.hdfFile,label)
uex_vals = uex0(coord,i*DT)
err = u-uex_vals
err *= err
err *= old_div(1.0,9261.0) #9261 = 21^3
L2approx = np.sqrt(err.sum())
print("Trapezoidal approximation for error at dofs for nx=21 ny=21 nz=21 is %s " % L2approx)
|
# -*- coding: utf-8 -*-
import pytest
from bookops_watchdog.bpl.class_marks import (
call_no_audience,
call_no_format,
has_language_prefix,
parse_bpl_call_no,
)
@pytest.mark.parametrize(
"arg,expectation",
[
("AUDIO 001 J", "a"),
("AUDIO J FIC ADAMS", "j"),
("AUDIO J 505 A", "j"),
("AUDIO CHI J FIC ADAMS", "j"),
("BOOK & CD RUS J-E ADAMS", "e"),
("BOOK & CD 128.2 F", "a"),
("KIT J-E BANG", "e"),
("eBOOK", "a"),
("CD INSTRU ACCORDION", "a"),
("CD J ADELE", "j"),
("DVD J 302 R", "j"),
("DVD J MOVIE CATS", "j"),
("DVD J TV CASPER", "j"),
("DVD SPA J MOVIE CATS", "j"),
("DVD SPA J 505.23 C", "j"),
("DVD SPA J B MANDELA K", "j"),
("DVD B ADAMS J", "a"),
("DVD 500 J", "a"),
("DVD MOVIE MONTY", "a"),
("DVD TV MONTY", "a"),
("LIB 782.085 ORFF", "a"),
("Mu 780.8 M", "a"),
("NM 029.1 A", "a"),
("VIDEO B FRANKLIN C", "a"),
("READALONG J 323.092 K", "j"),
("READALONG J B MANDELA K", "j"),
("READALONG J FIC GAIMAN", "j"),
("READALONG SPA J FIC MORA", "j"),
("BOARD GAME", "a"),
("J-E", "e"),
("J-E ADAMS", "e"),
("POL J-E ADAMS", "e"),
("CHI J-E", "e"),
("J-E P", "e"),
("909 B", "a"),
("B ADAMS C", "a"),
("FIC ADAMS", "a"),
("POL J FIC DUKAJ", "j"),
("POL J 505 K", "j"),
("POL J B MANDELA K", "j"),
("J 505 A", "j"),
("J B ADAMS K", "j"),
("J FIC ADAMS", "j"),
("J FIC", "j"),
],
)
def test_call_no_audience(arg, expectation):
assert call_no_audience(arg) == expectation
@pytest.mark.parametrize(
"arg,expectation",
[
("AUDIO 001 B", "au"),
("AUDIO SPA FIC ADAMS", "au"),
("AUDIO CHI J FIC ADAMS", "au"),
("BOOK & CD 128.2 F", "ki"),
("BOOK & CD RUS J-E ADAMS", "ki"),
("BOOK & DVD 781 D", "ki"),
("BOOK & TAPE 428.1 S", "ki"),
("KIT 423 D", "ki"),
("KIT J-E BANG", "ki"),
("eAUDIO", "er"),
("eBOOK", "er"),
("eJOURNAL", "er"),
("eMUSIC", "er"),
("eVIDEO", "er"),
("WEB SITE 011.3", "er"),
("DVD-ROM 382 U58 CU", "er"),
("CD-ROM 004.65 L", "er"),
("CD HOLIDAY CHRISTMAS AGUILERA", "cd"),
("CD INSTRU ACCORDION", "cd"),
("DVD MOVIE MONTY", "dv"),
("DVD TV MONTY", "dv"),
("DVD SPA J 550 C", "dv"),
("LIB 782.085 ORFF", "li"),
("Mu 780.8 M", "mu"),
("NM 029.1 A", "nm"),
("VIDEO B FRANKLIN C", "vi"),
("READALONG J 323.092 K", "ra"),
("READALONG J FIC GAIMAN", "ra"),
("BOARD GAME", "bg"),
("909 B", "pr"),
("B ADAMS C", "pr"),
("FIC ADAMS", "pr"),
("POL J FIC DUKAJ", "pr"),
],
)
def test_call_no_format(arg, expectation):
assert call_no_format(arg) == expectation
@pytest.mark.parametrize(
"arg,expectation",
[
("AUDIO FIC ADAMS", False),
("AUDIO J FIC A", False),
("AUDIO SPA 500 A", True),
("AUDIO SPA FIC ADAMS", True),
("AUDIO SPA B ADAMS C", True),
("BOOK & CD RUS J-E ADAMS", True),
("BOOK & CD RUS FIC ADAMS", True),
("BOOK & CD FIC ADAMS", False),
("BOOK & CD B ADAMS C", False),
("BOOK & CD SPA B ADAMS C", True),
("BOOK & CD SPA J FIC ADAMS", True),
("BOOK & CD 505 K", False),
("BOOK & CD SPA 505 K", True),
("KIT 505 W", False),
("KIT SPA 505 W", True),
("KIT SPA J 505 W", True),
("KIT J-E FOO", False),
("KIT J FIC CATS", False),
("WEB SITE 505", False),
("WEB SITE SPA 505", True),
("DVD MOVIE FOO", False),
("DVD SPA MOVIE FOO", True),
("DVD J MOVIE FOO", False),
("DVD TV FOO", False),
("DVD J TV FOO", False),
("DVD SPA J MOVIE FOO", True),
("DVD SPA J TV FOO", True),
("DVD SPA 500 A", True),
("DVD 500 A", False),
("DVD J 500 A", False),
("DVD B ADAMS C", False),
("DVD SPA B ADAMS C", True),
("LIB 700.23 C", False),
("READALONG SPA J FIC ADAMS", True),
("READALONG J FIC ADAMS", False),
("SPA J-E ADAMS", True),
("J-E ADAMS", False),
("J-E", False),
("SPA J FIC ADAMS", True),
("J FIC ADAMS", False),
("SPA J 500 A", True),
("SPA J B ADAMS C", True),
("J 500 A", False),
("J B ADA C", False),
("FIC ADAMS", False),
("FIC W", False),
("SPA FIC ADAMS", True),
("SPA FIC A", True),
("B FOO C", False),
("SPA B ADAMS C", True),
("J B FOO C", False),
("500 B", False),
("SPA 500 B", True),
("J 500 B", False),
],
)
def test_has_language_prefix(arg, expectation):
assert has_language_prefix(arg) == expectation
def test_parse_bpl_call_no():
pass
|
import numpy as np
import matplotlib.pyplot as plt
with open('accuracy_big') as f:
x = [i*0.1 for i in range(0, 11)]
y = [float(i) for i in f]
plt.plot(x,y)
plt.xlabel("mu")
plt.ylabel("Accuracy")
# plt.title("Prediction Accuracies (concated)")
plt.title("Prediction Accuracies")
plt.show()
|
"""This module contains PlainFrame and PlainColumn tests.
"""
import collections
import datetime
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal as np_assert_equal
from pywrangler.util.testing.plainframe import (
NULL,
ConverterFromPandas,
NaN,
PlainColumn,
PlainFrame
)
@pytest.fixture
def plainframe_standard():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, 2, False, "string2", "2019-02-01 10:00:00"]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def plainframe_missings():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, NaN, False, "string2", "2019-02-01 10:00:00"],
[NULL, NULL, NULL, NULL, NULL]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def df_from_pandas():
df = pd.DataFrame(
{"int": [1, 2],
"int_na": [1, np.NaN],
"bool": [True, False],
"bool_na": [True, np.NaN],
"float": [1.2, 1.3],
"float_na": [1.2, np.NaN],
"str": ["foo", "bar"],
"str_na": ["foo", np.NaN],
"datetime": [pd.Timestamp("2019-01-01"), pd.Timestamp("2019-01-02")],
"datetime_na": [pd.Timestamp("2019-01-01"), pd.NaT]})
return df
@pytest.fixture
def df_from_spark(spark):
from pyspark.sql import types
values = collections.OrderedDict(
{"int": [1, 2, None],
"smallint": [1, 2, None],
"bigint": [1, 2, None],
"bool": [True, False, None],
"single": [1.0, NaN, None],
"double": [1.0, NaN, None],
"str": ["foo", "bar", None],
"datetime": [datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
None],
"date": [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
None],
"map": [{"foo": "bar"}, {"bar": "foo"}, None],
"array": [[1, 2, 3], [3, 4, 5], None]}
)
data = list(zip(*values.values()))
c = types.StructField
columns = [c("int", types.IntegerType()),
c("smallint", types.ShortType()),
c("bigint", types.LongType()),
c("bool", types.BooleanType()),
c("single", types.FloatType()),
c("double", types.DoubleType()),
c("str", types.StringType()),
c("datetime", types.TimestampType()),
c("date", types.DateType()),
c("map", types.MapType(types.StringType(), types.StringType())),
c("array", types.ArrayType(types.IntegerType()))]
schema = types.StructType(columns)
return spark.createDataFrame(data, schema=schema)
def create_plain_frame(cols, rows, reverse_cols=False, reverse_rows=False):
"""Helper function to automatically create instances of PlainFrame.
`cols` contains typed column annotations like "col1:int".
"""
if reverse_cols:
cols = cols[::-1]
columns, dtypes = zip(*[col.split(":") for col in cols])
values = list(range(1, rows + 1))
mapping = {"str": list(map(str, values)),
"int": values,
"float": list(map(float, values)),
"bool": list([x % 2 == 0 for x in values]),
"datetime": ["2019-01-{:02} 10:00:00".format(x) for x in
values]}
data = [mapping[dtype] for dtype in dtypes]
data = list(zip(*data))
if reverse_rows:
data = data[::-1]
return PlainFrame.from_plain(data=data,
dtypes=dtypes,
columns=columns)
def create_plainframe_single(values, dtype):
"""Create some special scenarios more easily. Always assumes a single
column with identical name. Only values and dtype varies.
"""
data = [[x] for x in values]
dtypes = [dtype]
columns = ["name"]
return PlainFrame.from_plain(data=data, dtypes=dtypes, columns=columns)
def test_plainframe():
# incorrect instantiation with non tuples with non factory method
plain_column = PlainColumn.from_plain(name="int",
dtype="int",
values=[1, 2, 3])
# correct instantiation
PlainFrame(plaincolumns=(plain_column,))
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[plain_column])
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[1])
def test_plainframe_from_plain_pandas_empty():
# tests GH#29
df = PlainFrame.from_plain(data=[], columns=["col1:int", "col2:str"])
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "str"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
dfp = pd.DataFrame(columns=["col1", "col2"], dtype=int)
df = PlainFrame.from_pandas(dfp)
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "int"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
def test_plainframe_attributes(plainframe_missings):
df = plainframe_missings
col_values = lambda x: df.get_column(x).values
assert df.columns == ["int", "float", "bool", "str", "datetime"]
assert df.dtypes == ["int", "float", "bool", "str", "datetime"]
assert col_values("int") == (1, 2, NULL)
assert col_values("str") == ("string", "string2", NULL)
assert col_values("datetime")[0] == datetime.datetime(2019, 1, 1, 10)
def test_plainframe_modify():
# change single value
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([1, 1], "int")
assert df_origin.modify({"name": {1: 1}}) == df_target
# change multiple values
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([3, 3], "int")
assert df_origin.modify({"name": {0: 3, 1: 3}}) == df_target
# change multiple columns
df_origin = PlainFrame.from_plain(data=[[1, 2], ["a", "b"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
df_target = PlainFrame.from_plain(data=[[1, 1], ["a", "a"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
assert df_origin.modify({"int": {1: 1}, "str": {1: "a"}}) == df_target
def test_plainframe_modify_assertions():
# check incorrect type conversion
df = create_plainframe_single([1, 2], "int")
with pytest.raises(TypeError):
df.modify({"name": {0: "asd"}})
def test_plainframe_getitem_subset():
df = create_plain_frame(["col1:str", "col2:int", "col3:int"], 2)
df_sub = create_plain_frame(["col1:str", "col2:int"], 2)
cmp_kwargs = dict(assert_column_order=True,
assert_row_order=True)
# test list of strings, slice and string
df["col1", "col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1":"col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1"].assert_equal(df_sub["col1"], **cmp_kwargs)
# test incorrect type
with pytest.raises(ValueError):
df[{"col1"}]
# test invalid column name
with pytest.raises(ValueError):
df["non_existant"]
def test_plainframe_get_column():
df = create_plain_frame(["col1:str", "col2:int"], 2)
assert df.get_column("col1") is df.plaincolumns[0]
# check value error for non existent column
with pytest.raises(ValueError):
df.get_column("does_not_exist")
def test_plainframe_parse_typed_columns():
parse = PlainFrame._parse_typed_columns
# invalid splits
cols = ["col1:int", "col2"]
with pytest.raises(ValueError):
parse(cols)
# invalid types
cols = ["col1:asd"]
with pytest.raises(ValueError):
parse(cols)
# invalid abbreviations
cols = ["col1:a"]
with pytest.raises(ValueError):
parse(cols)
# correct types and columns
cols = ["col1:str", "col2:s",
"col3:int", "col4:i",
"col5:float", "col6:f",
"col7:bool", "col8:b",
"col9:datetime", "col10:d"]
names = ["col{}".format(x) for x in range(1, 11)]
dtypes = ["str", "str",
"int", "int",
"float", "float",
"bool", "bool",
"datetime", "datetime"]
result = (names, dtypes)
np_assert_equal(parse(cols), result)
def test_plainframe_from_plain():
# unequal elements per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1]],
columns=["a", "b"],
dtypes=["int", "int"])
# mismatch between number of columns and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a"],
dtypes=["int", "int"])
# mismatch between number of dtypes and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int"])
# incorrect dtypes
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bad_type"])
# type errors conversion
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bool"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["float", "int"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["str", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[True, 2],
[False, 2]],
columns=["a", "b"],
dtypes=["datetime", "int"])
# correct implementation should not raise
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "int"])
def test_plainframe_to_plain():
columns = dtypes = ["int", "float", "bool", "str"]
data = [[1, 1.1, True, "string"],
[2, 2, False, "string2"]]
pf = PlainFrame.from_plain(data=data, columns=columns, dtypes=dtypes)
expected = (data, columns, dtypes)
assert pf.to_plain() == expected
def test_plainframe_from_dict():
data = collections.OrderedDict(
[("col1:int", [1, 2, 3]),
("col2:s", ["a", "b", "c"])]
)
df = PlainFrame.from_dict(data)
# check correct column order and dtypes
np_assert_equal(df.columns, ("col1", "col2"))
np_assert_equal(df.dtypes, ["int", "str"])
# check correct values
np_assert_equal(df.get_column("col1").values, (1, 2, 3))
np_assert_equal(df.get_column("col2").values, ("a", "b", "c"))
def test_plainframe_to_dict():
df = create_plain_frame(["col2:str", "col1:int"], 2)
to_dict = df.to_dict()
keys = list(to_dict.keys())
values = list(to_dict.values())
# check column order and dtypes
np_assert_equal(keys, ["col2:str", "col1:int"])
# check values
np_assert_equal(values[0], ["1", "2"])
np_assert_equal(values[1], [1, 2])
def test_plainframe_from_pandas(df_from_pandas):
df = df_from_pandas
df_conv = PlainFrame.from_pandas(df)
# check int to int
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, 2)
# check bool to bool
assert df_conv.get_column("bool").dtype == "bool"
assert df_conv.get_column("bool").values == (True, False)
# check bool (object) to bool with nan
assert df_conv.get_column("bool_na").dtype == "bool"
assert df_conv.get_column("bool_na").values == (True, NULL)
# check float to float
assert df_conv.get_column("float").dtype == "float"
assert df_conv.get_column("float").values == (1.2, 1.3)
# check float to float with nan
assert df_conv.get_column("float_na").dtype == "float"
np_assert_equal(df_conv.get_column("float_na").values, (1.2, NaN))
# check str to str
assert df_conv.get_column("str").dtype == "str"
assert df_conv.get_column("str").values == ("foo", "bar")
# check str to str with nan
assert df_conv.get_column("str_na").dtype == "str"
assert df_conv.get_column("str_na").values == ("foo", NULL)
# check datetime to datetime
assert df_conv.get_column("datetime").dtype == "datetime"
assert df_conv.get_column("datetime").values == \
(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 1, 2))
# check datetime to datetime with nan
assert df_conv.get_column("datetime_na").dtype == "datetime"
assert df_conv.get_column("datetime_na").values == (
datetime.datetime(2019, 1, 1), NULL)
def test_plainframe_from_pandas_assertions_missings_cast():
# check mixed dtype raise
df = pd.DataFrame({"mixed": [1, "foo bar"]})
with pytest.raises(TypeError):
PlainFrame.from_pandas(df)
# check assertion for incorrect forces
# too many types provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["int", "str"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "str",
"dummy": "int"})
# invalid dtypes provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["not existant type"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "not existant type"})
# invalid column names provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"dummy": "str"})
# check int to forced int with nan
df = pd.DataFrame({"int": [1, np.NaN]})
df_conv = PlainFrame.from_pandas(df, dtypes=["int"])
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, NULL)
# check force int to float
df = pd.DataFrame({"int": [1, 2]})
df_conv = PlainFrame.from_pandas(df, dtypes=["float"])
assert df_conv.get_column("int").dtype == "float"
assert df_conv.get_column("int").values == (1.0, 2.0)
# check force float to int
df = pd.DataFrame({"float": [1.0, 2.0]})
df_conv = PlainFrame.from_pandas(df, dtypes=["int"])
assert df_conv.get_column("float").dtype == "int"
assert df_conv.get_column("float").values == (1, 2)
# check force str to datetime
df = pd.DataFrame({"datetime": ["2019-01-01", "2019-01-02"]})
df_conv = PlainFrame.from_pandas(df, dtypes=["datetime"])
assert df_conv.get_column("datetime").dtype == "datetime"
assert df_conv.get_column("datetime").values == \
(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 1, 2))
# dtype object with strings and nan should pass correctly
df = pd.DataFrame({"str": ["foo", "bar", NaN]}, dtype=object)
df_conv = PlainFrame.from_pandas(df)
assert df_conv.get_column("str").dtype == "str"
assert df_conv.get_column("str").values == ("foo", "bar", NULL)
def test_plainframe_from_pandas_inspect_dtype():
inspect = ConverterFromPandas.inspect_dtype
# raise if incorrect type
ser = pd.Series("asd", dtype=object)
with pytest.raises(TypeError):
inspect(ser)
def test_plainframe_from_pandas_inspect_dtype_object():
inspect = ConverterFromPandas.inspect_dtype_object
# ensure string with missings
df = pd.DataFrame({"dummy": ["asd", NaN]})
conv = ConverterFromPandas(df)
assert conv.inspect_dtype_object("dummy") == "str"
# check incorrect dtype
df = pd.DataFrame({"dummy": ["asd", tuple([1, 2])]})
conv = ConverterFromPandas(df)
with pytest.raises(TypeError):
conv.inspect_dtype_object("dummy")
def test_plainframe_to_pandas(plainframe_standard):
from pandas.api import types
df = plainframe_standard.to_pandas()
assert types.is_integer_dtype(df["int"])
assert df["int"][0] == 1
assert df["int"].isnull().sum() == 0
assert types.is_float_dtype(df["float"])
assert df["float"].isnull().sum() == 0
assert df["float"][1] == 2.0
assert types.is_bool_dtype(df["bool"])
np_assert_equal(df["bool"][0], True)
assert df["bool"].isnull().sum() == 0
assert types.is_object_dtype(df["str"])
assert df["str"].isnull().sum() == 0
assert df["str"][0] == "string"
assert types.is_datetime64_dtype(df["datetime"])
assert df["datetime"].isnull().sum() == 0
assert df["datetime"][0] == pd.Timestamp("2019-01-01 10:00:00")
def test_plainframe_to_pandas_missings(plainframe_missings):
from pandas.api import types
df = plainframe_missings.to_pandas()
assert types.is_float_dtype(df["int"])
assert df["int"][0] == 1.0
assert pd.isnull(df["int"][2])
assert df["int"].isnull().sum() == 1
assert df["float"].isnull().sum() == 2
assert df["float"][0] == 1.1
assert pd.isnull(df["float"][2])
assert types.is_float_dtype(df["bool"])
assert df["bool"][0] == 1.0
assert df["bool"].isnull().sum() == 1
assert types.is_object_dtype(df["str"])
assert df["str"].isnull().sum() == 1
assert df["str"][0] == "string"
assert types.is_datetime64_dtype(df["datetime"])
assert df["datetime"].isnull().sum() == 1
assert df["datetime"][0] == pd.Timestamp("2019-01-01 10:00:00")
assert df["datetime"][2] is pd.NaT
def test_plainframe_from_pyspark(df_from_spark):
def select(x):
from_pyspark = PlainFrame.from_pyspark
return from_pyspark(df_from_spark.select(x))
# int columns
int_columns = ["int", "smallint", "bigint"]
df = select(int_columns)
for int_column in int_columns:
assert df.get_column(int_column).dtype == "int"
assert df.get_column(int_column).values == (1, 2, NULL)
# bool column
df = select("bool")
assert df.get_column("bool").dtype == "bool"
assert df.get_column("bool").values == (True, False, NULL)
# float columns
float_columns = ["single", "double"]
df = select(float_columns)
for float_column in float_columns:
assert df.get_column(float_column).dtype == "float"
np_assert_equal(df.get_column(float_column).values, (1.0, NaN, NULL))
# string column
df = select("str")
assert df.get_column("str").dtype == "str"
assert df.get_column("str").values == ("foo", "bar", NULL)
# datetime columns
datetime_columns = ["datetime", "date"]
df = select(datetime_columns)
for datetime_column in datetime_columns:
col = df.get_column(datetime_column)
assert col.dtype == "datetime"
assert col.values == (datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
NULL)
# unsupported columns
unsupported_columns = ["map", "array"]
for unsupported_column in unsupported_columns:
with pytest.raises(ValueError):
df = select(unsupported_column)
def test_plainframe_to_pyspark(plainframe_missings):
df = plainframe_missings.to_pyspark()
dtypes = dict(df.dtypes)
assert dtypes["int"] == "int"
assert dtypes["float"] == "double"
assert dtypes["bool"] == "boolean"
assert dtypes["str"] == "string"
assert dtypes["datetime"] == "timestamp"
res = df.collect()
assert res[0].int == 1
assert res[2].int is None
assert res[0].float == 1.1
assert pd.isnull(res[1].float)
assert res[2].float is None
assert res[0].bool is True
assert res[2].bool is None
assert res[0].str == "string"
assert res[2].str is None
assert res[0].datetime == datetime.datetime(2019, 1, 1, 10)
assert res[2].datetime is None
def test_plainframe_from_any(plainframe_standard):
conv = PlainFrame.from_any
# test plainframe
assert conv(plainframe_standard) == plainframe_standard
# test dict
assert conv(plainframe_standard.to_dict()) == plainframe_standard
# test tuple
assert conv(plainframe_standard.to_plain()) == plainframe_standard
# test pandas
assert conv(plainframe_standard.to_pandas()) == plainframe_standard
# test pyspark
assert conv(plainframe_standard.to_pyspark()) == plainframe_standard
# test wrong type
with pytest.raises(ValueError):
conv("string")
def test_plainframe_assert_equal():
# equal values should be equal
left = create_plain_frame(["a:int", "b:int"], 10)
right = create_plain_frame(["a:int", "b:int"], 10)
left.assert_equal(right)
# different values should not be equal
left = create_plainframe_single([1, 2], "int")
right = create_plainframe_single([2, 3], "int")
with pytest.raises(AssertionError):
left.assert_equal(right)
# incorrect number of rows
with pytest.raises(AssertionError):
left = create_plain_frame(["a:int", "b:int"], 10)
right = create_plain_frame(["a:int", "b:int"], 5)
left.assert_equal(right)
# incorrect number of columns
with pytest.raises(AssertionError):
left = create_plain_frame(["a:int"], 10)
right = create_plain_frame(["a:int", "b:int"], 10)
left.assert_equal(right)
# incorrect column_names
with pytest.raises(AssertionError):
left = create_plain_frame(["a:int", "c:int"], 10)
right = create_plain_frame(["a:int", "b:int"], 10)
left.assert_equal(right)
# incorrect dtypes
with pytest.raises(AssertionError):
left = create_plain_frame(["a:int", "b:str"], 10)
right = create_plain_frame(["a:int", "b:int"], 10)
left.assert_equal(right)
# check column order
left = create_plain_frame(["a:int", "b:int"], 10, reverse_cols=True)
right = create_plain_frame(["a:int", "b:int"], 10)
left.assert_equal(right, assert_column_order=False)
with pytest.raises(AssertionError):
left.assert_equal(right, assert_column_order=True)
# check row order
left = create_plain_frame(["a:int", "b:int"], 10, reverse_rows=True)
right = create_plain_frame(["a:int", "b:int"], 10)
left.assert_equal(right, assert_row_order=False)
with pytest.raises(AssertionError):
left.assert_equal(right, assert_row_order=True)
def test_plainframe_assert_equal_missings():
# nan should be equal
left = create_plainframe_single([NaN, 1], "float")
right = create_plainframe_single([NaN, 1], "float")
left.assert_equal(right)
# Null should be equal
left = create_plainframe_single([NULL, 1], "float")
right = create_plainframe_single([NULL, 1], "float")
left.assert_equal(right)
# null should be different from other values
with pytest.raises(AssertionError):
left = create_plainframe_single(["2019-01-01"], "datetime")
right = create_plainframe_single([NULL], "datetime")
left.assert_equal(right)
# nan should be different from other values
with pytest.raises(AssertionError):
left = create_plainframe_single([1.1], "float")
right = create_plainframe_single([NaN], "float")
left.assert_equal(right)
|
def velthview_with_rm(line, romanflag):
#Now it works even if Ł and $ are not in the same line
#romanflag = False
return_line = ""
new_c = ""
dharma_trans_dict = {"ṃ": "ṁ",
"ṛ": "r̥",
"ṝ": "r̥̄",
"ḷ": "l̥",}
subdict ={'ā': 'aa',
"'": '.a',
'ī': 'ii',
'ū': 'uu',
'ṛ': '.r',
'r̥': '.r',
'ṝ': '.R',
'r̥̄': '.R',
'ḷ': '.l',
'l̥': '.l',
'ḹ': '.L',
'ṅ': '\"n',
'ñ': '~n',
'ṭ': '.t',
'ḍ': '.d',
'ṇ': '.n',
'ś': '\"s',
'ṣ': '.s',
'ṃ': '.m',
'ṁ': '.m',
'ḥ': '.h',
'Ó': '.o',
'°': '@'}
for c in line:
new_c = c
if c == "Ł":
romanflag = True
new_c = "Ł"
elif c == "$":
romanflag = False
new_c = "$"
elif romanflag == False:
if c in subdict:
new_c = subdict[c]
elif romanflag == True:
# Dharma transliteration tricks (XeLaTeX)
if c in dharma_trans_dict:
new_c = dharma_trans_dict[c]
return_line = return_line + new_c
return return_line, romanflag
|
from .market import Market
from . import position
from ..db.models import TradingOrder
import logging
logger = logging.getLogger(__name__)
class MarketSimulator(Market):
"""Wrapper for market that allows simulating simple buys and sells"""
def __init__(self, exchange, base_currency, quote_currency, quote_currency_balance, strategy):
super().__init__(exchange, base_currency, quote_currency, strategy)
self.starting_balance = quote_currency_balance
self.quote_balance = quote_currency_balance
self.base_balance = 0
self.simulating = False
def __del__(self):
self.session.close()
def add_session(self, session):
self.session = session()
def limit_buy(self, quantity, price):
if self.quote_balance >= quantity * price:
self.quote_balance = self.quote_balance - quantity * price
self.base_balance = self.base_balance + quantity
order = TradingOrder(
exchange=self.exchange.id,
strategy_id= self.strategy.strategy_id,
run_key=self.strategy.run_key,
pair=self.analysis_pair,
position='buy',
amount=quantity,
price=price,
simulated="simulated"
)
self.session.add(order)
self.session.commit()
logger.info("Executed buy simulation of " + str(quantity) + " " + self.base_currency + " for " + str(price) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation buy")
def limit_sell(self, quantity, price):
if self.base_balance >= quantity:
self.base_balance = self.base_balance - quantity
self.quote_balance = self.quote_balance + quantity * price
order = TradingOrder(
exchange=self.exchange.id,
strategy_id= self.strategy.strategy_id,
run_key=self.strategy.run_key,
pair=self.analysis_pair,
position='sell',
amount=quantity,
price=price,
simulated="simulated"
)
self.session.add(order)
self.session.commit()
logger.info("Executed sell simulation of " + str(quantity) + " " + self.base_currency + " for " + str(price) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation sell")
def market_buy(self, quantity):
if self.quote_balance >= quantity * self.get_ask_price():
self.quote_balance = self.quote_balance - quantity * self.get_ask_price()
self.base_balance = self.base_balance + quantity
logger.info("Executed buy simulation of " + str(quantity) + " " + self.base_currency + " for " + str(self.get_ask_price()) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation buy")
def market_sell(self, quantity):
if self.base_balance >= quantity:
self.base_balance = self.base_balance - quantity
self.quote_balance = self.quote_balance + quantity * self.get_bid_price()
logger.info("Executed sell simulation of " + str(quantity) + " " + self.base_currency + " for " + str(self.get_bid_price()) + " " + self.quote_currency)
logger.info(self.quote_currency + " balance: " + str(self.quote_balance))
logger.info(self.base_currency + " balance: " + str(self.base_balance))
else:
logger.info("Insufficient balance for simulation sell")
def get_ask_price(self):
"""Get ask price for simulation"""
if not self.simulating:
"""if operating on live data, use actual ask"""
return self.exchange.fetchTicker(self.analysis_pair)['ask']
else:
"""if operating on historical data, use close"""
return self.latest_candle['5m'][4]
def get_bid_price(self):
if not self.simulating:
"""if operating on live data, use actual ask"""
return self.exchange.fetchTicker(self.analysis_pair)['bid']
else:
"""if operating on historical data, use close"""
return self.latest_candle['5m'][4]
def get_wallet_balance(self):
return self.quote_balance
def open_long_position_simulation(market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent):
"""Create simulated long position"""
# logger.info("Opening simulated long position")
position = LongPositionSimulator(market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent)
position.open()
return position
def open_short_position_simulation(market, amount, price):
"""Create simulated short position"""
logger.info("Opening simulated short position")
position = ShortPositionSimulator(market, amount, price)
position.open()
return position
# TODO: %m interval also hardcoded here, search the project for 5m
class LongPositionSimulator(position.LongPosition):
"""Simulated long position. Overrides the functionality of creating an actual order to use the MarketSimulators balance and calculations"""
def __init__(self, market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent):
super().__init__(market, amount, price, fixed_stoploss, trailing_stoploss_percent, profit_target_percent)
# TODO: 5m interval is hard coded here
def liquidate_position(self):
"""Will use this method to actually create the order that liquidates the position"""
logger.info("Closing simulated long position")
open_short_position_simulation(self.market, self.amount, self.market.latest_candle['5m'][3])
self.is_open = False
def open(self):
self.market.limit_buy(self.amount, self.price)
self.is_open = True
def update(self, sell=False):
"""Use this method to trigger position to check if profit target has been met, and re-set trailiing stop loss"""
# logger.info("UPDATING LONG POSITION")
if self.market.latest_candle['5m'][3] < self.trailing_stoploss or \
self.market.latest_candle['5m'][3] < self.fixed_stoploss or \
self.market.latest_candle['5m'][3] >= self.profit_target or \
sell is True: # check price against last calculated trailing stoploss
self.liquidate_position()
# re-calculate trailing stoploss
self.trailing_stoploss = self.calculate_trailing_stoploss()
class ShortPositionSimulator(position.ShortPosition):
"""Simulated short position. Overrides the functionality of creating an actual order to use the MarketSimulators balance and calculations"""
def __init__(self, market, amount, price):
super().__init__(market, amount, price)
def open(self):
self.market.limit_sell(self.amount, self.price)
|
"""
raven.contrib.django.handlers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from raven.handlers.logging import SentryHandler as BaseSentryHandler
class SentryHandler(BaseSentryHandler):
def __init__(self, level=logging.NOTSET):
logging.Handler.__init__(self, level=level)
def _get_client(self):
from raven.contrib.django.models import client
return client
client = property(_get_client)
def _emit(self, record):
request = getattr(record, 'request', None)
return super(SentryHandler, self)._emit(record, request=request)
|
# Hertz Mindlin Contact Model for DEM
# 14.12.2021 Deeksha Singh
#import matplotlib.pyplot as plt
import numpy as np
# Particle 1
class Particle1:
def __init__(self):
self.G = 793e8 # Shear Modulus in SI Units
self.E = 210e9 # Youngs's Modulus in SI Units
self.nu = 0.3 # Poisson's ratio
self.r = 0.01 # radius of particle
self.rho = 7800 # density of material
self.v = np.array([10,10]) # velocity in y axis
self.x = np.array([0,0]) # Coordinate x axis
self.m = self.rho * (4 * 3.14 * (self.r) ** 3) / 3
# Particle 2
class Particle2:
def __init__(self):
self.G = 793e8
self.E = 210e9
self.nu = 0.3
self.r = 0.01
self.rho = 7800
self.v = np.array([0,-15]) # velocity vector
self.x = np.array([0.0142, 0.0142]) # Coordinate
self.m = self.rho* (4*3.14*(self.r)**3)/3
part1 = Particle1()
part2 = Particle2()
class CM_DEM:
def __init__(self, part1, part2):
self.r1 = part1.r
self.r2 = part2.r
self.m1 = part1.m
self.m2 = part2.m
self.G1 = part1.G
self.G2 = part2.G
self.E1 = part1.E
self.E2 = part2.E
self.rho1 = part1.rho
self.rho2 = part2.rho
self.nu1 = part1.nu
self.nu2 = part2.nu
self.v1 = part1.v
self.v2 = part2.v
self.x1 = part1.x
self.x2 = part2.x
def F(self):
timestep = 100
reff = 1/((1/self.r1)+(1/self.r2))
meff = 1/((1/self.m1)+(1/self.m2))
Geff = 1/(((1 - self.nu1 ** 2) / self.G1) + ((1 - self.nu2 ** 2) / self.G2))
Eff = 1/(((1-self.nu1**2)/self.E1) + ((1-self.nu2**2)/self.E2))
Trayleigh = (3.14*reff*(self.rho1/Geff)**(1/2))/(0.1631* self.nu1 + 0.8766)
dt = Trayleigh*timestep
print('dtr', dt)
beta = 1/(((3.14**2)+1)**(1/2))
z1 = self.x1 + dt * self.v1
z2 = self.x2 + dt * self.v2
print("z1", z1)
print("z2", z2)
n = np.subtract(z1, z2)/abs(np.subtract(z1, z2))
print('n', n)
y = np.subtract(z1, z2)
#print('y', y)
overlap = (self.r1 + self.r2) - np.dot(y, n)
print ('overlap', overlap)
Sn = 2*Eff*(reff*overlap)**(1/2)
vrel = -np.dot(np.subtract(self.v1, self.v2), n)
print('relative vel', vrel)
Fc = (4/3) * Eff * (reff ** (1 / 2)) * (overlap ** (3 / 2))
print('Fc', Fc)
Fn = -2*(5/6)**(1/2)*beta*(Sn*meff)**(1/2)*vrel
print('Fn', Fn)
Ft = Fc+Fn
print('Ft', Ft)
dv = (Ft*dt)/meff
print('del rel vel ', dv)
cd = CM_DEM(part1,part2)
cd.F()
#temp = vars(cd)
#for item in temp:
# print(item, ':', temp[item])
|
import json
from datetime import datetime, timedelta
from opennem.api.stats.controllers import stats_factory
from opennem.api.stats.schema import DataQueryResult, OpennemData, OpennemDataSet
from opennem.api.time import human_to_interval, human_to_period
from opennem.core.networks import network_from_network_code
from opennem.core.units import get_unit
from opennem.utils.dates import is_valid_isodate
def get_power_example() -> OpennemDataSet:
network = network_from_network_code("NEM")
interval = human_to_interval("5m")
units = get_unit("power")
period = human_to_period("7d")
network_region_code = "NSW1"
test_rows = []
dt = datetime.fromisoformat("2021-01-15 10:00:00")
for ft in ["coal_black", "coal_brown"]:
for v in range(0, 3):
test_rows.append([dt, ft, v])
dt = dt + timedelta(minutes=5)
stats = [
DataQueryResult(interval=i[0], result=i[2], group_by=i[1] if len(i) > 1 else None)
for i in test_rows
]
assert len(stats) == 6, "Should have 6 stats"
result = stats_factory(
stats,
code=network_region_code or network.code,
network=network,
interval=interval,
period=period,
units=units,
region=network_region_code,
fueltech_group=True,
)
if not result:
raise Exception("Bad unit test data")
return result
def test_power_is_valid() -> None:
result = get_power_example()
assert isinstance(result, OpennemDataSet), "Returns a dataset"
def test_power_has_version() -> None:
result = get_power_example()
assert hasattr(result, "version"), "Result has version"
def test_power_has_created_date() -> None:
result = get_power_example()
assert hasattr(result, "created_at"), "Resut has created at attribute"
def test_power_has_network_id() -> None:
result = get_power_example()
assert hasattr(result, "network"), "Resut has network attribute"
assert result.network == "nem", "Correct network set"
def test_power_valid_created_date() -> None:
"""Test valid ISO creted date"""
result = get_power_example()
result_json = result.json(indent=4)
r = json.loads(result_json)
assert is_valid_isodate(r["created_at"]), "Created at is valid ISO date"
def test_power_has_data() -> None:
result = get_power_example()
assert hasattr(result, "data"), "Resultset has data"
# satisfy mypy
if not result.data:
raise Exception("Invalid test data")
data = result.data
assert len(data) == 2, "Has two data series"
def test_power_data_series() -> None:
result = get_power_example()
for data_set in result.data:
assert isinstance(data_set, OpennemData), "Data set is a valid data set schema"
assert hasattr(data_set, "id"), "Has an id"
assert hasattr(data_set, "type"), "Has a type attribute"
assert hasattr(data_set, "code"), "Has a code"
assert hasattr(data_set, "units"), "Has units attribute"
assert hasattr(data_set, "history"), "Has history"
# check history
assert hasattr(data_set.history, "start"), "Has a start date"
assert hasattr(data_set.history, "last"), "Has a last date"
assert hasattr(data_set.history, "interval"), "Has an interval"
interval = data_set.history.interval
assert interval == "5m", "Has the correct interval"
data_values = data_set.history.data
assert len(data_values) == 3, "Should have 3 values"
def test_power_returns_json() -> None:
result = get_power_example()
result_json = result.json(indent=4)
r = json.loads(result_json)
assert isinstance(r, dict), "JSON is a dict"
assert "version" in r, "Has a version string"
|
#!/usr/bin/env python
# Copyright (c) 2022 SMHI, Swedish Meteorological and Hydrological Institute.
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2022-02-03 13:40
@author: johannes
"""
from pathlib import Path
from odv_transformer import Session
if __name__ == "__main__":
s = Session()
arch_path = Path(
r'C:\Arbetsmapp\datasets\Profile\2021\SHARK_Profile_2021_COD_SMHI')
s.read(
str(arch_path),
reader='profile',
delivery_name=arch_path.name,
)
s.write(
writer='ices_profile',
delivery_name=arch_path.name,
file_name=arch_path.name.lower(),
)
|
import logging
import numpy as np
import torch
from torch import nn
from torchvision.ops.boxes import box_area
from cvpods.layers import ShapeSpec, cat, generalized_batched_nms
from cvpods.modeling.basenet import basenet
from cvpods.modeling.box_regression import Box2BoxTransformTrace
from cvpods.modeling.losses import sigmoid_focal_loss_jit
from cvpods.modeling.postprocessing import detector_postprocess
from cvpods.structures import Boxes, TraceList, Instances
from cvpods.utils import log_first_n
def permute_to_N_WA_K(tensor, K):
"""
Transpose/reshape a tensor from (N, (A x K), W) to (N, (WxA), K)
"""
assert tensor.dim() == 3, tensor.shape
N, _, W = tensor.shape
tensor = tensor.view(N, -1, K, W)
tensor = tensor.permute(0, 3, 1, 2)
tensor = tensor.reshape(N, -1, K) # Size=(N,WA,K)
return tensor
@basenet
class WFDetection(nn.Module):
"""
Implementation of WFDetection.
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
# fmt: off
self.num_classes = cfg.MODEL.WFD.DECODER.NUM_CLASSES
self.in_features = cfg.MODEL.WFD.ENCODER.IN_FEATURES
self.pos_ignore_thresh = cfg.MODEL.WFD.POS_IGNORE_THRESHOLD
self.neg_ignore_thresh = cfg.MODEL.WFD.NEG_IGNORE_THRESHOLD
# Loss parameters:
self.focal_loss_alpha = cfg.MODEL.WFD.FOCAL_LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.WFD.FOCAL_LOSS_GAMMA
# Inference parameters:
self.score_threshold = cfg.MODEL.WFD.SCORE_THRESH_TEST
self.topk_candidates = cfg.MODEL.WFD.TOPK_CANDIDATES_TEST
self.nms_threshold = cfg.MODEL.WFD.NMS_THRESH_TEST
self.nms_type = cfg.MODEL.NMS_TYPE
self.max_detections_per_trace = cfg.TEST.DETECTIONS_PER_IMAGE
# fmt: on
self.backbone = cfg.build_backbone(
cfg, input_shape=ShapeSpec(channels=1))
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
self.encoder = cfg.build_encoder(
cfg, backbone_shape
)
self.decoder = cfg.build_decoder(cfg)
self.anchor_generator = cfg.build_anchor_generator(cfg, feature_shapes)
self.box2box_transform = Box2BoxTransformTrace(
cfg.MODEL.WFD.BBOX_REG_WEIGHTS,
scale_clamp=cfg.MODEL.WFD.SCALE_CLAMP,
add_ctr_clamp=cfg.MODEL.WFD.ADD_CTR_CLAMP,
ctr_clamp=cfg.MODEL.WFD.CTR_CLAMP
)
self.matcher = validness_match(cfg.MODEL.WFD.MATCHER_TOPK)
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one trace.
For now, each item in the list is a dict that contains:
* trace: Tensor, trace in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
traces = self.preprocess_trace(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [
x["instances"].to(self.device) for x in batched_inputs
]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN,
"'targets' in the model inputs is now renamed to 'instances'!",
n=10)
gt_instances = [
x["targets"].to(self.device) for x in batched_inputs
]
else:
gt_instances = None
features = self.backbone(traces.tensor)
features = [features[f] for f in self.in_features]
box_cls, box_delta = self.decoder(self.encoder(features[0]))
box_delta_min = torch.min(box_delta)
box_delta_max = torch.max(box_delta)
# print(box_delta_min, box_delta_max)
anchors = self.anchor_generator(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [permute_to_N_WA_K(box_cls, self.num_classes)]
pred_anchor_deltas = [permute_to_N_WA_K(box_delta, 2)]
# pred_anchor_deltas = [permute_to_N_WA_K(box_delta, 4)]
if self.training:
indices = self.get_ground_truth(
anchors, pred_anchor_deltas, gt_instances)
losses = self.losses(
indices, gt_instances, anchors,
pred_logits, pred_anchor_deltas)
return losses
else:
indices = self.get_ground_truth(
anchors, pred_anchor_deltas, gt_instances)
losses = self.losses(
indices, gt_instances, anchors,
pred_logits, pred_anchor_deltas)
# print(losses)
results = self.inference(
[box_cls], [box_delta], anchors, traces.trace_sizes)
processed_results = []
for results_per_trace, input_per_trace, trace_size in zip(
results, batched_inputs, traces.trace_sizes):
height = input_per_trace.get("height", trace_size[0])
# height = 0
width = input_per_trace.get("width", trace_size[1])
r = detector_postprocess(results_per_trace, height, width)
processed_results.append({"instances": r})
return processed_results
def losses(self,
indices,
gt_instances,
anchors,
pred_class_logits,
pred_anchor_deltas):
pred_class_logits = cat(
pred_class_logits, dim=1).view(-1, self.num_classes)
pred_anchor_deltas = cat(pred_anchor_deltas, dim=1).view(-1, 2)
# pred_anchor_deltas = cat(pred_anchor_deltas, dim=1).view(-1, 4)
anchors = [Boxes.cat(anchors_i) for anchors_i in anchors]
N = len(anchors)
# list[Tensor(R, 4)], one for each trace
all_anchors = Boxes.cat(anchors).tensor
# Boxes(Tensor(N*R, 4))
predicted_boxes = self.box2box_transform.apply_deltas(
pred_anchor_deltas, all_anchors)
predicted_boxes = predicted_boxes.reshape(N, -1, 4)
ious = []
pos_ious = []
for i in range(N):
src_idx, tgt_idx = indices[i]
iou, _ = box_iou(predicted_boxes[i, ...],
gt_instances[i].gt_boxes.tensor)
if iou.numel() == 0:
max_iou = iou.new_full((iou.size(0),), 0)
else:
max_iou = iou.max(dim=1)[0]
a_iou, _ = box_iou(anchors[i].tensor,
gt_instances[i].gt_boxes.tensor)
if a_iou.numel() == 0:
pos_iou = a_iou.new_full((0,), 0)
else:
pos_iou = a_iou[src_idx, tgt_idx]
ious.append(max_iou)
pos_ious.append(pos_iou)
ious = torch.cat(ious)
ignore_idx = ious > self.neg_ignore_thresh
pos_ious = torch.cat(pos_ious)
pos_ignore_idx = pos_ious < self.pos_ignore_thresh
src_idx = torch.cat(
[src + idx * anchors[0].tensor.shape[0] for idx, (src, _) in
enumerate(indices)])
gt_classes = torch.full(pred_class_logits.shape[:1],
self.num_classes,
dtype=torch.int64,
device=pred_class_logits.device)
gt_classes[ignore_idx] = -1
target_classes_o = torch.cat(
[t.gt_classes[J] for t, (_, J) in zip(gt_instances, indices)])
target_classes_o[pos_ignore_idx] = -1
# num_pos_ignore_idx = pos_ignore_idx.sum()
gt_classes[src_idx] = target_classes_o
valid_idxs = gt_classes >= 0
foreground_idxs = (gt_classes >= 0) & (gt_classes != self.num_classes)
num_foreground = foreground_idxs.sum()
gt_classes_target = torch.zeros_like(pred_class_logits)
gt_classes_target[foreground_idxs, gt_classes[foreground_idxs]] = 1
num_foreground = num_foreground * 1.0
# cls loss
loss_cls = sigmoid_focal_loss_jit(
pred_class_logits[valid_idxs],
gt_classes_target[valid_idxs],
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)
# reg loss
target_boxes = torch.cat(
[t.gt_boxes.tensor[i] for t, (_, i) in zip(gt_instances, indices)],
dim=0)
target_boxes = target_boxes[~pos_ignore_idx]
matched_predicted_boxes = predicted_boxes.reshape(-1, 4)[
src_idx[~pos_ignore_idx]]
loss_box_reg = (1 - torch.diag(generalized_box_iou(
matched_predicted_boxes, target_boxes))).sum()
return {
"loss_cls": loss_cls / max(1, num_foreground),
"loss_box_reg": loss_box_reg / max(1, num_foreground),
}
@torch.no_grad()
def get_ground_truth(self, anchors, bbox_preds, targets):
anchors = [Boxes.cat(anchors_i) for anchors_i in anchors]
N = len(anchors)
# list[Tensor(R, 4)], one for each trace
all_anchors = Boxes.cat(anchors).tensor.reshape(N, -1, 4)
# Boxes(Tensor(N*R, 4))
box_delta = cat(bbox_preds, dim=1)
# box_pred: xyxy; targets: xyxy
box_pred = self.box2box_transform.apply_deltas(box_delta, all_anchors)
indices = self.matcher(box_pred, all_anchors, targets)
return indices
def inference(self, box_cls, box_delta, anchors, trace_sizes):
"""
Arguments:
box_cls, box_delta: Same as the output of :meth:`WFDHead.forward`
anchors (list[list[Boxes]]): a list of #traces elements. Each is a
list of #feature level Boxes. The Boxes contain anchors of this
trace on the specific feature level.
trace_sizes (List[torch.Size]): the input trace sizes
Returns:
results (List[Instances]): a list of #traces elements.
"""
assert len(anchors) == len(trace_sizes)
results = []
box_cls = [permute_to_N_WA_K(x, self.num_classes) for x in box_cls]
# box_delta = [permute_to_N_WA_K(x, 4) for x in box_delta]
box_delta = [permute_to_N_WA_K(x, 2) for x in box_delta]
# list[Tensor], one per level, each has shape (N, Hi x Wi x A, K or 4)
for trc_idx, anchors_per_trace in enumerate(anchors):
trace_size = trace_sizes[trc_idx]
box_cls_per_trace = [
box_cls_per_level[trc_idx] for box_cls_per_level in box_cls
]
box_reg_per_trace = [
box_reg_per_level[trc_idx] for box_reg_per_level in box_delta
]
results_per_trace = self.inference_single_trace(
box_cls_per_trace, box_reg_per_trace, anchors_per_trace,
tuple(trace_size))
results.append(results_per_trace)
return results
def inference_single_trace(self, box_cls, box_delta, anchors, trace_size):
"""
Single-trace inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Arguments:
box_cls (list[Tensor]): list of #feature levels. Each entry contains
tensor of size (H x W x A, K)
box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4.
anchors (list[Boxes]): list of #feature levels. Each entry contains
a Boxes object, which contains all the anchors for that
trace in that feature level.
trace_size (tuple(H, W)): a tuple of the trace height and width.
Returns:
Same as `inference`, but for only one trace.
"""
boxes_all = []
scores_all = []
class_idxs_all = []
# Iterate over every feature level
for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta,
anchors):
# (HxWxAxK,)
box_cls_i = box_cls_i.flatten().sigmoid_()
# Keep top k top scoring indices only.
num_topk = min(self.topk_candidates, box_reg_i.size(0))
# torch.sort is actually faster than .topk (at least on GPUs)
predicted_prob, topk_idxs = box_cls_i.sort(descending=True)
predicted_prob = predicted_prob[:num_topk]
topk_idxs = topk_idxs[:num_topk]
# filter out the proposals with low confidence score
keep_idxs = predicted_prob > self.score_threshold
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = topk_idxs[keep_idxs]
anchor_idxs = topk_idxs // self.num_classes
classes_idxs = topk_idxs % self.num_classes
box_reg_i = box_reg_i[anchor_idxs]
anchors_i = anchors_i[anchor_idxs]
# predict boxes
predicted_boxes = self.box2box_transform.apply_deltas(
box_reg_i, anchors_i.tensor)
boxes_all.append(predicted_boxes)
scores_all.append(predicted_prob)
class_idxs_all.append(classes_idxs)
boxes_all, scores_all, class_idxs_all = [
cat(x) for x in [boxes_all, scores_all, class_idxs_all]
]
keep = generalized_batched_nms(boxes_all, scores_all, class_idxs_all,
self.nms_threshold, nms_type=self.nms_type)
keep = keep[:self.max_detections_per_trace]
result = Instances(trace_size)
result.pred_boxes = Boxes(boxes_all[keep])
result.scores = scores_all[keep]
result.pred_classes = class_idxs_all[keep]
return result
def preprocess_trace(self, batched_inputs):
"""
Normalize, pad and batch the input traces.
"""
traces = [x["trace"].to(self.device) for x in batched_inputs]
# traces = [(x - self.pixel_mean) / self.pixel_std for x in traces]
traces = TraceList.from_tensors(traces,
self.backbone.size_divisibility)
return traces
def _inference_for_ms_test(self, batched_inputs):
"""
function used for multiscale test, will be refactor in the future.
The same input with `forward` function.
"""
assert not self.training, "inference mode with training=True"
assert len(batched_inputs) == 1, "inference trace number > 1"
traces = self.preprocess_trace(batched_inputs)
features = self.backbone(traces.tensor)
features = [features[f] for f in self.in_features]
box_cls, box_delta = self.head(features)
anchors = self.anchor_generator(features)
results = self.inference(box_cls, box_delta, anchors, traces.trace_sizes)
for results_per_trace, input_per_trace, trace_size in zip(
results, batched_inputs, traces.trace_sizes
):
height = input_per_trace.get("height", trace_size[0])
width = input_per_trace.get("width", trace_size[1])
processed_results = detector_postprocess(results_per_trace, height, width)
return processed_results
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
class validness_match(nn.Module):
def __init__(self, match_times: int = 4):
super().__init__()
self.match_times = match_times
@torch.no_grad()
def forward(self, pred_boxes, anchors, targets):
bs, num_queries = pred_boxes.shape[:2]
# We flatten to compute the cost matrices in a batch
# [batch_size * num_anchors, 4]
out_bbox = pred_boxes.flatten(0, 1)
anchors = anchors.flatten(0, 1)
# Also concat the target boxes
tgt_bbox = torch.cat([v.gt_boxes.tensor for v in targets])
# Compute the L1 cost between boxes
# Note that we use anchors and predict boxes both
cost_bbox = torch.cdist(
box_xyxy_to_cxcywh(out_bbox), box_xyxy_to_cxcywh(tgt_bbox), p=1)
cost_bbox_anchors = torch.cdist(
box_xyxy_to_cxcywh(anchors), box_xyxy_to_cxcywh(tgt_bbox), p=1)
# Final cost matrix
C = cost_bbox
C = C.view(bs, num_queries, -1).cpu()
C1 = cost_bbox_anchors
C1 = C1.view(bs, num_queries, -1).cpu()
sizes = [len(v.gt_boxes.tensor) for v in targets]
all_indices_list = [[] for _ in range(bs)]
# positive indices when matching predict boxes and gt boxes
indices = [
tuple(
torch.topk(
c[i],
k=self.match_times,
dim=0,
largest=False)[1].numpy().tolist()
)
for i, c in enumerate(C.split(sizes, -1))
]
# positive indices when matching anchor boxes and gt boxes
indices1 = [
tuple(
torch.topk(
c[i],
k=self.match_times,
dim=0,
largest=False)[1].numpy().tolist())
for i, c in enumerate(C1.split(sizes, -1))]
# concat the indices according to image ids
for trc_id, (idx, idx1) in enumerate(zip(indices, indices1)):
trc_idx_i = [
np.array(idx_ + idx1_)
for (idx_, idx1_) in zip(idx, idx1)
]
trc_idx_j = [
np.array(list(range(len(idx_))) + list(range(len(idx1_))))
for (idx_, idx1_) in zip(idx, idx1)
]
all_indices_list[trc_id] = [*zip(trc_idx_i, trc_idx_j)]
# re-organize the positive indices
all_indices = []
for trc_id in range(bs):
all_idx_i = []
all_idx_j = []
for idx_list in all_indices_list[trc_id]:
idx_i, idx_j = idx_list
all_idx_i.append(idx_i)
all_idx_j.append(idx_j)
all_idx_i = np.hstack(all_idx_i)
all_idx_j = np.hstack(all_idx_j)
all_indices.append((all_idx_i, all_idx_j))
return [
(torch.as_tensor(i, dtype=torch.int64),
torch.as_tensor(j, dtype=torch.int64))
for i, j in all_indices
]
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Long range SSH Hamiltonian
Created on Mon Jul 31 10:20:50 2017
@author: Alexandre Dauphin
"""
import numpy as np
def ham(j1, j2, j3, j4, w1, w2, w3, w4, n):
vec1 = j1*np.ones(2*n-1)
vec1[1:2*n-1:2] = j2
vec2 = np.ones(2*n-3)*j3
vec2[1:2*n-1:2] = j4
vecdis = (2*np.random.random(2*n-1)-1)/2.
vecdis[0:2*n:2] = vecdis[0:2*n:2]*w1
vecdis[1:2*n-1:2] = vecdis[1:2*n-1:2]*w2
vecdislr = (2*np.random.random(2*n-3)-1)/2.
vecdislr[0:2*n-3:2] = vecdislr[0:2*n-3:2]*w3
vecdislr[1:2*n-3:2] = vecdislr[1:2*n-3:2]*w4
mat = np.diag(vec1, k=1) + np.diag(vec1, k=-1) + np.diag(vecdis, k=1) + \
np.diag(vecdis, k=-1) + np.diag(vec2, k=3) + np.diag(vec2, k=-3) + \
np.diag(vecdislr, k=3) + np.diag(vecdislr, k=-3)
return mat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.