repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
bermeom/quadruped-robot
|
learning/sources/source_gym_cartpole.py
|
from sources.source_gym import source_gym
import cv2
import numpy as np
##### SOURCE GYM CARTPOLE
class source_gym_cartpole( source_gym ):
### __INIT__
def __init__( self ):
source_gym.__init__( self , 'CartPole-v0' )
### INFORMATION
def num_actions( self ): return self.env.action_space.n
### MAP KEYS
def map_keys( self , actn ):
if actn[0] : return 0
if actn[1] : return 1
### MOVE ONE STEP
def move( self , actn ):
obsv , rewd , done, info = self.env.step( self.map_keys( actn ) )
if done: rewd -= 50
if self.render: self.env.render()
return self.process( obsv ) , rewd , done
### PROCESS OBSERVATION
def process( self , obsv ):
return obsv
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_A2C_1A.py
|
<gh_stars>1-10
from players_reinforcement.player_A2C_1 import *
import tensorflow as tf
# PLAYER A2C
class player_A2C_1A( player_A2C_1 ):
BATCH_SIZE = 50
NUM_FRAMES = 3
C_LEARNING_RATE = 1e-4
A_LEARNING_RATE = 1e-5
REWARD_DISCOUNT = 0.99
EXPERIENCES_LEN = 1e6
STEPS_BEFORE_TRAIN = 500
### __INIT__
def __init__( self ):
player_A2C_1.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range( self.NUM_FRAMES ) ), axis = 1 )
# PREPARE NETWORK
def network( self ):
# Input Placeholder
self.brain.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ],
name = 'Observation')
# Critic -------------------------------------------------------------
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.elu,
weight_stddev = 0.01,
bias_stddev = 0.01 )
self.brain.addLayer( input = 'Observation', out_channels = 512 )
self.brain.addLayer( out_channels = 512 )
self.brain.addLayer( out_channels = 1, activation = None ,name = 'Value' )
# Actor --------------------------------------------------------------
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.elu,
weight_stddev = 0.01,
bias_stddev = 0.01 )
self.brain.addLayer( input = 'Observation', out_channels = 512 )
self.brain.addLayer( out_channels = 512 )
self.brain.addLayer( out_channels = self.num_actions, activation = tb.activs.softmax, name = 'Output' )
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_block.py
|
<filename>tensorblock/recipe/recipe_block.py
class recipe_block:
###### Repeat Block
def repBlock( self , **args ):
block_pars = { **self.defs_block , **args }
src , dst , type = block_pars['src'] , block_pars['dst'] , block_pars['type']
list_src = src.split( '/' )
src = self.iterate( list_src )
src = src[ list_src[-1] ]
list_dst = dst.split( '/' )
dst = self.iterate( list_dst )
dst.addBlock( list_dst[-1] )
dst = dst[ list_dst[-1] ]
for label , name in src.order:
if label == 'block':
self.repeat( src = '/' + src.folder + name ,
dst = '/' + dst.folder + name , type = type )
if label == 'input':
pars = src.pars( name ).copy()
if type is 'share' and block_pars['mod_inputs']:
pars[type] = '/' + src.folder + pars['name']
dst.addInput( **pars )
if label == 'variable':
pars = src.pars( name ).copy()
if type is not None and block_pars['mod_variables']:
pars[type] = '/' + src.folder + pars['name']
dst.addVariable( **pars )
if label == 'layer':
pars = src.pars( name ).copy()
if type is not None and block_pars['mod_layers']:
pars[type] = '/' + src.folder + pars['name']
dst.addLayer( **pars )
if label == 'operation' and not block_pars['no_ops']:
pars = src.pars( name ).copy()
dst.addOperation( **pars )
return dst
###### Copy Block
def copyBlock( self , **args ):
pars = { **self.defs_block , **args } ; pars['type'] = 'copy'
return self.repBlock( **pars )
###### Share Block
def shareBlock( self , **args ):
pars = { **self.defs_block , **args } ; pars['type'] = 'share'
return self.repBlock( **pars )
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_A2C_1.py
|
<gh_stars>1-10
from players_reinforcement.player import player
from auxiliar.aux_plot import *
import tensorflow as tf
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
# PLAYER A2C
class player_A2C_1(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
self.num_stored_obsv = self.NUM_FRAMES
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate( state )
# CALCULATE NETWORK
def calculate(self, state):
output = np.squeeze( self.brain.run( 'Output', [ [ 'Observation', [state] ] ] ) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action( action )
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None, self.num_actions ], name = 'Actions' )
self.brain.addInput( shape = [ None, 1 ], name = 'Advantage' )
# Operations
# Critic
self.brain.addOperation( function = tb.ops.mean_squared_error,
input = ['Value','Advantage'],
name = 'CriticCost')
self.brain.addOperation( function = tb.optims.adam,
input = 'CriticCost',
learning_rate = self.C_LEARNING_RATE,
name = 'CriticOptimizer' )
# Actor
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Advantage' ],
name = 'ActorCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'ActorCost',
learning_rate = self.A_LEARNING_RATE,
summary = 'Summary',
writer = 'Writer',
name = 'ActorOptimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'ActorCost' )
self.brain.addWriter( name = 'Writer' , dir = '../' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience
self.experiences.append( ( prev_state , curr_state , actn , rewd , done ) )
if len( self.experiences ) > self.EXPERIENCES_LEN: self.experiences.popleft()
# Check for Train
if len( self.experiences ) > self.STEPS_BEFORE_TRAIN and self.BATCH_SIZE > 0:
# Select Random Batch
batch = random.sample( self.experiences , self.BATCH_SIZE )
# Separate Batch Data
prev_states = [d[0] for d in batch]
curr_states = [d[1] for d in batch]
actions = [d[2] for d in batch]
rewards = [d[3] for d in batch]
dones = [d[4] for d in batch]
# States Value
prev_values = np.squeeze(self.brain.run( 'Value' , [ [ 'Observation' , prev_states ] ] ) )
next_values = np.squeeze(self.brain.run( 'Value' , [ [ 'Observation' , curr_states ] ] ) )
# Calculate TD Targets and TD Errors
td_targets = []
td_errors = []
for i in range( len(rewards) ):
if dones[i]:
td_targets.append ( rewards[i] )
td_errors.append ( td_targets[i] - prev_values[i] )
else:
td_targets.append ( rewards[i] + self.REWARD_DISCOUNT * next_values[i] )
td_errors.append ( td_targets[i] - prev_values[i] )
td_targets = np.expand_dims( td_targets, 1 )
td_errors = np.expand_dims( td_errors, 1 )
# Optimize Neural Network
_, = self.brain.run( ['CriticOptimizer'], [ [ 'Observation', prev_states ],
[ 'Advantage', td_targets ] ] )
_, c, summary = self.brain.run( [ 'ActorOptimizer','ActorCost','Summary' ], [ [ 'Observation', prev_states ],
[ 'Actions', actions ],
[ 'Advantage', td_errors ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_PPO_2A.py
|
<gh_stars>1-10
from players_reinforcement.player_PPO_2 import *
# PLAYER PPO
class player_PPO_2A( player_PPO_2 ):
NUM_FRAMES = 3
LEARNING_RATE = 3e-4
UPDATE_SIZE = 5
BATCH_SIZE = 256
EPSILON = 0.2
GAMMA = 0.99
LAM = 0.95
rgb = 1 # 1 if black and white
### __INIT__
def __init__( self ):
player_PPO_2.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
obsv = np.stack( tuple( self.obsv_list[i] for i in range( self.NUM_FRAMES ) ), axis = -1 )
if self.rgb > 1: obsv = obsv.reshape(-1,self.obsv_shape[0],self.obsv_shape[1],self.NUM_FRAMES * self.rgb)[0]
return obsv
### PREPARE NETWORK
def network( self ):
# Critic
Critic = self.brain.addBlock( 'Critic' )
Critic.addInput( shape = [ None, self.obsv_shape[0], self.obsv_shape[1], self.NUM_FRAMES * self.rgb ],
name = 'Observation' )
Critic.setLayerDefaults( type = tb.layers.conv2d,
activation = tb.activs.relu,
pooling = 2,
weight_stddev = 0.01,
bias_stddev = 0.01 )
Critic.addLayer( out_channels = 32, ksize = 8, strides = 4, input = 'Observation' )
Critic.addLayer( out_channels = 64, ksize = 4, strides = 2 )
Critic.addLayer( out_channels = 64, ksize = 3, strides = 1 )
Critic.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.relu )
Critic.addLayer( out_channels = 512 )
Critic.addLayer( out_channels = 1, name = 'Value', activation = None )
# Actor
Actor = self.brain.addBlock( 'Actor' )
Actor.addInput( shape = [ None, self.obsv_shape[0], self.obsv_shape[1], self.NUM_FRAMES * self.rgb ],
name = 'Observation' )
Actor.setLayerDefaults( type = tb.layers.conv2d,
activation = tb.activs.relu,
pooling = 2,
weight_stddev = 0.01,
bias_stddev = 0.01 )
Actor.addLayer( out_channels = 32, ksize = 8, strides = 4, input = 'Observation' )
Actor.addLayer( out_channels = 64, ksize = 4, strides = 2 )
Actor.addLayer( out_channels = 64, ksize = 3, strides = 1 )
Actor.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.relu )
Actor.addLayer( out_channels = 512, name = 'Hidden' )
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = None, name = 'Mu')
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softplus, name = 'Sigma', activation_pars = 0.5 )
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softmax, name = 'Discrete' )
mu = Actor.tensor( 'Mu' )
sigma = Actor.tensor( 'Sigma' )
dist = tb.extras.dist_normal( mu, sigma )
action = dist.sample( 1 )
Actor.addInput( tensor = action, name = 'Output')
# OldActor
Old = self.brain.addBlock( 'Old' )
Old.addInput( shape = [ None, self.obsv_shape[0], self.obsv_shape[1], self.NUM_FRAMES * self.rgb ],
name = 'Observation' )
Old.setLayerDefaults( type = tb.layers.conv2d,
activation = tb.activs.relu,
pooling = 2,
weight_stddev = 0.01,
bias_stddev = 0.01 )
Old.addLayer( out_channels = 32, ksize = 8, strides = 4, input = 'Observation' )
Old.addLayer( out_channels = 64, ksize = 4, strides = 2 )
Old.addLayer( out_channels = 64, ksize = 3, strides = 1 )
Old.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.relu )
Old.addLayer( out_channels = 512, name = 'Hidden' )
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = None, name = 'Mu')
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softplus, name = 'Sigma', activation_pars = 0.5 )
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softmax, name = 'Discrete' )
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_base.py
|
<reponame>bermeom/quadruped-robot<filename>tensorblock/recipe/recipe_base.py
import tensorflow as tf
import tensorblock as tb
class recipe_base:
####### Get Collection
def collection( self , name = None ):
if name is None: name = self.folder
return tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES , name )
####### Get Tag
def tag( self , name ):
list = name.split( '/' )
if len( list ) == 1 :
return self.labels[ name ][0]
else:
ptr = self.iterate( list )
return ptr.tag( list[-1] )
####### Get Extra
def info( self , name ):
return self.data( name , get_info = True )
####### Get Data
def data( self , name , get_info = False ):
if not isinstance( name , str ):
return [ name , None ]
list = name.split( '/' )
if len( list ) == 1 :
tag , idx = self.labels[ name ]
if tag == 'input': return self.inputs[ idx ]
if tag == 'weight': return self.weights[ idx ]
if tag == 'bias': return self.biases[ idx ]
if tag == 'variable': return self.variables[ idx ]
if tag == 'operation': return self.operations[ idx ]
if tag == 'block': return self.blocks[ idx ]
if tag == 'summary': return self.summaries[ idx ]
if tag == 'writer': return self.writers[ idx ]
if tag == 'saver': return self.savers[ idx ]
if tag == 'plotter': return self.plotters[ idx ]
if tag == 'layer':
if get_info: return self.extras[ idx ]
else: return self.layers[ idx ]
if tag == 'dropout':
return self.root.dropouts[ idx ]
return None
else:
ptr = self.iterate( list )
return ptr.data( list[-1] )
####### Get Parameters
def pars( self , name ):
return self.data( name )[1]
####### Get Shape
def shape( self , name ):
node = self.node( name )
if isinstance( node , list ): node = node[0]
return tb.aux.tf_shape( node )
####### Get Tensor
def node( self , name ):
return self.data( name )[0]
####### Get Tensor
def tensor( self , names ):
tensors = self.tensor_list( names )
if not isinstance( names , list ) and names[-1] != '/': return tensors[0]
else: return tensors
####### Get Tensor List
def tensor_list( self , names ):
if not isinstance( names , list ):
tensors = [ self.tensor_expanded( names ) ]
else:
tensors = []
for name in names:
tensors.append( self.tensor_expanded( name ) )
return tb.aux.flatten_list( tensors )
####### Get Block Tensors
def tensors_block( self ):
tensors = []
for label in self.labels:
if self.tag( label ) == 'weight' and self.pars( label )['weight_type'] is not None:
tensors.append( self.node( label ) )
if self.tag( label ) == 'bias' and self.pars( label )['bias_type' ] is not None:
tensors.append( self.node( label ) )
if self.tag( label ) == 'block':
tensors.append( self.block( label ).tensors_block() )
return tb.aux.flatten_list( tensors )
####### Get Tensor Expanded
def tensor_expanded( self , name ):
if not isinstance( name , str ):
return name
expand = name[-1] == '/'
if not expand: return self.node( name )
else: name = name[:-1]
if self.tag( name ) == 'layer':
pars = self.pars( name ).copy()
tensors = []
if pars['weight_type'] is not None:
tensors.append( self.node( pars['folder'] + pars['weight_name'] ) )
if pars['bias_type'] is not None:
tensors.append( self.node( pars['folder'] + pars['bias_name' ] ) )
return tensors
elif self.tag( name ) == 'block':
return self.block( name ).tensors_block()
else:
return [ self.node( name ) ]
####### Get Block
def block( self , name ):
list = name.split( '/' )
if len( list ) == 1 : return self.tensor( name )
else: return self.iterate( list )[ list[-1] ]
####### Get Block
def __getitem__( self , name ):
return self.block( name )
####### Iterate
def iterate( self , list ):
if list[0] is '':
ptr = self.root
else:
if list[0][0] is '.':
ptr = self.root
back = self.folder.split( '/' )
for i in range( len( back ) - len( list[0] ) ):
if back[i] is not '' : ptr = ptr[ back[i] ]
list = list[1:]
else:
ptr = self
for i in range( len( list ) - 1 ):
if list[i] is not '' : ptr = ptr[ list[i] ]
return ptr
####### Add Label
def add_label( self , list , string , name , add_order ):
idx = len( list )
if name is None: name = string + '_' + str( idx )
self.labels[ name ] = ( string.lower() , idx )
if add_order:
self.order.append( [ string.lower() , name ] )
return name
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_dql_bayesian_2A.py
|
<reponame>bermeom/quadruped-robot
from players_reinforcement.player_dql_bayesian_2 import *
# PLAYER DQL BAYESIAN
class player_dql_bayesian_2A( player_dql_bayesian_2 ):
NUM_FRAMES = 2
BATCH_SIZE = 512
LEARNING_RATE = 3e-4
REWARD_DISCOUNT = 0.99
START_RANDOM_PROB = 1.00
FINAL_RANDOM_PROB = 0.05
NUM_EXPLORATION_EPISODES = 200
EXPERIENCES_LEN = 100000
STEPS_BEFORE_TRAIN = 1000
### __INIT__
def __init__( self ):
player_dql_bayesian_2.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range(self.NUM_FRAMES) ), axis=2 )
# PREPARE NETWORK
def network(self):
# Input Placeholder
self.brain.addInput(shape=[None, self.obsv_shape[0], self.obsv_shape[1], self.NUM_FRAMES],
name='Observation')
# Convolutional Layers
self.brain.setLayerDefaults( type = tb.layers.conv2d,
activation = tb.activs.relu,
pooling = 2,
weight_stddev = 0.01,
bias_stddev = 0.01)
self.brain.addLayer( out_channels=32, ksize=8, strides=4, input='Observation' )
self.brain.addLayer( out_channels=64, ksize=4, strides=2 )
self.brain.addLayer( out_channels=64, ksize=3, strides=1 )
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.relu )
self.brain.addLayer( out_channels = 256,
dropout = True,
dropout_name = 'Drop')
self.brain.addLayer( out_channels = self.num_actions,
activation = None,
name = 'Output')
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_reinforce_rnn_1A.py
|
<filename>learning/players_reinforcement/player_reinforce_rnn_1A.py
from players_reinforcement.player_reinforce_rnn_1 import *
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_1A( player_reinforce_rnn_1 ):
NUM_FRAMES = 3
LEARNING_RATE = 1e-4
REWARD_DISCOUNT = 0.99
### __INIT__
def __init__( self ):
player_reinforce_rnn_1.__init__( self )
# PREPARE NETWORKx
def network( self ):
# Input Placeholder
self.brain.addInput( shape = [ None, self.NUM_FRAMES, self.obsv_shape[0] ],
name = 'Observation' )
# RNN Layers
self.brain.addLayer( type = tb.layers.rnn,
cell_type = 'LSTM',
num_cells = 1,
out_channels = 64,
name = 'RNN')
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully,
weight_stddev = 0.01,
bias_stddev = 0.01 )
self.brain.addLayer( type = tb.layers.fully,
out_channels = 32 ,
activation = tb.activs.relu )
self.brain.addLayer( out_channels = self.num_actions,
activation = tb.activs.softmax,
name = 'Output' )
|
bermeom/quadruped-robot
|
tensorblock/functions/func_activations.py
|
<reponame>bermeom/quadruped-robot
import tensorflow as tf
### ELU Activation
def elu( x ):
return tf.nn.elu( x )
### ReLU Activation
def relu( x ):
return tf.nn.relu( x )
### Tanh Activation
def tanh( x ):
return tf.nn.tanh( x )
### SoftPlus Activation
def softplus( x, scale = 1):
return tf.nn.softplus( x ) * scale
### SoftMax Activation
def softmax( x ):
return tf.nn.softmax( x )
### Sigmoid Activation
def sigmoid( x ):
return tf.nn.sigmoid( x )
### Squared Exponential Activation
def sqrexp( x , pars ):
values = tf.Variable( pars , trainable = False , dtype = tf.float32 )
return values[0] * tf.exp( - tf.square( x ) / ( 2.0 * values[1] ) )
|
bermeom/quadruped-robot
|
tensorblock/aux/aux_batch.py
|
import copy
import numpy as np
### Get Batch Data (Numpy)
def get_batch_numpy( data , b , i ):
l = len( data )
st = ( i + 0 ) * b % l
fn = ( i + 1 ) * b % l
if st > fn :
if fn == 0: return data [ st : l ]
else: return np.vstack( ( data[ st : l ] , data[ 0 : fn ] ) )
else:
return data[ st : fn ]
### Get Batch Data (List)
def get_batch_list( data , b , i ):
l = len( data )
st = ( i + 0 ) * b % l
fn = ( i + 1 ) * b % l
if st > fn :
if fn == 0: return data [ st : l ]
else: return data[ st : l ] + data[ 0 : fn ]
else:
return data[ st : fn ]
### Get Batch Data
def get_batch( data , b , i ):
if isinstance( data , list ):
return get_batch_list( data , b , i )
if isinstance( data , np.ndarray ):
return get_batch_numpy( data , b , i )
print( 'DATA TYPE NOT SUPPORTED' )
return None
### Get Data Seqlen
def get_data_seqlen( data ):
data_seqlen = []
max_seqlen = 0
for i in range( len( data ) ):
seqlen = len( data[i] )
data_seqlen.append( seqlen )
if seqlen > max_seqlen:
max_seqlen = seqlen
return data_seqlen
### Pad Data
def pad_data( data , max_seqlen = None ):
data_seqlen = get_data_seqlen( data )
if max_seqlen is None: max_seqlen = max( data_seqlen )
max_featlen = len( data[0][0] )
pad = [ 0.0 for _ in range( max_featlen ) ]
for i in range( len( data ) ):
data[i] += [ pad for _ in range( max_seqlen - data_seqlen[i] ) ]
return data_seqlen
|
bermeom/quadruped-robot
|
tensorblock/layers/layer_hlfully.py
|
import numpy as np
import tensorflow as tf
import tensorblock as tb
class layer_hlfully:
####### Data
def name(): return 'Fully'
def shapeMult(): return 1
def dims(): return 1
def allowPooling(): return False
####### Function
def function( x , W , b , recipe , pars ):
if tb.aux.tf_length( x ) > 2:
x = tb.aux.tf_flatten( x )
layer = tf.contrib.layers.fully_connected (x,
pars['out_channels'],
activation_fn = None,
weights_regularizer = tf.contrib.layers.l2_regularizer(0.1))
return [ layer ] , pars , None
####### Shapes
def shapes( input_shape , pars ):
in_channels = tb.aux.flat_dim( input_shape )
out_channels = pars['out_channels'] * np.prod( pars['out_sides'] )
weight_shape = [ in_channels , out_channels ]
bias_shape = [ out_channels ]
return weight_shape , bias_shape
|
bermeom/quadruped-robot
|
learning/players_imitation/player.py
|
<gh_stars>1-10
import random
from collections import deque
import sys
sys.path.append( '..' )
sys.path.append( '../..' )
import tensorblock as tb
import numpy as np
##### PLAYER
class player:
### __INIT__
def __init__( self ):
self.num_stored_obsv = 0
### DUMMY FUNCTIONS
def prepare( self ): return None
def network( self ): return None
def operations( self ): return None
def train( self , prev_state , curr_obsv , actn ,rewd , done ): return None
def process( self , obsv ): return obsv
def act( self , state ): return None
def info( self ): return None
def on_start( self ): return None
### START
def start( self , source , obsv ):
self.obsv_list = deque()
self.obsv_shape = obsv.shape
self.num_actions = source.num_actions()
self.range_actions = source.range_actions()
self.continuous = False
if source.range_actions() != -1:
self.continuous = True
self.initialize()
return self.restart( obsv )
### RESTART
def restart( self , obsv ):
self.obsv_list.clear()
self.store_obsv( obsv )
return self.process( obsv )
### PARSE ARGUMENTS
def parse_args( self , args ):
self.arg_save = args.save[0]
self.arg_save_step = int( args.save[1] ) if len( args.save ) > 1 else 10
self.arg_load = args.load
self.arg_run = args.run
if self.arg_save == 'same' : self.arg_save = self.arg_load
if self.arg_load == 'same' : self.arg_load = self.arg_save
if self.arg_save is not None : self.arg_save = '../trained_models/' + self.arg_save
if self.arg_load is not None : self.arg_load = '../trained_models/' + self.arg_load
### CREATE ACTION
def create_action( self , idx ):
if self.continuous: return idx
action = np.zeros( self.num_actions )
action[ idx ] = 1
return action
### CREATE RANDOM ACTION
def create_random_action( self ):
return self.create_action( random.randint( 0 , self.num_actions - 1 ) )
#return self.env.action_space.sample()
### STORE OBSERVATIONS
def store_obsv( self , obsv ):
if self.num_stored_obsv > 0:
while len( self.obsv_list ) < self.num_stored_obsv + 1:
self.obsv_list.append( obsv )
self.obsv_list.popleft()
### INITIALIZE NETWORK
def initialize( self ):
self.brain = tb.recipe()
self.network()
self.operations()
if self.arg_save is not None:
self.brain.addSaver( name = 'Save' , dir = self.arg_save )
if self.arg_load is not None:
self.brain.addSaver( name = 'Load' , dir = self.arg_load )
self.brain.initialize()
if self.arg_load is not None:
print( '*** RESTORING' , self.arg_load )
self.brain.restore( name = 'Load' )
if not self.arg_run:
self.on_start()
### LEARN FROM CURRENT OBSERVATION
def learn( self , prev_state , curr_obsv , actn , rewd , done, episode ):
self.store_obsv( curr_obsv )
curr_state = self.process( curr_obsv )
if not self.arg_run:
self.train( prev_state, curr_state , actn , rewd , done, episode )
return curr_state
### TIME TO SAVE
def time_to_save( self , episode ):
return not self.arg_run and self.arg_save is not None and \
( episode + 1 ) % self.arg_save_step == 0
### VERBOSE OUTPUT
def verbose( self , episode , rewd , done ):
if done:
self.info()
if self.time_to_save( episode ):
print( ' SAVING |' , end = '' )
self.brain.save( name = 'Save' )
print()
|
bermeom/quadruped-robot
|
learning/sources/source_pygame.py
|
import signal
import sys
import importlib
from sources.source import source
##### SOURCE PYGAME
class source_pygame( source ):
### __INIT__
def __init__( self , game ):
source.__init__( self )
module = importlib.import_module( 'sources.pygames.' + game )
self.env = getattr( module , game )()
def signal_handler(signal, frame):
print('\nProgram closed!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
### START SIMULATION
def start( self ):
obsv = self.env.reset()
return self.process( obsv )
### MOVE ONE STEP
def move( self , actn ):
obsv , rewd , done = self.env.step( self.map_keys( actn ) )
return self.process( obsv ) , rewd , done
|
bermeom/quadruped-robot
|
learning/sources/vrep/vrepper/version.py
|
<filename>learning/sources/vrep/vrepper/version.py
VERSION = '3.5.0'
ARCH = 'x64'
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_dql_rnn_egreedy.py
|
import random
from collections import deque
from players_reinforcement.player import player
import sys
sys.path.append( '..' )
import tensorblock as tb
import numpy as np
import time
##### PLAYER DQL RNN EGREEDY
class player_dql_rnn_egreedy( player ):
### __INIT__
def __init__( self ):
player.__init__( self )
self.random_prob = self.START_RANDOM_PROB
self.experiences = deque()
self.sequence = []
### INFO ON SCREEN
def info( self ):
if self.random_prob > 0.0 and not self.arg_run:
print( ' Random : %5.5f |' % ( self.random_prob ) , end = '' )
if self.random_prob > self.FINAL_RANDOM_PROB:
self.random_prob -= ( self.START_RANDOM_PROB - self.FINAL_RANDOM_PROB ) / self.NUM_EXPLORATION_EPISODES
### CHOOSE NEXT ACTION
def act( self , state ):
if self.random_prob == 0.0 or self.arg_run:
return self.calculate( state )
if random.random() > self.random_prob:
return self.calculate( state )
else:
return self.create_random_action()
### CALCULATE NETWORK
def calculate( self , state ):
size = len( self.sequence )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( ( self.NUM_FRAMES , self.obsv_shape[0] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.sequence[j][1]
output = self.brain.run( 'Output' , [ [ 'Observation' , [ states ] ] ] )
return self.create_action( np.argmax( output ) )
### PREPARE OPERATIONS
def operations( self ):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.sum_mul ,
input = [ 'Output' , 'Actions' ] , name = 'Readout' )
self.brain.addOperation( function = tb.ops.mean_squared_error ,
input = [ 'Target' , 'Readout' ] , name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam , input = 'Cost' ,
learning_rate = self.LEARNING_RATE , name = 'Optimizer' )
### TRAIN NETWORK
def train( self, prev_state, curr_state, actn, rewd, done, episode ):
# Store New Experience
self.sequence.append( ( prev_state , curr_state , actn , rewd , done ) )
if done: self.experiences.append( self.sequence ) ; self.sequence = []
# Check for Train
if len( self.experiences ) >= self.STEPS_BEFORE_TRAIN and self.BATCH_SIZE > 0:
# Creat Batch Structures
prev_states = np.zeros( ( self.BATCH_SIZE , self.NUM_FRAMES , self.obsv_shape[0] ) )
curr_states = np.zeros( ( self.BATCH_SIZE , self.NUM_FRAMES , self.obsv_shape[0] ) )
actions = np.zeros( ( self.BATCH_SIZE , self.num_actions ) )
rewards = np.zeros( ( self.BATCH_SIZE ) )
dones = np.zeros( ( self.BATCH_SIZE ) )
# Select Random Batches
for i in range( 0 , self.BATCH_SIZE ):
rnd_i = random.randint( 0 , len( self.experiences ) - 1 )
rnd_j = random.randint( 0 , len( self.experiences[rnd_i] ) - self.NUM_FRAMES )
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:] = self.experiences[ rnd_i ][ rnd_j + j ][0]
curr_states[i,j,:] = self.experiences[ rnd_i ][ rnd_j + j ][1]
actions[i] = self.experiences[ rnd_i ][ rnd_j + self.NUM_FRAMES - 1 ][2]
rewards[i] = self.experiences[ rnd_i ][ rnd_j + self.NUM_FRAMES - 1 ][3]
dones[i] = self.experiences[ rnd_i ][ rnd_j + self.NUM_FRAMES - 1 ][4]
# Calculate Rewards for each Action
rewards_per_action = self.brain.run( 'Output' , [ [ 'Observation' , curr_states ] ] )
# Calculate Expected Reward
expected_reward = []
for i in range( self.BATCH_SIZE ):
if dones[i]: expected_reward.append( rewards[i] )
else: expected_reward.append( rewards[i] +
self.REWARD_DISCOUNT * np.max( rewards_per_action[i] ) )
# Optimize Neural Network
self.brain.run( 'Optimizer' , [ [ 'Observation' , prev_states ] ,
[ 'Actions' , actions ] ,
[ 'Target' , expected_reward ] ] )
|
bermeom/quadruped-robot
|
tensorblock/aux/__init__.py
|
from tensorblock.aux.aux_batch import *
from tensorblock.aux.aux_load import *
from tensorblock.aux.aux_reshape import *
from tensorblock.aux.aux_parse import *
from tensorblock.aux.aux_tf_reshape import *
|
bermeom/quadruped-robot
|
trained_models/quadruped_robot_err_vel_DDPG_1A_7/auxiliar/aux_plot.py
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
### PLOT STATE
def plot_state( state ):
n = state.shape[2]
fig = plt.figure()
for i in range( n ):
plt.subplot( 2 , n , i + 1 )
plt.imshow( state[:,:,i] , cmap = 'gray' )
plt.show()
### PLOT STATES
def plot_states( prev_state , curr_state ):
n = prev_state.shape[2]
fig = plt.figure()
for i in range( n ):
plt.subplot( 2 , n , i + 1 )
plt.imshow( curr_state[:,:,i] , cmap = 'gray' )
plt.subplot( 2 , n , i + n + 1 )
plt.imshow( prev_state[:,:,i] , cmap = 'gray' )
plt.show()
### SAVE STATISTICS
def plot_episode_stats(episode_lengths, episode_rewards, accumulated_lenghts, time_rewards):
np.savetxt('./auxiliar/EpisodeLengths.txt', episode_lengths, fmt='%.5f', newline='\n')
np.savetxt('./auxiliar/EpisodeRewards.txt', episode_rewards, fmt='%.5f', newline='\n')
np.savetxt('./auxiliar/AccLengths.txt', accumulated_lenghts, fmt='%.5f', newline='\n')
np.savetxt('./auxiliar/TimeRewards.txt', time_rewards, fmt='%.5f', newline='\n')
# Plot the episode length over time
fig1 = plt.figure(figsize=(20, 10))
plt.subplot(221)
plt.plot(episode_rewards, color = 'c')
plt.xlabel("Steps (thousands)")
plt.ylabel("Episode Reward")
plt.title("Episode Reward over Steps")
plt.subplot(222)
plt.plot(accumulated_lenghts, episode_rewards, color = 'g')
plt.xlabel("Time (seconds)")
plt.ylabel("Episode Reward")
plt.title("Episode Reward over Time")
plt.subplot(223)
plt.plot(episode_lengths, color = 'r')
plt.xlabel("Steps (thousands)")
plt.ylabel("Episode Length")
plt.title("Episode Length over Steps")
plt.subplot(224)
plt.plot(accumulated_lenghts, color = 'm')
plt.xlabel("Steps (thousands)")
plt.ylabel("Accumulated Length")
plt.title("Accumulated Length over Steps")
plt.savefig('./auxiliar/Plot.png')
plt.close()
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_print.py
|
import tensorflow as tf
import tensorblock as tb
class recipe_print:
####### Print Nodes
def printNodes( self ):
print( '############################################################## TENSORS' ,
'/' + self.folder[0:-1] )
print( '#################################################### INPUTS' )
for input in self.inputs:
pars = input[1]
print( '*************************** ' + pars['name'] , end = '' )
print( ' | S: ' + str( self.shape( pars['name'] ) ) , end = '' )
print( ' |' )
print( '#################################################### LAYERS' )
for layer in self.layers:
type , input , name = layer[1]['type'].name() , layer[1]['input'] , layer[1]['name']
weight_shape , bias_shape = self.shape( layer[1]['weight_name'] ) , self.shape( layer[1]['bias_name'] )
dropout , pooling = layer[1]['dropout'] , layer[1]['pooling']
in_dropout , out_dropout = layer[1]['in_dropout'] , layer[1]['out_dropout']
W = self.node( layer[1]['weight_name'] )
strW = ' W:' if not isinstance( W , list ) else ' W(' + str( len( W ) ) + '):'
b = self.node( layer[1]['bias_name'] )
strB = ' b:' if not isinstance( b , list ) else ' b(' + str( len( W ) ) + '):'
print( '*************************** (' + type + ') - ' + input + ' --> ' + name , end = '' )
print( ' | I:' , self.shape( layer[1]['input'] ) , end = '' )
print( ' | O:' , tb.aux.tf_shape( layer[0] ) , end = '' )
if weight_shape is not None : print( ' |' + strW , weight_shape , end = '' )
if bias_shape is not None : print( ' |' + strB , bias_shape , end = '' )
if pooling > 1 : print( ' | pool:' , pooling , end = '' )
if dropout > 0.0 : print( ' | drop:' , dropout , end = '' )
if in_dropout > 0.0 : print( ' | in_drop:' , in_dropout , end = '' )
if out_dropout > 0.0 : print( ' | out_drop:' , out_dropout , end = '' )
print( ' |' )
print( '#################################################### VARIABLES' )
for input in self.variables:
pars = input[1]
print( '*************************** ' + pars['name'] , end = '' )
print( ' | S: ' + str( self.shape( pars['name'] ) ) , end = '' )
print( ' |' )
print( '#################################################### OPERATIONS' )
for operation in self.operations:
name = operation[1]['name']
inputs , src , dst = operation[1]['input'] , operation[1]['src'] , operation[1]['dst']
print( '*************************** ' , end = '' )
if inputs is not None:
if not isinstance( inputs , list ):
print( inputs , end = '' )
else:
for i in range( len(inputs) - 1 ):
print( inputs[i] + ' & ' , end = '' )
print( inputs[-1] , end = '' )
if src is not None:
if not isinstance( src , list ):
print( src + ' <-> ' + dst , end = '' )
else:
for i in range( len(src) - 1 ):
print( src[i] + ' & ' , end = '' )
print( src[-1] , end = '' )
print( ' <-> ' , end = '' )
for i in range( len(dst) - 1 ):
print( dst[i] + ' & ' , end = '' )
print( dst[-1] , end = '' )
print( ' --> ' + name )
print( '############################################################## END TENSORS' ,
'/' + self.folder[0:-1] )
####### Print All Nodes
def printAllNodes( self ):
self.printNodes()
for i in range( len( self.blocks ) ):
self.blocks[i][0].printAllNodes()
####### Print Collection
def printCollection( self ):
print( '############################################################## COLLECTION' )
vars = [ v for v in tf.global_variables() if v.name.startswith( self.folder ) ]
for var in vars: print( var.name )
print( '############################################################## END COLLECTION' )
####### Print All Collection
def printAllCollection( self ):
print( '############################################################## COLLECTION' )
vars = [ v for v in tf.global_variables() ]
for var in vars: print( var.name )
print( '############################################################## END COLLECTION' )
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_reinforce_2.py
|
from players_reinforcement.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
# PLAYER REINFORCE
class player_reinforce_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.num_stored_obsv = self.NUM_FRAMES
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
output = np.squeeze( self.brain.run( 'Output', [ [ 'Observation', [state] ] ] ) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action( action )
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None, self.num_actions ], name = 'Actions' )
self.brain.addInput( shape = [ None ], name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
summary = 'Summary',
writer = 'Writer',
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = '../' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train( self, prev_state, curr_state, actn, rewd, done, episode ):
# Store New Experience Until Done
self.experiences.append( ( prev_state, curr_state, actn, rewd, done ) )
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = [d[0] for d in batch]
curr_states = [d[1] for d in batch]
actions = [d[2] for d in batch]
rewards = [d[3] for d in batch]
dones = [d[4] for d in batch]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] !=0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ [ 'Observation', prev_states ],
[ 'Actions', actions ],
[ 'Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset
self.experiences = deque()
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_dql_bayesian_1.py
|
<gh_stars>1-10
from players_reinforcement.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
# PLAYER DQL BAYESIAN
class player_dql_bayesian_1(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.num_stored_obsv = self.NUM_FRAMES
self.random_prob = self.START_RANDOM_PROB
self.experiences = deque()
self.keep_prob = self.FINAL_RANDOM_PROB
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
output = self.brain.run( 'Output', [ [ 'Observation', [state] ] ], use_dropout=True )
action = np.argmax(output)
return self.create_action ( action )
# PREPARE OPERATIONS
def operations ( self ):
# Action Placeholders
self.brain.addInput( shape = [ None, self.num_actions ], name = 'Actions' )
self.brain.addInput( shape = [ None ], name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.sum_mul,
input = ['Output', 'Actions'],
name = 'Readout' )
self.brain.addOperation( function = tb.ops.mean_squared_error,
input = ['Target', 'Readout'],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter(name = 'Writer' , dir = '../' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train( self, prev_state, curr_state, actn, rewd, done, episode ):
# Store New Experience
self.experiences.append( ( prev_state, curr_state, actn, rewd, done ) )
if len(self.experiences) > self.EXPERIENCES_LEN:
self.experiences.popleft()
# Check for Train
if len(self.experiences) > self.STEPS_BEFORE_TRAIN and self.BATCH_SIZE > 0:
# Select Random Batch
batch = random.sample(self.experiences, self.BATCH_SIZE)
# Separate Batch Data
prev_states = [d[0] for d in batch]
curr_states = [d[1] for d in batch]
actions = [d[2] for d in batch]
rewards = [d[3] for d in batch]
dones = [d[4] for d in batch]
# Calculate Rewards
rewards_per_action = self.brain.run( 'Output', [['Observation', curr_states]],
use_dropout = True )
# Calculate Expected Reward
expected_reward = []
for i in range(self.BATCH_SIZE):
if dones[i]:
expected_reward.append( rewards[i] )
else:
expected_reward.append( rewards[i] + \
self.REWARD_DISCOUNT * np.max( rewards_per_action[i] ) )
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ [ 'Observation', prev_states ],
[ 'Actions', actions ],
[ 'Target', expected_reward ] ], use_dropout=True )
# Update Random Probability
if done and self.random_prob > self.FINAL_RANDOM_PROB:
self.random_prob -= (self.START_RANDOM_PROB - self.FINAL_RANDOM_PROB) / \
self.NUM_EXPLORATION_EPISODES
self.keep_prob += (self.START_RANDOM_PROB - self.FINAL_RANDOM_PROB) / \
self.NUM_EXPLORATION_EPISODES
self.brain.setDropout( name = 'Drop', value = self.keep_prob )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
|
bermeom/quadruped-robot
|
tensorblock/layers/__init__.py
|
from tensorblock.layers.layer_flatten import layer_flatten as flatten
from tensorblock.layers.layer_fully import layer_fully as fully
from tensorblock.layers.layer_conv1d import layer_conv1d as conv1d
from tensorblock.layers.layer_conv2d import layer_conv2d as conv2d
from tensorblock.layers.layer_conv3d import layer_conv3d as conv3d
from tensorblock.layers.layer_deconv2d import layer_deconv2d as deconv2d
from tensorblock.layers.layer_variational import layer_variational as variational
from tensorblock.layers.layer_rnn import layer_rnn as rnn
from tensorblock.layers.layer_hlfully import layer_hlfully as hlfully
|
bermeom/quadruped-robot
|
learning/players_imitation/player_DAgger_1A.py
|
<gh_stars>1-10
from players_imitation.player_DAgger_1 import *
# PLAYER DAgger
class player_DAgger_1A( player_DAgger_1 ):
LEARNING_RATE = 1e-4
BETA = 0.5
NUM_FRAMES = 1
BATCH_SIZE = 64
EPOCHS = 10
TIME_TO_UPDATE = 5000
DS_SIZE = 100000
DATASET = 'cartpole'
### __INIT__
def __init__( self ):
player_DAgger_1.__init__( self )
# PROCESS OBSERVATION
def process(self, obsv):
return np.stack( tuple( self.obsv_list[i] for i in range( self.NUM_FRAMES ) ), axis = 1 )
### PREPARE NETWORK
def network( self ):
# Expert Policy
Expert = self.brain.addBlock( 'Expert' )
Expert.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name = 'Observation' )
Expert.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.relu )
Expert.addLayer( out_channels = 256, input = 'Observation', name = 'Hidden' )
#Expert.addLayer( out_channels = 200, name = 'Hidden' )
Expert.addLayer( out_channels = self.num_actions, input = 'Hidden', name = 'Output', activation = None )
Expert.addLayer( out_channels = self.num_actions, input = 'Hidden', name = 'Discrete', activation = tb.activs.softmax )
# Actor Policy
Actor = self.brain.addBlock( 'Actor' )
Actor.addInput( shape = [ None, self.obsv_shape[0], self.NUM_FRAMES ], name = 'Observation' )
Actor.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.relu )
Actor.addLayer( out_channels = 256, input = 'Observation', name = 'Hidden' )
#Actor.addLayer( out_channels = 200, name = 'Hidden' )
Actor.addLayer( out_channels = self.num_actions, input = 'Hidden', name = 'Output', activation = None )
Actor.addLayer( out_channels = self.num_actions, input = 'Hidden', name = 'Discrete', activation = tb.activs.softmax )
|
bermeom/quadruped-robot
|
tensorblock/recipe/recipe_layer.py
|
<reponame>bermeom/quadruped-robot
import numpy as np
import tensorflow as tf
import tensorblock as tb
class recipe_layer:
####### Add Layer
def addLayer( self , **args ):
pars = { **self.defs_layer , **args }
if pars['input'] is None: pars['input'] = self.curr_input
pars = self.parsePars( pars )
pars = self.createLayerNames( pars )
layer = pars['type']
input_shape = self.shape( pars['input'] )
pars['folder'] = self.folder
weight_shape , bias_shape = layer.shapes( input_shape , pars )
self.newLayerWeight( weight_shape , pars )
self.newLayerBias( bias_shape , pars )
self.newLayer( pars )
self.curr_input = pars['name']
return self.layers[-1][0]
####### Parse Pars
def parsePars( self , pars ):
if pars['copy'] is not None:
folder = tb.aux.get_folder( pars['copy'] )
copy_pars = self.pars( pars['copy'] )
pars['out_channels'] = copy_pars['out_channels']
pars['type' ] = copy_pars['type' ]
pars['weight_copy' ] = folder + copy_pars['weight_name' ]
pars['bias_copy' ] = folder + copy_pars['bias_name' ]
pars['dropout_copy'] = folder + copy_pars['dropout_name']
pars['dropout' ] = copy_pars['dropout' ]
if pars['share'] is not None:
folder = tb.aux.get_folder( pars['share'] )
share_pars = self.pars( pars['share'] )
pars['out_channels' ] = share_pars['out_channels']
pars['type' ] = share_pars['type' ]
pars['weight_share' ] = folder + share_pars['weight_name' ]
pars['bias_share' ] = folder + share_pars['bias_name' ]
pars['dropout_share'] = folder + share_pars['dropout_name']
pars['dropout' ] = share_pars['dropout' ]
dims = pars['type'].dims()
input_pars = self.pars( pars['input'] )
if pars['in_sides' ] is None: pars['in_sides' ] = input_pars['out_sides' ]
if pars['in_channels'] is None: pars['in_channels'] = input_pars['out_channels']
if pars['out_sides' ] is None: pars['out_sides' ] = 1
if isinstance( pars['in_sides'] , str ):
pars['in_sides'] = self.pars( pars['in_sides'] )['out_sides']
if isinstance( pars['in_channels'] , str ):
pars['in_channels'] = self.pars( pars['in_channels'] )['out_channels']
if isinstance( pars['out_channels'] , str ):
channels_pars = self.pars( pars['out_channels'] )
if dims == 1 and len( channels_pars['out_sides'] ) == 1 :
pars['out_channels'] = channels_pars['out_sides'][0]
else: pars['out_channels'] = channels_pars['out_channels']
if isinstance( pars['out_sides'] , str ):
pars['out_sides'] = self.pars( pars['out_sides'] )['in_sides']
if isinstance( pars['strides'] , str ):
pars['strides'] = self.pars( pars['strides'] )['strides']
if isinstance( pars['seqlen'] , str ):
pars['seqlen'] = self.tensor( pars['seqlen'] )
pars['ksize' ] = tb.aux.spread( pars['ksize' ] , dims )
pars['strides' ] = tb.aux.spread( pars['strides' ] , dims )
pars['in_sides' ] = tb.aux.spread( pars['in_sides' ] , dims )
pars['out_sides'] = tb.aux.spread( pars['out_sides'] , dims )
return pars
####### New Layer
def newLayer( self , pars ):
scope = self.folder + pars['name']
with tf.variable_scope( scope , reuse = False ):
layer , pars , vars = \
pars['type'].function( self.node( pars['input' ] ) , \
self.node( pars['weight_name'] ) , \
self.node( pars['bias_name' ] ) , self , pars )
layer = self.checkActivation( layer , pars )
layer = self.checkPooling( layer , pars )
layer = self.checkDropout( layer , pars )
shape = tb.aux.tf_shape( layer[0] )
if len( shape ) == 4: pars['out_sides'] = shape[1:3]
if len( shape ) == 5: pars['out_sides'] = shape[1:4]
if vars is not None:
self.weights[-1][0] = vars[0]
self.biases[ -1][0] = vars[1]
self.layers.append( [ layer[0] , pars ] )
self.extras.append( layer[1:] )
self.cnt += 1
####### Check for Activation Function
def checkActivation( self , layer , pars ):
if pars['activation'] is not None:
if pars['activation_pars'] is None: layer[0] = pars['activation']( layer[0] )
else: layer[0] = pars['activation']( layer[0] , pars['activation_pars'] )
return layer
####### Check for Pooling
def checkPooling( self , layer , pars ):
if pars['type'].allowPooling():
if np.prod( pars['pooling'] ) > 1:
dims = pars['type'].dims()
if dims == 2: layer[0] = tb.extras.maxpool2d( layer[0] , pars )
if dims == 3: layer[0] = tb.extras.maxpool3d( layer[0] , pars )
return layer
####### Check for Dropout
def checkDropout( self , layer , pars ):
if pars['dropout'] == 0.0:
return layer
scope = pars['dropout_name']
with tf.variable_scope( scope , reuse = False ):
if pars['dropout_share'] is not None: # Sharing
idx_share = self.pars( pars['dropout_share'] )[0]
self.labels[ pars['dropout_name'] ] = ( 'dropout' , idx_share )
layer[0] = tb.extras.dropout( layer[0] , self.root.dropouts[idx_share][0] )
else:
if pars['dropout_copy'] is not None: # Copying
dropout = self.pars( pars['dropout_copy'] )[1]
else: dropout = pars['dropout']
idx = len( self.root.dropouts )
self.labels[ pars['dropout_name'] ] = ( 'dropout' , idx )
self.root.dropouts.append( [ tb.vars.placeholder() , [ idx , dropout ] ] )
layer[0] = tb.extras.dropout( layer[0] , self.root.dropouts[-1][0] )
return layer
####### Set Dropout
def setDropout( self , name = None , value = 1.0 ):
self.pars( name )[1] = value
####### Add Dropout
def addDropout( self , name = None , value = 1.0 ):
self.pars( name )[1] += value
####### New Variable
def newLayerVariable( self , shape , pars , type ):
if type == 'weight': s , list = 'weight_' , self.weights
if type == 'bias': s , list = 'bias_' , self.biases
if shape is None: list.append( [ None , pars ] ) ; return
if pars[s + 'type'] is None: list.append( [ None , pars ] ) ; return
if isinstance( pars[s + 'type'] , str ):
list.append( [ self.tensor( pars[s + 'type'] ) , pars ] ) ; return
scope_name = '/' + pars[s + 'name']
dict = { 'label' : s , 'mean' : pars[s + 'mean'] , 'stddev' : pars[s + 'stddev'] ,
'value' : pars[s + 'value'] , 'min' : pars[s + 'min'] , 'max' : pars[s + 'max'] ,
'trainable' : pars[s + 'trainable'] , 'seed' : pars[s + 'seed'] }
if pars[s + 'share'] is not None: # Sharing
share_pars = self.pars( pars[s + 'share'] )
name = '/' + share_pars['folder'] + share_pars[s + 'name']
list.append( [ self.node( name ) , pars ] ) ; return
if pars[s + 'copy'] is not None: # Copying
pars[s + 'type'] = tb.vars.copy
copy_pars = self.pars( pars[s + 'copy'] )
name = '/' + copy_pars['folder'] + copy_pars[s + 'name']
shape = self.node( name )
else: # Nothing
shape[-1] = shape[-1] * pars['type'].shapeMult()
scope = self.folder + pars['name'] + scope_name
if scope[0] is '/' : scope = scope[1:]
if callable( pars[s + 'type'] ):
with tf.variable_scope( scope , reuse = False ):
list.append( [ pars[s + 'type']( shape , dict , name = type ) , pars ] )
else:
if isinstance( pars[s + 'type'] , np.ndarray ):
list.append( [ tb.vars.numpy( pars[s + 'type'] , dict , name = type ) , pars ] )
# print( 'NUMPY' , pars[s + 'type'].shape )
else:
list.append( [ pars[s + 'type'] , pars ] )
# print( 'TENSOR' )
####### New Weight
def newLayerWeight( self , shape , pars ):
self.newLayerVariable( shape , pars , 'weight' )
####### New Bias
def newLayerBias( self , shape , pars ):
self.newLayerVariable( shape , pars , 'bias' )
####### Create Layer Names
def createLayerNames( self , pars ):
if not pars['name' ]: pars['name' ] = pars['type'].name() + '_' + str( self.cnt )
if not pars['weight_name' ]: pars['weight_name' ] = 'W_' + pars['name']
if not pars['bias_name' ]: pars['bias_name' ] = 'b_' + pars['name']
if not pars['dropout_name']: pars['dropout_name'] = 'drop_' + pars['name']
self.labels[ pars['name' ] ] = ( 'layer' , self.cnt )
self.labels[ pars['weight_name'] ] = ( 'weight' , self.cnt )
self.labels[ pars['bias_name' ] ] = ( 'bias' , self.cnt )
self.order.append( [ 'layer' , pars['name'] ] )
return pars
|
bermeom/quadruped-robot
|
learning/sources/source_quadruped_robot.py
|
from sources.source_gym import source_gym
import numpy as np
import gym
gym.envs.register(
id='quadruped-robot-v0',
entry_point='envs.quadruped:QuadrupedEnv',
max_episode_steps=10000,
reward_threshold=4800.0,
)
##### SOURCE GYM HALFCHEETAH
class source_quadruped_robot( source_gym ):
### __INIT__
def __init__( self ):
source_gym.__init__( self , 'quadruped-robot-v0' )
### INFORMATION
def num_actions( self ): return self.env.action_space.shape[0]
def range_actions( self ): return abs(self.env.action_space.high[0])
### MAP KEYS
def map_keys( self , actn ):
actn = np.clip( actn, self.env.action_space.low[0], self.env.action_space.high[0])
return np.expand_dims(actn,0)
### PROCESS OBSERVATION
def process( self , obsv ):
return obsv
|
bermeom/quadruped-robot
|
learning/players_imitation/player_GAIL_1.py
|
<reponame>bermeom/quadruped-robot
from players_imitation.player import player
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import random
# PLAYER GAIL
class player_GAIL_1(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.num_stored_obsv = self.NUM_FRAMES
self.experiences = []
self.s_dataset = []
self.a_dataset = []
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
if self.continuous:
action = self.brain.run( 'Actor/Output', [ [ 'Actor/Observation', [state] ] ] )
action = np.reshape( action, self.num_actions )
if not self.continuous:
output = np.squeeze( self.brain.run( 'Actor/Discrete', [ [ 'Actor/Observation', [state] ] ] ) )
action = np.random.choice( np.arange( len( output ) ), p = output )
return self.create_action( action )
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None, self.num_actions ], name = 'Actions' )
self.brain.addInput( shape = [ None, self.num_actions ], name = 'O_Mu' )
self.brain.addInput( shape = [ None, self.num_actions ], name = 'O_Sigma' )
self.brain.addInput( shape = [ None, self.num_actions ], name = 'O_Discrete' )
self.brain.addInput( shape = [ None, 1 ], name = 'Advantage' )
self.brain.addInput( shape = [ None, 1 ], name = 'Exp_Logits' )
# Operations
# Critic
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Critic/Value','Advantage' ],
name = 'CriticCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'CriticCost',
learning_rate = self.A_LEARNING_RATE,
name = 'CriticOptimizer' )
# Actor
if self.continuous:
self.brain.addOperation( function = tb.ops.ppocost_distrib,
input = [ 'Actor/Mu',
'Actor/Sigma',
'O_Mu',
'O_Sigma',
'Actions',
'Advantage',
self.EPSILON ],
name = 'ActorCost' )
if not self.continuous:
self.brain.addOperation( function = tb.ops.ppocost,
input = [ 'Actor/Discrete',
'O_Discrete',
'Actions',
'Advantage',
self.EPSILON ],
name = 'ActorCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'ActorCost',
learning_rate = self.A_LEARNING_RATE,
name = 'ActorOptimizer' )
# Discriminator
self.brain.addOperation( function = tb.ops.disccost,
input = [ 'Disc/Output', 'Exp_Logits' ],
name = 'DiscCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'DiscCost',
learning_rate = self.D_LEARNING_RATE,
name = 'DiscOptimizer' )
# Assign
self.brain.addOperation( function = tb.ops.assign,
input = [ 'Old', 'Actor' ],
name = 'Assign' )
# Behaviour Cloning
if self.continuous:
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Actor/Output', 'Actions' ],
name = 'BcCost' )
if not self.continuous:
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Actor/Discrete', 'Actions' ],
name = 'BcCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'BcCost',
learning_rate = self.BC_LEARNING_RATE,
name = 'BcOptimizer' )
# RUN ONCE ON START
def on_start( self ):
self.s_dataset = np.load( '../datasets/' + self.DATASET + '_states.npy' )[ 0 : self.DS_SIZE ]
self.a_dataset = np.load( '../datasets/' + self.DATASET + '_actions.npy' )[ 0 : self.DS_SIZE ]
self.s_dataset = np.expand_dims(self.s_dataset,2)
# Start with Behaviour Cloning
if self.B_CLONING:
for _ in range ( self.INIT_NO_EPOCHS * len(self.s_dataset) // self.BC_BATCH_SIZE ):
idx = np.random.randint(len(self.s_dataset), size = self.BC_BATCH_SIZE)
states = self.s_dataset[idx,:]
actions = self.a_dataset[idx,:]
self.brain.run( [ 'BcOptimizer' ], [ [ 'Actor/Observation', states ],
[ 'Actions', actions ] ] )
self.brain.run( ['Assign'], [] )
# TRAIN NETWORK
def train( self, prev_state, curr_state, actn, rewd, done, episode ):
# Store New Experience Until Done
self.experiences.append( (prev_state, curr_state, actn, rewd, done) )
# Check for Train
if (len(self.experiences)) >= self.BATCH_SIZE:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = [d[0] for d in batch]
curr_states = [d[1] for d in batch]
actions = [d[2] for d in batch]
#rewards = [d[3] for d in batch]
dones = [d[4] for d in batch]
# Trajectories Evaluation
idx = np.random.randint( self.DS_SIZE - 2 * self.BATCH_SIZE, size = 1 )
#idxs = np.random.choice(self.DS_SIZE, size=self.BATCH_SIZE)
idxs = np.arange( idx, idx + self.BATCH_SIZE )
#idxs2 = np.random.choice(200, size=self.BATCH_SIZE)
#prev_states = np.array(prev_states)[idxs2,:,:]
#curr_states = np.array(curr_states)[idxs2,:,:]
#actions = np.array(actions)[idxs2,:]
#dones = np.array(dones)[idxs2]
s_dataset = self.s_dataset[idxs,:]
a_dataset = self.a_dataset[idxs,:]
#s_dataset = np.expand_dims(s_dataset,2)
#a_dataset = np.expand_dims(a_dataset,2)
exp_logits = self.brain.run( 'Disc/Output' , [ [ 'Disc/Observation', s_dataset ],
[ 'Disc/Action', a_dataset ] ] )
# Update Discriminator
self.brain.run( [ 'DiscOptimizer' ], [ [ 'Disc/Observation', prev_states ],
[ 'Disc/Action', actions ],
[ 'Exp_Logits', exp_logits ] ] )
# States Values
prev_values = np.squeeze( self.brain.run( 'Critic/Value' , [ [ 'Critic/Observation', prev_states ] ] ) )
curr_values = np.squeeze( self.brain.run( 'Critic/Value' , [ [ 'Critic/Observation', curr_states ] ] ) )
# Discriminator Rewards
rewards = np.squeeze( self.brain.run( 'Disc/DiscRew' , [ [ 'Disc/Observation', prev_states ],
[ 'Disc/Action', actions ] ] ) )
# Calculate Generalized Advantage Estimation
running_add_y = 0
running_add_a = 0
y = np.zeros_like(rewards)
advantage = rewards + (self.GAMMA * curr_values) - prev_values
for t in reversed ( range( 0, len( advantage ) ) ):
if dones[t]:
curr_values[t] = 0
running_add_a = 0
running_add_y = curr_values[t] * self.GAMMA + rewards [t]
running_add_a = running_add_a * self.GAMMA * self.LAM + advantage [t]
y [t] = running_add_y
advantage [t] = running_add_a
y = np.expand_dims( y, 1 )
advantage = np.expand_dims( advantage, 1 )
# Update Old Pi
self.brain.run( ['Assign'], [] )
# Get Old Probabilities
if self.continuous:
o_Mu, o_Sigma = self.brain.run( [ 'Old/Mu', 'Old/Sigma' ], [ [ 'Old/Observation', prev_states ] ] )
if not self.continuous:
o_Discrete = self.brain.run( 'Old/Discrete' , [ [ 'Old/Observation', prev_states ] ] )
# Update Actor and Critic
for _ in range (self.UPDATE_SIZE):
if self.continuous:
self.brain.run( [ 'ActorOptimizer' ], [ [ 'Actor/Observation', prev_states ],
[ 'O_Mu', o_Mu ],
[ 'O_Sigma', o_Sigma ],
[ 'Actions', actions ],
[ 'Advantage', advantage ] ] )
if not self.continuous:
self.brain.run( [ 'ActorOptimizer' ], [ [ 'Actor/Observation', prev_states ],
[ 'O_Discrete', o_Discrete ],
[ 'Actions', actions ],
[ 'Advantage', advantage ] ] )
self.brain.run( [ 'CriticOptimizer' ], [ [ 'Critic/Observation', prev_states ],
[ 'Advantage', y ] ] )
# Reset
self.experiences = []
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_reinforce_rnn_2A.py
|
from players_reinforcement.player_reinforce_rnn_2 import *
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2A( player_reinforce_rnn_2 ):
NUM_FRAMES = 3
LEARNING_RATE = 1e-4
REWARD_DISCOUNT = 0.99
### __INIT__
def __init__( self ):
player_reinforce_rnn_2.__init__( self )
# PREPARE NETWORK
def network( self ):
# Input Placeholder
self.brain.addInput( shape = [ None, self.NUM_FRAMES, self.obsv_shape[0], self.obsv_shape[1] ],
name = 'Observation' )
# Reshape Input to CNN (B,T,D1,D2)->(B*T,D1,D2,1)
self.obs = self.brain.tensor('Observation')
self.obs = tf.expand_dims ( self.obs, axis=tf.rank(self.obs) )
self.obs = tf.reshape( self.obs, [ tf.shape(self.obs)[0]*self.NUM_FRAMES, self.obsv_shape[0], self.obsv_shape[1], 1 ] )
self.brain.addInput( tensor = self.obs, name = 'InputCNN' )
# Convolutional Layers
self.brain.setLayerDefaults( type=tb.layers.conv2d,
activation=tb.activs.relu, pooling=2, weight_stddev=0.01, bias_stddev=0.01 )
self.brain.addLayer( out_channels=32, ksize=8, strides=4, input = 'InputCNN' )
self.brain.addLayer( out_channels=64, ksize=4, strides=2 )
self.brain.addLayer( out_channels=64, ksize=3, strides=1 )
# Fully
self.brain.addLayer( type = tb.layers.flatten,
name = 'Flatten' )
self.brain.addLayer( type = tb.layers.fully,
out_channels = 256 ,
activation = tb.activs.elu,
name = 'OutputFully' )
# Reshape OutputFully to RNN (B*T,C)->(B,T,C)
self.outfully = tf.reshape( self.brain.tensor('OutputFully') , [-1, self.NUM_FRAMES, 256] )
self.brain.addInput( tensor = self.outfully, name = 'InputRNN' )
# RNN Layers
self.brain.addLayer( input = 'InputRNN',
type = tb.layers.rnn,
cell_type = 'LSTM',
num_cells = 2,
out_channels = 256,
name = 'RNN')
# Fully Connected Layers
self.brain.setLayerDefaults( type = tb.layers.fully,
weight_stddev = 0.01 ,
bias_stddev = 0.01 )
self.brain.addLayer( out_channels = self.num_actions,
activation = tb.activs.softmax,
name = 'Output' )
|
bermeom/quadruped-robot
|
learning/sources/source_pygame_catch.py
|
from sources.source_pygame import source_pygame
import cv2
##### SOURCE PYGAME CATCH
class source_pygame_catch( source_pygame ):
### __INIT__
def __init__( self ):
source_pygame.__init__( self , 'pygame_catch' )
### INFORMATION
def num_actions( self ): return 3
### MAP KEYS
def map_keys( self , actn ):
if actn[0] : return 0
if actn[1] : return 1
if actn[2] : return 2
### PROCESS OBSERVATION
def process( self , obsv ):
obsv = cv2.resize( obsv , ( 80 , 80 ) )
obsv = cv2.cvtColor( obsv , cv2.COLOR_BGR2GRAY )
_ , obsv = cv2.threshold( obsv , 127 , 255 , cv2.THRESH_BINARY )
return obsv
|
bermeom/quadruped-robot
|
tensorblock/recipe/__init__.py
|
from tensorblock.recipe.recipe_base import recipe_base as base
from tensorblock.recipe.recipe_block import recipe_block as block
from tensorblock.recipe.recipe_init import recipe_init as init
from tensorblock.recipe.recipe_input import recipe_input as input
from tensorblock.recipe.recipe_layer import recipe_layer as layer
from tensorblock.recipe.recipe_operation import recipe_operation as operation
from tensorblock.recipe.recipe_plot import recipe_plot as plot
from tensorblock.recipe.recipe_print import recipe_print as print
from tensorblock.recipe.recipe_save import recipe_save as save
from tensorblock.recipe.recipe_summary import recipe_summary as summary
from tensorblock.recipe.recipe_train import recipe_train as train
|
bermeom/quadruped-robot
|
__init__.py
|
<filename>__init__.py
from gym.envs.registration import registry, register, make, spec
# import sys
# sys.path.insert(0, '/ddpg')
# sys.path.insert(0, '/ddpg/nets')
register(
id='quadreped-robot-v0',
entry_point='envs.quadreped:QuadrepedEnv',
max_episode_steps=10000,
reward_threshold=4800.0,
)
|
bermeom/quadruped-robot
|
learning/sources/source_vrep_poppy.py
|
<gh_stars>1-10
from sources.source_vrep import source_vrep
import numpy as np
import time
##### SOURCE VREP POPPY
class source_vrep_poppy( source_vrep ):
RENDER = False
DESIRED_POSITION = [ 30, 45, -20, 90, 45, -45 ]
### __INIT__
def __init__( self ):
source_vrep.__init__( self , 'poppy_ergo_jr' )
# Get objects
self.objects = []
for i in range(6):
obj = self.env.get_object_by_name('m{}'.format(i + 1), is_joint=True)
self.objects.append(obj)
# Start episode
self.counter = 0
### INFORMATION
def num_actions( self ): return 6
def range_actions( self ): return 1
# STEP ACTIONS
def step(self, positions, speeds=None):
for i, m in enumerate(self.objects):
target = positions[i]
if i == 0:
target *= -1
m.set_position_target(target)
if speeds is not None:
m.set_velocity(speeds[i])
# GET OBSERVATIONS
def _get_obsv(self, desired_position):
self.counter += 1
# Observations
out_pos = np.zeros(6, dtype=np.float32)
out_vel = np.zeros(6, dtype=np.float32)
out_dis = np.zeros(6, dtype=np.float32)
for i, m in enumerate(self.objects):
angle = m.get_joint_angle()
out_pos[i] = angle
out_vel[i] = m.get_joint_velocity()[0]
out_dis[i] = desired_position[i] - angle
#obsv = np.append(out_pos,out_dis)
obsv = out_pos
# Rewards
reward = 0
for i, m in enumerate(self.objects):
dist_abs = 180 - np.abs( m.get_joint_angle() - desired_position[i] ) #* (i + 1)
reward += dist_abs / 10000
# Dones
done = True
for i, m in enumerate(self.objects):
done *= ( np.square( m.get_joint_angle() - desired_position[i] ) <= 0.5 )
if self.counter == 200:
self.counter = 0
done = True
self.env.stop_simulation()
time.sleep(.2)
if (self.counter % 200 == 0): print(obsv)
return obsv, reward, done
### SCALE AND LIMIT ACTIONS
def map_keys( self , actn ):
return np.multiply(actn, 90)
|
bermeom/quadruped-robot
|
learning/sources/source_unity.py
|
import signal
import sys
import numpy as np
from sources.source import source
from sources.unity.unityagents import UnityEnvironment
##### SOURCE UNITY
class source_unity( source ):
### __INIT__
def __init__( self, game ):
source.__init__( self )
self.env = UnityEnvironment( file_name = "./sources/unity/" + game, worker_id = 0 )
self.brain_name = self.env.brain_names[0]
self.brain_initial_info = self.env.reset(True, None)[self.brain_name]
self.image_obsv = False
def signal_handler(signal, frame):
self.env.close()
print('\nSocket closed!')
print('Program closed!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
### INFORMATION
def num_actions( self ):
return self.env.brains[self.brain_name].vector_action_space_size
def num_agents( self ):
return len(self.brain_initial_info.agents)
### START SIMULATION
def start( self ):
obsv = self.env.reset(True, None)[self.brain_name].vector_observations[0]
if (self.image_obsv): obsv = self.env.reset(True, None)[self.brain_name].visual_observations[0][0]
return self.process( obsv )
### MOVE ONE STEP
def move( self , actn ):
# Map Actions
if self.env.brains[self.brain_name].vector_action_space_type == "continuous":
actn = np.reshape( self.num_agents() * [ self.map_keys( actn ) ], [ self.num_agents(), self.num_actions() ] )
else:
actn = np.reshape( self.num_agents() * [ self.map_keys( actn ) ], [ self.num_agents(), 1 ] )
# Step on Environment
brain_info = self.env.step( actn )[ self.brain_name ]
# Get Info
obsv = brain_info.vector_observations[0]
rewd = brain_info.rewards[0]
done = brain_info.local_done[0]
if (self.image_obsv): obsv = brain_info.visual_observations[0][0]
return self.process( obsv ) , rewd , done
|
bermeom/quadruped-robot
|
tensorblock/functions/func_operations.py
|
import tensorflow as tf
import tensorblock as tb
### Copy
def copy( tensors , extras , pars ):
list = []
for i in range( len( tensors[0] ) ):
list.append( tensors[1][i].assign( tensors[0][i] ) )
return list
### Assign
def assign( tensors , extras , pars ):
vars = tf.trainable_variables()
variables0 = [ var for var in vars if str(tensors[0]) in var.name ]
variables1 = [ var for var in vars if str(tensors[1]) in var.name ]
return [ v0.assign(v1) for v0, v1 in zip(variables0, variables1) ]
### Assign Soft
def assign_soft( tensors , extras , pars ):
vars = tf.trainable_variables()
variables0 = [ var for var in vars if str(tensors[0]) in var.name ]
variables1 = [ var for var in vars if str(tensors[1]) in var.name ]
return [ v0.assign( v1 * tensors[2] + v0 * (1. - tensors[2] )) for v0, v1 in zip(variables0, variables1) ]
### Mean SoftMax Cross Entropy Logit
def mean_soft_cross_logit( tensors , extras , pars ):
return tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(
logits = tensors[0] , labels = tensors[1] ) )
### Weighted Mean SoftMax Cross Entropy Logit
def weighted_mean_soft_cross_logit( tensors , extras , pars ):
return tf.reduce_mean( tf.multiply(
tf.nn.softmax_cross_entropy_with_logits( tensors[0] , tensors[1] ) , tensors[2] ) )
### Mean Squared Error
def mean_squared_error( tensors , extras , pars ):
return tf.reduce_mean( tf.square( tensors[0] - tensors[1] ) )
### Mean Squared Error
def hlmean_squared_error( tensors , extras , pars ):
return tf.losses.mean_squared_error ( tensors[0] , tensors[1] )
### Masked Mean Squared Error
def masked_mean_squared_error( tensors , extras , pars ):
shape = tensors[0].get_shape().as_list()
label , max_seqlen = tensors[0] , shape[1]
if len( tensors ) == 3:
output , seqlen = tensors[1] , tensors[2]
else:
output , seqlen = extras[0] , tensors[1]
mask = tf.sequence_mask( seqlen , max_seqlen , dtype = tf.float32 )
cost = tf.square( label - output )
if len( shape ) == 3:
cost = tf.reduce_sum( cost , reduction_indices = 2 )
cost = tf.reduce_sum( cost * mask , reduction_indices = 1 )
cost /= tf.reduce_sum( mask , reduction_indices = 1 )
return tf.reduce_mean( cost )
### Mean Equal Argmax
def mean_equal_argmax( tensors , extras , pars ):
correct = tf.equal( tf.argmax( tensors[0] , 1 , name = 'ArgMax_1' ) ,
tf.argmax( tensors[1] , 1 , name = 'ArgMax_2' ) )
return tf.reduce_mean( tf.cast( correct , tf.float32 ) )
### Mean Cast
def mean_cast( tensors , extras , pars ):
return tf.reduce_mean( tf.cast( tensors[0] , tf.float32 ) )
### Sum Mul
def sum_mul( tensors , extras , pars ):
axis = len( tb.aux.tf_shape( tensors[0] ) ) - 1
return tf.reduce_sum( tf.multiply(
tensors[0] , tensors[1] ) , axis = axis )
### Mean Variational
def mean_variational( tensors , extras , pars ):
z_mu , z_sig = extras
z_mu2 , z_sig2 = tf.square( z_mu ) , tf.square( z_sig )
rec_loss = tf.reduce_sum( tf.square( tensors[0] - tensors[1] ) )
kl_div = - 0.5 * tf.reduce_sum( 1.0 + tf.log( z_sig2 + 1e-10 ) - z_mu2 - z_sig2 , 1 )
return tf.reduce_mean( rec_loss + kl_div )
### Policy Gradients Cost
def pgcost(tensors, extras, pars):
loglik = tf.reduce_sum( tensors[1] * tensors[0], axis = 1 )
return tf.reduce_mean( tf.multiply( -tf.log(loglik + 1e-8), tensors[2] ) )
### Get Gradients
def get_grads(tensors, extras, pars):
return tf.gradients(tensors[0], tensors[1])
### Combine Gradients (DDPG)
def combine_grads(tensors, extras, pars):
vars = tf.trainable_variables()
normal_actor_vars = [var for var in vars if 'NormalActor' in var.name]
return tf.gradients(tensors[0], normal_actor_vars, -tensors[1])
### Policy Gradients Cost (PPO)
def ppocost(tensors, extras, pars):
a_pi, o_pi = tensors[0], tensors[1]
actions, advantage, epsilon = tensors[2], tensors[3], tensors[4]
a_prob = tf.reduce_sum( a_pi * actions, axis = 1 )
o_prob = tf.reduce_sum( o_pi * actions, axis = 1 )
ratio = a_prob / ( o_prob + 1e-8 )
cost = -tf.reduce_mean( tf.minimum( ratio * advantage, tf.clip_by_value( ratio, 1.- epsilon, 1. + epsilon) * advantage ) )
return cost
### Policy Gradients Cost (PPO with tf distribution)
def ppocost_distrib(tensors, extras, pars):
a_mu, a_sigma = tensors[0], tensors[1]
o_mu, o_sigma = tensors[2], tensors[3]
actions, advantage, epsilon = tensors[4], tensors[5], tensors[6]
pi = tf.distributions.Normal( a_mu, a_sigma )
oldpi = tf.distributions.Normal( o_mu, o_sigma )
ratio = pi.prob( actions ) / ( oldpi.prob( actions ) + 1e-8 )
cost = -tf.reduce_mean( tf.minimum( ratio * advantage, tf.clip_by_value( ratio, 1.- epsilon, 1. + epsilon) * advantage ) )
entropy = tf.reduce_mean( tf.reduce_mean ( pi.entropy() ) )
return cost - 0.001 * entropy
# Discriminator Cost
def disccost(tensors, extras, pars):
g_cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = tensors[0], labels = tf.zeros(tf.shape(tensors[0])))
e_cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = tensors[1], labels = tf.ones(tf.shape(tensors[1])))
logits = tf.concat([tensors[0], tensors[1]], 0)
entropy = tf.reduce_mean(( 1 - tf.nn.sigmoid(logits) ) * logits + tf.nn.softplus( -logits ))
return tf.reduce_mean( g_cost ) + tf.reduce_mean ( e_cost ) - 0.001 * entropy
|
bermeom/quadruped-robot
|
learning/players_reinforcement/player_PPO_1A.py
|
<reponame>bermeom/quadruped-robot<gh_stars>1-10
from players_reinforcement.player_PPO_1 import *
# PLAYER PPO
class player_PPO_1A( player_PPO_1 ):
LEARNING_RATE = 1e-4
UPDATE_SIZE = 5
BATCH_SIZE = 64
EPSILON = 0.2
GAMMA = 0.99
LAM = 0.95
### __INIT__
def __init__( self ):
player_PPO_1.__init__( self )
### PREPARE NETWORK
def network( self ):
# Critic
Critic = self.brain.addBlock( 'Critic' )
Critic.addInput( shape = [ None, self.obsv_shape[0] ], name='Observation' )
Critic.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.tanh )
Critic.addLayer( out_channels = 64, input = 'Observation' )
Critic.addLayer( out_channels = 64 )
Critic.addLayer( out_channels = 1, name = 'Value', activation = None )
# Actor
Actor = self.brain.addBlock( 'Actor' )
Actor.addInput( shape = [ None, self.obsv_shape[0] ], name = 'Observation' )
Actor.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.tanh )
Actor.addLayer( out_channels = 64 , input = 'Observation' )
Actor.addLayer( out_channels = 64, name = 'Hidden' )
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = None, name = 'Mu')
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softplus, name = 'Sigma', activation_pars = 0.1 )
Actor.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softmax, name = 'Discrete' )
mu = Actor.tensor( 'Mu' )
sigma = Actor.tensor( 'Sigma' )
dist = tb.extras.dist_normal( mu, sigma )
action = dist.sample( 1 )
Actor.addInput( tensor = action, name = 'Output')
# OldActor
Old = self.brain.addBlock( 'Old' )
Old.addInput( shape = [ None, self.obsv_shape[0] ], name = 'Observation' )
Old.setLayerDefaults( type = tb.layers.fully,
activation = tb.activs.tanh )
Old.addLayer( out_channels = 64 , input = 'Observation' )
Old.addLayer( out_channels = 64, name = 'Hidden' )
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = None, name = 'Mu')
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softplus, name = 'Sigma', activation_pars = 0.1 )
Old.addLayer( out_channels = self.num_actions , input = 'Hidden', activation = tb.activs.softmax, name = 'Discrete' )
|
bermeom/quadruped-robot
|
learning/players_imitation/player_DAgger_1.py
|
<reponame>bermeom/quadruped-robot
from players_imitation.player import player
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import random
# PLAYER DAgger
class player_DAgger_1(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.num_stored_obsv = self.NUM_FRAMES
self.experiences = []
self.s_dataset = []
self.a_dataset = []
self.iteration = 0
self.first_run = True
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
if self.first_run:
BETA = 1
else:
BETA = self.BETA
# Actor Actions
if random.random() > BETA:
if self.continuous:
action = self.brain.run( 'Actor/Output', [ [ 'Actor/Observation', [state] ] ] )
action = np.reshape( action, self.num_actions )
if not self.continuous:
action = np.squeeze( self.brain.run( 'Actor/Discrete', [ [ 'Actor/Observation', [state] ] ] ) )
action = np.random.choice( np.arange( len( action ) ), p = action )
# Expert Actions
else:
if self.continuous:
action = self.brain.run( 'Expert/Output', [ [ 'Expert/Observation', [state] ] ] )
action = np.reshape( action, self.num_actions )
if not self.continuous:
action = np.squeeze( self.brain.run( 'Expert/Discrete', [ [ 'Expert/Observation', [state] ] ] ) )
action = np.random.choice( np.arange( len( action ) ), p = action )
return self.create_action( action )
# PREPARE NETWORK
def operations(self):
# Placeholders
self.brain.addInput( shape = [ None, self.num_actions ], name = 'Labels' )
# Operations
# Actor
if self.continuous:
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Actor/Output', 'Labels' ],
name = 'ActorCost' )
if not self.continuous:
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Actor/Discrete', 'Labels' ],
name = 'ActorCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'ActorCost',
learning_rate = self.LEARNING_RATE,
name = 'ActorOptimizer' )
# Expert
if self.continuous:
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Expert/Output', 'Labels' ],
name = 'ExpertCost' )
if not self.continuous:
self.brain.addOperation( function = tb.ops.hlmean_squared_error,
input = [ 'Expert/Discrete', 'Labels' ],
name = 'ExpertCost' )
self.brain.addOperation( function = tb.optims.adam,
input = 'ExpertCost',
learning_rate = self.LEARNING_RATE,
name = 'ExpertOptimizer' )
# RUN ONCE ON START
def on_start( self ):
self.s_dataset = np.load('../datasets/' + self.DATASET + '_states.npy' )[ 0 : self.DS_SIZE ]
self.a_dataset = np.load('../datasets/' + self.DATASET + '_actions.npy' )[ 0 : self.DS_SIZE ]
#self.s_dataset = np.expand_dims(self.s_dataset,2)
# Train Expert Once
for _ in range ( self.EPOCHS * len(self.s_dataset) // self.BATCH_SIZE ):
idx = np.random.randint(len(self.s_dataset), size = self.BATCH_SIZE)
states = self.s_dataset[idx,:]
actions = self.a_dataset[idx,:]
self.brain.run( [ 'ExpertOptimizer' ], [ [ 'Expert/Observation', states ],
[ 'Labels', actions ] ] )
# TRAIN NETWORK
def train( self, prev_state, curr_state, actn, rewd, done, episode ):
# Store New Experience Until Done
self.experiences.append( (prev_state, curr_state, actn, rewd, done) )
# Check for Aggregate and Train
if ( len(self.experiences) >= self.TIME_TO_UPDATE ):
# Change Beta
self.iteration += 1
self.BETA **= self.iteration
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = [d[0] for d in batch]
curr_states = [d[1] for d in batch]
#actions = [d[2] for d in batch]
rewards = [d[3] for d in batch]
dones = [d[4] for d in batch]
# Calculate Expert Actions
exp_actions = self.brain.run( 'Expert/Output', [ [ 'Expert/Observation', prev_states ] ] )
# Aggregate Datasets
self.s_dataset = np.concatenate ( (self.s_dataset, prev_states ), axis = 0 )
self.a_dataset = np.concatenate ( (self.a_dataset, exp_actions ), axis = 0 )
# Train Actor
for _ in range ( self.EPOCHS * len(self.s_dataset) // self.BATCH_SIZE ):
idx = np.random.randint(len(self.s_dataset), size = self.BATCH_SIZE)
states = self.s_dataset[idx,:]
actions = self.a_dataset[idx,:]
self.brain.run( [ 'ActorOptimizer' ], [ [ 'Actor/Observation', states ],
[ 'Labels', actions ] ] )
# Reset
self.experiences = []
self.first_run = False
|
bermeom/quadruped-robot
|
tensorblock/layers/layer_flatten.py
|
import tensorblock as tb
class layer_flatten:
####### Data
def name(): return 'Flatten'
def shapeMult(): return None
def dims(): return 1
def allowPooling(): return False
####### Function
def function( x , W , b , recipe , pars ):
layer = tb.aux.tf_flatten( x )
return [ layer ] , pars , None
####### Shapes
def shapes( input_shape , pars ):
return None , None
|
bermeom/quadruped-robot
|
learning/sources/source_unity_exporter.py
|
import tensorflow as tf
import tensorblock as tb
from tensorflow.python.tools import freeze_graph
def export_ugraph( brain, model_path, env_name, target_nodes):
"""
Unity ML Agents
Exports latest saved model to .bytes format for Unity embedding.
:brain: tensorblock brain
:param model_path: path of model checkpoints.
:param env_name: Name of associated Learning Environment.
:param target_nodes: Comma separated string of needed output nodes for embedded graph.
Example: To export: from sources.source_unity_exporter import *
export_ugraph (self.brain, "./trained_models/unity_contcatch_player_DDPG/", "continuouscatcher", "NormalActor/Output/Tanh")
raise SystemExit(0)
On Unity: scope = NormalActor/
action = /Output/Tanh
observation = Observation/Placeholder
"""
tf.train.write_graph(tf.Session().graph_def, model_path, 'raw_graph_def.pb', as_text=False)
ckpt = tf.train.get_checkpoint_state(model_path)
freeze_graph.freeze_graph(input_graph=model_path + '/raw_graph_def.pb',
input_binary=True,
input_checkpoint=ckpt.model_checkpoint_path,
output_node_names=target_nodes,
output_graph=model_path + '/' + env_name + '.bytes',
clear_devices=True, initializer_nodes="", input_saver="",
restore_op_name="save/restore_all", filename_tensor_name="save/Const:0")
|
bermeom/quadruped-robot
|
learning/sources/source.py
|
<filename>learning/sources/source.py
import time
from collections import deque
from auxiliar.aux_plot import *
##### SOURCE
class source:
### __INIT__
def __init__( self ):
self.elapsed_time = 0
self.avg_rewd = deque()
self.sum_rewd = 0
self.acc_rewd = 0
self.acc_time = 0
self.episode_rewards = deque()
self.episode_lengths = deque()
self.time_rewards = deque()
self.accumulated_lenghts = deque()
self.timer = time.time()
return None
### DUMMY FUNCTIONS
def num_actions( self ): return 0
def range_actions( self ): return -1
def map_keys( self , action ): return 0
def process( self , obsv ): return obsv
### VERBOSE OUTPUT
def verbose( self , episode , rewd , done , avg_length = 10 ):
self.sum_rewd += rewd
if done:
self.acc_rewd += self.sum_rewd
self.avg_rewd.append( self.sum_rewd )
if len( self.avg_rewd ) > avg_length : self.avg_rewd.popleft()
now = time.time()
self.elapsed_time = ( now - self.timer )
self.timer = now
self.acc_time += self.elapsed_time
print( '*** Episode : %5d | Time : %6.2f s | Rewards : %9.3f | Average : %9.3f |' % \
( episode + 1 , self.elapsed_time , self.sum_rewd ,
sum( self.avg_rewd ) / len( self.avg_rewd ) ) , end = '' )
#self.env.info()
self.episode_rewards.append(self.sum_rewd)
self.episode_lengths.append(self.elapsed_time)
self.time_rewards.append([self.sum_rewd, self.acc_time])
self.accumulated_lenghts.append(self.acc_time)
plot_episode_stats(self.episode_lengths, self.episode_rewards, self.accumulated_lenghts, self.time_rewards)
self.sum_rewd = 0
|
bermeom/quadruped-robot
|
learning/sources/source_gym_pong.py
|
<reponame>bermeom/quadruped-robot
from sources.source_gym import source_gym
import cv2
import numpy as np
##### SOURCE GYM PONG
class source_gym_pong( source_gym ):
### __INIT__
def __init__( self ):
source_gym.__init__( self , 'Pong-v4' )
### INFORMATION
def num_actions( self ): return 3
### MAP KEYS
def map_keys( self , actn ):
if actn[0] : return 1
if actn[1] : return 2
if actn[2] : return 3
### PROCESS OBSERVATION
def process( self , obsv ):
obsv = cv2.resize( obsv , ( 80 , 80 ) )
obsv = cv2.cvtColor( obsv , cv2.COLOR_BGR2GRAY )
_ , obsv = cv2.threshold( obsv , 97 , 255 , cv2.THRESH_BINARY )
return obsv
|
bermeom/quadruped-robot
|
tensorblock/aux/aux_parse.py
|
import numpy as np
### Parse Pais
def parse_pairs( data ):
if not isinstance( data , list ):
return [ [ data , None ] ]
for i in range( len( data ) ):
if not isinstance( data[i] , list ):
data[i] = [ data[i] , None ]
return data
### Get Folder
def get_folder( name ):
folder , list = '' , name.split( '/' )
for i in range( len( list ) - 1 ):
folder += list[i] + '/'
return folder
### Clean Duplicates
def clean_dups( str ):
clean = ''
for i in range( 1 , len( str ) ):
if str[i] is not str[i-1]:
clean += str[i]
return clean
|
bermeom/quadruped-robot
|
tensorblock/functions/func_optimizers.py
|
import tensorflow as tf
### Adam Optimizer
def adam( tensors , extras , pars ):
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
trainer = tf.train.AdamOptimizer( pars['learning_rate'] )
update = trainer.minimize( tensors[0] )
return update
### Gradient Descent Optimizer
def gradient_descent( tensors , extras , pars ):
return tf.train.GradientDescentOptimizer( pars['learning_rate'] ).minimize( tensors[0] )
### Adam Optimizer
def adam_apply( tensors , extras , pars ):
vars = tf.trainable_variables()
normal_actor_vars = [var for var in vars if 'NormalActor' in var.name]
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
trainer = tf.train.AdamOptimizer( pars['learning_rate'] )
update = trainer.apply_gradients(zip(tensors, normal_actor_vars ) )
return update
|
bermeom/quadruped-robot
|
envs/test_robot.py
|
#!/usr/bin/env python3
"""
Shows how to toss a capsule to a container.
"""
import numpy as np
import math
from pprint import pprint
import mujoco_py as mpy #import load_model_from_path, MjSim, MjViewer
import os
model = mpy.mujoco_py.load_model_from_path("xml/quadrupedrobot.xml")
# model = mpy.mujoco_py.load_model_from_path("xml/humanoid.xml")
sim = mpy.MjSim(model)
viewer = mpy.MjViewer(sim)
sim_state = sim.get_state()
pprint (sim_state);
k = 1;
qfrc_target = sim.data.qfrc_applied;
force = qfrc_target;#np.array([0, +150, 0],dtype=np.float64);
# force[1]=150;
force0 = np.array([0, -150, 0],dtype=np.float64);
torque = np.array([0, 0, 0],dtype=np.float64);
point = np.array([0, 0, 0],dtype=np.float64) ;
body = 1;
qfrc_target = sim.data.qfrc_applied;
perp = mpy.cymj.PyMjvPerturb();
# print("start",qfrc_target);
sw = True;
while True:
print ("0 - orientation -> body_quat ",sim.data.qpos.flat[3:7])
# print ("0 - orientation -> body_quat ",sim.data.qpos.flat[0:7])
mat = np.zeros(9, dtype=np.float64)
mpy.functions.mju_quat2Mat(mat, sim.data.body_xquat[2]);
# print ("0 - orientation -> ",mat)
# print("0 - data -> ",sim.data.qM)
# sw = False;
sim.set_state(sim_state)
print ("0 - orientation -> body_quat ",sim.data.qpos.flat[3:7])
mat = np.zeros(9, dtype=np.float64)
mpy.functions.mju_quat2Mat(mat, sim.data.body_xquat[2]);
# print ("orientation -> ",mat)
# print("data -> ",sim.data.qM)
# print("qfrc_applied[0]-> ",sim.data.qfrc_applied);
force[1]=-150;
# mpy.functions.mj_applyFT(sim.model,sim.data,force,torque,point,body,sim.data.qfrc_applied);
sim.step();
# print("qfrc_applied[1]-> ",sim.data.qfrc_applied);
# mpy.functions.mj_applyFT(sim.model,sim.data,-sim.data.qfrc_applied,torque,point,body,sim.data.qfrc_applied);
if sw :
force[1]=0;
sw = not sw;
# sim.step();
# sim.data.qfrc_applied[0]=0;
# mpy.functions.mjv_applyPerturbForce(sim.model,sim.data,perp);
# print("qfrc_applied[2]-> ",sim.data.qfrc_applied);
# pprint(sim.data.qpos.flat)
# sim.data.ctrl[4*0+3]=-1;
# sim.data.ctrl[4*1+3]=-1;
# sim.data.ctrl[4*2+3]=-1;
# sim.data.ctrl[4*3+3]=-1;
sim.step();
observations = np.concatenate([
sim.data.qpos,
sim.data.qvel,
]);
orientation = sim.data.qpos.flat[3:7];
# print(len(sim.data.qpos)," ",len(sim.data.qvel),"\n",observations);
for i in range(500):
if i < 200 | i > 800 :
sim.data.ctrl[3]=1;
sim.data.ctrl[k] = -0.5
sim.data.ctrl[k+4] = 0.5
sim.data.ctrl[k+2*4] = -0.5
sim.data.ctrl[k+3*4] = 0.5
else:
# sim.data.ctrl[3]=-1;
sim.data.ctrl[k] = 1.0/2
sim.data.ctrl[k+4] = -1.0/2
sim.data.ctrl[k+2*4] = 1.0/2
sim.data.ctrl[k+3*4] = -1.0/2
sim.step();
viewer.render();
orientation = sim.data.qpos.flat[3:7]; # w x y z
if (orientation[1]+orientation[2]>0.5):
break;
# mpy.functions.mj_applyFT(sim.model,sim.data,force0,torque,point,body,qfrc_target);
# print("qfrc_target",qfrc_target);
observations = np.concatenate([
sim.data.qpos.flat[1:],
sim.data.qvel.flat,
]);
# print("END",len(sim.data.qpos)," ",len(sim.data.qvel),"\n",observations);
# mat = np.zeros(9, dtype=np.float64)
# mpy.functions.mju_quat2Mat(mat, sim.data.body_xquat[2]);
# print ("orientation -> ",mat)
# print("data -> ",sim.data.qM)
if os.getenv('TESTING') is not None:
break
|
JanaKat-git/MovieRecommender
|
recommender.py
|
<filename>recommender.py
'''
File with function for the Movie Recommender
'''
import pandas as pd
import numpy as np
from sklearn.decomposition import NMF
def create_user_item_matrix(file_ratings, file_movies, n_rows):
'''
Create a user_item_matrix as pd.DataFrame.
Paramters
---------
file_ratings: str
csv-file with ratings data
file_movies: str
csv-file with movie data
n_rows: int
Number of Rows reading from the csv-files.
Returns
---------
df_ratings_movie: pd.DataFrame
The created user-item-matrix
'''
ratings = pd.read_csv(file_ratings, nrows=n_rows) #dataset to big --> kernel crashs everytime just use first 1_000_000 data
#make a dict with movieId and title
movies = pd.read_csv(file_movies, index_col=0)
movies.drop(columns='genres', inplace=True)
df_ratings_movie = pd.merge(ratings, movies, how='inner', on='movieId')
df_ratings_movie.drop(columns=['timestamp','movieId'], inplace=True)
return df_ratings_movie
def model_recommender(df):
'''
Uses pd.Dataframe of user-item-matrix and cerates and trains a NMF model.
Creates user-item-matrix(R), user feature matrix (P) and item feature matrix(Q) .
Paramters
---------
df: pd.DataFrame
dataframe of an user-item-matrix
Returns
---------
R: pd.DataFrame
The created user-item-matrix
P: pd.DataFrame
The user feature matrix
Q: pd.DataFrame
item feature matrix
nmf: NMF(l1_ratio=0.5, max_iter=5000, n_components=150)
The trained nmf model
'''
#Create user-item-matrix
R = df.pivot(index='userId',
columns='title',
values='rating'
)
# Fill empty values
R= R.fillna(2.5)
#Create and train model
nmf = NMF(n_components = 150,
max_iter=5_000,
#alpha = 0.2,
l1_ratio= 0.5) # instantiate model
nmf.fit(R) #fit R to the model
#create Q: item feature matrix
Q = pd.DataFrame(nmf.components_, columns=R.columns)
#create P: user feature matrix
P = pd.DataFrame(nmf.transform(R), index=R.index)
#create R_hat: Matrixmultiplication of Q and P
R_hat = pd.DataFrame(np.dot(P,Q), columns=R.columns, index=R.index)
#evaluate error: delta(R, R_hat)
nmf.reconstruction_err_
return R, P, Q, nmf
def user_recommendation(input_dict, R, Q, nmf):
'''
Uses trained model to make recommendations for new user.
Paramters
---------
input_dict: dict
userinput with movies and ratings.
R: pd.DataFrame
user-item-matrix
Q: pd.DataFrame
item feature matrix
nmf: NMF(l1_ratio=0.5, max_iter=5000, n_components=150)
The trained model
Returns
---------
recommendations_user[:5]: list with first 5 entries
The first 5 recommendations for the new_user.
'''
#create a sorted ranking list (first item --> first ranking corresponds to first movie)
ranking = []
for i in list(range(0,5)):
ranking.append(input_dict[sorted(input_dict.keys())[i]])
#create a sorted movie titel list (first item --> first movie)
titel = []
for i in list(range(5,10)):
titel.append(input_dict[sorted(input_dict.keys())[i]])
#create a dict out of ranking & titel list to use as input to create a pd.DataFrame(new_user)
dict_user = {titel[i]:ranking[i] for i in range(len(titel))}
new_user = pd.DataFrame(data=dict_user, index=['new_user'], columns=R.columns)
new_user = new_user.fillna(2.54)
#transform P matrix
user_P = nmf.transform(new_user)
#create user-item-matrix for new_user
user_R = pd.DataFrame(np.dot(user_P, Q), columns=R.columns, index=['new_user'])
#make recommendations and leave movie titels the new user used as input out
recommendations = user_R.drop(columns=titel)
#create a list with the recommendet movie titels
recommendations_user = list(recommendations.sort_values(axis=1, by='new_user', ascending=False))
return recommendations_user[:5]
|
JanaKat-git/MovieRecommender
|
create_matrix_model.py
|
'''
File to:
(1) load the user-item matrix and
(2) train an NMF model (use recommender.model_recommender)
(3) pickle the model and matrices in the file 'nmf_model'
'''
from recommender import model_recommender, create_user_item_matrix
import pandas as pd
import pickle
df = create_user_item_matrix('RATINGS', 'MOVIES', 10_000)
df.to_csv('user_item_matrix.csv')
#Load the user_item_matrix
df = pd.read_csv('user_item_matrix.csv')
#Create and train a NMF model with the user_item_matrix
R, P, Q, nmf = model_recommender(df)
#Pickle the model and matrices
with open("nmf_m.pkl", "wb") as f:
pickle.dump([R, P, Q, nmf], f)
|
JanaKat-git/MovieRecommender
|
application.py
|
<filename>application.py
'''
Controller file for the web appilaction
The central file of the application
'''
from flask import Flask
from flask import render_template
from flask import request
from recommender import user_recommendation
import pickle
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', title='Awesome Movie Recommender')
@app.route('/recommender')
def recommender():
#save user input as dict and print it
html_from_data = dict(request.args)
print(html_from_data)
#load pickled model
with open("nmf_m.pkl", "rb") as f:
R, P, Q, nmf = pickle.load(f)
#make recommendations for new user
recs = user_recommendation(html_from_data, R, Q, nmf)
print(recs)
return render_template("recommendations.html",movies=recs)
if __name__ == "__main__":
app.run(debug=True, port=5500)
|
thobianchi/ansible-role-sys_update
|
molecule/default/tests/test_default.py
|
<filename>molecule/default/tests/test_default.py
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_dep_pkg(host):
if host.system_info.distribution == "CentOS":
pkg = host.package('yum-utils')
assert pkg.is_installed
if host.system_info.distribution == "RedHat":
pkg = host.package('yum-utils')
assert pkg.is_installed
if host.system_info.distribution == "Fedora":
pkg = host.package('tracer')
assert pkg.is_installed
|
maikealame/xpboards-python-lib
|
xpboards/dataset.py
|
import csv
import json
from uuid import uuid4
class XPBoardsDataSet:
def __init__(self, dataset, convert_types=False):
"""
The dataset must have the following pattern:
{
"id": <integer>, (for XPBoards datasets that already exists, optional)
"name": <string>,
"columns":[
{
"type": <string>,
"name": <string>
}
],
"rows": [
"values": [
{
"value": <any> (any primitive type)
}
]
]
}
"""
self.__id = dataset.get('id', None)
self.__name = dataset.get('name', '')
self.__columns = []
for column in dataset['columns']:
self.__columns.append(
self.Column(
name=column['name'],
value_type=column.get('type', '')
)
)
self.__rows = []
for row in dataset['rows']:
rows = []
for i in range(len(self.__columns)):
value = row['values'][i]['value']
if convert_types:
rows.append(
self.Row.convert(
value=value,
to_type=self.__columns[i].value_type
)
)
else:
rows.append(value)
self.__rows.append(
self.Row(
values = rows
)
)
# Private
# Static
@staticmethod
def __parse_dict(data_dict):
columns = []
rows = []
item_count = 0
for row in data_dict:
row_values = []
for key, value in row.items():
if item_count == 0:
columns.append({
'name':key,
'value_type': None
})
row_values.append({'value':value})
rows.append({
'values': row_values
})
item_count += 1
return XPBoardsDataSet({
'columns':columns,
'rows': rows
})
# Instance
def __parse_to_dict(self):
dataset_json = []
for row in self.__rows:
json_row = {}
for i in range(self.columns_count):
json_row[self.__columns[i].name] = row.values[i]
dataset_json.append(json_row)
return dataset_json
# Public
# Properties
@property
def id(self):
"""
Returns the dataset id
"""
return self.__id
@property
def columns_count(self):
"""
Returns the number of columns in the dataset
"""
return len(self.__columns)
@property
def items_count(self):
"""
Returns the number of rows in the dataset
"""
return len(self.__rows)
@property
def columns(self):
"""
Returns the list of columns of the dataset
"""
return self.__columns
@property
def items(self):
"""
Returns the list of rows of the dataset (matrix shape)
"""
return self.__rows
@property
def shape(self):
"""
Returns the rows X columns of the dataset in a tuple
"""
return self.items_count, self.columns_count
@property
def name(self):
"""
Returns the dataset name
"""
return self.__name
@name.setter
def name(self, value):
"""
Sets the name of the dataset
"""
self.__name = value
# Static
# @staticmethod
# def from_csv(path, delimiter=','):
# """
# Returns a new XPBoardsDataSet instance reading from specified CSV file
# """
# columns = []
# rows = []
# with open(path) as csv_file:
# reader = csv.reader(csv_file, delimiter=delimiter)
# row_count = 0
# for row in reader:
# if row_count == 0:
# for column in row:
# column_dict = {
# 'type': None,
# 'name': column
# }
# columns.append(column_dict)
# else:
# row_dict = {
# 'values': list(map(lambda item: { 'value': item }, row))
# }
# rows.append(row_dict)
# row_count += 1
# return XPBoardsDataSet({
# "columns": columns,
# "rows" : rows
# })
@staticmethod
def read_dict(data):
"""
Returns a new XPBoardsDataSet instance from a "data" dictionary param.
The expected dictonary format is the same as pandas dataframe.to_json('records') output.
Example:
[
{
"Name":"Max",
"Age": "32"
},
{
"Name":"Scarlett",
"Age": "25"
}
]
"""
return XPBoardsDataSet.__parse_dict(data)
@staticmethod
def read_json(path):
"""
Returns a new XPBoardsDataSet instance from a JSON file from provided "path" param
The expected JSON format is the same as pandas dataframe.to_json('records') output.
Example:
[
{
"Name":"Max",
"Age": "32"
},
{
"Name":"Scarlett",
"Age": "25"
}
]
"""
data_dict = None
with open(path) as json_file:
data_dict = json.load(json_file)
return XPBoardsDataSet.__parse_dict(data_dict)
# Instance
def to_csv(self, path, delimiter=',', quotechar='"'):
"""
Outputs dataset to a CSV file given a path ('path' param)
"""
with open(path, mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=delimiter, quotechar=quotechar)
writer.writerow(self.columns)
for row in self.__rows:
writer.writerow(row.values)
def to_json(self, path):
"""
Outputs the dataset to a JSON file given a path ('path' param)
"""
dataset_json = self.__parse_to_dict()
with open(path, 'w') as f:
json.dump(dataset_json, f)
def to_dict(self):
"""
Returns the dataset as a python list of dicts
"""
return self.__parse_to_dict()
def to_api(self):
columns = list(map(
lambda column: {
'name': column.name,
'type': None if column.value_type == "" else column.value_type
}, self.__columns))
rows = list(map(
lambda row: row.values, self.__rows
))
return ({
'name': self.name,
'columns': columns,
'rows': rows
})
def append_item(self, values):
"""
Append item to the end of the dataset, with given array of values
"""
self.__rows.append(
self.Row(
values=values
)
)
def remove_item(self, item_index):
"""
Remove specified item from the dataset
"""
self.__rows.pop(item_index)
def append_column(self, name, value_type='text', default_value=''):
"""
Append a column to the end of all columns, setting a default value and type for all items in the dataset
"""
self.__columns.append(
self.Column(name=name, value_type=name)
)
for row in self.__rows:
row.values.append(default_value)
def edit_column(self, column_index, name=None, value_type=None):
"""
Edit specified column with value and value_type
"""
self.__columns[column_index].value_type = (
value_type if value_type else self.__columns[column_index].value_type
)
self.__columns[column_index].name = (
name if name else self.__columns[column_index].name
)
def remove_column(self, column_index):
"""
Remove specified column from the dataset
"""
self.__columns.pop(column_index)
for row in self.__rows:
row.values.pop(column_index)
# Child Classes
class ColumnTypes:
DECIMAL = 'decimal'
INTEGER = 'integer'
PERCENTAGE = 'percentage'
DATE = 'date'
DATETIME = 'datetime'
TIME = 'time'
TEXT = 'text'
BOOLEAN = 'boolean'
class Column:
def __init__(self, name, value_type):
self.__name = name
self.__value_type = value_type
self.__id = uuid4().hex
@property
def id(self):
return self.__id
def __repr__(self):
return self.__name
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def value_type(self):
return self.__value_type
@value_type.setter
def value_type(self, value):
self.__value_type = value
class Row:
def __init__(self, values):
self.__values = values
self.__id = uuid4().hex
@property
def id(self):
return self.__id
@property
def values(self):
return self.__values
@values.setter
def values(self, value):
self.__values = value
@staticmethod
def convert(value, to_type):
types_converter = {}
types_converter[XPBoardsDataSet.ColumnTypes.DECIMAL] = lambda x: float(x)
types_converter[XPBoardsDataSet.ColumnTypes.INTEGER] = lambda x: int(x)
types_converter[XPBoardsDataSet.ColumnTypes.PERCENTAGE] = lambda x: str(x)
types_converter[XPBoardsDataSet.ColumnTypes.DATE] = lambda x: str(x)
types_converter[XPBoardsDataSet.ColumnTypes.DATETIME] = lambda x: str(x)
types_converter[XPBoardsDataSet.ColumnTypes.TIME] = lambda x: str(x)
types_converter[XPBoardsDataSet.ColumnTypes.TEXT] = lambda x: str(x)
types_converter[XPBoardsDataSet.ColumnTypes.BOOLEAN] = lambda x: bool(x)
try:
return types_converter[to_type](value)
except (ValueError, KeyError):
print(f'Cannot convert {value} to {to_type}, falling back to {XPBoardsDataSet.ColumnTypes.TEXT}')
return types_converter[XPBoardsDataSet.ColumnTypes.TEXT](value)
|
maikealame/xpboards-python-lib
|
tests/test_dataset.py
|
import pytest
from xpboards import dataset as xpb_dataset
DATASET_DICT = [
{
'firstname': 'Maike',
'lastname': 'Alame'
},
{
'firstname': 'Fernando',
'lastname': 'Palmeiro'
}
]
# Fixtures
@pytest.fixture
def dataset_instance():
return xpb_dataset.read_dict(DATASET_DICT)
# Test dataset importing
def test_dataset_read_dict():
dataset = xpb_dataset.read_dict(DATASET_DICT)
assert isinstance(dataset, xpb_dataset)
# Test dataset properties
def test_dataset_shape(dataset_instance):
shape = dataset_instance.shape
assert shape[0] == 2 and shape[1] == 2
# Test dataset manipulation
def test_dataset_append_column(dataset_instance):
dataset_columns_count_before = dataset_instance.columns_count
dataset_instance.append_column(name='awake', value_type='boolean', default_value='true')
assert dataset_columns_count_before + 1 == dataset_instance.columns_count
def test_dataset_edit_column(dataset_instance):
new_name = 'notfirstname'
dataset_instance.edit_column(column_index=0, name=new_name)
assert new_name == dataset_instance.columns[0].name
def test_dataset_remove_column(dataset_instance):
dataset_columns_count_before = dataset_instance.columns_count
dataset_instance.remove_column(column_index=0)
assert dataset_columns_count_before - 1 == dataset_instance.columns_count
def test_dataset_append_item(dataset_instance):
dataset_items_count_before = dataset_instance.items_count
dataset_instance.append_item(['Douglas', 'Eloy'])
assert dataset_items_count_before + 1 == dataset_instance.items_count
def test_dataset_remove_item(dataset_instance):
dataset_items_count_before = dataset_instance.items_count
dataset_instance.remove_item(item_index=0)
assert dataset_items_count_before - 1 == dataset_instance.items_count
|
maikealame/xpboards-python-lib
|
xpboards/services.py
|
import requests
from .plan import XPBoardsPlan
from .dataset import XPBoardsDataSet
class XPBoardsServices:
__API_VERSION = 'v1'
__BASE_URL = f'https://hom.web.xpboards.com.br/api/{__API_VERSION}'
__DEFAULT_HEADERS = {'Accept': 'application/json', 'Content-Type': 'application/json' }
def __init__(self, email, password):
self.__token = self.__generate_token(email, password)
def __handle_request_errors(self, message):
raise Exception(message)
def __handle_response(self, response):
errors = response.get('errors', None)
if errors:
self.__handle_request_errors(message=f'Got the following errors from api service: {repr(errors)}')
data = response.get('data', None)
if data == None:
self.__handle_request_errors(message=f'No "data" found in the response body')
if not isinstance(data, list):
code = data.get('code', None)
if code == 'max_dataset_count':
self.__handle_request_errors(message=f'Got the following message from api service: {data["message"]}')
return data
def __generate_token(self, email, password):
url = f'{self.__BASE_URL}/login'
headers = self.__DEFAULT_HEADERS
body = {
'email': email,
'password': password
}
response = requests.post(
url=url,
headers=headers,
json=body
)
data = self.__handle_response(response.json())
token = data.get('access_token', None)
if token != None:
return token['accessToken']
else:
raise Exception(f'Expecting token, instead got {repr(data)}')
def get_token(self):
return self.__token
def __get_auth_headers(self):
headers = self.__DEFAULT_HEADERS
headers['Authorization'] = f'Bearer {self.__token}'
return headers
def get_plan(self):
url = f'{self.__BASE_URL}/userdata'
headers = self.__get_auth_headers()
response = requests.get(
url=url,
headers=headers
)
return XPBoardsPlan(self.__handle_response(response.json()))
def list_datasets(self):
url = f'{self.__BASE_URL}/dataset'
headers = self.__get_auth_headers()
response = requests.get(
url=url,
headers=headers
)
return self.__handle_response(response.json())
def list_dataset_items(self, dataset_id, raw=False):
url = f'{self.__BASE_URL}/dataset/{dataset_id}'
headers = self.__get_auth_headers()
response = requests.get(
url=url,
headers=headers
)
data = self.__handle_response(response.json())
if raw:
return data
else:
return XPBoardsDataSet(data, convert_types=True)
def create_dataset(self, data, name=None):
if isinstance(data, XPBoardsDataSet):
if name:
data.name = name
url = f'{self.__BASE_URL}/dataset'
headers = self.__get_auth_headers()
response = requests.post(
url=url,
headers=headers,
json=data.to_api()
)
data = self.__handle_response(response.json())
return data
else:
raise Exception('The "data" param is not a XPBoardsDataSet instance')
def update_dataset(self, dataset_id, data):
"""
Replaces specified dataset with sent data
"""
if isinstance(data, XPBoardsDataSet):
url = f'{self.__BASE_URL}/dataset/{dataset_id}'
headers = self.__get_auth_headers()
response = requests.put(
url=url,
headers=headers,
json=data.to_api()
)
data = self.__handle_response(response.json())
return data
else:
raise Exception('The "data" param is not a XPBoardsDataSet instance')
def clear_dataset(self, dataset_id, data):
"""
Clear all dataset items
"""
if isinstance(data, XPBoardsDataSet):
url = f'{self.__BASE_URL}/dataset/{dataset_id}'
headers = self.__get_auth_headers()
cleared_data = data.to_api()
cleared_data['rows'] = []
response = requests.put(
url=url,
headers=headers,
json=cleared_data
)
data = self.__handle_response(response.json())
return data
else:
raise Exception('The "data" param is not a XPBoardsDataSet instance')
|
maikealame/xpboards-python-lib
|
xpboards/__init__.py
|
from .services import XPBoardsServices as services
from .dataset import XPBoardsDataSet as dataset
from .plan import XPBoardsPlan as plan
|
maikealame/xpboards-python-lib
|
xpboards/plan.py
|
<gh_stars>0
class XPBoardsPlan:
def __init__(self, user_dict):
datasets_limit = user_dict['customer']['datasets_limit']
datasets_count = user_dict['customer']['datasets_count']
datasets_limit = 0 if datasets_limit == None else datasets_limit
datasets_count = 0 if datasets_count == None else datasets_count
self._avaiable_datasets=(datasets_limit - datasets_count)
devices_limit = user_dict['customer']['devices_limit']
devices_count = user_dict['customer']['devices_count']
devices_limit = 0 if devices_limit == None else devices_limit
devices_count = 0 if devices_count == None else devices_count
self._avaiable_devices=(devices_limit - devices_count)
dashboards_limit = user_dict['customer']['dashboards_limit']
dashboards_count = user_dict['customer']['dashboards_count']
dashboards_limit = 0 if dashboards_limit == None else dashboards_limit
dashboards_count = 0 if dashboards_count == None else dashboards_count
self._avaiable_dashboards=(dashboards_limit - dashboards_count)
self._is_active=user_dict['customer']['is_active']
@property
def avaiable_datasets(self):
return self._avaiable_datasets
@property
def avaiable_devices(self):
return self._avaiable_devices
@property
def avaiable_dashboards(self):
return self._avaiable_dashboards
|
maikealame/xpboards-python-lib
|
setup.py
|
from setuptools import setup, find_packages
from os import path
# Reads documentation from README.md
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='xpboards',
packages=find_packages(include=['xpboards']),
version='0.1.2',
description='A python util for xpboards',
author='<NAME>',
install_requires=['requests'],
setup_requires=['pytest-runner'],
tests_require=['pytest==6.2.2'],
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown'
)
|
rr326/ad_app_autoclimate
|
_autoclimate/laston.py
|
import datetime as dt
import json
from typing import Dict, List, Optional
from _autoclimate.state import State
from _autoclimate.utils import climate_name
from adplus import Hass
"""
Laston - create new sensors that track the last time the climate
was "on" as defined by autoclimate entity_rules.
sensor.autoclimate_gym_laston = <datetime>
"""
class Laston:
def __init__(
self,
hass: Hass,
config: dict,
appname: str,
climates: list,
appstate_entity: str,
test_mode: bool,
):
self.hass = hass
self.aconfig = config
self.appname = appname
self.test_mode = test_mode
self.climates = climates
self.appstate_entity = appstate_entity
self.climate_states: Dict[str, TurnonState] = {}
self.hass.run_in(self.initialize_states, 0)
def initialize_states(self, kwargs):
for climate in self.climates:
self.climate_states[climate] = TurnonState(self.hass, self.aconfig, climate)
# After initialization
self.hass.run_in(self.create_laston_sensors, 0)
self.hass.run_in(self.init_laston_listeners, 0.1)
def laston_sensor_name(self, climate):
return self.laston_sensor_name_static(self.appname, climate)
@staticmethod
def laston_sensor_name_static(appname, climate):
return f"sensor.{appname}_{climate_name(climate)}_laston"
def create_laston_sensors(self, kwargs):
self.get_history_data()
for climate in self.climates:
laston_sensor_name = self.laston_sensor_name(climate)
laston_date = self.climate_states[climate].last_turned_on
self.hass.update_state(
laston_sensor_name,
state=laston_date,
attributes={
"freindly_name": f"{climate_name(climate)} - Last date climate was turned on",
"device_class": "timestamp",
},
)
self.hass.log(
f"Created sensor: {laston_sensor_name}. Initial state: {laston_date}"
)
def init_laston_listeners(self, kwargs):
for climate in self.climates:
self.hass.listen_state(
self.update_laston_sensors, entity=climate, attribute="all"
)
def update_laston_sensors(self, climate, attribute, old, new, kwargs):
# Listener for climate entity
self.climate_states[climate].add_state(new)
laston_date = str(self.climate_states[climate].last_turned_on)
sensor_name = self.laston_sensor_name(climate)
sensor_state = self.hass.get_state(sensor_name)
if sensor_state != laston_date:
self.hass.update_state(sensor_name, state=laston_date)
self.hass.log(
f"Updated state for {sensor_name}: {laston_date}. Previous: {sensor_state}"
)
def get_history_data(self, days: int = 10) -> List:
data: List = self.hass.get_history(entity_id=self.appstate_entity, days=days) # type: ignore
if not data or len(data) == 0:
self.hass.warn(
f"get_history returned no data for entity: {self.appstate_entity}. Exiting"
)
return []
edata = data[0]
# the get_history() fn doesn't say it guarantees sort (though it appears to be)
edata = list(reversed(sorted(edata, key=lambda rec: rec["last_updated"])))
return edata
def find_laston_from_history(self, climate: str, history: List):
key = f"{climate_name(climate)}_state"
retval = None
for rec in history:
if rec["attributes"].get(key) == "on":
retval = rec["last_changed"]
break
return retval
class TurnonState:
"""
.__init__() - initialize from history
.add_state(stateobj) - add stateobj
.last_turned_on [property] -> None, datetime
returns the last time a climate went from "off" to "on"
(based on autoclimate config)
This requires the current state, the previous state, and the state before that.
"""
def __init__(self, hass: Hass, config: dict, climate_entity: str) -> None:
self.hass = hass
self.config = config[climate_entity]
self.climate_entity = climate_entity
# states: "on", "off" (Ignore "offline")
self.curr: Optional[str] = None
self.curr_m1: Optional[str] = None # curr minus t1 ie: prev
self.curr_m2: Optional[str] = None # curr minus t2 ie: prev prev
self._curr_dt: Optional[dt.datetime] = None
self._curr_dt_m1: Optional[dt.datetime] = None
self._initialize_from_history()
def add_state(self, stateobj: dict):
"""Must be added in chronologically increasing order!"""
last_updated = stateobj.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt.datetime.fromisoformat(stateobj["last_updated"])
if self._curr_dt and last_updated < self._curr_dt:
raise RuntimeError(
f"Adding state earlier than lastest saved state. Can only add states in increasing datetime. stateobj: {json.dumps(stateobj)}"
)
state = self.entity_state(stateobj)
assert state in ["on", "off", "offline", "error_off"]
if state == self.curr or state == "offline":
return
else:
self.curr_m2 = self.curr_m1
self.curr_m1 = self.curr
self.curr = state
self._curr_dt_m1 = self._curr_dt
self._curr_dt = last_updated
def entity_state(self, stateobj: dict) -> str:
"""Return summarized state based on config: on, off, offline """
return State.offstate(self.climate_entity, stateobj, self.config, self.hass)[0]
@property
def last_turned_on(self) -> Optional[dt.datetime]:
if self.curr == "on" and self.curr_m1 == "off":
return self._curr_dt
elif self.curr == "off" and self.curr_m1 == "on" and self.curr_m2 == "off":
return self._curr_dt_m1
else:
return None
def _initialize_from_history(self):
history = self._get_history_data()
for stateobj in history:
self.add_state(stateobj)
def _get_history_data(self, days: int = 10) -> List:
"""
returns state history for self.climate_entity
**IN CHRONOLOGICAL ORDER**
"""
data: List = self.hass.get_history(entity_id=self.climate_entity, days=days) # type: ignore
if not data or len(data) == 0:
self.hass.warn(
f"get_history returned no data for entity: {self.climate_entity}. Exiting"
)
return []
edata = data[0]
# the get_history() fn doesn't say it guarantees sort (though it appears to be)
edata = list(sorted(edata, key=lambda rec: rec["last_updated"]))
return edata
def __str__(self):
def dtstr(val: Optional[dt.datetime]):
if type(val) is str:
print("here")
return "None " if not val else val.strftime("%y/%m/%d %H:%M:%S")
return f"TurnOnState: {self.climate_entity:35} **{dtstr(self.last_turned_on)}** - {self.curr} - {self.curr_m1} - {self.curr_m2} - {dtstr(self._curr_dt)} - {dtstr(self._curr_dt_m1)}"
|
rr326/ad_app_autoclimate
|
_autoclimate/state.py
|
<filename>_autoclimate/state.py
import json # noqa
import math
from typing import Optional, Tuple
import adplus
from adplus import Hass
adplus.importlib.reload(adplus)
from _autoclimate.occupancy import Occupancy
from _autoclimate.utils import climate_name
class State:
def __init__(
self,
hass: Hass,
config: dict,
poll_frequency: int,
appname: str,
climates: list,
create_temp_sensors: bool,
test_mode: bool,
):
self.hass = hass
self.aconfig = config
self.poll_frequency = poll_frequency
self.appname = appname
self.app_state_name = f"app.{self.appname}_state"
self.test_mode = test_mode
self.use_temp_sensors = create_temp_sensors
self.climates = climates
self.state: dict = {}
self._current_temps: dict = {} # {climate: current_temp}
self.init_states()
self.hass.run_in(self.create_hass_stateobj, 0)
if self.use_temp_sensors:
self.hass.run_in(self.create_temp_sensors, 0)
self.hass.run_in(self.init_climate_listeners, 0)
self.hass.run_every(
self.get_and_publish_state, "now", 60 * 60 * self.poll_frequency
)
self.hass.run_in(self.register_services, 0)
def create_hass_stateobj(self, kwargs):
# APP_STATE
self.hass.update_state(
self.app_state_name,
attributes={"friendly_name": f"{self.appname} State"},
)
def create_temp_sensors(self, kwargs):
# Temperature Sensors
for climate in self.climates:
sensor_name = self.sensor_name(climate)
self.hass.update_state(
sensor_name,
attributes={
"unit_of_measurement": "°F",
"freindly_name": f"Temperatue for {climate_name(climate)}",
"device_class": "temperature",
},
)
self.hass.log(f"Created sensor for {sensor_name}")
def init_states(self):
for climate in self.climates:
self.state[climate] = {
"offline": None,
"state": None,
"unoccupied": None,
"state_reason": None,
}
def init_climate_listeners(self, kwargs):
for climate in self.climates:
self.hass.listen_state(
self.get_and_publish_state, entity=climate, attribute="all"
)
def sensor_name(self, entity):
return f"sensor.{self.appname}_{climate_name(entity)}_temperature"
def publish_state(
self,
):
"""
This publishes the current state, as flat attributes,
to APP_STATE (eg: app.autoclimate_state)
"""
data = {
f"{climate_name(entity)}_{key}": value
for (entity, rec) in self.state.items()
for (key, value) in rec.items()
}
# app.autoclimate_state ==> autoclimate_state
data["summary_state"] = self.autoclimate_overall_state
self.hass.update_state(
self.app_state_name, state=self.autoclimate_overall_state, attributes=data
)
if self.use_temp_sensors:
for climate, current_temp in self._current_temps.items():
sensor_name = self.sensor_name(climate)
self.hass.update_state(sensor_name, state=current_temp)
# self.log(
# f"DEBUG LOGGING\nPublished State\n============\n{json.dumps(data, indent=2)}"
# )
def get_and_publish_state(self, *args, **kwargs):
mock_data = kwargs.get("mock_data")
self.get_all_entities_state(mock_data=mock_data) # Update state copy
self.publish_state()
def get_entity_state(
self, entity: str, mock_data: Optional[dict] = None
) -> Tuple[str, str, float]:
state_obj: dict = self.hass.get_state(entity, attribute="all") # type: ignore
return self.offstate(
entity,
state_obj,
self.aconfig[entity],
self.hass,
self.test_mode,
mock_data,
)
def get_all_entities_state(self, *args, mock_data: Optional[dict] = None):
"""
temp
* value = valid setpoint
* not found: offline
* None = system is off
"""
for entity in self.climates:
summarized_state, state_reason, current_temp = self.get_entity_state(
entity, mock_data
)
#
# Current_temp
#
self._current_temps[entity] = current_temp
#
# Offline
#
if summarized_state == "offline":
self.state[entity] = {
"offline": True,
"state": "offline",
"unoccupied": "offline",
}
continue
else:
self.state[entity]["offline"] = False
#
# State
#
self.state[entity]["state"] = summarized_state
self.state[entity]["state_reason"] = state_reason
#
# Occupancy
#
if not self.state[entity]["offline"]:
try:
last_on_date = self.hass.get_state(
Occupancy.unoccupied_sensor_name_static(self.appname, entity)
)
if last_on_date == Occupancy.UNOCCUPIED_SINCE_OCCUPIED_VALUE:
self.state[entity]["unoccupied"] = False
elif last_on_date is None:
self.state[entity]["unoccupied"] = None
else:
self.state[entity][
"unoccupied"
] = Occupancy.duration_off_static(self.hass, last_on_date)
except Exception as err:
self.hass.error(
f"Error getting occupancy for {entity}. Err: {err}."
)
@property
def autoclimate_overall_state(self):
"""
Overall state:
* on - any on
* offline - not on and any offline
* error - any "error_off" - meaning any are off but should not be
* off - all properly off, confirmed.
"""
substates = {entity["state"] for entity in self.state.values()}
if "on" in substates:
return "on"
elif "offline" in substates:
return "offline"
elif "error_off" in substates:
return "error"
elif {"off"} == substates:
return "off"
else:
self.hass.log(f"Unexpected overall state found: {substates}")
return "programming_error"
@staticmethod
def offstate(
entity: str,
stateobj: dict,
config: dict,
hass: Hass,
test_mode: bool = False,
mock_data: Optional[dict] = None,
) -> Tuple[str, str, float]:
"""
Returns: on/off/offline, reason, current_temp
if test_mode it will merge self.mocked_attributes to the state
This tests to see if a climate entity's state is what it should be.
The logic is pretty complex due to challenges with offline/online,
priorities, differences in behavior from different thermostats, etc.
"""
offconfig = config["off_state"]
attributes = stateobj["attributes"] if stateobj else {}
# Mocks
if test_mode and mock_data:
if mock_data.get("entity_id") == entity:
mock_attributes = mock_data["mock_attributes"]
hass.info(
f"get_entity_state: using MOCKED attributes for entity {entity}: {mock_attributes}"
)
attributes = attributes.copy()
attributes.update(mock_attributes)
# Get current temperature
current_temp: float = attributes.get("current_temperature", math.nan)
#
# Offline?
#
if "temperature" not in attributes:
return "offline", "offline", current_temp
#
# Not offline. Check if mode == off_state.
#
temp = attributes.get("temperature")
# Turned off?
if temp is None:
# Thermostat is turned off
if offconfig["state"] == "off":
return "off", "Thermostat is off", current_temp
else:
return "error_off", "Thermostat is off but should not be!", current_temp
# Thermostat is on.
elif offconfig["state"] == "off":
return "on", "Thermostat is not off, but it should be", current_temp
# Is away mode?
elif offconfig["state"] == "away":
if attributes.get("preset_mode").lower() != "away":
return "on", "Not away mode, but should be", current_temp
else:
# Proper away mode setting?
if (off_temp := offconfig.get("temp")) is None:
return "off", "Away mode. No off_temp available.", current_temp
else:
if temp == off_temp:
return (
"off",
f"Away mode at proper temp: {off_temp}",
current_temp,
)
else:
return (
"on",
f"Away mode but improper temp. Should be {off_temp}. Actual: {temp}.",
current_temp,
)
# Perm_hold?
elif offconfig["state"] == "perm_hold":
if attributes.get("preset_mode") != offconfig["perm_hold_string"]:
return (
"on",
f"Not proper permanent hold. Actual: {attributes.get('preset_mode')} -- {attributes.get('temperature')}",
current_temp,
)
elif temp > offconfig["temp"]:
return (
"on",
f"Perm hold at {temp}. Should be <= {offconfig['temp']}",
current_temp,
)
else:
return "off", f"Perm hold at {temp}", current_temp
# Unexpected value
return "none", "error - should not be here", current_temp
def is_offline(self, namespace, domain, service, kwargs) -> bool:
return self.state[kwargs["climate"]]["offline"]
def is_on(self, namespace, domain, service, kwargs) -> bool:
return self.state[kwargs["climate"]]["state"] == "on"
def is_off(self, namespace, domain, service, kwargs) -> bool:
return self.state[kwargs["climate"]]["state"] == "off"
def entity_state(self, namespace, domain, service, kwargs) -> Optional[str]:
return self.state[kwargs["climate"]]["state"]
def is_hardoff(self, namespace, domain, service, kwargs) -> bool:
state = self.hass.get_state(entity_id=kwargs["climate"])
return state == "off"
def is_error_off(self, namespace, domain, service, kwargs) -> bool:
return self.state[kwargs["climate"]]["state"] == "error_off"
def is_error(self, namespace, domain, service, kwargs) -> bool:
return self.state[kwargs["climate"]]["state"] == "error"
def register_services(self, kwargs: dict):
callbacks = [
self.is_offline,
self.is_on,
self.is_off,
self.entity_state,
self.is_hardoff,
self.is_error_off,
self.is_error,
]
for callback in callbacks:
service_name = f"{self.appname}/{callback.__name__}"
self.hass.register_service(service_name, callback)
self.hass.log(f"Registered service: {service_name}")
|
rr326/ad_app_autoclimate
|
_autoclimate/occupancy.py
|
<gh_stars>0
import datetime as dt
from typing import List
from _autoclimate.utils import climate_name
from adplus import Hass
from dateutil import tz
"""
Create new sensors
Reason: That way you do auto off if unoccupied since AND last_manual_change > X hours
* unoccupied_since:
* Last unoccupied
* None if no data
* datetime.max if currently occupied
* last_manual_change
* Timestamps as above
# TODO
* Offline - handle
"""
class Occupancy:
UNOCCUPIED_SINCE_OCCUPIED_VALUE = dt.datetime(dt.MAXYEAR, 12, 29, tzinfo=tz.tzutc())
def __init__(
self,
hass: Hass,
config: dict,
appname: str,
climates: list,
test_mode: bool,
):
self.hass = hass
self.aconfig = config
self.appname = appname
self.test_mode = test_mode
self.climates = climates
self.hass.run_in(self.create_occupancy_sensors, 0)
self.hass.run_in(self.init_occupancy_listeners, 0.1)
def unoccupied_sensor_name(self, climate):
return self.unoccupied_sensor_name_static(self.appname, climate)
@staticmethod
def unoccupied_sensor_name_static(appname, climate):
return f"sensor.{appname}_{climate_name(climate)}_unoccupied_since"
def create_occupancy_sensors(self, kwargs):
# Unoccupied Since Sensors
for climate in self.climates:
unoccupied_sensor_name = self.unoccupied_sensor_name(climate)
last_on_date = self.history_last_on_date(climate=climate)
self.hass.update_state(
unoccupied_sensor_name,
state=last_on_date,
attributes={
"freindly_name": f"{climate_name(climate)} - unoccupied since",
"device_class": "timestamp",
},
)
self.hass.log(
f"Created sensor: {unoccupied_sensor_name}. Initial state: {last_on_date}"
)
def init_occupancy_listeners(self, kwargs):
"""
This will create a different occupancy sensor for each climate,
so if multiple climates have the same oc_sensor, you'll get multiple
listeners.
"""
for climate in self.climates:
oc_sensor = self.get_sensor(climate=climate)
self.hass.log(f"listen_state: {oc_sensor}")
self.hass.listen_state(
self.update_occupancy_sensor,
entity=oc_sensor,
attribute="all",
climate=climate,
)
def update_occupancy_sensor(self, entity, attribute, old, new, kwargs):
climate = kwargs["climate"]
# self.hass.log(f'update_occupancy_sensor: {entity} -- {climate} -- {new} -- {attribute}')
last_on_date = self.oc_sensor_val_to_last_on_date(
new["state"], new["last_updated"]
)
unoccupied_sensor_name = self.unoccupied_sensor_name(climate)
self.hass.update_state(
unoccupied_sensor_name,
state=last_on_date,
)
self.hass.log(
f"update_occupancy_sensor - {unoccupied_sensor_name} - state: {last_on_date}"
)
def get_sensor(self, climate=None, sensor=None):
if climate and sensor:
raise RuntimeError(
f"Programming error - history_last_on_date: give climate OR sensor"
)
elif climate is None and sensor is None:
raise RuntimeError(
f"Programming error - need a climate or sensor. Got None."
)
elif sensor:
return sensor
else:
try:
oc_sensor = self.aconfig[climate]["occupancy_sensor"]
except KeyError:
raise RuntimeError(f"Unable to get occupancy_sensor for {climate}")
return oc_sensor
def oc_sensor_val_to_last_on_date(self, state, last_on_date):
if state == "on":
return self.UNOCCUPIED_SINCE_OCCUPIED_VALUE
elif state in ["off", "unavailable"]:
return last_on_date
else:
self.hass.log(f"Unexpected last_on_date state: {state}")
# Error or offline
return None
def history_last_on_date(self, climate=None, sensor=None):
state, duration_off, last_on_date = self.get_unoccupied_time_for(
climate, sensor
)
return self.oc_sensor_val_to_last_on_date(state, last_on_date)
def get_unoccupied_time_for(self, climate=None, sensor=None):
oc_sensor = self.get_sensor(climate=climate, sensor=sensor)
state, duration_off, last_on_date = self._history_occupancy_info(oc_sensor)
return state, duration_off, last_on_date
@staticmethod
def duration_off_static(hass, dateval):
"""
0 - currently on
> 0 - number hours off
< 0 / None - Error
"""
if isinstance(dateval, str):
dateval = dt.datetime.fromisoformat(dateval)
if dateval.tzinfo is None:
dateval.replace(tzinfo=tz.tzlocal())
now = hass.get_now()
if dateval > now:
return 0
duration_off_hours = round((now - dateval).total_seconds() / (60 * 60), 2)
return duration_off_hours
def _history_occupancy_info(self, sensor_id: str, days: int = 10):
"""
returns: state (on/off/unavailable), duration_off (hours float / None), last_on_date (datetime, None)
state = state of occupancy sensor
All based on an occupancy sensor's history data.
{
"entity_id": "binary_sensor.seattle_occupancy",
"state": "off", # on/off/unavailable
"attributes": {
"friendly_name": "<NAME>",
"device_class": "occupancy"
},
"last_changed": "2020-10-28T13:10:47.384057+00:00",
"last_updated": "2020-10-28T13:10:47.384057+00:00"
}
Note - it looks like the occupancy sensor properly handles offline by returning
an "unavailble" status. (Unlike temp sensors, which show the last value.)
"""
data: List = self.hass.get_history(entity_id=sensor_id, days=days) # type: ignore
if not data or len(data) == 0:
self.hass.warn(
f"get_history returned no data for entity: {sensor_id}. Exiting"
)
return "error", None, None
edata = data[0]
# the get_history() fn doesn't say it guarantees sort (though it appears to be)
edata = list(reversed(sorted(edata, key=lambda rec: rec["last_updated"])))
current_state = edata[0]["state"]
if current_state == "on":
return "on", None, None
last_on_date = None
now: dt.datetime = self.hass.get_now() # type: ignore
for rec in edata:
if rec.get("state") == "on":
last_on_date = dt.datetime.fromisoformat(rec["last_updated"])
duration_off_hours = round(
(now - last_on_date).total_seconds() / (60 * 60), 2
)
return current_state, duration_off_hours, last_on_date
# Can not find a last on time. Give the total time shown.
min_time_off = round(
(now - dt.datetime.fromisoformat(edata[-1]["last_updated"])).seconds
/ (60 * 60),
2,
)
return current_state, min_time_off, None
|
rr326/ad_app_autoclimate
|
_autoclimate/mocks.py
|
from typing import Callable, List
from adplus import Hass
class Mocks:
def __init__(
self,
hass: Hass,
mock_config: dict,
mock_callbacks: List[Callable],
run_mocks: bool = False,
init_delay: int = 1,
mock_delay: int = 1,
):
self.hass = hass
self.mconfig = mock_config
self.run_mocks = run_mocks
self.callbacks = mock_callbacks
self.init_delay = init_delay
self.mock_delay = mock_delay
if self.run_mocks:
self.hass.run_in(self.init_mocks, self.init_delay)
def init_mocks(self, kwargs):
self.hass.log("Running Mocks")
mock_delay = 0
for mock in self.mconfig:
self.hass.run_in(
self.run_mock,
mock_delay := mock_delay + self.mock_delay,
mock_config=mock,
)
def run_mock(self, kwargs):
"""
Weird - I can't send the callback in the init_mocks above. Some sort of strange pickling / lock error.
So instead I'll do the callback loop here.
"""
mock_config = kwargs["mock_config"]
self.hass.log(f"\n\n==========\nMOCK: {mock_config}")
for callback in self.callbacks:
self.hass.run_in(callback, 0, mock_data=mock_config)
|
rr326/ad_app_autoclimate
|
autoclimate.py
|
from copy import Error
import json # noqa
import re
import datetime as dt
from _autoclimate.utils import in_inactive_period
import adplus
adplus.importlib.reload(adplus)
import _autoclimate
import _autoclimate.laston
import _autoclimate.mocks
import _autoclimate.occupancy
import _autoclimate.schema
import _autoclimate.state
import _autoclimate.turn_off
adplus.importlib.reload(_autoclimate)
adplus.importlib.reload(_autoclimate.state)
adplus.importlib.reload(_autoclimate.mocks)
adplus.importlib.reload(_autoclimate.occupancy)
adplus.importlib.reload(_autoclimate.turn_off)
adplus.importlib.reload(_autoclimate.laston)
adplus.importlib.reload(_autoclimate.schema)
from _autoclimate.laston import Laston
from _autoclimate.mocks import Mocks
from _autoclimate.occupancy import Occupancy
from _autoclimate.schema import SCHEMA
from _autoclimate.state import State
from _autoclimate.turn_off import TurnOff
class AutoClimate(adplus.Hass):
"""
# AutoClimateApp
This provides serveral services for thermostat management.
See README.md for documentation.
See autoclimate.yaml.sample for sample configuration.
## Events
Events have TWO names:
event = "autoclimate" for ALL events
sub_event = app.{appname}_event - this is the event you actually care about
Why? To trigger an event in Lovelace, you need to trigger a script, where you
have to hardcode the event name, but can send template data in the body. So
rather than have to write different scripts for each event, here you create
*one* script to trigger the event and put the event you care about in a
sub_event kwarg.
"""
EVENT_TRIGGER = "autoclimate"
def initialize(self):
self.log("Initialize")
self.argsn = adplus.normalized_args(self, SCHEMA, self.args, debug=False)
self.entity_rules = self.argsn["entity_rules"]
self.inactive_period = None
self.extra_validation(self.argsn)
if in_inactive_period(self, self.inactive_period):
self.log(f'Autoclimate in inactive_period - will not use shutoff rules.')
self.test_mode = self.argsn.get("test_mode")
self.appname = self.argsn["name"]
self.poll_frequency = self.argsn["poll_frequency"]
self.TRIGGER_HEAT_OFF = f"app.{self.appname}_turn_off_all"
self.climates = list(self.entity_rules.keys())
self.log(f"Climates controlled: {self.climates}")
#
# Initialize sub-classes
#
self.state_module = State(
hass=self,
config=self.entity_rules,
poll_frequency=self.argsn["poll_frequency"],
appname=self.appname,
climates=self.climates,
create_temp_sensors=self.argsn["create_temp_sensors"],
test_mode=self.test_mode,
)
self.climate_state = self.state_module.state
self.occupancy_module = Occupancy(
hass=self,
config=self.entity_rules,
appname=self.appname,
climates=self.climates,
test_mode=self.test_mode,
)
self.laston_module = Laston(
hass=self,
config=self.entity_rules,
appname=self.appname,
climates=self.climates,
appstate_entity=self.state_module.app_state_name,
test_mode=self.test_mode,
)
self.turn_off_module = TurnOff(
hass=self,
config=self.entity_rules,
inactive_period = self.inactive_period,
poll_frequency=self.argsn["poll_frequency"],
appname=self.appname,
climates=self.climates,
test_mode=self.test_mode,
climate_state=self.climate_state,
turn_on_error_off=self.argsn["turn_on_error_off"],
)
self.mock_module = Mocks(
hass=self,
mock_config=self.argsn["mocks"],
run_mocks=self.argsn["run_mocks"],
mock_callbacks=[self.turn_off_module.autooff_scheduled_cb],
init_delay=1,
mock_delay=1,
)
def extra_validation(self, args):
# Validation that Cerberus doesn't do well
# entity_rules
for climate, rule in self.entity_rules.items():
offrule = rule.get("off_state", {})
if offrule.get("state", "") == "perm_hold":
if "temp" not in offrule:
self.error(f'Invalid offrule. Perm_hold needs an "temp": {offrule}')
if "perm_hold_string" not in offrule:
self.error(
f'Invalid offrule. Perm_hold needs an "perm_hold_string": {offrule}'
)
state = self.get_state(climate, attribute="all")
if state == None:
self.error(
f"Probable misconfiguration (bad entity): could not get state for entity: {climate}"
)
# inactive_period: mm/dd - mm/dd
if self.argsn.get("inactive_period"):
try:
match = re.match(r"(\d?\d)/(\d?\d)\s*-\s*(\d?\d)/(\d?\d)", self.argsn["inactive_period"])
start = (int(match.group(1)), int(match.group(2))) # type: ignore
end = (int(match.group(3)), int(match.group(4))) # type: ignore
if not (1<=start[0]<=12 and 1<=end[0]<=12 and 1<=start[1]<=31 and 1<=end[1]<=31):
raise Error(f'Invalid day or month value in inactive_period ({self.argsn["inactive_period"]})')
except Exception as err:
self.error(
f'Invalid inactive_period format. Should be: "mm/dd - mm/dd". Error: {err}'
)
else:
self.inactive_period = (start, end) # ((m,d), (m,d))
def trigger_sub_events(self):
pass
|
rr326/ad_app_autoclimate
|
_autoclimate/turn_off.py
|
<reponame>rr326/ad_app_autoclimate<filename>_autoclimate/turn_off.py
# pyright: reportUnusedCoroutine=false
import datetime as dt
import json # noqa
from typing import Union
import pytz
import adplus
from adplus import Hass
from typing import Optional
adplus.importlib.reload(adplus)
from _autoclimate.laston import Laston
from _autoclimate.schema import SCHEMA
from _autoclimate.utils import in_inactive_period
class TurnOff:
def __init__(
self,
hass: Hass,
config: dict,
inactive_period: Optional[bool],
poll_frequency: int,
appname: str,
climates: list,
test_mode: bool,
climate_state: dict,
turn_on_error_off=False,
):
self.hass = hass
self.aconfig = config
self.inactive_period = inactive_period
self.poll_frequency = poll_frequency
self.appname = appname
self.app_state_name = f"app.{self.appname}_state"
self.test_mode = test_mode
self.climates = climates
self.climate_state = climate_state
self.turn_on_error_off = turn_on_error_off
self.state: dict = {}
self._current_temps: dict = {} # {climate: current_temp}
self.init_listeners()
if not self.any_autooff():
self.hass.log("autooff: Not configured. Will not run.")
else:
self.hass.run_every(
self.autooff_scheduled_cb, "now", self.poll_frequency * 60 * 60
)
def init_listeners(self):
self.hass.listen_event(self.cb_turn_off_all, event=self.event_all_off_name())
self.hass.log(f"Listening to event: {self.event_all_off_name()}")
self.hass.listen_event(
self.cb_turn_off_climate, event=self.event_entity_off_name()
)
self.hass.log(f"Listening to event: {self.event_entity_off_name()}")
def event_all_off_name(self) -> str:
return f"app.{self.appname}_turn_off_all"
def event_entity_off_name(self) -> str:
return f"app.{self.appname}_turn_off_climate"
def turn_off_climate(
self, climate: str, config: dict = None, test_mode: bool = False
) -> None:
"""
Turn "off" a climate climate, where "off" is defined by an off rule such as:
climate.cabin:
off_state: "away"
off_temp: 55
config - if given, will use from self.aconfig. If passed, will use passed config
"""
if config is None:
config = self.aconfig[climate]
else:
# Config passed in.
schema = SCHEMA["entity_rules"]["valuesrules"]["schema"]
try:
config = adplus.normalized_args(self.hass, schema, config)
except adplus.ConfigException as err:
self.hass.error(
f"turn_off_climate called with passed-in config that does not validate: {config}"
)
return
stateobj: dict = self.hass.get_state(climate, attribute="all") # type: ignore
attributes = stateobj["attributes"]
if "temperature" not in attributes:
self.hass.log(f"{climate} - Offline. Can not turn off.")
return
if not config:
self.hass.error(f"No off_rule for climate: {climate}. Can not turn off.")
return
# Set to "off"
if config["off_state"]["state"] == "off":
if not test_mode:
self.hass.call_service("climate/turn_off", entity_id=climate)
self.hass.lb_log(f"{climate} - Turn off")
# Set to "away"
elif config["off_state"]["state"] == "away":
if not test_mode:
self.hass.call_service(
"climate/set_preset_mode",
entity_id=climate,
preset_mode="Away",
)
self.hass.lb_log(f"{climate} - Set away mode")
# Set to "perm_hold"
elif config["off_state"]["state"] == "perm_hold":
if not test_mode:
self.hass.call_service(
"climate/set_temperature",
entity_id=climate,
temperature=config["off_state"]["temp"],
)
self.hass.call_service(
"climate/set_preset_mode",
entity_id=climate,
preset_mode="Permanent Hold",
)
self.hass.log(
f"{climate} - Set Perm Hold to {config['off_state']['temp']}. "
)
# Invalid config
else:
self.hass.error(f"Programming error. Unexpected off_rule: {config}")
def cb_turn_off_climate(self, event_name, data, kwargs):
"""
kwargs:
entity: climate_string
config: OFF_SCHEMA (see above)
test_mode: bool (optional)
"""
climate = data["climate"]
config = data.get("config")
test_mode = data.get("test_mode")
return self.turn_off_climate(climate, config=config, test_mode=test_mode)
def cb_turn_off_all(self, event_name, data, kwargs):
test_mode = data.get("test_mode")
for climate in self.climates:
config = data["config"].get(climate, {}) if "config" in data else None
self.turn_off_climate(climate, config=config, test_mode=test_mode)
def any_autooff(self):
for climate in self.climates:
if self.aconfig.get(climate, {}).get("auto_off_hours") != None:
return True
return False
def autooff_scheduled_cb(self, kwargs):
"""
Turn off any thermostats that have been on too long.
"""
if in_inactive_period(self.hass, self.inactive_period):
return
for climate, state in self.climate_state.items():
self.hass.debug(f'autooff: {climate} - {state["state"]}')
config = self.aconfig.get(climate)
if not config:
continue
if not "auto_off_hours" in config:
continue
if state["state"] == "off":
continue
if state["offline"]:
continue # Can't do anything
if state["state"] == "error_off" and self.turn_on_error_off:
# Off but should not be
self.hass.log(
f"{climate} is off but should not be! Attempting to turn on."
)
if not self.test_mode:
self.hass.call_service("climate/turn_on", entity_id=climate)
self.hass.lb_log(f"{climate} - Turned thermostat on.")
hours_unoccupied = self.climate_state[climate]["unoccupied"]
if hours_unoccupied is None:
self.hass.warn(
f"Programming error - hours_unoccupied None for {climate}"
)
elif hours_unoccupied < 0:
self.hass.warn(
f"Programming error - Negative duration off for {climate}: {hours_unoccupied}"
)
elif hours_unoccupied == 0:
# Currently off
pass
elif hours_unoccupied > config["auto_off_hours"] or self.test_mode:
# Maybe turn off?
# First check to see if someone turned it on since last off.
laston_sensor = Laston.laston_sensor_name_static(self.appname, climate)
laston_date = self.hass.get_state(laston_sensor)
if self.hours_since_laston(laston_date) < hours_unoccupied:
self.hass.log(
f"Autooff - NOT turning off {climate}. hours_unoccupied: {hours_unoccupied}. But last turned on: {laston_date}"
)
continue
# Turn off
self.hass.lb_log(f"Autooff - Turning off {climate}")
if not self.test_mode:
self.turn_off_climate(climate)
def hours_since_laston(self, laston_date: Union[str, dt.datetime]) -> float:
if laston_date in [None, "None"]:
laston_date = dt.datetime(dt.MINYEAR,1,1, tzinfo=pytz.timezone(str(self.hass.get_timezone())))
elif isinstance(laston_date, str):
laston_date = dt.datetime.fromisoformat(laston_date)
now = self.hass.get_now()
return (now - laston_date).total_seconds() / (60 * 60) # type: ignore
|
rr326/ad_app_autoclimate
|
_autoclimate/schema.py
|
SCHEMA = {
"name": {"required": True, "type": "string"},
"poll_frequency": {"required": True, "type": "number"},
"test_mode": {"required": False, "type": "boolean", "default": False},
"run_mocks": {"required": False, "type": "boolean", "default": False},
"create_temp_sensors": {"required": True, "type": "boolean"},
"turn_on_error_off": {"required": False, "type": "boolean", "default": True},
"inactive_period": { # See "extra_validation" for validation rules
"required": False,
"type": "string",
},
"entity_rules": {
"required": True,
"type": "dict",
"valuesrules": {
"type": "dict",
"required": True,
"schema": {
"off_state": {
"type": "dict",
"required": True,
"schema": {
"state": {
"type": "string",
"required": True,
"allowed": ["away", "off", "perm_hold"],
},
"temp": {"type": "number", "required": False},
"perm_hold_string": {"type": "string", "required": False},
},
},
"occupancy_sensor": {"type": "string", "required": True},
"auto_off_hours": {"type": "number", "required": False},
},
},
},
"mocks": {
"required": False,
"type": "list",
"schema": {
"type": "dict",
"required": True,
"schema": {
"entity_id": {"required": True, "type": "string"},
"mock_attributes": {"required": True, "type": "dict"},
},
},
},
}
|
rr326/ad_app_autoclimate
|
_autoclimate/utils.py
|
from adplus import Hass
import datetime as dt
import pytz
def climate_name(entity):
# climate.my_thermostat ==> my_thermostat
return entity.split(".")[1]
def in_inactive_period(hass: Hass, inactive_period) -> bool:
if in_inactive_period is None:
return False
try:
now = hass.get_now() # type: dt.datetime
tzinfo = pytz.timezone(str(hass.get_timezone()))
year = now.year
ip = inactive_period
start = dt.datetime(year, ip[0][0], ip[0][1], tzinfo=tzinfo)
end = dt.datetime(year, ip[1][0], ip[1][1], tzinfo=tzinfo)
return start <= now < end
except Exception as err:
hass.log(f'Error testing inactive period. err: {err}')
return False
|
Alkemic/yaACL
|
yaacl/migrations/0001_initial.py
|
<filename>yaacl/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '__first__'),
]
operations = [
migrations.CreateModel(
name='ACL',
fields=[
('id', models.AutoField(
verbose_name='ID',
serialize=False,
auto_created=True,
primary_key=True,
)),
('resource', models.CharField(
max_length=255,
verbose_name='Resource name',
db_index=True,
)),
('display', models.CharField(
max_length=255,
null=True,
verbose_name='displayed name',
blank=True,
)),
('created_at', models.DateTimeField(
auto_now_add=True,
verbose_name='Creation time',
)),
('is_available', models.BooleanField(
default=True,
verbose_name='Is available to assign',
)),
],
options={
},
bases=(models.Model,),
),
]
|
Alkemic/yaACL
|
yaacl/models.py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from .managers import ACLManager
from django.conf import settings
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
group_model = getattr(settings, 'ACL_GROUP_USER_MODEL', 'auth.Group')
@python_2_unicode_compatible
class ACL(models.Model):
acl_list = {}
resource = models.CharField(
_("Resource name"),
max_length=255,
db_index=True,
)
display = models.CharField(
_("displayed name"),
max_length=255,
null=True,
blank=True,
)
created_at = models.DateTimeField(
_("Creation time"),
auto_now_add=True,
)
is_available = models.BooleanField(
_("Is available to assign"),
default=True,
)
user = models.ManyToManyField(
user_model,
verbose_name=_('User'),
blank=True,
related_name='acl',
)
group = models.ManyToManyField(
group_model,
verbose_name=_('User'),
blank=True,
related_name='acl',
)
objects = ACLManager()
class Meta:
app_label = 'yaacl'
def __str__(self):
if self.display:
return "%s (%s)" % (self.display, self.resource)
else:
return self.resource
|
Alkemic/yaACL
|
setup.py
|
# -*- coding:utf-8 -*-
import os
from setuptools import setup
f = open(os.path.join(os.path.dirname(__file__), 'README.md'))
readme = f.read()
f.close()
setup(
name='yaacl',
version='0.8.2',
description='Yet another access control list (ACL) per view for Django',
long_description=readme,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/Alkemic/yaACL',
license='MIT',
packages=['yaacl'],
include_package_data=True,
install_requires=['setuptools'],
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
keywords='django,acl,admin',
)
|
Alkemic/yaACL
|
yaacl/templatetags/acl.py
|
# -*- coding:utf-8 -*-
from django import template
from yaacl import functions
register = template.Library()
@register.filter
def has_access(user, resource):
"""
:type user: django.contrib.auth.models.User
:type name : str
Checks if user has rights to given resource
"""
return functions.has_access(user, resource)
@register.filter
def has_all_access(user, resource):
"""
:type user: django.contrib.auth.models.User
:type name : str
Checks if user has rights to given resource
"""
return functions.has_all_access(user, resource)
|
Alkemic/yaACL
|
demo/test_acl/urls.py
|
<reponame>Alkemic/yaACL<gh_stars>0
# -*- coding:utf-8 -*-
from django.conf.urls import *
from .views import Index, TestClassBasedView, test, other_index, utf_test
urlpatterns = [
url(r'^$', Index.as_view(), name='index'),
url(r'^$', TestClassBasedView.as_view(), name='index'),
url(r'test/$', test, name='test'),
url(r'other_index/$', other_index, name='other_index'),
url(r'utf_test/$', utf_test, name='utf_test'),
]
|
Alkemic/yaACL
|
yaacl/management/commands/show_acl.py
|
# -*- coding:utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy as _
from yaacl.models import ACL
__author__ = '<NAME> <<EMAIL>>'
class Command(BaseCommand):
help = _('Lists all saved resources')
def handle(self, *args, **options):
for acl in ACL.objects.all():
self.stdout.write('Resource: %s, displayed named: %s' % (
acl.resource,
acl.display,
))
|
Alkemic/yaACL
|
yaacl/managers.py
|
<gh_stars>0
# -*- coding:utf-8 -*-
from django.db.models.query_utils import Q
from django.db.models import Manager
class ACLManager(Manager):
def get_user_resources(self, user):
return self.get_queryset().filter(
Q(user=user) | Q(group__in=user.groups.all())
).distinct()
|
Alkemic/yaACL
|
yaacl/decorators.py
|
<filename>yaacl/decorators.py
# -*- coding:utf-8 -*-
from functools import wraps
from django.utils.decorators import method_decorator, available_attrs
from yaacl.functions import has_access
from .views import no_access
from .models import ACL
from .signals import register_resource
def acl_register_view(name=None, resource=None):
"""
:type name: unicode
:type resource: str
"""
def decorator(view_func, name, resource):
if resource is None:
resource = "%s.%s" % (
view_func.__module__,
view_func.__name__,
)
signal_returned = register_resource.send(
sender='acl_register_view',
resource=resource,
name=name,
)
if signal_returned:
signal_returned = signal_returned[-1]
resource = signal_returned[1].get('resource', None)
name = signal_returned[1].get('name', None)
if resource not in ACL.acl_list:
ACL.acl_list[resource] = name
@wraps(view_func, assigned=available_attrs(view_func))
def wrapped_view(request, *args, **kwargs):
"""
:type request: django.http.request.HttpRequest
"""
has_access_to_resource = (
request.user.is_authenticated() and
has_access(request.user, resource)
)
if has_access_to_resource:
return view_func(request, *args, **kwargs)
else:
return no_access(request)
return wrapped_view
return lambda view_func: decorator(view_func, name, resource)
def acl_register_class(name=None, resource=None):
def klass_decorator(klass, name, resource):
if resource is None:
resource = "%s.%s" % (klass.__module__, klass.__name__)
klass.dispatch = method_decorator(
acl_register_view(name, resource)
)(klass.dispatch)
return klass
return lambda klass: klass_decorator(klass, name, resource)
|
Alkemic/yaACL
|
yaacl/views.py
|
# -*- coding: utf-8 -*-
from django.template import loader
from django.template.response import TemplateResponse
def no_access(request):
"""No access view"""
template = loader.get_template('yaacl/no_access.html')
return TemplateResponse(request, template)
|
Alkemic/yaACL
|
yaacl/functions.py
|
# -*- coding:utf-8 -*-
from django.db.models.query_utils import Q
from yaacl.models import ACL
__author__ = '<NAME> <<EMAIL>>'
def get_acl_resources(user):
"""
:type user: django.contrib.auth.models.User
:type name : str
Checks if user has rights to given resource
"""
return ACL.objects.filter(
Q(user=user) | Q(group__in=user.groups.all())
).distinct().values_list('resource', flat=True)
def has_access(user, resource_name, resources=None):
"""
:type user: django.contrib.auth.models.User
:type name : str
:type resources : list
Checks if user has rights to given resource
"""
if not user or not user.is_authenticated():
return False
if user.is_superuser:
return True
if resources is None:
resources = get_acl_resources(user)
return any(map(lambda r: r.startswith(resource_name), resources))
def has_all_access(user, name, resources=None):
"""
:type user: django.contrib.auth.models.User
:type name : str
:type resources : list
Checks if user has rights to given resource
"""
if not user or not user.is_authenticated():
return False
if user.is_superuser:
return True
if resources is None:
resources = get_acl_resources(user)
user_resources = set(filter(lambda r: r.startswith(name), resources))
available_resources = set(filter(
lambda r: r.startswith(name), ACL.acl_list.keys(),
))
return user_resources == available_resources
|
Alkemic/yaACL
|
yaacl/signals.py
|
<filename>yaacl/signals.py
# -*- coding:utf-8 -*-
from django.dispatch import Signal
__author__ = '<NAME> <<EMAIL>>'
register_resource = Signal(
providing_args=['resource', 'name'],
)
|
Alkemic/yaACL
|
yaacl/management/commands/sync_acl.py
|
# -*- coding:utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from yaacl.models import ACL
__author__ = '<NAME> <<EMAIL>>'
class Command(BaseCommand):
help = _('Sync resources with database')
def handle(self, *args, **options):
for app in settings.INSTALLED_APPS:
try:
__import__("%s.views" % app)
except ImportError:
pass
existing_resources = set([
resource_name
for resource_name, display_name in ACL.acl_list.items()
])
saved_resources = set([
acl.resource
for acl in ACL.objects.all()
])
# resources to remove
for resource_name in saved_resources - existing_resources:
ACL.objects.filter(resource=resource_name).delete()
self.stdout.write('Deleted resource: %s' % resource_name)
# resources to add
for resource_name in existing_resources - saved_resources:
ACL.objects.create(
resource=resource_name,
display=ACL.acl_list[resource_name],
)
self.stdout.write('Added resource: %s' % resource_name)
for resource_name in existing_resources & saved_resources:
entry = ACL.objects.get(resource=resource_name)
if entry.display != ACL.acl_list[resource_name]:
entry.display = ACL.acl_list[resource_name]
entry.save()
self.stdout.write('Updated resource: %s' % resource_name)
|
Alkemic/yaACL
|
yaacl/admin.py
|
<reponame>Alkemic/yaACL<gh_stars>0
# -*- coding:utf-8 -*-
from django import forms
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.models import Group
from django.utils.translation import gettext_lazy as _
from .models import ACL
class ACLAdditionalForm(forms.ModelForm):
acl = forms.ModelMultipleChoiceField(
ACL.objects.all(),
widget=admin.widgets.FilteredSelectMultiple('ACL', False),
required=False,
)
def __init__(self, *args, **kwargs):
super(ACLAdditionalForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.initial['acl'] = self.instance.acl.values_list(
'pk', flat=True,
)
def save(self, *args, **kwargs):
instance = super(ACLAdditionalForm, self).save(*args, **kwargs)
if not instance.pk:
instance.save()
instance.acl = self.cleaned_data['acl']
return instance
User = get_user_model()
try:
admin.site.unregister(User)
except admin.sites.NotRegistered:
pass
class ACLUserAdmin(UserAdmin):
form = ACLAdditionalForm
fieldsets = UserAdmin.fieldsets + (
(_('ACL'), {'fields': ('acl',)}),
)
if getattr(settings, 'ACL_GROUP_USER_MODEL', 'auth.Group') == 'auth.Group':
try:
admin.site.unregister(Group)
except admin.sites.NotRegistered:
pass
app_label, class_name = getattr(
settings,
'ACL_GROUP_USER_MODEL',
'auth.Group',
).split('.')
group_model = get_model(app_label, class_name)
class ACLGroupAdmin(GroupAdmin):
form = ACLAdditionalForm
admin.site.register(Group, ACLGroupAdmin)
admin.site.register(User, ACLUserAdmin)
admin.site.register(ACL)
|
Alkemic/yaACL
|
yaacl/migrations/0002_m2m_relations.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.auth.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
('yaacl', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='acl',
name='group',
field=models.ManyToManyField(related_name='acl', verbose_name='User', to='auth.Group', blank=True),
),
migrations.AddField(
model_name='acl',
name='user',
field=models.ManyToManyField(related_name='acl', verbose_name='User', to=settings.AUTH_USER_MODEL, blank=True),
),
]
|
Alkemic/yaACL
|
yaacl/management/commands/__init__.py
|
<filename>yaacl/management/commands/__init__.py
__author__ = '<NAME> <<EMAIL>>'
|
Alkemic/yaACL
|
demo/test_acl/views.py
|
<gh_stars>0
# -*- coding:utf-8 -*-
from django.views.generic import View, TemplateView
from yaacl.decorators import acl_register_view, acl_register_class
class Index(TemplateView):
template_name = 'test_acl/index.html'
@acl_register_view()
def test(request):
pass
@acl_register_view('This is the "other" index')
def other_index(request):
pass
@acl_register_view(u"Zażółć gęślą jaźń")
def utf_test(request):
pass
@acl_register_view(u"Another test")
def another_test(request):
pass
@acl_register_class(u"Class based view")
class TestClassBasedView(View):
pass
|
Alkemic/yaACL
|
yaacl/templatetags/__init__.py
|
# -*- coding:utf-8 -*-
__author__ = '<NAME> <<EMAIL>>'
|
seantis/spare
|
spare/tests/test_utils.py
|
<reponame>seantis/spare<gh_stars>1-10
import os
import pytest
import signal
from multiprocessing import Process
from spare import utils
from spare.errors import FileChangedDuringReadError
from time import sleep
def test_abort_if_file_changed(temporary_path, loghandler):
path = temporary_path / 'foo'
path.touch()
with pytest.raises(FileChangedDuringReadError):
with utils.abort_if_file_changes_during_read(path):
with path.open('w') as f:
f.write('foobar')
def test_writable(temporary_path):
path = temporary_path / 'foo'
path.touch()
path.chmod(0o440)
with pytest.raises(PermissionError):
open(path, 'w')
with utils.writable(path):
with open(path, 'w'):
pass
with pytest.raises(PermissionError):
open(path, 'w')
# an inexistant path is a no-op
with utils.writable(path / 'foobar'):
pass
def test_delay_signal():
class Normal(Process):
def run(self):
import coverage
coverage.process_startup()
for i in range(0, 100):
sleep(0.05)
class Delayed(Process):
def run(self):
import coverage
coverage.process_startup()
with utils.delay_signal(signal.SIGTERM, 'busy'):
for i in range(0, 100):
sleep(0.05)
# the normal process exits immedately
process = Normal()
process.start()
sleep(0.5)
os.kill(process.pid, signal.SIGTERM)
sleep(0.5)
assert not process.is_alive()
# the other process exits when it's done
process = Delayed()
process.start()
sleep(0.5)
os.kill(process.pid, signal.SIGTERM)
sleep(0.5)
assert process.is_alive()
# stop the test
os.kill(process.pid, signal.SIGKILL)
|
seantis/spare
|
spare/tests/test_block.py
|
<gh_stars>1-10
import pytest
import random
from hypothesis import given, strategies
from spare.block import AVAILABLE_BLOCKS
from secrets import token_bytes
@pytest.mark.parametrize("cls", AVAILABLE_BLOCKS.values())
def test_block(cls):
block = cls(password=b'password', nonce=token_bytes(16), data=b'foobar')
block.encrypt()
assert block.data != b'foobar'
block.decrypt()
assert block.data == b'foobar'
@pytest.mark.parametrize("cls", AVAILABLE_BLOCKS.values())
def test_large_block(cls):
data = token_bytes(1_048_576)
block = cls(password=b'password', nonce=token_bytes(16), data=data)
block.encrypt()
assert block.data != data
block.decrypt()
assert block.data == data
@pytest.mark.parametrize("cls", AVAILABLE_BLOCKS.values())
@given(data=strategies.binary(), password=strategies.text())
def test_block_with_random_binary_data(cls, data, password):
block = cls(password.encode('utf-8'), token_bytes(16), data)
block.encrypt()
assert block.data != data
block.decrypt()
assert block.data == data
@pytest.mark.parametrize("cls", AVAILABLE_BLOCKS.values())
def test_empty_block(cls):
block = cls(b'password', token_bytes(16), b'')
block.encrypt()
assert block.data != b''
block.decrypt()
assert block.data == b''
@pytest.mark.parametrize("cls", AVAILABLE_BLOCKS.values())
def test_flip_bit(cls):
block = cls(b'password', token_bytes(16), b'foobar')
block.encrypt()
data = bytearray(block.data)
bit = random.randrange(0, len(data))
data[bit] = data[bit] ^ 1
block.data = bytes(data)
with pytest.raises(Exception) as e:
block.decrypt()
assert 'InvalidTag' in str(e) or 'IntegrityError' in str(e)
|
seantis/spare
|
spare/inventory.py
|
import grp
import hashlib
import io
import os
import os.path
import platform
import pwd
import re
import stat
from cached_property import cached_property
from collections import defaultdict
from contextlib import suppress
from pathlib import Path
from spare import log
from spare.utils import abort_if_file_changes_during_read, read_in_chunks
def hash_implementation():
""" We use blake2b with a 32 bytes digest size, which seems to be a good
compromise between security, performance and short digest size.
The checksum is not relevant for security, it is simply used to detect
differences between files.
"""
return hashlib.blake2b(digest_size=32)
def file_checksum(path):
""" Generates the checksum for the given path. """
with abort_if_file_changes_during_read(path):
m = hash_implementation()
with open(path, 'rb') as f:
for chunk in read_in_chunks(f, io.DEFAULT_BUFFER_SIZE):
m.update(chunk)
return m.hexdigest()
def scandir(path, recurse=True, follow_symlinks=False):
""" Runs os.scandir recursively with a coroutine interface.
The coroutine interface is used to add extra paths to be scanned during
the execution of the generator.
"""
unscanned = {path}
while unscanned:
next_path = unscanned.pop()
# it's possible for directories to vanish during operation
if next_path != path and not os.path.lexists(next_path):
continue # pragma: no cover
with os.scandir(next_path) as iterator:
for e in iterator:
extra_path = yield e
if extra_path:
unscanned.add(extra_path)
if recurse and e.is_dir(follow_symlinks=follow_symlinks):
unscanned.add(e.path)
class Inventory(object):
""" Manages metadata related to a local path.
Every time the `scan` method is called, two dictionaries are created
by walking the given path.
The structure dictionary contains all the paths and metadata to the files
found in the path. For example:
{
'pictures': {
'type': 'directory',
'user': 'denis',
'group': 'staff',
'mode': 41453,
'size': 0,
'mtime_ns': 1524208524622212344
}
'pictures/001.jpg': {
'type': 'file',
'user': 'denis',
'group': 'staff',
'mode': 41380,
'size': 1024,
'mtime_ns': 1524208524622245644
},
'bilder': {
'type': 'symlink',
'target': 'pictures',
'user': 'denis',
'group': 'staff',
'mode': 41453,
'size': 0,
'mtime_ns': 1524208524622278944
}
}
The structure is used to rebuild the directory tree when restoring a
remote backup.
The other dictionary, the data dictionary, contains the hashes of all
files that we can upload remotely together with their paths. For example:
{
'0e5751c026e...': ['pictures/001.jpg']
}
As multiple files might have the same hash we can have multiple paths
for a single hash. The data dictionary is used to figure out what we
need to upload and what hash the file had at the time of the scan.
It also acts as a basic way of deduplicate files. That is, files which
have the same hash will be combined into a single upload and copied
back when restoring.
Note that due to the nature of our environment these scans cannot be relied
on blindly. The structure changes and files are modified while they are
being read or deleted before we get to process them further down the line.
In this sense you should think of the inventory as an potentially
inconsistent snapshot.
"""
def __init__(self, path, skip=None):
self.path = Path(path)
self.structure = {}
self.files = defaultdict(list)
if skip:
skip = (skip, ) if isinstance(skip, str) else skip
paths = '|'.join(p.lstrip('./') for p in skip)
paths = re.compile(rf'({paths})')
self.skip = paths
else:
self.skip = None
@property
def identity(self):
return f'{platform.node()}:{self.path}:{self.path.stat().st_ino}'
@cached_property
def users(self):
return {pw.pw_uid: pw.pw_name for pw in pwd.getpwall()}
@cached_property
def groups(self):
return {gr.gr_gid: gr.gr_name for gr in grp.getgrall()}
def cache_status(self, status):
return {
'user': self.users[status.st_uid],
'group': self.groups[status.st_gid],
'mode': status.st_mode,
'size': status.st_size,
'mtime_ns': status.st_mtime_ns,
'inode': status.st_ino
}
def relative_path(self, path):
return Path(path).relative_to(self.path)
def absolute_path(self, path):
return self.path / path
def process_dir(self, path, status):
self.structure[str(self.relative_path(path))] = {
'type': 'directory',
**self.cache_status(status)
}
def process_symlink(self, path, status, target):
self.structure[str(self.relative_path(path))] = {
'type': 'symlink',
'target': str(self.relative_path(target)),
**self.cache_status(status)
}
def process_file(self, path, status):
relative_path = str(self.relative_path(path))
is_empty = status.st_size == 0
self.structure[relative_path] = {
'type': 'file',
'empty': is_empty,
**self.cache_status(status)
}
if not is_empty:
self.files[file_checksum(path)].append(relative_path)
def clear(self):
self.structure.clear()
self.files.clear()
def scan(self):
self.clear()
if self.path.is_dir():
self.scan_directory(self.path)
else:
self.scan_entry(self.path)
def scan_directory(self, path, recurse=True):
scanner = scandir(path, recurse)
extra = None
with suppress(StopIteration):
while True:
entry = scanner.send(extra)
extra = self.scan_entry(entry)
def scan_entry(self, entry):
if self.skip and self.skip.match(str(self.relative_path(entry))):
return None
if isinstance(entry, os.DirEntry):
status = entry.stat(follow_symlinks=False)
else:
status = entry.stat()
path = entry.__fspath__()
scan_also = None
if stat.S_ISCHR(status.st_mode): # pragma: no cover
log.warn(f"Skipping character special device {path}")
# we can't cover this on Travis currently as we don't have access to
# a block device and can't create one without sudo
elif stat.S_ISBLK(status.st_mode): # pragma: no cover
log.warn(f"Skipping block special device {path}")
elif stat.S_ISFIFO(status.st_mode):
log.warn(f"Skipping named pipe {path}")
elif stat.S_ISSOCK(status.st_mode):
log.warn(f"Skipping socket {path}")
elif entry.is_symlink():
target = Path(os.readlink(path))
if not target.exists():
log.warn(f"Skipping broken symlink {path}")
elif self.path not in target.parents:
if target.is_dir():
log.warn(f"Processing symlink {path} as a directory")
self.process_dir(path, status)
scan_also = path
else:
log.warn(f"Processing symlink {path} as a file")
self.process_file(path, status)
else:
self.process_symlink(path, status, target)
elif entry.is_dir():
self.process_dir(path, status)
elif entry.is_file():
self.process_file(path, status)
else:
raise NotImplementedError # pragma: no cover
return scan_also
|
seantis/spare
|
spare/tests/test_inventory.py
|
import os
import pytest
import socket
import tempfile
from contextlib import suppress
from io import UnsupportedOperation
from pathlib import Path
from spare.inventory import Inventory, scandir
def test_empty_inventory(temporary_path):
inventory = Inventory(temporary_path)
inventory.scan()
assert not inventory.structure
assert not inventory.files
def test_single_file_inventory(temporary_path):
with (temporary_path / 'foo').open('w') as f:
f.write('foo')
inventory = Inventory((temporary_path / 'foo'))
inventory.scan()
assert len(inventory.structure) == 1
assert len(inventory.files) == 1
assert '.' in inventory.structure
assert list(inventory.files.values())[0][0] == '.'
def test_empty_file_inventory(temporary_path):
(temporary_path / 'foo').touch()
inventory = Inventory(temporary_path)
inventory.scan()
assert len(inventory.structure) == 1
assert len(inventory.files) == 0
assert inventory.structure['foo']['type'] == 'file'
assert inventory.structure['foo']['empty'] is True
def test_single_symlink_inventory(temporary_path):
(temporary_path / 'foo').touch()
(temporary_path / 'bar').symlink_to((temporary_path / 'foo'))
inventory = Inventory((temporary_path / 'bar'))
inventory.scan()
assert inventory.structure['.']['type'] == 'file'
def test_nested_paths(temporary_path):
(temporary_path / 'foo').mkdir()
with (temporary_path / 'foo' / 'bar').open('w') as f:
f.write('foo')
with (temporary_path / 'bar').open('w') as f:
f.write('foo')
inventory = Inventory(temporary_path)
inventory.scan()
assert 'foo' in inventory.structure
assert 'foo/bar' in inventory.structure
assert 'bar' in inventory.structure
# two files with the same data
assert len(inventory.files) == 1
assert len(tuple(inventory.files.values())[0]) == 2
def test_symlinks(temporary_path):
with (temporary_path / 'foo').open('w') as f:
f.write('foo')
(temporary_path / 'bar').symlink_to((temporary_path / 'foo'))
inventory = Inventory(temporary_path)
inventory.scan()
assert len(inventory.files) == 1
assert len(inventory.structure) == 2
assert inventory.structure['bar']['target'] == 'foo'
def test_ignore_devices():
# if we can scan /dev without errors we handle all the special devices
inventory = Inventory('/dev')
# try a bunch of folders to really get all kinds of devices
# no need to do this recursively (which might not be a great idea)
ignored = (
FileNotFoundError,
UnsupportedOperation,
PermissionError,
OSError
)
for path in ('/dev', '/dev/block', '/dev/disk/by-uuid'):
with suppress(*ignored):
inventory.scan_directory(Path(path), recurse=False)
def test_read_outside_symlink(temporary_path):
with tempfile.TemporaryDirectory() as d:
(Path(d) / 'outside').touch()
(temporary_path / 'inside').symlink_to(Path(d) / 'outside')
inventory = Inventory(temporary_path)
inventory.scan()
assert inventory.structure['inside']['type'] == 'file'
def test_read_outside_symlink_to_directory():
with tempfile.TemporaryDirectory() as planet:
with (Path(planet) / 'earth').open('w') as f:
f.write('earth')
with tempfile.TemporaryDirectory() as continent:
with (Path(continent) / 'europe').open('w') as f:
f.write('europe')
(Path(continent) / 'planet').symlink_to(planet)
with tempfile.TemporaryDirectory() as country:
with (Path(country) / 'switzerland').open('w') as f:
f.write('switzerland')
(Path(country) / 'continent').symlink_to(continent)
inventory = Inventory(country)
inventory.scan()
assert inventory.structure['switzerland']['type'] == 'file'
assert inventory.structure['continent']['type'] == 'directory'
assert inventory.structure['continent/europe']['type'] == 'file'
assert inventory.structure['continent/planet']['type'] == 'directory'
assert inventory.structure['continent/planet/earth']['type'] == 'file'
values = {p for paths in inventory.files.values() for p in paths}
assert values == {
'switzerland',
'continent/europe',
'continent/planet/earth',
}
def test_detect_infinite_symlink_loop():
with tempfile.TemporaryDirectory() as outside:
with tempfile.TemporaryDirectory() as inside:
(Path(outside) / 'inside').symlink_to(inside)
(Path(inside) / 'outside').symlink_to(outside)
with pytest.raises(OSError):
inventory = Inventory(inside)
inventory.scan()
def test_ignore_broken_symlink(temporary_path):
(temporary_path / 'foo').touch()
(temporary_path / 'bar').symlink_to((temporary_path / 'foo'))
(temporary_path / 'foo').unlink()
inventory = Inventory(temporary_path)
inventory.scan()
assert not inventory.structure
def test_ignore_fifo(temporary_path):
fifo = (temporary_path / 'fifo')
os.mkfifo(fifo)
inventory = Inventory(temporary_path)
inventory.scan()
assert not inventory.structure
assert not inventory.files
def test_ignore_socket_file(temporary_path):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(str(temporary_path / 'socket'))
inventory = Inventory(temporary_path)
inventory.scan()
assert not inventory.structure
assert not inventory.files
def test_skip_files(temporary_path):
(temporary_path / 'foo').mkdir()
(temporary_path / 'foo' / 'bar').touch()
(temporary_path / 'bar').mkdir()
(temporary_path / 'bar' / 'bar').touch()
inventory = Inventory(temporary_path, skip=('./foo', ))
inventory.scan()
assert set(inventory.structure.keys()) == {
'bar',
'bar/bar',
}
inventory = Inventory(temporary_path, skip=('./bar', ))
inventory.scan()
assert set(inventory.structure.keys()) == {
'foo',
'foo/bar',
}
inventory = Inventory(temporary_path, skip=('./foo/bar', ))
inventory.scan()
assert set(inventory.structure.keys()) == {
'bar',
'bar/bar',
'foo',
}
def test_missing_path_in_scandir(temporary_path):
# the initial path *has* to exist
with pytest.raises(FileNotFoundError):
list(scandir(temporary_path / 'foo'))
# other paths may be missing
(temporary_path / 'bar').mkdir()
(temporary_path / 'bar' / 'baz').mkdir()
scanner = scandir(temporary_path)
_ = scanner.send(None)
_ = scanner.send(temporary_path / 'bar')
_ = scanner.send(temporary_path / 'foo')
|
seantis/spare
|
spare/recovery.py
|
import os
import grp
import pwd
from cached_property import cached_property
from itertools import groupby
from pathlib import Path
from spare import log, FOLLOW_SYMLINKS
from spare.errors import TargetPathNotEmpty
from spare.download import Download, DownloadProcess
from multiprocessing import cpu_count, Queue
class Recovery(object):
""" Responsible for the recovery of snapshots to any given path.
Whereas our backing up procedures try to be conservative with cpu and
memory usage, this module tries to maximise recovery speed. The assumption
is that recovery happens on a system that is otherwise unused.
"""
def __init__(self, envoy, snapshot):
self.envoy = envoy
self.snapshot = snapshot
@cached_property
def uids(self):
return {pw.pw_name: pw.pw_uid for pw in pwd.getpwall()}
@cached_property
def gids(self):
return {gr.gr_name: gr.gr_gid for gr in grp.getgrall()}
def restore(self, target):
log.info(f"Restoring {target}")
target = Path(target)
target.mkdir(exist_ok=True)
for path in target.iterdir():
raise TargetPathNotEmpty(path)
log.info(f"Restoring folder structure of {target}")
self.restore_structure(target)
log.info(f"Downloading data for {target}")
self.download_data(target)
log.info(f"Restored {target}")
def restore_structure(self, target):
structure = self.snapshot.meta['structure']
for path in sorted(structure):
status = structure[path]
path = target / path
if status['type'] == 'directory':
path.mkdir()
elif status['type'] == 'symlink':
path.symlink_to(target / status['target'])
elif status['type'] == 'file':
path.touch()
else:
raise NotImplementedError # pragma: no cover
uid = self.uids.get(status['user'], -1)
gid = self.gids.get(status['group'], -1)
uid == -1 and log.warn(f"Unknown owner {status['user']}: {path}")
gid == -1 and log.warn(f"Unknown group {status['group']}: {path}")
os.chmod(path, status['mode'], **FOLLOW_SYMLINKS)
os.chown(path, uid, gid, **FOLLOW_SYMLINKS)
def download_data(self, target):
structure = self.snapshot.meta['structure']
def inode(path):
return structure[path]['inode']
def by_inode(paths):
return groupby(sorted(paths, key=inode), key=inode)
queue = Queue()
processes = tuple(
DownloadProcess(
endpoint=self.envoy.s3.endpoint,
access_key=self.envoy.s3.access_key,
secret_key=self.envoy.s3.secret_key,
bucket=self.envoy.bucket_name,
password=<PASSWORD>,
queue=queue
) for _ in range(cpu_count())
)
for process in processes:
process.start()
for digest, paths in self.snapshot.meta['files'].items():
download = Download(prefix=digest, digest=digest)
log.info(f"Downloading {paths[0]}")
for _, paths in by_inode(paths):
path, *rest = (target / p for p in paths)
download.to(path, hardlinks=rest)
queue.put(download)
for process in processes:
queue.put(None)
for process in processes:
process.join()
|
seantis/spare
|
spare/cli.py
|
import click
import pdb
import sys
import traceback
from logbook import StreamHandler
from spare.backup import create, restore, validate, lock, unlock
from spare.utils import s3_client
VALID_PATH = click.Path(exists=True, file_okay=False)
def enable_post_mortem_debugging(): # pragma: no cover
def hook(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(type, value, tb)
else:
traceback.print_exception(type, value, tb)
pdb.post_mortem(tb)
sys.excepthook = hook
@click.group()
@click.option('--pdb', help="Enable post-mortem debugging", is_flag=True)
@click.option('--verbose', help="Print log messages to stdout", is_flag=True)
@click.pass_context
def cli(ctx, pdb, verbose): # pragma: no cover
if pdb:
enable_post_mortem_debugging()
level = verbose and 'INFO' or 'WARNING'
StreamHandler(sys.stdout, level=level).push_application()
@cli.command(name='create')
@click.option('--endpoint', envvar='SPARE_ENDPOINT', required=True)
@click.option('--access-key', envvar='SPARE_ACCESS_KEY', required=True)
@click.option('--secret-key', envvar='SPARE_SECRET_KEY', required=True)
@click.option('--password', envvar='<PASSWORD>', required=True)
@click.option('--bucket', envvar='SPARE_BUCKET', required=True)
@click.option('--path', envvar='SPARE_PATH', type=VALID_PATH, required=True)
@click.option('--force', default=False, required=False, is_flag=True)
@click.option('--skip', multiple=True, required=False)
def create_cli(endpoint, access_key, secret_key, path,
password, bucket, skip, force):
s3 = s3_client(endpoint, access_key, secret_key)
create(path, s3, bucket, password, skip=skip or None, force=force)
@cli.command(name='restore')
@click.option('--endpoint', envvar='SPARE_ENDPOINT', required=True)
@click.option('--access-key', envvar='SPARE_ACCESS_KEY', required=True)
@click.option('--secret-key', envvar='SPARE_SECRET_KEY', required=True)
@click.option('--password', envvar='SPARE_PASSWORD', required=True)
@click.option('--bucket', envvar='SPARE_BUCKET', required=True)
@click.option('--path', envvar='SPARE_PATH', type=VALID_PATH, required=True)
def restore_cli(endpoint, access_key, secret_key, path, password, bucket):
s3 = s3_client(endpoint, access_key, secret_key)
restore(path, s3, bucket, password)
@cli.command(name='validate')
@click.option('--endpoint', envvar='SPARE_ENDPOINT', required=True)
@click.option('--access-key', envvar='SPARE_ACCESS_KEY', required=True)
@click.option('--secret-key', envvar='SPARE_SECRET_KEY', required=True)
@click.option('--password', envvar='SPARE_PASSWORD', required=True)
@click.option('--bucket', envvar='SPARE_BUCKET', required=True)
def validate_cli(endpoint, access_key, secret_key, password, bucket):
s3 = s3_client(endpoint, access_key, secret_key)
rc = 0 if validate(s3, bucket, password) else 1
sys.exit(rc)
@cli.command(name='lock')
@click.option('--endpoint', envvar='SPARE_ENDPOINT', required=True)
@click.option('--access-key', envvar='SPARE_ACCESS_KEY', required=True)
@click.option('--secret-key', envvar='SPARE_SECRET_KEY', required=True)
@click.option('--password', envvar='SPARE_PASSWORD', required=True)
@click.option('--bucket', envvar='SPARE_BUCKET', required=True)
def lock_cli(endpoint, access_key, secret_key, password, bucket):
s3 = s3_client(endpoint, access_key, secret_key)
rc = 0 if lock(s3, bucket, password) else 1
sys.exit(rc)
@cli.command(name='unlock')
@click.option('--endpoint', envvar='SPARE_ENDPOINT', required=True)
@click.option('--access-key', envvar='SPARE_ACCESS_KEY', required=True)
@click.option('--secret-key', envvar='SPARE_SECRET_KEY', required=True)
@click.option('--password', envvar='SPARE_PASSWORD', required=True)
@click.option('--bucket', envvar='SPARE_BUCKET', required=True)
def unlock_cli(endpoint, access_key, secret_key, password, bucket):
s3 = s3_client(endpoint, access_key, secret_key)
rc = 0 if unlock(s3, bucket, password) else 1
sys.exit(rc)
|
seantis/spare
|
spare/block.py
|
<filename>spare/block.py
import lzma
import hashlib
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from miscreant.aead import AEAD
AVAILABLE_BLOCKS = {}
DEFAULT_BLOCK = 'aes-siv'
class Block(object):
""" Interface to implement a block. Each block is meant to encrypt and
decrypt data in memory.
Blocks may share a single passsword, but they are virtually guaranteed to
have independent nonces (randomly chosen from a pool of 16^16 values).
Blocks should also compress/decompress data, the used encryption should
not use any padding and it should authenticate during/after decryption.
"""
__slots__ = ('password', 'nonce', 'data')
def __init_subclass__(cls, id, **kwargs):
assert id not in AVAILABLE_BLOCKS
AVAILABLE_BLOCKS[id] = cls
super().__init_subclass__(**kwargs)
def __init__(self, password, nonce, data):
""" Called with the password, nonce and data to encrypt/decrypt.
All parameters ought to be in bytes.
"""
raise NotImplementedError # pragma: nocover
def encrypt(self):
""" Encrypts self.data in-place, doesn't return anything. """
raise NotImplementedError # pragma: nocover
def decrypt(self):
""" Decrypts self.data in-place, doesn't return anything. """
raise NotImplementedError # pragma: nocover
class LZMA_AES_SIV_Block(Block, id='aes-siv'):
""" The default block implementation, using AES-SIV by miscreant.
Blocks are compressed using LZMA before being encrypted.
"""
__slots__ = ('password', 'nonce', 'data')
def __init__(self, password, nonce, data):
self.password = hashlib.sha512(password).digest()
self.nonce = nonce
self.data = data
@property
def aead(self):
return AEAD('AES-SIV', self.password)
def encrypt(self):
self.data = self.aead.seal(lzma.compress(self.data), self.nonce)
def decrypt(self):
self.data = lzma.decompress(self.aead.open(self.data, self.nonce))
class LZMA_AES_GCM_Block(Block, id='aes-gcm'):
""" Another block implementation, using AES-GCM via the cryptography
module. Mainly used to prove that independent block implementations
work as intended.
Blocks are compressed using LZMA before being encrypted.
"""
__slots__ = ('password', 'nonce', 'data')
def __init__(self, password, nonce, data):
self.password = <PASSWORD>.sha256(password).digest()
# use the NIST recommended 96 bits for the nonce
self.nonce = hashlib.blake2b(nonce, digest_size=12).digest()
self.data = data
@property
def aesgcm(self):
return AESGCM(self.password)
def encrypt(self):
self.data = lzma.compress(self.data)
self.data = self.aesgcm.encrypt(self.nonce, self.data, None)
def decrypt(self):
self.data = self.aesgcm.decrypt(self.nonce, self.data, None)
self.data = lzma.decompress(self.data)
|
seantis/spare
|
spare/envoy.py
|
import re
import secrets
import weakref
from boto3.s3.transfer import TransferConfig
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
from spare.block import DEFAULT_BLOCK, AVAILABLE_BLOCKS
from spare.errors import BucketAlreadyLockedError
from spare.errors import BucketNotLockedError
from spare.errors import BucketOtherwiseUsedError
from spare.errors import ExistingPrefixError
from spare.errors import InvalidPrefixError
from spare.utils import read_in_chunks, on_kill, delay_signals
def padded(n):
return f'{n:0>9}'
FIRST_BLOCK_MARKER = f'/{padded(1)}-'
class Envoy(object):
""" Provides the bridge between an unencrypted local file and a remote file
encrypted in chunks.
The keys used for each chunks are of the following format:
prefix/000000001-nonce
For example:
my-file/000000001-c3f543e56704af2ca4779a7d530836cc
my-file/000000002-4489c3d7ff0e090ad1a1260efa2f5084
As the number indicates, we are limited to 999'999'999 blocks of 1 MiB
each, so the largest file we can accept after compression and encryption
is 953.674 Tebibytes.
"""
# valid prefixes are limited to a-z A-Z 0-9 and underscore. It may not
# begin with a dot (reserved for envoy-internal files) and it must
# be at least two characters long
valid_prefix = re.compile(r'^[a-zA-Z0-9_]{1}[a-zA-Z0-9_\.]+$')
def __init__(self, s3, bucket, password, block=DEFAULT_BLOCK):
self.s3 = s3
self.bucket = s3.Bucket(bucket)
self.bucket_name = bucket
self.password = password
self.block_class = AVAILABLE_BLOCKS[DEFAULT_BLOCK]
# you should not change these values outside of unit tests!
self.blocksize = 1_048_576 # found to be good value through testing
self.noncesize = 16
# as long as an envoy is locked, we have exclusive access and can
# therefore keep track of the known prefixes ourselves
self._known_prefixes = None
# disable threads as we manage our own
self.transfer_config = TransferConfig(use_threads=False)
# clean exit on kill
on_kill(weakref.WeakMethod(self.unlock))
def __enter__(self):
self.lock()
return self
def __exit__(self, *args):
self.unlock()
def lock(self):
self.ensure_bucket_exists()
self.ensure_bucket_is_ours()
if self.locked:
raise BucketAlreadyLockedError(self.bucket_name)
self._known_prefixes = set(self.prefixes())
self._known_prefixes.add('.spare')
with BytesIO() as f:
self.bucket.upload_fileobj(f, '.lock')
self.on_store_prefix('.lock')
@property
def locked(self):
return self.is_known_prefix('.lock')
def unlock(self):
if self.is_known_prefix('.lock'):
self.bucket.objects.filter(Prefix='.lock').delete()
self.on_delete_prefix('.lock')
self._known_prefixes = None
def ensure_locked(self):
if not self.locked:
raise BucketNotLockedError(self.bucket_name)
def ensure_bucket_exists(self):
if not self.bucket.creation_date:
self.bucket.create()
with BytesIO(b'https://github.com/seantis/spare') as f:
self.bucket.upload_fileobj(f, '.spare')
def ensure_bucket_is_ours(self):
if not self.is_known_prefix('.spare'):
raise BucketOtherwiseUsedError(self.bucket_name)
def ensure_prefix_unknown(self, prefix):
if self.is_known_prefix(prefix):
raise ExistingPrefixError(prefix)
def ensure_valid_prefix(self, prefix):
if not prefix or not self.valid_prefix.match(prefix):
raise InvalidPrefixError(prefix)
def spawn_block(self, nonce, data):
return self.block_class(
password=self.password.encode('utf-8'),
nonce=nonce.encode('utf-8'),
data=data
)
def generate_nonce(self):
return secrets.token_hex(self.noncesize)
def extract_nonce(self, key):
return key.split('-')[-1]
def extract_prefix(self, key):
return key.split('/')[0]
def is_first_block(self, key):
return FIRST_BLOCK_MARKER in key
def is_known_prefix(self, prefix):
if self._known_prefixes is not None:
return prefix in self._known_prefixes
for obj in self.bucket.objects.filter(Prefix=prefix, MaxKeys=1):
return True
return False
def on_store_prefix(self, prefix):
if self._known_prefixes is not None:
self._known_prefixes.add(prefix)
def on_delete_prefix(self, prefix):
if self._known_prefixes is not None:
self._known_prefixes.discard(prefix)
def keys(self, prefix=None):
for obj in self.bucket.objects.filter(Prefix=prefix or ''):
if not obj.key.startswith('.'):
yield obj.key
def prefixes(self, prefix=None):
for key in self.keys(prefix=prefix):
if self.is_first_block(key):
yield self.extract_prefix(key)
def delete(self, prefix):
self.ensure_locked()
self.ensure_valid_prefix(prefix)
self.bucket.objects.filter(Prefix=prefix).delete()
self.on_delete_prefix(prefix)
def send(self, prefix, fileobj, before_encrypt=None):
self.ensure_locked()
self.ensure_valid_prefix(prefix)
self.ensure_prefix_unknown(prefix)
chunks = read_in_chunks(fileobj, self.blocksize)
# uploading chunks in threads makes little difference in memory and cpu
# usage, but it does speed up large downloads by 20%
#
# further up the stack threads are not much more effective, but they
# use much more cpu/memory
#
# maybe having some kind of pipeline of all chunks (not just per file
# would improve things further, but for now this seems reasonable
# enough)
def upload_block(name, block):
with BytesIO(block.data) as buffer:
self.bucket.upload_fileobj(
buffer, name, Config=self.transfer_config)
with delay_signals("Uploading {prefix}"):
with ThreadPoolExecutor() as executor:
for n, chunk in enumerate(chunks, start=1):
nonce = self.generate_nonce()
block = self.spawn_block(nonce, chunk)
if before_encrypt is not None:
before_encrypt(chunk)
block.encrypt()
block_name = f'{prefix}/{padded(n)}-{nonce}'
executor.submit(upload_block, block_name, block)
self.on_store_prefix(prefix)
def recv(self, prefix, fileobj, after_decrypt=None):
self.ensure_valid_prefix(prefix)
for obj in self.bucket.objects.filter(Prefix=prefix):
nonce = self.extract_nonce(obj.key)
with BytesIO() as buffer:
self.bucket.download_fileobj(
obj.key, buffer, Config=self.transfer_config)
buffer.seek(0)
block = self.spawn_block(nonce, buffer.read())
block.decrypt()
if after_decrypt is not None:
after_decrypt(block.data)
fileobj.write(block.data)
|
seantis/spare
|
spare/utils.py
|
import os
import signal
import stat
import sys
import weakref
from boto3 import resource
from botocore.client import Config
from contextlib import contextmanager, ExitStack
from logbook import SyslogHandler
from pathlib import Path
from spare import log
from spare.errors import FileChangedDuringReadError
CLEANUP_FUNCTIONS = None
KILL_SIGNALS = (signal.SIGTERM, signal.SIGINT)
def read_in_chunks(fileobj, chunksize):
fileobj.seek(0)
while True:
chunk = fileobj.read(chunksize)
if chunk:
yield chunk
else:
break
def clean_exit(*args):
""" Exits cleanly, running cleanup functions first. """
return_code = 0
for ref in CLEANUP_FUNCTIONS:
try:
fn = ref()
if fn:
fn()
except Exception:
return_code = 1
log.exception("Failure during signal handling")
sys.exit(return_code)
def on_kill(fn, signals=KILL_SIGNALS):
""" Executes cleanup functions when the program is killed. """
assert isinstance(fn, weakref.ref)
global CLEANUP_FUNCTIONS
if CLEANUP_FUNCTIONS is None:
CLEANUP_FUNCTIONS = set()
for signum in signals:
signal.signal(signum, clean_exit)
CLEANUP_FUNCTIONS.add(fn)
@contextmanager
def abort_if_file_changes_during_read(path):
before = Path(path).stat()
yield
after = Path(path).stat()
for attr in ('st_mtime_ns', 'st_size', 'st_ino', 'st_dev'):
if getattr(before, attr) != getattr(after, attr):
raise FileChangedDuringReadError(path)
def s3_client(endpoint, access_key, secret_key,
connect_timeout=5, read_timeout=10, max_attempts=2):
endpoint = '://' in endpoint and endpoint or f'https://{endpoint}'
s3 = resource(
service_name='s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
config=Config(
connect_timeout=connect_timeout,
read_timeout=read_timeout,
retries={'max_attempts': max_attempts}
))
s3.endpoint = endpoint
s3.access_key = access_key
s3.secret_key = secret_key
return s3
@contextmanager
def writable(path):
""" Ensures the given path is writable, resetting the mode afterwards.
Usage:
with writable('/my/file'):
with open('/my/file', 'w') as f:
f.write('foobar')
"""
if not os.path.exists(path):
yield
else:
mode = stat.S_IMODE(os.stat(path).st_mode)
if mode != mode | stat.S_IWUSR:
os.chmod(path, mode | stat.S_IWUSR)
yield
os.chmod(path, mode)
# coverage is skipped here because it is tested in a subprocess
class delay_signal(object): # pragma: no cover
""" Blocks the handling of the given signal inside the with statement.
Once the with statement is exited, the last received signal is replayed.
This basically stops the user from restarting a server which is currently
running a backup or restore operation.
A message is sent to the syslog if this happens.
Usage:
with delay_signal(SIGTERM, 'doing something important'):
pass
"""
def __init__(self, signal, message):
self.signal = signal
self.message = message
def __enter__(self):
self.received = None
self.previous = signal.signal(self.signal, self.handler)
def handler(self, signal, frame):
self.received = (signal, frame)
try:
with SyslogHandler('spare', level='WARNING').applicationbound():
log.warn(f"Delaying handling of {self.signal.name}")
except IOError:
pass
def __exit__(self, type, value, traceback):
signal.signal(self.signal, self.previous)
if self.received:
self.previous(*self.received)
@contextmanager
def delay_signals(message, signals=KILL_SIGNALS):
""" Delay multiple signals at once. """
with ExitStack() as stack:
for signum in signals:
stack.enter_context(delay_signal(signum, message))
yield
|
seantis/spare
|
spare/backup.py
|
from spare import log
from spare.envoy import Envoy
from spare.inventory import Inventory
from spare.recovery import Recovery
from spare.snapshot import SnapshotCollection
def create(path, s3, bucket, password, keep=1, skip=None, force=False):
inventory = Inventory(path, skip)
inventory.scan()
with Envoy(s3, bucket, password) as envoy:
collection = SnapshotCollection(envoy)
collection.load()
snapshot = collection.create()
snapshot.backup(inventory, force=force)
collection.prune(keep=keep)
def restore(path, s3, bucket, password, snapshot='latest'):
with Envoy(s3, bucket, password) as envoy:
collection = SnapshotCollection(envoy)
collection.load()
snapshot = collection.get(snapshot)
recovery = Recovery(envoy, snapshot)
recovery.restore(target=path)
def validate(s3, bucket, password):
with Envoy(s3, bucket, password) as envoy:
collection = SnapshotCollection(envoy)
collection.load()
valid = True
for snapshot in collection.snapshots:
if snapshot.validate():
log.info(f"{snapshot.prefix} is valid")
else:
log.error(f"{snapshot.prefix} has errors")
valid = False
return valid
def lock(s3, bucket, password):
envoy = Envoy(s3, bucket, password)
envoy.ensure_bucket_exists()
envoy.ensure_bucket_is_ours()
if not envoy.locked:
envoy.lock()
return True
return False
def unlock(s3, bucket, password):
envoy = Envoy(s3, bucket, password)
envoy.ensure_bucket_exists()
envoy.ensure_bucket_is_ours()
if envoy.locked:
envoy.unlock()
return True
return False
|
seantis/spare
|
spare/tests/test_cli.py
|
<filename>spare/tests/test_cli.py
from click.testing import CliRunner
from spare.cli import create_cli
from spare.cli import lock_cli
from spare.cli import restore_cli
from spare.cli import unlock_cli
from spare.cli import validate_cli
def test_create_cli(endpoint, access_key, secret_key, temporary_path):
runner = CliRunner()
result = runner.invoke(create_cli, [
'--endpoint', endpoint.geturl(),
'--access-key', access_key,
'--secret-key', secret_key,
'--password', '<PASSWORD>',
'--bucket', 'foobar',
'--path', str(temporary_path)
])
assert result.exit_code == 0
result = runner.invoke(restore_cli, [
'--endpoint', endpoint.geturl(),
'--access-key', access_key,
'--secret-key', secret_key,
'--password', '<PASSWORD>',
'--bucket', 'foobar',
'--path', str(temporary_path)
])
assert result.exit_code == 0
result = runner.invoke(validate_cli, [
'--endpoint', endpoint.geturl(),
'--access-key', access_key,
'--secret-key', secret_key,
'--password', '<PASSWORD>',
'--bucket', 'foobar',
])
assert result.exit_code == 0
# lock once
result = runner.invoke(lock_cli, [
'--endpoint', endpoint.geturl(),
'--access-key', access_key,
'--secret-key', secret_key,
'--password', '<PASSWORD>',
'--bucket', 'foobar',
])
assert result.exit_code == 0
# lock again
result = runner.invoke(lock_cli, [
'--endpoint', endpoint.geturl(),
'--access-key', access_key,
'--secret-key', secret_key,
'--password', '<PASSWORD>',
'--bucket', 'foobar',
])
assert result.exit_code == 1
# unlock
result = runner.invoke(unlock_cli, [
'--endpoint', endpoint.geturl(),
'--access-key', access_key,
'--secret-key', secret_key,
'--password', '<PASSWORD>',
'--bucket', 'foobar',
])
assert result.exit_code == 0
# unlock again
result = runner.invoke(unlock_cli, [
'--endpoint', endpoint.geturl(),
'--access-key', access_key,
'--secret-key', secret_key,
'--password', '<PASSWORD>',
'--bucket', 'foobar',
])
assert result.exit_code == 1
|
seantis/spare
|
spare/tests/test_transfer.py
|
def test_access(s3):
assert not tuple(s3.buckets.iterator())
s3.create_bucket(Bucket='foobar')
assert tuple(s3.buckets.iterator())
def test_buckets_cleared_between_tests(s3):
assert not tuple(s3.buckets.iterator())
|
seantis/spare
|
spare/download.py
|
<reponame>seantis/spare<gh_stars>1-10
import os
import random
import shutil
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from spare import log, FOLLOW_SYMLINKS
from spare.envoy import Envoy
from spare.inventory import hash_implementation
from spare.utils import s3_client, writable
from time import sleep
class Download(object):
""" Represents a single download.
Locally duplicated files are only stored once on the remote and we want
to only download them once as well.
Therefore we use a single prefix and n targets. Each target is a list of
1-n paths. The first path is a copy of the original digest, all the other
paths of the same target ar hardlinks to the copy of the original digest.
For example, say we have this folder structure where all the files
have the same content and the files in each folder have the same inode:
/ foo / a (inode 100)
/ foo / b (inode 100)
/ foo / c (inode 100)
/ bar / a (inode 200)
/ bar / b (inode 200)
/ bar / c (inode 200)
This should result in a single download of this form:
Download(prefix='xxx', targets=[
('/foo/a', '/foo/b', '/foo/c'),
('/bar/a', '/bar/b', '/bar/c')
])
This in turn will lead to a download to /foo/a, hardlinks to /foo/a from
/foo/b /foo/c). As well as a copy of /foo/a to /bar/a and hardlinks from
/bar/b and /bar/c to /bar/a.
"""
__slots__ = ('prefix', 'digest', 'targets')
def __init__(self, prefix, digest):
self.prefix = prefix
self.digest = digest
self.targets = []
def to(self, path, hardlinks=()):
self.targets.append((path, *hardlinks))
class DownloadProcess(Process):
""" Runs a download manager in a separate process and queues downloads
using a DownloadManager in multiple threads.
"""
def __init__(self, endpoint, access_key, secret_key,
bucket, password, queue):
Process.__init__(self)
self.queue = queue
self.s3 = s3_client(endpoint, access_key, secret_key)
self.envoy = Envoy(self.s3, bucket, password)
def run(self):
with DownloadManager(self.envoy) as manager:
while True:
download = self.queue.get()
if not download:
break
manager.queue(download)
# sleep randomly to ensure that multiple processes
# read equally from the shared queue
sleep(random.uniform(0, 0.1))
class DownloadManager(object):
""" Takes download objects and downloads them using a threadpool executor.
In addition the download targets are realised after the download (incl.
copies and hardlinks).
Since the download manager works with threads it should be used with
a with clause:
with DownloadManager(envoy) as manager:
download = Download('prefix', 'digest')
download.to(path, hardlinks)
download.to(path, hardlinks)
manager.queue(download)
"""
def __init__(self, envoy):
self.envoy = envoy
self.executor = ThreadPoolExecutor(max_workers=4)
self.results = []
def __enter__(self):
return self
def __exit__(self, *args):
for future in self.results:
future.result()
self.executor.shutdown(wait=True)
def queue(self, download):
self.results.append(self.executor.submit(self.fetch, download))
def fetch(self, download):
m = hash_implementation()
genesis = download.targets[0][0]
with writable(genesis):
with open(genesis, 'wb') as f:
self.envoy.recv(download.prefix, f, after_decrypt=m.update)
# the following branch is actually covered, but coverage.py does not
# consider it as such - I've tried all the tricks in the book and it
# will still not capture it, so 'no cover' it is
if download.digest != m.hexdigest(): # pragma: no cover
paths = (p for paths in download.targets for p in paths)
for path in paths:
log.error((
f"Unexpected checksum for {path}, "
f"expected {download.digest}, "
f"got {m.hexdigest()}"
))
for paths in download.targets:
clone, *links = paths
if clone != genesis:
shutil.copyfile(genesis, clone, **FOLLOW_SYMLINKS)
shutil.copystat(genesis, clone, **FOLLOW_SYMLINKS)
for link in links:
# files are touched during structure creation, which is a
# problem for hard links
os.unlink(link)
os.link(clone, link, **FOLLOW_SYMLINKS)
|
seantis/spare
|
setup.py
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
name = 'spare'
description = 'One S3 backup, encrypted on the fly.'
version = '0.4.0'
def get_long_description():
readme = open('README.rst').read()
history = open('HISTORY.rst').read()
# cut the part before the description to avoid repetition on pypi
readme = readme[readme.index(description) + len(description):]
return '\n'.join((readme, history))
setup(
name=name,
version=version,
description=description,
long_description=get_long_description(),
url='http://github.com/seantis/spare',
author='Seantis GmbH',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=name.split('.')[:-1],
include_package_data=True,
zip_safe=False,
platforms='any',
# we require 3.6.2 (not just 3.6.*), due to this bug:
# https://bugs.python.org/issue29581
python_requires='>=3.6.2',
install_requires=[
'boto3',
'cached_property',
'click',
'cryptography',
'logbook',
'miscreant',
'ulid-py',
],
extras_require=dict(
test=[
'hypothesis',
'mirakuru',
'flake8',
'port-for',
'pytest',
'pytest-cov',
'pytest-flake8',
'pytest-logbook'
],
),
entry_points={
'console_scripts': 'spare=spare.cli:cli'
},
classifiers=[
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python :: 3',
'Topic :: System :: Archiving :: Backup',
]
)
|
seantis/spare
|
spare/tests/conftest.py
|
import os
import port_for
import pytest
import secrets
import sys
import tempfile
from mirakuru import HTTPExecutor
from pathlib import Path
from spare.envoy import Envoy
from spare.utils import s3_client
from urllib.request import urlretrieve
MINIO_RELEASES = (
('darwin', 'https://dl.minio.io/server/minio/release/darwin-amd64/minio'),
('linux', 'https://dl.minio.io/server/minio/release/linux-amd64/minio')
)
@pytest.fixture(scope='session')
def access_key():
return secrets.token_hex(20)
@pytest.fixture(scope='session')
def secret_key():
return secrets.token_hex(40)
@pytest.fixture(scope='session')
def endpoint(minio):
return minio.url
@pytest.fixture(scope='function')
def s3(access_key, secret_key, endpoint, minio_path):
# minio has some concurrency problems with locks which compound if we
# use the default connection setup which includes lots of retries and
# big timeouts
s3 = s3_client(
endpoint.geturl(),
access_key,
secret_key,
connect_timeout=1,
read_timeout=1,
max_attempts=3
)
yield s3
for bucket in s3.buckets.iterator():
bucket.objects.all().delete()
bucket.delete()
@pytest.fixture(scope='session')
def minio_release():
for key, url in MINIO_RELEASES:
if key in sys.platform:
return url
raise RuntimeError(f"{sys.platform} is currently not supported")
@pytest.fixture(scope='session')
def minio_binary(request, minio_release):
path = Path(request.config.cache.makedir('minio'))
binary = path / 'minio'
if not binary.exists():
urlretrieve(minio_release, binary)
binary.chmod(0o755)
return binary
@pytest.fixture(scope='session')
def minio_path():
with tempfile.TemporaryDirectory() as d:
yield Path(d)
@pytest.fixture(scope='session')
def minio_port():
return port_for.select_random()
@pytest.fixture(scope='session')
def minio(minio_binary, minio_path, minio_port, access_key, secret_key):
os.environ['MINIO_ACCESS_KEY'] = access_key
os.environ['MINIO_SECRET_KEY'] = secret_key
os.environ['MINIO_UPDATE'] = 'off'
address = f'127.0.0.1:{minio_port}'
command = f'{minio_binary} server --address {address} {minio_path}'
endpoint = f'http://{address}'
# for some reason minio gets stuck at times if it prints to stdout
# or if it is run as a non-shell command
command += ' > /dev/null'
executor = HTTPExecutor(command, url=endpoint, status=405, shell=True)
executor.start()
yield executor
executor.stop()
@pytest.fixture(scope='function')
def temporary_path():
with tempfile.TemporaryDirectory() as d:
yield Path(d)
@pytest.fixture(scope='function')
def envoy(s3):
with Envoy(s3, 'bucket', 'password') as envoy:
yield envoy
|
seantis/spare
|
spare/errors.py
|
class SpareError(Exception):
pass
class FileChangedDuringReadError(SpareError):
""" Raised when a file has changed while it was read. """
def __init__(self, path):
self.path = path
class FileChangedBeforeUploadError(SpareError):
""" Raised when a file changed after it was inventoried, but before
it was uploaded.
"""
def __init__(self, path):
self.path = path
class PruneToZeroError(SpareError):
""" Raised when trying to prune a bucket in a way that would not leave
any snapshots behind.
"""
def __init__(self, snapshots_left):
self.snapshots_left = snapshots_left
class InvalidPrefixError(SpareError):
""" Raised when a given prefix is invalid. """
def __init__(self, prefix):
self.prefix = prefix
class ExistingPrefixError(SpareError):
""" Raised when a given prefix exists already. """
def __init__(self, prefix):
self.prefix = prefix
class BucketAlreadyLockedError(SpareError):
""" Raised when bucket is already locked. """
def __init__(self, bucket):
self.bucket = bucket
class TargetPathNotEmpty(SpareError):
""" Raised when a restore target path is not empty. """
def __init__(self, path):
self.path = path
class BucketNotLockedError(SpareError):
""" Raised when a bucket was expected to be locked, but was not. """
def __init__(self, bucket):
self.bucket = bucket
class SnapshotMismatchError(SpareError):
""" Raised when snapshot is attempted on a bucket which has snapshots
from a different host/path on it.
"""
def __init__(self, expected, found):
self.expected = expected
self.found = found
class BucketOtherwiseUsedError(SpareError):
""" Raised when the bucket is not managed by spare. """
def __init__(self, bucket):
self.bucket = bucket
|
seantis/spare
|
spare/tests/test_envoy.py
|
import os
import secrets
import signal
import weakref
import pytest
from io import BytesIO
from spare.block import AVAILABLE_BLOCKS
from spare.envoy import Envoy
from spare.errors import BucketAlreadyLockedError
from spare.errors import BucketNotLockedError
from spare.errors import BucketOtherwiseUsedError
from spare.errors import ExistingPrefixError
from spare.errors import InvalidPrefixError
from spare.utils import on_kill, delay_signals
from unittest.mock import patch
@pytest.mark.parametrize('block', AVAILABLE_BLOCKS)
def test_single_block(s3, block):
with Envoy(s3, 'my-bucket', 'my-password', block) as envoy:
envoy.send('document', BytesIO(b'secret'))
blocks = tuple(envoy.bucket.objects.filter(Prefix='document'))
assert len(blocks) == 1
assert blocks[0].get()['Body'].read() != b'secret'
output = BytesIO()
envoy.recv('document', output)
output.seek(0)
assert output.read() == b'secret'
@pytest.mark.parametrize('block', AVAILABLE_BLOCKS)
def test_multiple_blocks(s3, block):
with Envoy(s3, 'my-bucket', 'my-password', block) as envoy:
envoy.blocksize = 1024
data = secrets.token_bytes(1024 * 2)
envoy.send('document', BytesIO(data))
blocks = tuple(envoy.bucket.objects.filter(Prefix='document'))
assert len(blocks) == 2
output = BytesIO()
envoy.recv('document', output)
output.seek(0)
assert output.read() == data
def test_replace(s3):
with Envoy(s3, 'my-bucket', 'my-password') as envoy:
envoy.blocksize = 1024
initial = secrets.token_bytes(1024 * 2)
replacement = secrets.token_bytes(1024 * 1)
envoy.send('document', BytesIO(initial))
assert sum(1 for k in envoy.keys()) == 2
with pytest.raises(ExistingPrefixError):
envoy.send('document', BytesIO(replacement))
assert sum(1 for k in envoy.keys()) == 1
envoy.delete('document')
envoy.send('document', BytesIO(replacement))
output = BytesIO()
envoy.recv('document', output)
output.seek(0)
assert output.read() == replacement
def test_delete(s3):
with Envoy(s3, 'my-bucket', 'my-password') as envoy:
envoy.send('foo', BytesIO(b'foo'))
envoy.send('bar', BytesIO(b'bar'))
assert sum(1 for k in envoy.keys()) == 2
envoy.delete('bar')
assert sum(1 for k in envoy.keys()) == 1
with BytesIO() as output:
envoy.recv('foo', output)
assert output.getvalue() == b'foo'
def test_invalid_prefix(s3):
with Envoy(s3, 'bucket', 'password') as envoy:
# no dashes
with pytest.raises(InvalidPrefixError):
envoy.send('no-dashes', BytesIO())
# at least 2 characters long
with pytest.raises(InvalidPrefixError):
envoy.send('x', BytesIO())
# no dots
with pytest.raises(InvalidPrefixError):
envoy.send('.hidden', BytesIO())
def test_prefixes(s3):
with Envoy(s3, 'bucket', 'password') as envoy:
envoy.blocksize = 1024
envoy.ensure_bucket_exists()
assert not next(envoy.prefixes(), None)
data = secrets.token_bytes(1024 * 2)
envoy.send('foo', BytesIO(data))
assert sum(1 for k in envoy.keys()) == 2
assert tuple(envoy.prefixes()) == ('foo', )
def test_first_block(s3):
with Envoy(s3, 'bucket', 'password') as envoy:
envoy.blocksize = 16
envoy.ensure_bucket_exists()
envoy.send('foo', BytesIO(secrets.token_bytes(16*20)))
assert sum(1 for k in envoy.keys()) == 20
assert tuple(envoy.prefixes()) == ('foo', )
def test_lock(s3):
with Envoy(s3, 'bucket', 'password'):
pass
with Envoy(s3, 'bucket', 'password'):
pass
with pytest.raises(BucketAlreadyLockedError):
with Envoy(s3, 'bucket', 'password'):
with Envoy(s3, 'bucket', 'password'):
pass
def test_lock_enforced(s3):
with Envoy(s3, 'bucket', 'password'):
pass
with pytest.raises(BucketNotLockedError):
Envoy(s3, 'bucket', 'password').send('foo', BytesIO())
def test_envoy_fail_on_foreign_buckets(s3):
bucket = Envoy(s3, 'bucket', 'password').bucket
bucket.create()
bucket.upload_fileobj(BytesIO(b'foo'), 'bar')
with pytest.raises(BucketOtherwiseUsedError):
with Envoy(s3, 'bucket', 'password'):
pass
def test_clean_unlock(s3):
envoy = Envoy(s3, 'bucket', 'password')
envoy.lock()
assert envoy.locked
# the lock should have been removed
with patch('sys.exit'):
os.kill(os.getpid(), signal.SIGTERM)
assert not envoy.locked
# if the model is deleted, there should not be an error
del envoy
with patch('sys.exit'):
os.kill(os.getpid(), signal.SIGTERM)
# even if the method throws an error
on_kill(weakref.ref(AssertionError))
with patch('sys.exit'):
os.kill(os.getpid(), signal.SIGTERM)
# this should work with delay signal
envoy = Envoy(s3, 'bucket', 'password')
with patch('sys.exit'):
with delay_signals("Delaying signals"):
envoy.lock()
assert envoy.locked
os.kill(os.getpid(), signal.SIGTERM)
assert envoy.locked
assert not envoy.locked
|
seantis/spare
|
spare/tests/test_download.py
|
from io import BytesIO
from spare.download import Download, DownloadManager
def test_digestless_download(envoy, temporary_path):
envoy.send('foo', BytesIO(b'foo'))
with DownloadManager(envoy) as manager:
download = Download('foo', 'wrong-digest')
download.to(temporary_path / 'foo')
manager.queue(download)
assert (temporary_path / 'foo').open('rb').read() == b'foo'
def test_wrong_digest_download(envoy, temporary_path, loghandler):
envoy.send('foo', BytesIO(b'foo'))
with DownloadManager(envoy) as manager:
download = Download('foo', 'wrong-digest')
download.to(temporary_path / 'foo')
manager.queue(download)
# the download will still work
assert (temporary_path / 'foo').open('rb').read() == b'foo'
# but we get an error int he logs
assert "Unexpected checksum for" in loghandler.records[0].message
|
seantis/spare
|
spare/tests/test_snapshot.py
|
import pytest
from io import BytesIO
from spare.errors import FileChangedBeforeUploadError
from spare.errors import SnapshotMismatchError
from spare.errors import PruneToZeroError
from spare.inventory import Inventory
from spare.snapshot import SnapshotCollection
def test_empty_bucket(envoy):
collection = SnapshotCollection(envoy)
collection.load()
assert not collection.snapshots
def test_save(envoy):
collection = SnapshotCollection(envoy)
s1 = collection.create()
s1.save()
collection.load()
assert len(collection.snapshots) == 1
s2 = collection.create()
s2.save()
collection.load()
assert len(collection.snapshots) == 2
s1.meta['foo'] = 'bar'
s1.save()
collection.load()
assert collection.snapshots[0].meta['foo'] == 'bar'
assert not collection.snapshots[1].meta
def test_prune(envoy):
collection = SnapshotCollection(envoy)
for i in range(0, 3):
snapshot = collection.create()
snapshot.meta['id'] = i
snapshot.save()
collection.load()
assert len(collection.snapshots) == 3
assert collection.snapshots[0].meta['id'] == 0
assert collection.snapshots[1].meta['id'] == 1
assert collection.snapshots[2].meta['id'] == 2
collection.prune(keep=4)
collection.load()
assert len(collection.snapshots) == 3
collection.prune(keep=3)
collection.load()
assert len(collection.snapshots) == 3
collection.prune(keep=2)
collection.load()
assert len(collection.snapshots) == 2
assert collection.snapshots[0].meta['id'] == 1
assert collection.snapshots[1].meta['id'] == 2
collection.prune(keep=1)
collection.load()
assert len(collection.snapshots) == 1
assert collection.snapshots[0].meta['id'] == 2
collection.prune(keep=1)
collection.load()
assert len(collection.snapshots) == 1
assert collection.snapshots[0].meta['id'] == 2
with pytest.raises(PruneToZeroError):
collection.prune(keep=0)
# make sure the internal files are not touched
assert envoy.locked
def test_change_before_upload(envoy, temporary_path):
path = temporary_path / 'foo'
with path.open('w') as f:
f.write('foo')
inventory = Inventory(temporary_path)
inventory.scan()
with path.open('w') as f:
f.write('bar')
with pytest.raises(FileChangedBeforeUploadError):
snapshot = SnapshotCollection(envoy).create()
snapshot.backup(inventory)
def test_validate(envoy, temporary_path, loghandler):
with (temporary_path / 'foo').open('w') as f:
f.write('foo')
with (temporary_path / 'bar').open('w') as f:
f.write('bar')
inventory = Inventory(temporary_path)
inventory.scan()
snapshot = SnapshotCollection(envoy).create()
snapshot.backup(inventory)
assert snapshot.validate()
a, b = snapshot.meta['files']
# remove one file
envoy.delete(a)
# replace another
envoy.delete(b)
envoy.send(b, BytesIO(b'baz'))
# remove something from the structure
snapshot.meta['structure'].popitem()
snapshot.save()
assert not snapshot.validate()
records = [r for r in loghandler.records if r.level_name == 'ERROR']
assert len(records) == 3
log = '\n'.join(l.message for l in records)
assert 'but got' in log
assert 'is missing' in log
assert 'is unknown' in log
def test_owner_mismatch(envoy, temporary_path):
(temporary_path / 'foo').mkdir()
(temporary_path / 'bar').mkdir()
foo = Inventory(temporary_path / 'foo')
foo.scan()
bar = Inventory(temporary_path / 'bar')
bar.scan()
snapshot = SnapshotCollection(envoy).create()
snapshot.backup(foo)
with pytest.raises(SnapshotMismatchError):
snapshot.backup(bar)
with pytest.raises(SnapshotMismatchError):
snapshot = SnapshotCollection(envoy).create()
snapshot.backup(bar)
|
seantis/spare
|
spare/snapshot.py
|
<reponame>seantis/spare<gh_stars>1-10
import json
import ulid
from io import BytesIO
from spare import log
from spare.errors import FileChangedBeforeUploadError
from spare.errors import PruneToZeroError
from spare.errors import SnapshotMismatchError
from spare.inventory import hash_implementation
from spare.utils import abort_if_file_changes_during_read
class SnapshotCollection(object):
""" Manages the snapshots stored on a bucket.
Snapshots are created using 'ulid' to automatically keep them in order
of creation (from oldest to newest).
Snapshots store the structure of the inventory they backed up as well
as all the file-digests they are linked with. This ends up working
a bit like reference counting - as long as a snapshot links to an
uploaded file, that file is kept around.
When snapshots are deleted/pruned, all the files which are no longer
referenced by any snapshot get deleted.
"""
def __init__(self, envoy):
self.envoy = envoy
self.clear()
def clear(self):
self.snapshots = []
def load(self):
""" Load all snapshots from the bucket. """
self.clear()
for prefix in sorted(self.envoy.prefixes('snapshot')):
self.snapshots.append(Snapshot.load(self.envoy, prefix))
def create(self):
return Snapshot(self.envoy)
def get(self, prefix='latest'):
""" Gets the given snapshot by prefix or the latest one (default). """
if prefix == 'latest':
return self.snapshots[-1]
return next((s for s in self.snapshots if s.prefix == prefix), None)
def prune(self, keep=1):
""" Prunes the snapshots and all the files associated with them. """
# make sure we are up to date
self.load()
if keep < 1:
raise PruneToZeroError(len(self.snapshots))
if len(self.snapshots) <= keep:
return
# delete the meatdata of old snapshots
old, self.snapshots = self.snapshots[:-keep], self.snapshots[-keep:]
for snapshot in old:
snapshot.delete()
# delete the prefixes we don't recognise
known = set()
for snapshot in self.snapshots:
known.add(snapshot.prefix)
known.update(d for d in snapshot.meta.get('files', ()))
for prefix in self.envoy.prefixes():
if prefix not in known:
self.envoy.delete(prefix)
class Snapshot(object):
""" A single snapshot, responsible for backing up inventories. """
def __init__(self, envoy, prefix=None, meta=None):
self.envoy = envoy
self.prefix = prefix or f'snapshot_{ulid.new()}'
self.meta = meta or {}
@classmethod
def load(cls, envoy, prefix):
with BytesIO() as buffer:
envoy.recv(prefix, buffer)
meta = json.loads(buffer.getvalue().decode('utf-8'))
return cls(envoy, prefix, meta)
@property
def neighbours(self):
collection = SnapshotCollection(self.envoy)
collection.load()
for snapshot in collection.snapshots:
if snapshot.prefix != self.prefix:
yield snapshot
def ensure_identity_match(self, inventory):
""" Each inventory we backup has an identity associated with it
(hostname + path). When creating a new snapshot we ensure that
this identity matches, because we want each hostname/path combination
to be stored in a separate bucket.
"""
if 'identity' in self.meta:
if self.meta['identity'] != inventory.identity:
raise SnapshotMismatchError(
expected=inventory.identity,
found=self.meta['identity']
)
for snapshot in self.neighbours:
if snapshot.meta['identity'] != inventory.identity:
raise SnapshotMismatchError(
expected=inventory.identity,
found=snapshot.meta['identity']
)
def backup(self, inventory, force=False):
""" Backup the given inventory. """
log.info(f"Backing up {inventory.path}")
force or self.ensure_identity_match(inventory)
uploaded = set(self.envoy.prefixes())
for digest, paths in inventory.files.items():
if digest in uploaded:
continue
for path in paths:
log.info(f"Uploading {path}")
path = inventory.absolute_path(paths[0])
m = hash_implementation()
with abort_if_file_changes_during_read(path):
with open(path, 'rb') as f:
self.envoy.send(digest, f, before_encrypt=m.update)
if digest != m.hexdigest():
raise FileChangedBeforeUploadError(path)
self.meta['files'] = inventory.files
self.meta['structure'] = inventory.structure
self.meta['identity'] = inventory.identity
self.save()
log.info(f"Completed {self.prefix}")
def save(self):
""" Save the snapshot metadata to the bucket. """
if self.envoy.is_known_prefix(self.prefix):
self.envoy.delete(self.prefix)
with BytesIO() as buffer:
buffer.write(json.dumps(self.meta).encode('utf-8'))
self.envoy.send(self.prefix, buffer)
def delete(self):
""" Deletes the snapshot data, but *not* the associated data. The
latter is done by the `prune` call in the `SnapshotCollection` class.
"""
self.envoy.delete(self.prefix)
def validate(self):
""" Validates the snapshot data by downloading and validating the
checksum of all the files in the envoy.
Returns true if the snapshot is valid. Errors are added to the log.
"""
class NullBuffer(object):
def write(self, bytes):
pass
prefixes = set(self.envoy.prefixes())
null = NullBuffer()
success = True
def fail(message):
nonlocal success
success = False
log.error(message)
for digest in self.meta.get('files', ()):
if digest not in prefixes:
fail(f"{digest} is unknown")
else:
m = hash_implementation()
self.envoy.recv(digest, null, after_decrypt=m.update)
if digest != m.hexdigest():
fail(f"Expected {digest} but got {m.hexdigest()}")
for path in self.meta['files'][digest]:
if path not in self.meta['structure']:
fail(f"the metadata for {path} is missing")
return success
|
seantis/spare
|
spare/tests/test_backup.py
|
import os
import secrets
import pytest
from pathlib import Path
from spare.backup import create, restore, validate
from spare.envoy import Envoy
from spare.errors import TargetPathNotEmpty, SnapshotMismatchError
from spare.snapshot import SnapshotCollection
from tempfile import TemporaryDirectory
def test_create(s3, temporary_path):
envoy = Envoy(s3, 'my-bucket', 'password')
with (temporary_path / 'foo').open('w') as f:
f.write('foo')
create(temporary_path, s3, 'my-bucket', 'password')
prefixes = set(envoy.prefixes())
assert len(prefixes) == 2
create(temporary_path, s3, 'my-bucket', 'password')
prefixes = set(envoy.prefixes())
assert len(prefixes) == 2
with (temporary_path / 'bar').open('w') as f:
f.write('bar')
create(temporary_path, s3, 'my-bucket', 'password')
prefixes = set(envoy.prefixes())
assert len(prefixes) == 3
(temporary_path / 'bar').unlink()
create(temporary_path, s3, 'my-bucket', 'password')
prefixes = set(envoy.prefixes())
assert len(prefixes) == 2
def test_create_exclude(s3, temporary_path):
envoy = Envoy(s3, 'my-bucket', 'password')
with (temporary_path / 'foo').open('w') as f:
f.write('foo')
with (temporary_path / 'bar').open('w') as f:
f.write('bar')
create(temporary_path, s3, 'my-bucket', 'password', skip=('./foo'))
assert len(set(envoy.prefixes())) == 2
def test_large_file(s3):
content = secrets.token_bytes(1024*1024)
with TemporaryDirectory() as d:
d = Path(d)
with (d / 'foo').open('wb') as f:
f.write(content)
create(d, s3, 'my-bucket', 'password')
with TemporaryDirectory() as d:
d = Path(d)
restore(d, s3, 'my-bucket', 'password')
with (d / 'foo').open('rb') as f:
assert f.read() == content
def test_restore(s3):
with TemporaryDirectory() as d:
d = Path(d)
# normal files
with (d / 'foo').open('w') as f:
f.write('foo')
with (d / 'bar').open('w') as f:
f.write('bar')
# symlinks
(d / 'link-to-foo').symlink_to(d / 'foo')
# hardlinks
os.link(d / 'bar', d / 'hardlink-to-bar')
# directories
(d / 'dir').mkdir()
(d / 'dir' / 'foo').mkdir()
(d / 'dir' / 'foo' / 'bar').mkdir()
# executables
(d / 'exe').touch()
(d / 'exe').chmod(0o755)
create(d, s3, 'my-bucket', 'password')
with TemporaryDirectory() as d:
d = Path(d)
restore(d, s3, 'my-bucket', 'password')
# normal files
assert (d / 'foo').open('r').read() == 'foo'
assert (d / 'bar').open('r').read() == 'bar'
# symlinks
assert (d / 'link-to-foo').is_symlink()
assert os.readlink(d / 'link-to-foo') == str(d / 'foo')
assert (d / 'link-to-foo').open('r').read() == 'foo'
# hardlinks
assert (d / 'hardlink-to-bar').stat().st_ino\
== (d / 'bar').stat().st_ino
# directories
assert (d / 'dir').is_dir()
assert (d / 'dir' / 'foo').is_dir()
assert (d / 'dir' / 'foo' / 'bar').is_dir()
# executables
assert (d / 'exe').stat().st_mode & 0o777 == 0o755
def test_restore_complex_hardlinks_case(s3):
with TemporaryDirectory() as d:
d = Path(d)
for child in ('foo', 'bar'):
(d / child).mkdir()
with (d / child / 'a').open('wb') as f:
f.write(b'foo')
# a <- b <- c
os.link((d / child / 'a'), (d / child / 'b'))
os.link((d / child / 'b'), (d / child / 'c'))
assert (d / child / 'a').stat().st_ino\
== (d / child / 'b').stat().st_ino\
== (d / child / 'c').stat().st_ino
create(d, s3, 'my-bucket', 'password')
with TemporaryDirectory() as d:
d = Path(d)
# we should see two hierarchies with three files sharing a single
# inode for each directory (no overlap between directories)
restore(d, s3, 'my-bucket', 'password')
inodes = set()
for child in ('foo', 'bar'):
assert (d / child).is_dir()
assert (d / child / 'a').open('rb').read() == b'foo'
assert (d / child / 'b').open('rb').read() == b'foo'
assert (d / child / 'c').open('rb').read() == b'foo'
assert (d / child / 'a').stat().st_ino\
== (d / child / 'b').stat().st_ino\
== (d / child / 'c').stat().st_ino
inode = (d / child / 'a').stat().st_ino
assert inode not in inodes
inodes.add(inode)
def test_restore_previous_snapshot(s3):
with TemporaryDirectory() as d:
d = Path(d)
with (d / 'foo').open('w') as f:
f.write('foo')
create(d, s3, 'my-bucket', 'password')
collection = SnapshotCollection(Envoy(s3, 'my-bucket', 'password'))
collection.load()
assert len(collection.snapshots) == 1
with (d / 'foo').open('w') as f:
f.write('bar')
(d / 'bar').mkdir()
create(d, s3, 'my-bucket', 'password', keep=2)
collection.load()
assert len(collection.snapshots) == 2
with TemporaryDirectory() as d:
d = Path(d)
restore(d, s3, 'my-bucket', 'password', collection.snapshots[0].prefix)
assert (d / 'foo').open('r').read() == 'foo'
assert not (d / 'bar').exists()
with TemporaryDirectory() as d:
d = Path(d)
restore(d, s3, 'my-bucket', 'password')
assert (d / 'foo').open('r').read() == 'bar'
assert (d / 'bar').exists()
def test_restore_to_non_empty_directory(s3, temporary_path):
(temporary_path / 'foo').touch()
create(temporary_path, s3, 'my-bucket', 'password')
with pytest.raises(TargetPathNotEmpty):
restore(temporary_path, s3, 'my-bucket', 'password')
def test_validate(s3, temporary_path):
with TemporaryDirectory() as d:
d = Path(d)
with (d / 'foo').open('w') as f:
f.write('foo')
with (d / 'foo').open('w') as f:
f.write('bar')
create(d, s3, 'my-bucket', 'password')
assert validate(s3, 'my-bucket', 'password')
with Envoy(s3, 'my-bucket', 'password') as envoy:
envoy.delete(next(envoy.prefixes()))
assert not validate(s3, 'my-bucket', 'password')
def test_force_backup(s3, temporary_path):
(temporary_path / 'foo').mkdir()
(temporary_path / 'foo' / 'bar').touch()
create(temporary_path / 'foo', s3, 'my-bucket', 'password')
(temporary_path / 'foo').rename(temporary_path / 'bar')
(temporary_path / 'foo').mkdir()
(temporary_path / 'foo' / 'bar').touch()
# the inode has changed
with pytest.raises(SnapshotMismatchError):
create(temporary_path / 'foo', s3, 'my-bucket', 'password')
create(temporary_path / 'foo', s3, 'my-bucket', 'password', force=True)
create(temporary_path / 'foo', s3, 'my-bucket', 'password')
|
seantis/spare
|
spare/__init__.py
|
<reponame>seantis/spare
from logbook import Logger
from os import supports_follow_symlinks
log = Logger('spare')
FOLLOW_SYMLINKS = supports_follow_symlinks and {'follow_symlinks': True} or {}
__all__ = ('log', 'FOLLOW_SYMLINKS')
|
CXUtk/TRV2
|
build.py
|
import sys
import os
import shutil
# 把资源文件夹里的资源按照是否相等复制一份到输出目录
def copyFiles(resourcePath: str, dirName: str, outputPath: str):
fileList = []
totalFiles = 0
for root, dirs, files in os.walk(resourcePath):
for file in files:
filePath = os.path.join(root, file)
targetPath = os.path.join(
outputPath, filePath.replace(resourcePath, dirName))
totalFiles = totalFiles + 1
if not os.path.exists(targetPath):
fileList.append((filePath, targetPath))
continue
# 如果文件与目标文件不同,也要复制过去
with open(filePath, 'rb') as fsrc:
with open(targetPath, 'rb') as ftarget:
if fsrc.read() != ftarget.read():
fileList.append((filePath, targetPath))
for src, dest in fileList:
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copy(src, dest)
print('Copying {}'.format(dest))
print('{} out of {} files are copied to output directory'.format(
len(fileList), totalFiles))
return fileList
def main():
# 检查输入的路径是否合法
if len(sys.argv) != 3:
print('Usage: ./build.py <Resources_Dir> <Output_Dir>')
return
print('Parameter List: [' + ', '.join(sys.argv) + ']')
resourcePath, outputPath = sys.argv[1], sys.argv[2]
resDirectoryName = resourcePath[str.rfind(resourcePath, '/') + 1:]
if not os.path.exists(resourcePath):
print('Cannot find resource: {}'.format(resourcePath))
return
if not os.path.exists(outputPath):
print('Cannot find output directory: {}'.format(outputPath))
return
# 绝对路径转相对路径
resourcePath, outputPath = os.path.relpath(
resourcePath), os.path.relpath(outputPath)
fileList = copyFiles(
resourcePath, resDirectoryName, outputPath)
if __name__ == '__main__':
print('\n************************************* Executing Resources-copy Script *************************************\n')
main()
print('\n************************************* Finished Resources-copy Script **************************************\n')
|
CXUtk/TRV2
|
encoding.py
|
<filename>encoding.py
import os
import sys
import codecs
from chardet.universaldetector import UniversalDetector
# 把文件夹中所有 *.cpp, *.h, *.hpp, *.inl, ... 等源码文件全部转换为utf8编码
__exts__ = (
".cpp", ".h", ".hpp", ".inl", ".frag", ".vert"
)
# 检测文件的编码
def detectEncoding(file):
detector = UniversalDetector()
with open(file, 'rb') as f1:
detector.feed(f1.read())
detector.close()
return detector.result
def encodeToTarget(fileName, encoding, target):
# TODO: This is dangerous ^^||, would need a backup option :)
# NOTE: Use 'replace' option which tolerates errorneous characters
data = codecs.open(fileName, 'rb', encoding, 'replace').read()
open(fileName, 'wb').write(data.encode(target, 'replace'))
def main():
# 检查输入的路径是否合法
if len(sys.argv) != 3:
print('Usage: ./encoding.py <Encoding_Dir> <Target Encoding>')
return
sourceDir = os.path.relpath(sys.argv[1])
targetEncoding = sys.argv[2]
for root, dirs, files in os.walk(sourceDir):
for file in files:
fileName = os.path.join(root, file)
if fileName.endswith(__exts__):
result = detectEncoding(fileName)
if result['confidence'] == 0 and result['encoding'] == None:
print('Empty: {}'.format(fileName))
continue
encoding = result['encoding'].lower()
confidence = result['confidence']
if confidence < 0.7:
print('Skipped: {}'.format(fileName))
continue
if encoding != targetEncoding:
encodeToTarget(fileName, encoding, targetEncoding)
print('Encoding: {}'.format(fileName))
else:
print('Skipped {}: {}'.format(targetEncoding, fileName))
if __name__ == '__main__':
print('\n************************************* Executing Encoding-check Script *************************************\n')
main()
print('\n************************************* Finished Encoding-check Script **************************************\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.