hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc0a0fa5f18fcd52ed6c80228bbe0f8823a9bdad | 2,688 | py | Python | rev/desmos-pro/solve-maze.py | nanzggits/sdctf-2021 | fcddb506f5f798a264fc17e5588c0f5b7d5fbb2c | [
"MIT"
] | 6 | 2021-05-17T21:04:31.000Z | 2022-01-01T23:28:18.000Z | rev/desmos-pro/solve-maze.py | nanzggits/sdctf-2021 | fcddb506f5f798a264fc17e5588c0f5b7d5fbb2c | [
"MIT"
] | null | null | null | rev/desmos-pro/solve-maze.py | nanzggits/sdctf-2021 | fcddb506f5f798a264fc17e5588c0f5b7d5fbb2c | [
"MIT"
] | 1 | 2021-06-01T17:03:06.000Z | 2021-06-01T17:03:06.000Z | from typing import List, Optional, Tuple
import mazegen
from mazegen import M, WALL
# In desmos coordinates (x,y), with y=0 being the bottom row
# CARDINAL_DELTAS = [(0, 1), (1, 0), (0, -1), (-1, 0)]
# In array coordinates (y,x), with y=0 being the top row
CARDINAL_DELTAS = [(-1, 0), (0, 1), (1, 0), (0, -1)]
S = Tuple[int, int]
class Node:
def __init__(self, direction: int, prev: Optional['Node']):
# dir: 0: up, 1: right, 2: down, 3: left
self.direction = direction
self.prev = prev
def convert_to_array_coord(height: int, desmos_coord: S) -> S:
x = desmos_coord[0]
y = height - 1 - desmos_coord[1]
return y, x
# MODULO: int = 10 ** 15
MODULO: int = 1_000_000_007
PRIME = 31
OFFSET = 1
def flag_hash(list_dir: List[int]):
h = 0
for direction in list_dir:
h *= PRIME
h += direction + OFFSET
h %= MODULO
return h % MODULO
def modexp_table(l: int):
return [pow(PRIME, i, MODULO) for i in range(l)]
def solve(maze: M, start: S, end: S):
width = len(maze[0])
height = len(maze)
node_visited = [[False] * width for _ in range(height)]
# prev: List[List[Optional[Node]]] = [[None] * width for _ in range(height)] # type: ignore
none_list: List[Optional[Node]] = [None]
node: List[List[Optional[Node]]] = [none_list * width for _ in range(height)]
def visit(square_y: int, square_x: int):
node_visited[square_y][square_x] = True
for direction, (dy, dx) in enumerate(CARDINAL_DELTAS):
ny = square_y + dy
nx = square_x + dx
if 0 <= ny < height and 0 <= nx < width and maze[ny][nx] != WALL and not node_visited[ny][nx]:
node[ny][nx] = Node(direction, node[square_y][square_x])
visit(ny, nx)
visit(*convert_to_array_coord(height, start))
steps = []
ey, ex = convert_to_array_coord(height, end)
current_node = node[ey][ex]
if current_node == None:
print('No path :(')
else:
while current_node != None:
steps.append(current_node.direction)
current_node = current_node.prev
dir_list = list(reversed(steps))
print(dir_list)
l = len(steps)
print('l = {}'.format(l))
# The modulo exponentiation table allows Desmos to compute the hash
print('T = {}'.format(modexp_table(l)))
print('T_mod = {}'.format(MODULO))
print('Flag: sdctf{{{}}}'.format(flag_hash(dir_list)))
# solve(mazegen.generate_maze(21, 21, 3157464641), (0, 20), (6, 20))
# solve(mazegen.generate_maze(21, 21, 3157464641), (0, 20), (12, 20))
solve(mazegen.generate_maze(21, 21, 3157464641), (0, 20), (10, 10))
| 34.909091 | 106 | 0.603051 | 184 | 0.068452 | 0 | 0 | 0 | 0 | 0 | 0 | 594 | 0.220982 |
bc0a8cdfe027330a24bc7a38c3790b5c1221893d | 361 | py | Python | solvent/run.py | shlomimatichin/solvent | c8bdba9aed1372060d167b27366cbe7afaacc470 | [
"Apache-2.0"
] | null | null | null | solvent/run.py | shlomimatichin/solvent | c8bdba9aed1372060d167b27366cbe7afaacc470 | [
"Apache-2.0"
] | null | null | null | solvent/run.py | shlomimatichin/solvent | c8bdba9aed1372060d167b27366cbe7afaacc470 | [
"Apache-2.0"
] | null | null | null | import subprocess
import logging
def run(command, cwd=None):
try:
return subprocess.check_output(
command, cwd=cwd, stderr=subprocess.STDOUT,
stdin=open("/dev/null"), close_fds=True)
except subprocess.CalledProcessError as e:
logging.error("Failed command '%s' output:\n%s" % (command, e.output))
raise
| 27.769231 | 78 | 0.648199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.121884 |
bc0cf17e742ee5f0e9c7dd32d45b474ef9bbc8ae | 15,924 | py | Python | util/approximate/embedding_interpolator/old/v1_numpy.py | tchlux/util | eff37464c7e913377398025adf76b057f9630b35 | [
"MIT"
] | 4 | 2021-04-22T20:19:40.000Z | 2022-01-30T18:57:23.000Z | util/approximate/embedding_interpolator/old/v1_numpy.py | tchlux/util | eff37464c7e913377398025adf76b057f9630b35 | [
"MIT"
] | 1 | 2022-01-24T14:10:27.000Z | 2022-01-30T16:42:53.000Z | util/approximate/embedding_interpolator/old/v1_numpy.py | tchlux/util | eff37464c7e913377398025adf76b057f9630b35 | [
"MIT"
] | 2 | 2019-05-19T07:44:28.000Z | 2021-04-22T20:20:40.000Z | from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, \
random, prod, asarray, set_printoptions, unravel_index
# Generate a random uniform number (array) in range [0,1].
def zero(*shape): return zeros(shape)
def randnorm(*shape): return random.normal(size=shape)
def randuni(*shape): return random.random(size=shape)
def randint(*shape, min=-3, max=9):
data = asarray(random.randint(min+1,max+1,size=shape), dtype=float)
data[data <= 0] -= 1
return data
# Build a model given four integers:
# di - dimension of input
# ds - dimension for each internal state
# ns - number of internal states
# do - dimension of output
def build_model(di, ds, ns, do):
# Use random normal vectors.
input_params = randnorm(di+1, ds)
internal_params = randnorm(ds+1, ds, ns-1)
output_params = randnorm(ds+1, do)
# Normalize the length of all random normal vectors for input.
input_params[:,:] /= ((input_params[:,:]**2).sum(axis=0))**(1/2)
internal_params[:,:,:] /= ((internal_params[:,:,:]**2).sum(axis=0))**(1/2)
# Set the bias values.
input_params[-1,:] = 1
internal_params[-1,:,:] = 0
# Set the scratch space for storing internal values to zero.
internal_values = zero(ds, ns)
return input_params, internal_params, output_params, internal_values
# Get the shape of a model (when provided the arrays).
def get_shape(*model):
di, ds = model[0].shape
di -= 1
ns = model[1].shape[-1] + 1
do = model[2].shape[-1]
return di, ds, ns, do
# Function for pushing values forward through a dense MLP.
def forward(inputs, input_params, internal_params, output_params, internal_values, display=False):
di, ds, ns, do = get_shape(input_params, internal_params, output_params)
# Compute the input layer.
internal_values[:,0] = clip(dot(inputs, input_params[:di,:]) +
input_params[di,:], 0.0, float('inf'))
if display:
print("^"*70)
print("input: ",inputs)
print()
for n in range(ds):
print(f"0.{n} ", input_params[:di,n], '+', input_params[di,n], '=', internal_values[n,0])
print(" 0 out ", internal_values[:,0])
# Compute the next set of internal values with a rectified activation.
for i in range(ns-1):
internal_values[:,i+1] = internal_params[ds,:,i] + \
dot(internal_values[:,i],
internal_params[:ds,:,i])
if display:
print()
for n in range(ds):
print(f"{i+1}.{n} ", internal_params[:ds,n,i], '+', internal_params[ds:ds+1,n,i], '=', internal_values[n,i+1])
internal_values[:,i+1] = clip(internal_values[:,i+1], 0.0, float('inf'))
if display: print(f" {i+1} out ", internal_values[:,i+1])
# compute the output.
output = dot(internal_values[:,ns-1], output_params[:ds]) + output_params[ds]
if display:
print()
for n in range(do):
print(f"{ns}.{n} ", output_params[:ds,n],'+', output_params[ds,n], '=', output[n])
print(f" {ns} out ", output[:])
print()
print("output:", output)
print("_"*70)
return output
# Compute the gradient with respect to all parameters using finite differences.
def gradient(grad, inputs, *model, display=False):
# Get the model shape.
di, ds, ns, do = get_shape(*model)
# Initialize storage for the gradients.
input_grad = zeros(model[0].shape)
internal_grad = zeros(model[1].shape)
output_grad = ones(model[2].shape)
# Retrieve the model parameters.
internal_params = model[1]
output_params = model[2]
# Retreive the internal values of the model (after executing forwards).
internal_values = model[-1]
# Compute the gradient of the last parameters.
nonzero = internal_values[:,-1].nonzero()
output_grad[ds,:] = grad[:]
for i in range(do):
output_grad[:ds,i] = internal_values[:,-1] * grad[i]
internal_values[nonzero,-1] = dot(output_params[:ds,:][nonzero], grad)
if display:
print("^"*70)
print("Output grad:")
print("",output_grad.T)
print("",nonzero, internal_values[:,-1])
# Compute the gradient of all internal parameters.
for i in range(ns-2,-1,-1):
# Compute the gradient for all weights.
# set the bias gradient.
internal_grad[ds,:,i] = internal_values[:,i+1]
# set the gradient for each column of connections
# (to a single output in next layer).
nonzero = internal_values[:,i].nonzero()
for j in range(ds):
if (internal_values[j,i+1] == 0): continue
internal_grad[:ds,j,i][nonzero] = internal_values[nonzero,i] * internal_values[j,i+1]
if display:
print(f"layer {i} -> {i+1}, output node {j}")
print(" ",internal_grad[:,j,i])
# Compute the next preceding layer of internal values.
internal_values[nonzero,i] = dot(internal_params[:ds,:,i][nonzero], internal_values[:,i+1])
if display:
print("Grads for next layer:")
print("",nonzero, internal_values[:,i])
# Compute the gradient for the input parameters.
input_grad[di,:] = internal_values[:,0]
for i in range(ds):
input_grad[:di,i] = inputs[:] * internal_values[i,0]
if display:
print("Input grad:")
print(input_grad.T)
print("_"*70)
# Return the gradients.
return input_grad, internal_grad, output_grad
# Compute the gradient with respect to all parameters using finite differences.
def finite_difference(inputs, *model, diff=0.0001, display=False):
# Shift matrices (used for computing finite differences).
input_shift = zeros(model[0].shape)
internal_shift = zeros(model[1].shape)
output_shift = zeros(model[2].shape)
# Function for producting the shifted model.
shifted_model = lambda: (model[0]+input_shift, model[1]+internal_shift, model[2]+output_shift, model[3])
# Gradient matrices.
input_grad = zeros(model[0].shape)
internal_grad = zeros(model[1].shape)
output_grad = zeros(model[2].shape)
# Total number of outputs.
output_shape = forward(inputs, *model).shape
num_outputs = prod(output_shape)
# Compute the expected set of nonzero internal activations.
forward(inputs, *model)
expected_nonzero = tuple(model[-1].nonzero()[0])
# Function for testing the effect that a shift
def measure_layer(layer, grad, shift, name):
for j in range(layer.size):
curr_idx = unravel_index(j, layer.shape)
shift[curr_idx] = diff/2
out_high = forward(inputs, *shifted_model())[out_index]
nonzero_high = tuple(model[3].nonzero()[0])
shift[curr_idx] = -diff/2
out_low = forward(inputs, *shifted_model())[out_index]
nonzero_low = tuple(model[3].nonzero()[0])
shift[curr_idx] = 0
# If a zero became nonzero (or vice versa), then the
# finite different approximation is unstable.
if ((len(nonzero_high) <= len(expected_nonzero)) and
(len(nonzero_low) <= len(expected_nonzero))):
# Compute the gradient
grad[curr_idx] += sum(out_high - out_low) / diff
if display:
print(f"{name:14s}{str(curr_idx):10s} {grad[curr_idx]: .3f}")
print(f" {float(out_high)}")
print(f" {float(out_low)}")
print(f" {float(diff)}")
# Display information.
if display:
print("^"*70)
print("shifted_model: ",[v.shape for v in shifted_model()])
print("output shape, size: ", output_shape, num_outputs)
# Cycle over each output.
for i in range(num_outputs):
out_index = unravel_index(i, output_shape)
if display: print("out_index: ",out_index)
# Cycle over all model parameters, testing effect on output.
# input layer
measure_layer(model[0], input_grad, input_shift, "input idx:")
# internal layers
measure_layer(model[1], internal_grad, internal_shift, "internal idx:")
# output layer
measure_layer(model[2], output_grad, output_shift, "output idx:")
if display: print("_"*70)
# Done computing finite difference gradient!
return input_grad, internal_grad, output_grad
def test():
print("Testing..")
di_vals = (1,2,3)
ds_vals = (1,2,3)
ns_vals = (1,2,3)
do_vals = (1,2,3)
seeds = list(range(5))
# Cycle all combination of tests.
from itertools import product
for (di, ds, ns, do, seed) in product(di_vals, ds_vals, ns_vals, do_vals, seeds):
# --------------------------------------------------------------------
# di - dimension of input
# ds - dimension for each internal state
# ns - number of internal states
# do - dimension of output
# --------------------------------------------------------------------
random.seed(seed)
# Create the model.
#
model = build_model(di, ds, ns, do)
# Call the "forward" function.
# inputs = randuni(di)
inputs = randuni(di)
# inputs = randint(di)
# Run the model forward to compute internal values.
output = forward(inputs, *model, display=False)
# Compute the gradients with a finite difference.
approx_model_grad = finite_difference(inputs, *model, display=False)
# Run the model again (fresh) to get the "internal values".
output = forward(inputs, *model, display=False)
# Print the model gradients that were directly computed.
model_grad = gradient(ones(do), inputs, *model)
# Check the correctness of the gradient function.
for i,(app, true) in enumerate(zip(approx_model_grad, model_grad)):
diff = (abs(app - true) / (abs(true) + 1)).T
# Skip "internal params" if that is empty.
if (len(diff) == 0): continue
# Check for the difference.
if (max(diff) > .01):
set_printoptions(precision=3, sign=" ")
print()
print("ERROR ON TEST")
print(" seed =",seed)
print()
print("di, ds, ns, do: ",di, ds, ns, do)
print("input_params: ",model[0].shape)
print("internal_params: ",model[1].shape)
print("output_params: ",model[2].shape)
print("internal_values: ",model[3].shape)
print()
# forward(inputs, *model, display=True)
finite_difference(inputs, *model, display=True)
print()
print("model[0]:")
print(model[0].T)
print()
print("model[1]:")
print(model[1].T)
print()
print("model[2]:")
print(model[2].T)
print()
print("internals:")
print(model[-1].T)
print()
print()
print("approx_model_grad[0]:")
print(approx_model_grad[0].T)
print()
print("approx_model_grad[1]:")
print(approx_model_grad[1].T)
print()
print("approx_model_grad[2]:")
print(approx_model_grad[2].T)
print()
print()
print("model_grad[0]:")
print(model_grad[0].T)
print()
print("model_grad[1]:")
print(model_grad[1].T)
print()
print("model_grad[2]:")
print(model_grad[2].T)
print()
print()
print("Phase",i,"(0 = input, 1 = internal, 2 = output)")
print("",max(diff))
print("",unravel_index(argmax(diff), diff.shape))
print()
print("Finite differene gradient:")
print(app.T)
print()
print("Directly computed gradient:")
print(true.T)
print()
print("Difference")
print(diff)
print()
print("ERROR ON TEST")
exit()
print(" all passed!")
if __name__ == "__main__":
test()
class NN:
def __init__(self, di, do, ds=16, ns=4):
self.di = di
self.ds = ds
self.ns = ns
self.do = do
self.model = list(build_model(di, ds, ns, do))
def fit(self, x, y, steps=1000, step_factor=0.01, display=False,
show=False, **kwargs):
# Make sure that the given data is the right shape.
assert (self.di == x.shape[-1])
assert (self.do == y.shape[-1])
if (show and (self.do == 1) and (self.di == 1)):
show_interval = max([1, steps // 100])
from util.plot import Plot
p = Plot()
p.add("Data", *(x.T), y.flatten(), group='d', frame=-1)
p.add_func("Model", self, [x.min(), x.max()], group='m', frame=-1)
loss_values = []
# For the number of training steps..
for s in range(steps):
if (not s%10): print(s, end="\r")
if (show): loss_values.append( ((y - self(x))**2).sum()**(1/2) )
grads = [zeros(l.shape) for l in self.model]
# Average gradient from all data points.
for i, (d_in, d_out) in enumerate(zip(x,y)):
m_out = forward(d_in, *self.model, display=False)
loss_grad = m_out - d_out
grad_step = gradient(loss_grad, d_in, *self.model, display=False)
# Dynamically update the average (of the gradients).
for j in range(len(grad_step)):
grads[j] += (grad_step[j] - grads[j]) / (i+1)
if display:
yhat = self(x).reshape(y.shape)
loss = ((y - yhat)**2).sum(axis=-1).mean()
# Take a step in the gradient direction.
for j in range(len(grads)):
self.model[j] -= grads[j] * step_factor
# Display progress.
if display:
print()
print("Step:", s)
print("loss:", loss)
print("model:")
for l in self.model[:-1]:
print("",l.T)
print("grads: ")
for l in grads[:-1]:
print("",-l.T)
print()
# Update the model plot, if appropriate.
if (show and (s%show_interval == 0)):
p.add("Data", *(x.T), y.flatten(), group='d', frame=s)
p.add_func("Model", self, [x.min(), x.max()], group='m', frame=s)
# Add the last frame, if it wasn't already added.
if (show):
print(" showing plot..")
# Show the plot of the model.
p.show(show=False)
p = Plot("","Step","Loss value")
p.add("Loss", list(range(len(loss_values))), loss_values,
mode="markers+lines", color=1)
p.show(append=True, show_legend=False)
# Return predictions for new data.
def predict(self, x):
if (len(x.shape) == 2):
outputs = []
for x_in in x:
outputs.append( forward(x_in, *self.model)[0] )
return asarray(outputs)
else: return forward(x, *self.model)
# Wrapper for the "__call__".
def __call__(self, *args):
return self.predict(*args)
| 41.46875 | 129 | 0.543582 | 3,254 | 0.204346 | 0 | 0 | 0 | 0 | 0 | 0 | 4,202 | 0.263878 |
bc101f9477ced02fc73e4aa485f705e9bede840d | 1,429 | py | Python | sample-apps/data-loader/app.py | jkylling/fdb-kubernetes-operator | b6c2d18d7841e789bd33bc1e05ed449e7468fabd | [
"Apache-2.0"
] | null | null | null | sample-apps/data-loader/app.py | jkylling/fdb-kubernetes-operator | b6c2d18d7841e789bd33bc1e05ed449e7468fabd | [
"Apache-2.0"
] | null | null | null | sample-apps/data-loader/app.py | jkylling/fdb-kubernetes-operator | b6c2d18d7841e789bd33bc1e05ed449e7468fabd | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
'''
This file provides a sample app for loading data into FDB.
To use it to load data into one of the sample clusters in this repo,
you can build the image by running `docker build -t fdb-data-loader sample-apps/data-loader`,
and then run the data loader by running `kubectl apply -f sample-apps/data-loader/job.yaml`
'''
import argparse
import random
import uuid
import fdb
fdb.api_version(600)
@fdb.transactional
def write_batch(tr, batch_size, value_size):
prefix = uuid.uuid4()
for index in range(1, batch_size+1):
key = fdb.tuple.pack((prefix, index))
value = []
for _ in range(0, value_size):
value.append(random.randint(0, 255))
tr[key] = bytes(value)
pass
def load_data(keys, batch_size, value_size):
batch_count = int(keys / batch_size)
db = fdb.open()
for batch in range(1, batch_count+1):
print('Writing batch %d' % batch)
write_batch(db, batch_size, value_size)
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Load random data into FDB")
parser.add_argument('--keys', type=int, help='Number of keys to generate', default=100000)
parser.add_argument('--batch-size', type=int, help='Number of keys to write in each transaction', default=10)
parser.add_argument('--value-size', type=int, help='Number of bytes to include in each value', default=1000)
args = parser.parse_args()
load_data(args.keys, args.batch_size, args.value_size)
| 30.404255 | 110 | 0.73478 | 0 | 0 | 0 | 0 | 281 | 0.196641 | 0 | 0 | 546 | 0.382085 |
bc10b3a754b1e07f5391ee346cb0afda2918b4d2 | 1,580 | py | Python | data_files/PROGRAMS/MEDIUM/0018_4Sum.py | sudhirrd007/LeetCode-scraper | de87ff17fff2c73e67392321df1107cce7cbf883 | [
"MIT"
] | null | null | null | data_files/PROGRAMS/MEDIUM/0018_4Sum.py | sudhirrd007/LeetCode-scraper | de87ff17fff2c73e67392321df1107cce7cbf883 | [
"MIT"
] | null | null | null | data_files/PROGRAMS/MEDIUM/0018_4Sum.py | sudhirrd007/LeetCode-scraper | de87ff17fff2c73e67392321df1107cce7cbf883 | [
"MIT"
] | null | null | null | # ID : 18
# Title : 4Sum
# Difficulty : MEDIUM
# Acceptance_rate : 35.2%
# Runtime : 72 ms
# Memory : 12.7 MB
# Tags : Array , Hash Table , Two Pointers
# Language : python3
# Problem_link : https://leetcode.com/problems/4sum
# Premium : 0
# Notes : -
###
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
nums.sort()
L = len(nums)
ans = []
if(L > 2):
last = nums[-1]
else:
return []
for i in range(L-3):
if(i>0 and nums[i]==nums[i-1] or nums[i] + 3*last < target):
continue
if(4*nums[i] > target):
break
for j in range(i+1, L-2):
if(j>i+1 and nums[j] == nums[j-1] or nums[i]+nums[j]+2*last < target):
continue
if(nums[i]+3*nums[j] > target):
break
temp = nums[i] + nums[j]
start,end = j+1, L-1
while(start < end):
t = temp + nums[start] + nums[end]
if(t < target):
start += 1
elif(t > target):
end -= 1
else:
if([nums[i], nums[j], nums[end], t] not in ans):
ans.append([nums[i], nums[j], nums[start], nums[end]])
while(start < end and nums[start]==nums[start+1]):
start += 1
start += 1
end -= 1
return ans
| 31.6 | 86 | 0.401899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.15443 |
bc10d6ea7391b5e3c732ffbac5c5266d74feef0e | 1,916 | py | Python | dentalvision/asm/fit.py | DreamSki/dentalvision | dd7ca6f7ccefb294dab5b37c37f195df5aadfaab | [
"MIT"
] | 7 | 2018-01-22T01:03:25.000Z | 2022-01-05T08:47:29.000Z | dentalvision/asm/fit.py | tdekeyser/dentalvision | dd7ca6f7ccefb294dab5b37c37f195df5aadfaab | [
"MIT"
] | null | null | null | dentalvision/asm/fit.py | tdekeyser/dentalvision | dd7ca6f7ccefb294dab5b37c37f195df5aadfaab | [
"MIT"
] | 8 | 2017-03-03T14:26:34.000Z | 2020-07-16T12:15:41.000Z | '''
Algorithm for matching the model to image points.
Based on (Cootes et al. 2000, p.9) and (Blanz et al., p.4).
'''
import numpy as np
from utils.structure import Shape
from utils.align import Aligner
class Fitter(object):
def __init__(self, pdmodel):
self.pdmodel = pdmodel
self.aligner = Aligner()
self.start_pose = ()
def fit(self, prev_shape, new_shape, pyramid_level=0, n=None):
'''
Algorithm that finds the best shape parameters that match identified
image points.
In: PointDistributionModel instance pdm,
array of new image points (x1, x2, ..., xN, y1, y2,..., yN)
Out: the pose params (Tx, Ty, s, theta) and shape parameter (c) to
fit the model to the image
'''
if not isinstance(new_shape, Shape):
new_shape = Shape(new_shape)
if not isinstance(prev_shape, Shape):
prev_shape = Shape(prev_shape)
if not self.start_pose:
raise ValueError('No inital pose parameters found.')
# find pose parameters to align with new image points
Tx, Ty, s, theta = self.start_pose
dx, dy, ds, dTheta = self.aligner.get_pose_parameters(prev_shape, new_shape)
changed_pose = (Tx + dx, Ty + dy, s*(1+ds), theta+dTheta)
# align image with model
y = self.aligner.invert_transform(new_shape, changed_pose)
# SVD on scaled eigenvectors of the model
u, w, v = np.linalg.svd(self.pdmodel.scaled_eigenvectors, full_matrices=False)
W = np.zeros_like(w)
# define weight vector n
if n is None:
last_eigenvalue = self.pdmodel.eigenvalues[-1]
n = last_eigenvalue**2 if last_eigenvalue**2 >= 0 else 0
# calculate the shape vector
W = np.diag(w/((w**2) + n))
c = (v.T).dot(W).dot(u.T).dot(y.vector)
return changed_pose, c
| 34.214286 | 86 | 0.616388 | 1,709 | 0.891962 | 0 | 0 | 0 | 0 | 0 | 0 | 672 | 0.350731 |
bc1384f21ce9e92bbb5a1b5230fae5b32226449c | 578 | py | Python | bank_ddd_es_cqrs/accounts/app.py | Hyaxia/Bank-DDD-CQRS-ES | 116e3eb3e93d549c1da53e6d506ab47667d77445 | [
"MIT"
] | 8 | 2020-10-27T09:46:20.000Z | 2022-01-27T12:16:48.000Z | bank_ddd_es_cqrs/accounts/app.py | Hyaxia/Bank-DDD-CQRS-ES | 116e3eb3e93d549c1da53e6d506ab47667d77445 | [
"MIT"
] | null | null | null | bank_ddd_es_cqrs/accounts/app.py | Hyaxia/Bank-DDD-CQRS-ES | 116e3eb3e93d549c1da53e6d506ab47667d77445 | [
"MIT"
] | 2 | 2021-05-29T08:11:48.000Z | 2021-07-26T04:44:53.000Z | from flask import Flask
from flask_cors import CORS # type: ignore
from .api import account_blueprint
from .event_handlers import register_event_handlers
from .infrastructure import event_store_db
from .composition_root import event_manager
def account_app_factory(db_string: str):
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = db_string
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
event_store_db.init_app(app)
app.register_blueprint(account_blueprint)
register_event_handlers(event_manager)
return app
| 25.130435 | 56 | 0.792388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.122837 |
bc13edd4a944944871cf4bb67afafb4e4e45becb | 41,794 | py | Python | Fase II/team04/storageManager.py | estrada-usac/EDD20DIC-PROYECTO | 843347de97af5e432d97c99ef490024d6f3ff49e | [
"MIT"
] | null | null | null | Fase II/team04/storageManager.py | estrada-usac/EDD20DIC-PROYECTO | 843347de97af5e432d97c99ef490024d6f3ff49e | [
"MIT"
] | 3 | 2020-12-22T15:17:16.000Z | 2021-01-08T17:37:22.000Z | Fase II/team04/storageManager.py | estrada-usac/EDD20DIC-PROYECTO | 843347de97af5e432d97c99ef490024d6f3ff49e | [
"MIT"
] | null | null | null | # Package: Storage Manager
# License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
# Developers: Alexis Peralta
from storage.avl import avlMode
from storage.b import BMode
from storage.bplus import BPlusMode
from storage.hash import HashMode
from storage.isam import ISAMMode
from storage.json_mode import jsonMode
from storage.dict import DictMode
from storage.b import Serializable
from DBList import DBList
import re
import codificar
from random import randint
MODES = ['avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash']
HEX_SYMBOLS = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"]
VALID_ENCODING = ["utf8", "iso-8859-1", "ascii"]
DB_NAME_PATTERN = "^[a-zA-Z][a-zA-Z0-9#@$_]*"
# Obteniendo la lista general de bases de datos a partir del binario almacenado
# Si no existe el binario, se crea una nueva lista de bases de datos y se almacena
# el binario
try:
databases = Serializable.rollback("lista_bases_de_datos")
except FileNotFoundError:
databases = DBList()
Serializable.commit(databases, "lista_bases_de_datos")
# Descripción:
# Crea una nueva base de datos
# Parámetros:
# database:str - El nombre de la nueva base de datos
# mode:str - El modo de almacenamiento a utilizar en la base de datos
# encoding:str - La codificación utilizada por la base de datos
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos existente
# 3 - Modo incorrecto
# 4 - Codificación incorrecta
def createDatabase(database: str, mode: str, encoding: str) -> int:
if encoding not in VALID_ENCODING:
return 4
if databases.search(database) != None:
return 2
if mode == "avl":
code = avlMode.createDatabase(database)
elif mode == "b":
code = BMode.createDatabase(database)
elif mode == "bplus":
code = BPlusMode.createDatabase(database)
elif mode == "dict":
code = DictMode.createDatabase(database)
elif mode == "isam":
code = ISAMMode.createDatabase(database)
elif mode == "json":
code = jsonMode.createDatabase(database)
elif mode == "hash":
code = HashMode.createDatabase(database)
else:
return 3
if code == 0:
databases.create(database, mode, encoding)
try:
for i in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
except:
continue
except:
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
return 1
else:
for i in range(4):
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
break
return 1
return code
# Descripción:
# Crea una nueva base de datos
# Parámetros:
# database:str - El nombre de la nueva base de datos
# mode:str - El modo de almacenamiento a utilizar en la base de datos
# encoding:str - La codificación utilizada por la base de datos
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos existente
# 3 - Modo incorrecto
# 4 - Codificación incorrecta
def __create_database_sp(database: str, mode: str, encoding: str) -> int:
if encoding not in VALID_ENCODING:
return 4
dbs = databases.find_all(database)
if dbs != None:
for db in dbs:
if db.name == database and db.mode == mode:
# Ya existe esta base de datos alternativa
return 0
if mode == "avl":
code = avlMode.createDatabase(database)
elif mode == "b":
code = BMode.createDatabase(database)
elif mode == "bplus":
code = BPlusMode.createDatabase(database)
elif mode == "dict":
code = DictMode.createDatabase(database)
elif mode == "isam":
code = ISAMMode.createDatabase(database)
elif mode == "json":
code = jsonMode.createDatabase(database)
elif mode == "hash":
code = HashMode.createDatabase(database)
else:
return 3
if code == 0:
databases.create(database, mode, encoding)
try:
for i in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
except:
continue
except:
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
return 1
else:
for i in range(4):
code_drop = dropDatabase(database)
if code_drop == 0:
databases.delete(database)
break
return 1
return code
# Descripción:
# Devuelve un nombre aleatorio que no existe entre las bases de datos
# Valores de retorno:
# str - Un nombre no utilizado entre las bases de datos
def temp_name():
temp_database_name = HEX_SYMBOLS[randint(10, len(HEX_SYMBOLS)-1)]
for x in range(4):
temp_database_name += HEX_SYMBOLS[randint(0, len(HEX_SYMBOLS)-1)]
while databases.search(temp_database_name) != None:
temp_database_name += HEX_SYMBOLS[randint(0, len(HEX_SYMBOLS)-1)]
return temp_database_name
# Descripción:
# Cambia el modo de almacenamiento de una base de datos
# Parámetros:
# database:str - El nombre de la base de datos que se desea modificar
# mode:str - Es un string indicando el modo 'avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash'
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 4 - modo incorrecto
def alterDatabaseMode(database: str, mode: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
if mode not in MODES:
return 4
tables = showTables(database)
temp_db_name = temp_name()
createDatabase(temp_db_name, mode, dbs[0].encoding)
for table in tables:
aux_table = databases.find_table(database, table)
if createTable(temp_db_name, table, aux_table.columns) != 0:
dropDatabase(temp_db_name)
return 1
if aux_table.pk != []:
if alterAddPK(temp_db_name, table, aux_table.pk) != 0:
dropDatabase(temp_db_name)
return 1
registers = extractTable(database, table)
for register in registers:
if insert(temp_db_name, table, register) != 0:
dropDatabase(temp_db_name)
return 1
if dropDatabase(database) != 0:
dropDatabase(temp_db_name)
return 1
if alterDatabase(temp_db_name, database) != 0:
return 1
return 0
# Descripción:
# Cambia el modo de almacenamiento de una tabla de una base de datos especificada
# Parámetros:
# database:str - El nombre de la base de datos que se desea modificar
# table:str - El nombre de la tabla que se desea modificar
# mode:str - Es un string indicando el modo 'avl', 'b', 'bplus', 'dict', 'isam', 'json', 'hash'
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - modo incorrecto
def alterTableMode(database: str, table: str, mode: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
if databases.find_table(database, table) == None:
return 3
if mode not in MODES:
return 4
for db in dbs:
tb = db.tables.search(table)
if tb != None:
# Revisando si la tabla ya se encuentra en una base de datos con el modo indicado
if db.mode == mode:
return 0
# Se revisa si ya existe una base de datos alternativa con el modo indicado
alt_db_exists = False
for aux in dbs:
if aux.name == database and aux.mode == mode and aux.encoding == db.encoding:
alt_db_exists = True
# Extraer los registros de la tabla
registers = extractTable(database, table)
if alt_db_exists:
# Crear tabla en esta base de datos
if __create_table_sp(database, table, tb.columns, mode) != 0:
return 1
# Insertar registros
for register in registers:
if __insert_sp(database, table, register, mode) != 0:
return 1
else:
# Crear base de datos y tabla e insertar las tuplas
if __create_database_sp(database, mode, db.encoding) != 0:
return 1
# Crear tabla en esta base de datos
if __create_table_sp(database, table, tb.columns, mode) != 0:
return 1
# Insertar registros
for register in registers:
if __insert_sp(database, table, register, mode) != 0:
return 1
# Eliminar tabla original
if __drop_table_sp(database, table, db.mode) != 0:
return 1
if db.tables.first == None:
if __drop_database_sp(db.name, db.mode) != 0:
return 1
return 0
return 1
# Descripción:
# Devuelve una lista con los nombres de las bases de datos
# Valores de retorno:
# Lista de strings con los nombres de las bases de datos
# Si ocurrió un error devuelve una lista vacía
# Si no hay bases de datos devuelve una lista vacía
def showDatabases() -> list:
return databases.list_databases_diff()
# Descripción:
# Renombra la base de datos databaseOld por databaseNew
# Parámetros:
# databaseOld:str - Nombre actual de la base de datos, debe cumplir con las reglas de identificadores de SQL
# databaseNew:str - Nuevo nombre de la base de datos, debe cumplir con las reglas de identificadores de SQL
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - databaseOld no existente
# 3 - databaseNew existente
def alterDatabase(databaseOld: str, databaseNew: str) -> int:
if re.search(DB_NAME_PATTERN, databaseOld) and re.search(DB_NAME_PATTERN, databaseNew):
if databases.search(databaseNew) != None:
return 3
dbs = databases.find_all(databaseOld)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
code = avlMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "b":
code = BMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "bplus":
code = BPlusMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "dict":
code = DictMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "isam":
code = ISAMMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "json":
code = jsonMode.alterDatabase(databaseOld, databaseNew)
elif db.mode == "hash":
code = HashMode.alterDatabase(databaseOld, databaseNew)
if code == 0:
db.name = databaseNew
try:
Serializable.commit(databases, "lista_bases_de_datos")
return 0
except:
db.name = databaseOld
return 1
else:
return 1
# Descripción:
# Elimina por completo la base de datos indicada en database
# Parámetros:
# database:str - Es el nombre de la base de datos que se desea eliminar, debe cumplir con las reglas de identificadores de SQL
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos no existente
def dropDatabase(database: str) -> int:
if re.search(DB_NAME_PATTERN, database):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
code = avlMode.dropDatabase(database)
elif db.mode == "b":
code = BMode.dropDatabase(database)
elif db.mode == "bplus":
code = BPlusMode.dropDatabase(database)
elif db.mode == "dict":
code = DictMode.dropDatabase(database)
elif db.mode == "isam":
code = ISAMMode.dropDatabase(database)
elif db.mode == "json":
code = jsonMode.dropDatabase(database)
elif db.mode == "hash":
code = HashMode.dropDatabase(database)
if code == 0:
databases.delete(db.name)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return 0
except:
continue
return 1
else:
return 1
# Descripción:
# Elimina por la base de datos con el nombre y modo indicados
# Parámetros:
# database:str - Es el nombre de la base de datos que se desea eliminar, debe cumplir con las reglas de identificadores de SQL
# mode:str - El modo de almacenamiento utilizado por la base de datos que se desea eliminar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos no existente
# 4 - Modo incorrecto
def __drop_database_sp(database: str, mode: str) -> int:
if re.search(DB_NAME_PATTERN, database):
dbs = databases.find_all(database)
if dbs == []:
return 2
if mode not in MODES:
return 4
for db in dbs:
if db.mode == mode == "avl":
code = avlMode.dropDatabase(database)
elif db.mode == mode == "b":
code = BMode.dropDatabase(database)
elif db.mode == mode == "bplus":
code = BPlusMode.dropDatabase(database)
elif db.mode == mode == "dict":
code = DictMode.dropDatabase(database)
elif db.mode == mode == "isam":
code = ISAMMode.dropDatabase(database)
elif db.mode == mode == "json":
code = jsonMode.dropDatabase(database)
elif db.mode == mode == "hash":
code = HashMode.dropDatabase(database)
else:
continue
if code == 0:
databases.delete_sp(db.name, db.mode)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return 0
except:
continue
return 1
else:
return 1
# Descripción:
# Crea una nueva tabla en la base de datos indicada
# Parámetros:
# database:str - El nombre de la base de datos a la que se desea agregar la tabla
# table:str - El nombre de la nueva tabla
# numberColumns:int - La cantidad de columnas que manejará la tabla
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos inexistente
# 3 - Tabla existente
def createTable(database: str, table: str, numberColumns: int) -> int:
db = databases.search(database)
if db == None:
return 2
if table in showTables(database):
return 3
if db.mode == "avl":
result = avlMode.createTable(database, table, numberColumns)
elif db.mode == "b":
result = BMode.createTable(database, table, numberColumns)
elif db.mode == "bplus":
result = BPlusMode.createTable(database, table, numberColumns)
elif db.mode == "dict":
result = DictMode.createTable(database, table, numberColumns)
elif db.mode == "isam":
result = ISAMMode.createTable(database, table, numberColumns)
elif db.mode == "json":
result = jsonMode.createTable(database, table, numberColumns)
elif db.mode == "hash":
result = HashMode.createTable(database, table, numberColumns)
if result == 0:
if db.tables.create(table, numberColumns) == 0:
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
continue
return 1
return result
# Descripción:
# Crea una nueva tabla en la base de datos indicada con el modo indicado
# Parámetros:
# database:str - El nombre de la base de datos a la que se desea agregar la tabla
# table:str - El nombre de la nueva tabla
# numberColumns:int - La cantidad de columnas que manejará la tabla
# mode:str - El modo de almacenamiento utilizado por la base de datos
# Valores de Retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - Base de datos inexistente
# 3 - Tabla existente
# 4 - Modo incorrecto
def __create_table_sp(database: str, table: str, numberColumns: int, mode: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
# if table in showTables(database):
# return 3
if mode not in MODES:
return 4
for db in dbs:
if db.mode == mode == "avl":
result = avlMode.createTable(database, table, numberColumns)
elif db.mode == mode == "b":
result = BMode.createTable(database, table, numberColumns)
elif db.mode == mode == "bplus":
result = BPlusMode.createTable(database, table, numberColumns)
elif db.mode == mode == "dict":
result = DictMode.createTable(database, table, numberColumns)
elif db.mode == mode == "isam":
result = ISAMMode.createTable(database, table, numberColumns)
elif db.mode == mode == "json":
result = jsonMode.createTable(database, table, numberColumns)
elif db.mode == mode == "hash":
result = HashMode.createTable(database, table, numberColumns)
else:
continue
if result == 0:
if db.tables.create(table, numberColumns) == 0:
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
continue
return 1
return result
# Descripción:
# Devuelve una lista con los nombres de todas las tablas de la base de datos
# Parámetros:
# database:str - El nombre de la base de datos cuyas tablas se desean obtener
# Valores de retorno:
# Si existen la base de datos y las tablas devuelve una lista de nombres de tablas
# Si existe la base de datos, pero no existen tablas devuelve una lista vacía
# Si no existe la base de datos devuelve None
def showTables(database: str) -> list:
dbs = databases.find_all(database)
if dbs == []:
return None
result = []
for db in dbs:
if db.mode == "avl":
result += avlMode.showTables(database)
elif db.mode == "b":
result += BMode.showTables(database)
elif db.mode == "bplus":
result += BPlusMode.showTables(database)
elif db.mode == "dict":
result += DictMode.showTables(database)
elif db.mode == "isam":
result += ISAMMode.showTables(database)
elif db.mode == "json":
result += jsonMode.showTables(database)
elif db.mode == "hash":
result += HashMode.showTables(database)
return result
# Descripción:
# Elimina la llave primaria actual en la información de la tabla, manteniendo el índice
# actual de la estructura del árbol hasta que se invoque de nuevo el alterAddPK()
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - pk no existente
def alterDropPK(database: str, table: str) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterDropPK(database, table)
elif db.mode == "b":
result = BMode.alterDropPK(database, table)
elif db.mode == "bplus":
result = BPlusMode.alterDropPK(database, table)
elif db.mode == "dict":
result = DictMode.alterDropPK(database, table)
elif db.mode == "isam":
result = ISAMMode.alterDropPK(database, table)
elif db.mode == "json":
result = jsonMode.alterDropPK(database, table)
elif db.mode == "hash":
result = HashMode.alterDropPK(database, table)
if result != 3:
if result == 0:
db.tables.search(table).pk = []
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Agrega una columna al final de cada registro de la tabla y base de datos especificada
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# default:any - Es el valor que se establecerá en a la nueva columna para los registros existentes
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
def alterAddColumn(database: str, table: str, default: any) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterAddColumn(database, table, default)
elif db.mode == "b":
result = BMode.alterAddColumn(database, table, default)
elif db.mode == "bplus":
result = BPlusMode.alterAddColumn(database, table, default)
elif db.mode == "dict":
result = DictMode.alterAddColumn(database, table, default)
elif db.mode == "isam":
result = ISAMMode.alterAddColumn(database, table, default)
elif db.mode == "json":
result = jsonMode.alterAddColumn(database, table, default)
elif db.mode == "hash":
result = HashMode.alterAddColumn(database, table, default)
if result != 3:
if result == 0:
db.search(table).columns += 1
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Eliminar una n-ésima columna de cada registro de la tabla excepto si son llaves primarias
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# columnNumber:int - El número de la columna a eliminar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existe
# 3 - table no existe
# 4 - Llave no puede eliminarse o tabla quedarse sin columnas
# 5 - Columna fuera de límites
def alterDropColumn(database: str, table: str, columnNumber: int) -> int:
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "b":
result = BMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "bplus":
result = BPlusMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "dict":
result = DictMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "isam":
result = ISAMMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "json":
result = jsonMode.alterDropColumn(database, table, columnNumber)
elif db.mode == "hash":
result = HashMode.alterDropColumn(database, table, columnNumber)
if result != 3:
if result == 0:
db.search(table).columns -= 1
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Convierte un string en bytes utilizando la codificación especificada
# Parámetros:
# text:str - El texto que se desea codificar
# encoding:str - El tipo de codificación que se desea utilizar
# Valores de retorno:
# bytes - El objeto de tipo bytes con el texto codificado
# None - Error durante la codificación
def codificar(text, encoding):
if encoding not in VALID_ENCODING:
return None
try:
if encoding == "utf8":
return bytes(text, encoding = "utf-8")
if encoding == "iso-8859-1":
return bytes(text, encoding = "iso-8859-1")
if encoding == "ascii":
return bytes(text, encoding = "ascii")
return None
except:
return None
# Descripción:
# Inserta un registro en la estructura de datos asociada a la tabla y la base de datos
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# register:list - Es una lista de elementos que represent un registro
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - Llave primaria duplicada
# 5 - Columnas fuera de límites
def insert(database: str, table: str, register: list):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
# for x in range(0, len(register)):
# aux = codificar(register[x], db.encoding)
# if aux == None:
# return 1
# register[x] = aux
if db.mode == "avl":
result = avlMode.insert(database, table, register)
elif db.mode == "b":
result = BMode.insert(database, table, register)
elif db.mode == "bplus":
result = BPlusMode.insert(database, table, register)
elif db.mode == "dict":
result = DictMode.insert(database, table, register)
elif db.mode == "isam":
result = ISAMMode.insert(database, table, register)
elif db.mode == "json":
result = jsonMode.insert(database, table, register)
elif db.mode == "hash":
result = HashMode.insert(database, table, register)
if result != 3:
break
return result
# Descripción:
# Inserta un registro en la estructura de datos asociada a la tabla y la base de datos con el modo indicado
# Parámetros:
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# register:list - Es una lista de elementos que represent un registro
# mode:str - El modo de la base de datos en la que se desea insertar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error en la operación
# 2 - database no existente
# 3 - table no existente
# 4 - Llave primaria duplicada
# 5 - Columnas fuera de límites
# 6 - Modo incorrecto
def __insert_sp(database: str, table: str, register: list, mode: str):
dbs = databases.find_all(database)
if dbs == []:
return 2
tb = databases.find_table(database, table)
if tb == None:
return 3
if len(register) != tb.columns:
return 5
if mode not in MODES:
return 6
for db in dbs:
# for x in range(0, len(register)):
# aux = codificar(register[x], db.encoding)
# if aux == None:
# return 1
# register[x] = aux
if db.mode == mode == "avl":
result = avlMode.insert(database, table, register)
elif db.mode == mode == "b":
result = BMode.insert(database, table, register)
elif db.mode == mode == "bplus":
result = BPlusMode.insert(database, table, register)
elif db.mode == mode == "dict":
result = DictMode.insert(database, table, register)
elif db.mode == mode == "isam":
result = ISAMMode.insert(database, table, register)
elif db.mode == mode == "json":
result = jsonMode.insert(database, table, register)
elif db.mode == mode == "hash":
result = HashMode.insert(database, table, register)
else:
continue
if result != 3:
break
return result
# Descripción:
# Carga un archivo CSV de una ruta especificada indicando la base de datos y tabla donde será almacenado
# Parámetros:
# file:str - Ruta del archivo CSV a utilizar
# database:str - El nombre de la base de datos a utilizar
# table:str - El nombre de la tabla a utilizar
# Valores de retorno:
# Lista con los valores enteros que devuelve el insert por cada fila del CSV
# Si ocurrió un error o el archivo CSV no tiene filas devuelve una lista vacía
def loadCSV(file: str, database: str, table: str) -> list:
try:
result = []
with open(file, "r") as csv:
for line in csv:
register = line.strip().split(",")
result += [insert(database, table, register)]
return result
except:
return []
def delete(database: str, table: str, columns: list):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.delete(database, table, columns)
elif db.mode == "b":
result = BMode.delete(database, table, columns)
elif db.mode == "bplus":
result = BPlusMode.delete(database, table, columns)
elif db.mode == "dict":
result = DictMode.delete(database, table, columns)
elif db.mode == "isam":
result = ISAMMode.delete(database, table, columns)
elif db.mode == "json":
result = jsonMode.delete(database, table, columns)
elif db.mode == "hash":
result = HashMode.delete(database, table, columns)
return result
def extractTable(database,table):
dbs = databases.find_all(database)
if dbs == []:
return None
if databases.find_table(database, table) == None:
return None
result = []
for db in dbs:
tb = db.tables.search(table)
if tb != None:
if db.mode == "avl":
result = avlMode.extractTable(database,table)
elif db.mode == "b":
result = BMode.extractTable(database,table)
elif db.mode == "bplus":
result = BPlusMode.extractTable(database,table)
elif db.mode == "dict":
result = DictMode.extractTable(database,table)
elif db.mode == "isam":
result = ISAMMode.extractTable(database,table)
elif db.mode == "json":
result = jsonMode.extractTable(database,table)
elif db.mode == "hash":
result = HashMode.extractTable(database,table)
else:
continue
return result
def extractRangeTable(database: str, table: str, columnNumber: int, lower: any, upper: any) -> list:
db = databases.search(database)
if db == None:
return None
if db.mode == "avl":
result = avlMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "b":
result = BMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "bplus":
result = BPlusMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "dict":
result = DictMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "isam":
result = ISAMMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "json":
result = jsonMode.extractRangeTable(database,table,columnNumber,lower, upper)
elif db.mode == "hash":
result = HashMode.extractRangeTable(database,table,columnNumber,lower, upper)
return result
def alterTable(database, tableOld, tableNew):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterTable(database,tableOld,tableNew)
elif db.mode == "b":
result = BMode.alterTable(database,tableOld,tableNew)
elif db.mode == "bplus":
result = BPlusMode.alterTable(database,tableOld,tableNew)
elif db.mode == "dict":
result = DictMode.alterTable(database,tableOld,tableNew)
elif db.mode == "isam":
result = ISAMMode.alterTable(database,tableOld,tableNew)
elif db.mode == "json":
result = jsonMode.alterTable(database,tableOld,tableNew)
elif db.mode == "hash":
result = HashMode.alterTable(database,tableOld,tableNew)
if result != 3:
if result == 0:
db.tables.search(tableOld).name = tableNew
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
def dropTable(database,table):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.dropTable(database, table)
elif db.mode == "b":
result = BMode.dropTable(database, table)
elif db.mode == "bplus":
result = BPlusMode.dropTable(database, table)
elif db.mode == "dict":
result = DictMode.dropTable(database, table)
elif db.mode == "isam":
result = ISAMMode.dropTable(database, table)
elif db.mode == "json":
result = jsonMode.dropTable(database, table)
elif db.mode == "hash":
result = HashMode.dropTable(database, table)
if result != 3:
if result == 0:
db.tables.delete(table)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
# Descripción:
# Elimina una tabla especificada en una base de datos con el nombre y modo especificados.
# Parámetros:
# database:str - El nombre de la base de datos que se va a utilizar
# table:str - El nombre de la tabla que se desea eliminar
# mode:str - El modo que debe tener la base de datos que se va a utilizar
# Valores de retorno:
# 0 - Operación exitosa
# 1 - Error durante la operación
# 2 - database no existe
# 3 - table no existe
def __drop_table_sp(database, table, mode):
dbs = databases.find_all(database)
if dbs == []:
return 2
if databases.find_table(database, table) == None:
return 3
for db in dbs:
if db.mode == mode == "avl":
result = avlMode.dropTable(database, table)
elif db.mode == mode == "b":
result = BMode.dropTable(database, table)
elif db.mode == mode == "bplus":
result = BPlusMode.dropTable(database, table)
elif db.mode == mode == "dict":
result = DictMode.dropTable(database, table)
elif db.mode == mode == "isam":
result = ISAMMode.dropTable(database, table)
elif db.mode == mode == "json":
result = jsonMode.dropTable(database, table)
elif db.mode == mode == "hash":
result = HashMode.dropTable(database, table)
else:
continue
if result != 3:
if result == 0:
db.tables.delete(table)
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result
def extractRow(database, table, columns):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.extractRow(database, table, columns)
elif db.mode == "b":
result = BMode.extractRow(database, table, columns)
elif db.mode == "bplus":
result = BPlusMode.extractRow(database, table, columns)
elif db.mode == "dict":
result = DictMode.extractRow(database, table, columns)
elif db.mode == "isam":
result = ISAMMode.extractRow(database, table, columns)
elif db.mode == "json":
result = jsonMode.extractRow(database, table, columns)
elif db.mode == "hash":
result = HashMode.extractRow(database, table, columns)
return result
def update(database, table, register, columns):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.update(database, table, register, columns)
elif db.mode == "b":
result = BMode.update(database, table, register, columns)
elif db.mode == "bplus":
result = BPlusMode.update(database, table, register, columns)
elif db.mode == "dict":
result = DictMode.update(database, table, register, columns)
elif db.mode == "isam":
result = ISAMMode.update(database, table, register, columns)
elif db.mode == "json":
result = jsonMode.update(database, table, register, columns)
elif db.mode == "hash":
result = HashMode.update(database, table, register, columns)
return result
def truncate(database, table):
db = databases.search(database)
if db == None:
return 2
if db.mode == "avl":
result = avlMode.truncate(database, table)
elif db.mode == "b":
result = BMode.truncate(database, table)
elif db.mode == "bplus":
result = BPlusMode.truncate(database, table)
elif db.mode == "dict":
result = DictMode.truncate(database, table)
elif db.mode == "isam":
result = ISAMMode.truncate(database, table)
elif db.mode == "json":
result = jsonMode.truncate(database, table)
elif db.mode == "hash":
result = HashMode.truncate(database, table)
return result
def alterAddPK(database, table, columns):
dbs = databases.find_all(database)
if dbs == []:
return 2
for db in dbs:
if db.mode == "avl":
result = avlMode.alterAddPK(database, table, columns)
elif db.mode == "b":
result = BMode.alterAddPK(database, table, columns)
elif db.mode == "bplus":
result = BPlusMode.alterAddPK(database, table, columns)
elif db.mode == "dict":
result = DictMode.alterAddPK(database, table, columns)
elif db.mode == "isam":
result = ISAMMode.alterAddPK(database, table, columns)
elif db.mode == "json":
result = jsonMode.alterAddPK(database, table, columns)
elif db.mode == "hash":
result = HashMode.alterAddPK(database, table, columns)
if result != 3:
if result == 0:
db.tables.search(table).pk += columns
for x in range(5):
try:
Serializable.commit(databases, "lista_bases_de_datos")
return result
except:
break
return 1
break
return result | 39.502836 | 131 | 0.568933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,656 | 0.278272 |
bc14cc074618702fe004846062d87461a64782e7 | 1,015 | py | Python | pdf_downloader.py | mniac810/RPA_Challenge | 3f203bf33c9643ab77e22e109520bf748c866e46 | [
"Apache-2.0"
] | 1 | 2022-01-10T04:49:38.000Z | 2022-01-10T04:49:38.000Z | pdf_downloader.py | mniac810/RPA_Challenge | 3f203bf33c9643ab77e22e109520bf748c866e46 | [
"Apache-2.0"
] | null | null | null | pdf_downloader.py | mniac810/RPA_Challenge | 3f203bf33c9643ab77e22e109520bf748c866e46 | [
"Apache-2.0"
] | null | null | null | from RPA.Browser.Selenium import Selenium
from RPA.FileSystem import FileSystem
import datetime
import os
class PDFDownloader:
def __init__(self, page_urls, names):
self.browser = Selenium()
self.files = FileSystem()
self._dir = f'{os.getcwd()}/output'
self._urls = page_urls
self._names = names
def pdf_download(self):
dir = f'{os.getcwd()}/output'
self.browser.set_download_directory(dir, True)
for num, url in enumerate(self._urls):
self.browser.open_available_browser(url)
self.browser.wait_until_element_is_visible(
locator="css:div#business-case-pdf>a",
timeout=datetime.timedelta(minutes=1))
self.browser.click_element(locator="css:div#business-case-pdf>a")
self.files.wait_until_created('{}/{}.pdf'.format(
dir, self._names[num]),
timeout=60.0 * 5)
self.browser.close_browser()
| 31.71875 | 77 | 0.605911 | 905 | 0.891626 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.1133 |
bc1654338e8c6eee8c72f671de285bccc11be562 | 1,352 | py | Python | tests/TestFiles/fodder.py | ComposableAnalytics/ComposaPy | 6d82f8f953c709dde55e7f055e2f8b4e6765caba | [
"MIT"
] | null | null | null | tests/TestFiles/fodder.py | ComposableAnalytics/ComposaPy | 6d82f8f953c709dde55e7f055e2f8b4e6765caba | [
"MIT"
] | null | null | null | tests/TestFiles/fodder.py | ComposableAnalytics/ComposaPy | 6d82f8f953c709dde55e7f055e2f8b4e6765caba | [
"MIT"
] | null | null | null | # Add this test back later, unfortunately casting errors and no time to deal with them.
# @pytest.mark.parametrize("dataflow_object", ["external_input_int.json"], indirect=True)
# def test_external_input_int(dataflow_object: DataFlowObject, dataflow: DataFlow):
# dataflow_rs = dataflow_object.run(external_inputs={"IntInput": 3})
#
# assert dataflow_rs.modules.first_with_name("Calculator").result.value_obj == 5.0
# @pytest.mark.parametrize("dataflow_id", ["EXTERNAL_INPUT_FILE_ID"], indirect=True)
# def test_external_input_file(dataflow: dataflow, dataflow_id: int):
#
# table_dataflow = dataflow.run(table_dataflow_id)
# table = table_dataflow.modules.first_with_name("Sql Query").result
# test_input = { "TableInput": table }
#
# dataflow_rs = dataflow.run(dataflow_id, external_inputs=test_input)
#
# assert dataflow_rs.modules.first().result.Headers == table.Headers
# assert dataflow_rs.modules.first().result.SqlQuery == table.SqlQuery
# def test_queryview_to_pandas(queryview: queryview):
# df = queryview.queryview_from_id(137072)
# print(df.head())
# print(df.dtypes)
#
#
# def test_queryview_to_pandas_streaming(queryview: queryview):
# t = time()
# df = queryview.queryview_from_id(137072)
# print(time() - t)
# print(df.head())
# print(df.dtypes)
# print(len(df))
| 38.628571 | 89 | 0.727071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,318 | 0.974852 |
bc16f12ef99fb925404a57659bcb97a3596fb611 | 886 | py | Python | django/store/views.py | brickfaced/django-ecommerce | e06c69f4cc1f57ac2acffa0baf1c48fa99fede13 | [
"MIT"
] | 24 | 2021-03-10T17:04:46.000Z | 2022-02-28T20:09:52.000Z | nextdrf/django/store/views.py | RJ-0605/YT_NextJS_DRF_Ecommerce_2021_Part1 | 4416c2168143aa71ba863d61efb71c5712dbd215 | [
"MIT"
] | 1 | 2021-04-07T19:57:10.000Z | 2021-04-07T20:37:03.000Z | nextdrf/django/store/views.py | RJ-0605/YT_NextJS_DRF_Ecommerce_2021_Part1 | 4416c2168143aa71ba863d61efb71c5712dbd215 | [
"MIT"
] | 18 | 2021-03-21T09:03:40.000Z | 2022-01-31T10:08:24.000Z | from django.shortcuts import render
from rest_framework import generics
from . import models
from .models import Category, Product
from .serializers import CategorySerializer, ProductSerializer
class ProductListView(generics.ListAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class Product(generics.RetrieveAPIView):
lookup_field = "slug"
queryset = Product.objects.all()
serializer_class = ProductSerializer
class CategoryItemView(generics.ListAPIView):
serializer_class = ProductSerializer
def get_queryset(self):
return models.Product.objects.filter(
category__in=Category.objects.get(slug=self.kwargs["slug"]).get_descendants(include_self=True)
)
class CategoryListView(generics.ListAPIView):
queryset = Category.objects.filter(level=1)
serializer_class = CategorySerializer
| 27.6875 | 106 | 0.772009 | 679 | 0.766366 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.013544 |
bc1a89242c0c01e6a959ddfab279551c0ad333eb | 831 | py | Python | 6. Algorithms - Graph Traversal/3 - GraphTraversal-DFS.py | PacktPublishing/Data-Structures-and-Algorithms-The-Complete-Masterclass | a4f34ecaa23682df62763e3b6b54709383f45c0b | [
"MIT"
] | 25 | 2021-01-20T19:09:05.000Z | 2022-02-02T01:29:46.000Z | 6. Algorithms - Graph Traversal/3 - GraphTraversal-DFS.py | EmmaMuhleman1/Data-Structures-and-Algorithms-The-Complete-Masterclass | a4f34ecaa23682df62763e3b6b54709383f45c0b | [
"MIT"
] | null | null | null | 6. Algorithms - Graph Traversal/3 - GraphTraversal-DFS.py | EmmaMuhleman1/Data-Structures-and-Algorithms-The-Complete-Masterclass | a4f34ecaa23682df62763e3b6b54709383f45c0b | [
"MIT"
] | 23 | 2021-01-20T19:09:12.000Z | 2022-03-23T02:50:05.000Z | class Node():
def __init__(self, value):
self.value = value
self.adjacentlist = []
self.visited = False
class Graph():
def DFS(self, node, traversal):
node.visited = True
traversal.append(node.value)
for element in node.adjacentlist:
if element.visited is False:
self.DFS(element, traversal)
return traversal
node1 = Node("A")
node2 = Node("B")
node3 = Node("C")
node4 = Node("D")
node5 = Node("E")
node6 = Node("F")
node7 = Node("G")
node8 = Node("H")
node1.adjacentlist.append(node2)
node1.adjacentlist.append(node3)
node1.adjacentlist.append(node4)
node2.adjacentlist.append(node5)
node2.adjacentlist.append(node6)
node4.adjacentlist.append(node7)
node6.adjacentlist.append(node8)
graph = Graph()
print(graph.DFS(node1, [])) | 21.868421 | 44 | 0.649819 | 407 | 0.489771 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.028881 |
bc1b2d2d655b2df2c4a3a6c3fd0b4a26677171d9 | 613 | py | Python | blender/arm/logicnode/array/LN_array_loop_node.py | niacdoial/armory | 3f9b633fbf772017c576a3f77695a6c28d9956e1 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/array/LN_array_loop_node.py | niacdoial/armory | 3f9b633fbf772017c576a3f77695a6c28d9956e1 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/array/LN_array_loop_node.py | niacdoial/armory | 3f9b633fbf772017c576a3f77695a6c28d9956e1 | [
"Zlib"
] | null | null | null | from arm.logicnode.arm_nodes import *
class ArrayLoopNode(ArmLogicTreeNode):
"""Loops through each item of the given array."""
bl_idname = 'LNArrayLoopNode'
bl_label = 'Array Loop'
arm_version = 1
def init(self, context):
super(ArrayLoopNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketArray', 'Array')
self.add_output('ArmNodeSocketAction', 'Loop')
self.add_output('NodeSocketShader', 'Value')
self.add_output('NodeSocketInt', 'Index')
self.add_output('ArmNodeSocketAction', 'Done')
| 34.055556 | 54 | 0.676998 | 572 | 0.933116 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.376835 |
bc1b8a063d015af41a39f93f00886505f7a7b958 | 2,592 | py | Python | streamalert_cli/terraform/cloudwatch_events.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | 1 | 2020-07-28T09:54:07.000Z | 2020-07-28T09:54:07.000Z | streamalert_cli/terraform/cloudwatch_events.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | streamalert_cli/terraform/cloudwatch_events.py | Meliairon/streamalert | 3b774a59d260b2822cd156e837781bd34f3625f7 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from streamalert.shared.logger import get_logger
LOGGER = get_logger(__name__)
# From here: http://amzn.to/2zF7CS0
VALID_EVENT_PATTERN_KEYS = {
'version',
'id',
'detail-type',
'source',
'account',
'time',
'region',
'resources',
'detail',
}
def generate_cloudwatch_events(cluster_name, cluster_dict, config):
"""Add the CloudWatch Events module to the Terraform cluster dict.
Args:
cluster_name (str): The name of the currently generating cluster
cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
config (dict): The loaded config from the 'conf/' directory
Returns:
bool: Result of applying the cloudwatch events module
"""
modules = config['clusters'][cluster_name]['modules']
settings = modules['cloudwatch_events']
if not settings.get('enabled', True):
LOGGER.debug('CloudWatch events module is not enabled')
return True # not an error
tf_module_name = 'cloudwatch_events_{}'.format(cluster_name)
# Using this syntax to allow for override via empty event pattern
event_pattern = None
if 'event_pattern' in settings:
event_pattern = settings['event_pattern']
if event_pattern and not set(event_pattern).issubset(VALID_EVENT_PATTERN_KEYS):
LOGGER.error('Invalid CloudWatch event pattern: %s', json.dumps(event_pattern))
return False
else:
event_pattern = {'account': [config['global']['account']['aws_account_id']]}
cluster_dict['module'][tf_module_name] = {
'source': './modules/tf_cloudwatch_events',
'cluster': cluster_name,
'prefix': config['global']['account']['prefix'],
'kinesis_arn': '${{module.kinesis_{}.arn}}'.format(cluster_name),
# None == null in json objects and terraform 12 supports null variables
'event_pattern': json.dumps(event_pattern) if event_pattern is not None else event_pattern
}
return True
| 34.56 | 98 | 0.699846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,594 | 0.614969 |
bc1bb420dc30704ccfcc14f1b5e2b0fb34f290c1 | 2,416 | py | Python | EvolutionaryAlgorithm/evolution.py | stepan-krivanek/evolutionary-algorithm | d8bafdb0caad2d66cc8138090c046244bd7779e8 | [
"MIT"
] | null | null | null | EvolutionaryAlgorithm/evolution.py | stepan-krivanek/evolutionary-algorithm | d8bafdb0caad2d66cc8138090c046244bd7779e8 | [
"MIT"
] | null | null | null | EvolutionaryAlgorithm/evolution.py | stepan-krivanek/evolutionary-algorithm | d8bafdb0caad2d66cc8138090c046244bd7779e8 | [
"MIT"
] | null | null | null |
import numpy as np
from LocalSearch.local_search import *
from EvolutionAlgorithm import EvolutionAlgorithm as EA
def init_real_chromosome(size, variation=2):
return np.random.normal(0, variation, size)
def init_bin_chromosome(size):
return np.random.binomial(1, 0.5, size)
def initialization(population_size, init_f):
return np.array([init_f() for x in range(population_size)])
def tournament_selection(parents_num, fitness):
result = np.empty(parents_num, dtype='int32')
k = int(fitness.size / 4) + 1
p = 0.5
indices = np.arange(0, fitness.size)
probabilities = np.array([p * (1 - p) ** x for x in range(k)])
probabilities = probabilities / np.sum(probabilities)
for i in range(parents_num):
candidates = np.random.choice(indices, k, False)
sorted_candidates = candidates[np.argsort(fitness[candidates])]
result[i] = np.random.choice(sorted_candidates, p=probabilities)
return result
def k_point_crossover(parents, k=2):
np.random.shuffle(parents)
a, b = np.array_split(parents, 2)
offsprings = []
for parent_a, parent_b in zip(a, b):
cross_points = np.random.randint(1, parent_a.size, k)
for cross_point in cross_points:
parent_a = np.append(parent_a[:cross_point], parent_b[cross_point:])
parent_b = np.append(parent_b[:cross_point], parent_a[cross_point:])
offsprings.append(parent_a)
offsprings.append(parent_b)
return np.array(offsprings)
def truncation_replacement_strategy(old_candidates, old_fitness, new_candidates, new_fitness):
candidates = np.concatenate((old_candidates, new_candidates))
fitness = np.concatenate((old_fitness, new_fitness))
indices = np.argsort(fitness)[:old_candidates.shape[0]]
return candidates[indices], fitness[indices]
def evolutionary_algorithm(obj_f, population_size, generations):
ea = EA()
ea.set_initialization(lambda x: initialization(x, lambda: init_real_chromosome(10)))
ea.set_selection(tournament_selection)
ea.set_mutation(normal_distribution_addition)
ea.set_crossover(k_point_crossover, 0.5)
ea.set_replacement_strategy(truncation_replacement_strategy)
ea.initialize(obj_f, population_size)
ea.run(generations)
return ea.get_best_result(), ea.get_best_fitness()
if __name__ == "__main__":
print(evolutionary_algorithm(sphere, 100, 1000))
| 30.974359 | 94 | 0.722268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.007036 |
bc1bb7a3ffc94e487eae34a752e77501805ef163 | 31 | py | Python | tests/tests/dummy.py | cjhall1283/pylint_runner | 64dae35dd07886524278c90c62270ae7a7d3a7a7 | [
"MIT"
] | 16 | 2016-01-14T17:38:39.000Z | 2020-10-30T13:41:33.000Z | tests/tests/dummy.py | cjhall1283/pylint_runner | 64dae35dd07886524278c90c62270ae7a7d3a7a7 | [
"MIT"
] | 12 | 2015-03-19T13:56:18.000Z | 2020-09-11T12:09:16.000Z | tests/tests/dummy.py | cjhall1283/pylint_runner | 64dae35dd07886524278c90c62270ae7a7d3a7a7 | [
"MIT"
] | 7 | 2017-02-14T01:03:22.000Z | 2021-08-25T16:34:17.000Z | """
Dummy file for testing
"""
| 7.75 | 22 | 0.612903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.967742 |
bc1e77fd96c476fc9f6f17e8e53fde1e2ccef6c7 | 1,545 | py | Python | src/kids/test/__init__.py | 0k/kids.test | 51554726caea29a5e690c76981c6035dc5c7cfee | [
"BSD-2-Clause"
] | null | null | null | src/kids/test/__init__.py | 0k/kids.test | 51554726caea29a5e690c76981c6035dc5c7cfee | [
"BSD-2-Clause"
] | null | null | null | src/kids/test/__init__.py | 0k/kids.test | 51554726caea29a5e690c76981c6035dc5c7cfee | [
"BSD-2-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import os
import tempfile
import unittest
import re
import shutil
class Test(unittest.TestCase):
## XXXvlab: it seems it's already there in PY3 and maybe PY2,
## so why keep it ?
def assertContains(self, haystack, needle, msg=None):
if not msg:
msg = "%r should contain %r." % (haystack, needle)
self.assertTrue(needle in haystack, msg)
def assertNotContains(self, haystack, needle, msg=None):
if not msg:
msg = "%r should not contain %r." % (haystack, needle)
self.assertTrue(needle not in haystack, msg)
def assertRegex(self, text, regex, msg=None):
if not msg:
msg = "%r should match regex %r." % (text, regex)
self.assertTrue(re.search(regex, text, re.MULTILINE) is not None, msg)
class BaseTmpDirTest(Test):
def setUp(self):
## put an empty tmp directory up
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
## delete the tmp directory
os.chdir(self.old_cwd)
shutil.rmtree(self.tmpdir)
def run(test):
from pprint import pprint
try:
from StringIO import StringIO
except ImportError: ## PY3
from io import StringIO
stream = StringIO()
runner = unittest.TextTestRunner(stream=stream)
runner.run(unittest.makeSuite(test))
stream.seek(0)
print(stream.read())
| 26.637931 | 78 | 0.641424 | 1,048 | 0.678317 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.159871 |
bc1e78e2d936299d31ed164ae95b1d514a32eb51 | 221 | py | Python | homeassistant/components/ridwell/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/ridwell/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/ridwell/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Constants for the Ridwell integration."""
import logging
DOMAIN = "ridwell"
LOGGER = logging.getLogger(__package__)
DATA_ACCOUNT = "account"
DATA_COORDINATOR = "coordinator"
SENSOR_TYPE_NEXT_PICKUP = "next_pickup"
| 18.416667 | 44 | 0.778281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.39819 |
bc1e7e433a435fea0e74defa83e3a8743bf77c68 | 6,143 | py | Python | src/py/test/IntegrationTest/Main.py | AdamPesci/diveR | ae7fe415bbdbb008fadd2b96a0a3a5092b04fdc7 | [
"MIT"
] | null | null | null | src/py/test/IntegrationTest/Main.py | AdamPesci/diveR | ae7fe415bbdbb008fadd2b96a0a3a5092b04fdc7 | [
"MIT"
] | null | null | null | src/py/test/IntegrationTest/Main.py | AdamPesci/diveR | ae7fe415bbdbb008fadd2b96a0a3a5092b04fdc7 | [
"MIT"
] | null | null | null | """
Authors: Augustine Italiano, Ryan Forster, Adam Camer-Pesci
This class will act as the control centre for the backend components:
Calculations.py
FileLoader.py
"""
import os
import sys
import csv
from datetime import datetime
import json
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
import FileLoader
import FileWriter
import Calculations
import Parser
from rpy2 import robjects
def main():
# rpy2 setup
# print(rpy2.__version__)
# needs comma as list. all function in sanity check will iterate characters otherwise.
package_names = ('scuba',)
utils = rpackages.importr('utils')
# sanity check -> see if package is already installed
if all(rpackages.isinstalled(x) for x in package_names):
have_package = True
else:
have_package = False
# install uninstalled packages from list
if not have_package:
utils.chooseCRANmirror(ind=1)
packnames_to_install = [
x for x in package_names if not rpackages.isinstalled(x)]
if len(packnames_to_install) > 0:
utils.install_packages(StrVector(packnames_to_install))
scuba = rpackages.importr('scuba')
json_string = "{\"filePaths\":[\"./resources/s_1076_leh.ans\"],\"halfTimeSet\":\"ZH-L16B\",\"gasCombinations\":[[0.2,0.2,0.6,0],[0.5,0,0.5,42.84]],\"outputFilePath\":\"./zhB\",\"gfToggle\":\"True\",\"cpToggle\":\"True\"}"
settings = json.loads(json_string)
halftime_set = settings['halfTimeSet']
file_paths = settings['filePaths']
out_path = settings['outputFilePath']
gas_list = settings['gasCombinations']
cp_toggle = settings['cpToggle']
toggle = settings['gfToggle'].upper()
if(toggle == 'FALSE'):
gf_toggle = False
else:
gf_toggle = True
if(halftime_set == 'Buzzacott'):
gf_toggle = False
# gasCode = 6.812134
# print(round(gasCode - 6,6)) #full
# print(round(round(gasCode - 6,6),2)) #oxygen
# print(round(round(gasCode - 6,6),5)) #oxygen
# initialize classes
csv_loader = FileLoader.CSVLoader()
ans_loader = FileLoader.ANSLoader()
# # #gasList = [4.81,0.0]
calc = Calculations.Calculations()
# ans_path = ['./resources/s_34_leh.ans']
# ans_dive_profile, ans_gas_list = ans_loader.load(ans_path)
parser = {'Haldane': Parser.HaldaneParser('Haldane', toggle=gf_toggle),
'DSAT': Parser.HaldaneParser('Haldane', toggle=gf_toggle),
'ZH-L16A': Parser.BuhlmannParser('ZH-L16A', toggle=gf_toggle),
'ZH-L16B': Parser.BuhlmannParser('ZH-L16A', toggle=gf_toggle),
'ZH-L16C': Parser.BuhlmannParser('ZH-L16A', toggle=gf_toggle),
'Workman65': Parser.WorkmanParser('Workman65', toggle=gf_toggle),
'Buzzacott': Parser.BuzzacotParser('Buzzacott')
}
fw = FileWriter.CSVWriter()
extra_list_ans = None
file_paths = list(file_paths)
file_paths.append('./resources/s_33_leh.ans')
if(file_paths[0].endswith('.ans') or file_paths[0].endswith('.txt')):
dives, filtered_paths, gas_codes, extra_list_ans = ans_loader.load(
file_paths)
gas_list = list()
else:
dives, filtered_paths = csv_loader.load(file_paths)
# initialize file counter
i = 0
# iterate over each file and perform calculations
for dive_profile in dives:
try:
##for each ans dive, convert codes for that dive into gaslist to pass to initialize dive
if(filtered_paths[0].endswith('.ans') or filtered_paths[0].endswith('.txt')):
gas_list.clear()
gas_list = ans_loader.generate_gas_list(gas_codes[i])
# convert gas_codes into gas_list
# dive profile containing the gas mixes
try:
dive, gasses = calc.initialise_dive(dive_profile, gas_list) #ans files will have iterable gaslist
except IndexError:
dive, gasses = calc.initialise_dive(dive_profile, gas_list) #csv files will not
tanklist = robjects.r['tanklist']
print(tanklist(dive))
cp = calc.compartment_pressures(
dive, halftime_set)
ap = calc.ambient_pressures(dive_profile)
nIPP = calc.nitrogen_inert_pressure(ap, gasses, dive)
heIPP = calc.helium_inert_pressure(ap, gasses, dive)
totalIPP = calc.totalIPP(nIPP, heIPP)
max_ascent = calc.max_ascent(dive_profile)
otu = calc.o2_tox(dive)
begin_time = datetime.now()
values = calc.max_values(ap, cp, totalIPP, nIPP, heIPP)
print("finished max_values in: " + str(datetime.now()-begin_time))
gfs = []
begin_time = datetime.now()
if(gf_toggle):
gfs = calc.gradient_factors(
dive_profile, cp, halftime_set, values)
parser[halftime_set].parse_json(
values, gfs, otu, max_ascent, os.path.basename(filtered_paths[i]), halftime_set, toggle=gf_toggle)
print("finished gf's in : " + str(datetime.now()-begin_time))
# write cp values if toggled
if(cp_toggle):
path = out_path
basename = os.path.basename(path)
replace = filtered_paths[i].replace('.csv', '')
replace += '_compartment-vals.csv'
new_basename = os.path.basename(replace)
cp_path = path.replace(basename, new_basename)
utils.write_csv(cp, cp_path)
except ValueError as err:
sys.stderr.write(repr(err))
i = i + 1
parsed_json = parser[halftime_set].get_list_json()
dump = json.dumps(parsed_json, indent=4)
# print(dump)
if(file_paths[0].endswith('.ans')):
fw.write(out_path, parsed_json, gf_toggle, extra_list=extra_list_ans)
else:
fw.write(out_path, parsed_json, gf_toggle)
if __name__ == '__main__':
begin_time = datetime.now()
main()
print("finished script in : " + str(datetime.now()-begin_time))
| 38.879747 | 225 | 0.629497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,647 | 0.26811 |
bc1e9e83d6c06a1ae9645a6b283f62c80e387eab | 1,328 | py | Python | exussum/fs.py | exussum/exussum | 54fc2388364ab8a3a2cc8e6fb838a7b748d7c889 | [
"BSD-2-Clause"
] | null | null | null | exussum/fs.py | exussum/exussum | 54fc2388364ab8a3a2cc8e6fb838a7b748d7c889 | [
"BSD-2-Clause"
] | null | null | null | exussum/fs.py | exussum/exussum | 54fc2388364ab8a3a2cc8e6fb838a7b748d7c889 | [
"BSD-2-Clause"
] | null | null | null | import subprocess
import fnmatch
from pathlib import Path
import os
import re
def all_files(src):
regex_include = re.compile("|".join((fnmatch.translate(e) for e in src.included_files)))
regex_exclude = re.compile("|".join((fnmatch.translate(e) for e in src.excluded_files)))
for root, dirs, files in os.walk(src.root):
dirs[:] = [d for d in dirs if d not in src.excluded_paths]
path = os.path.relpath(root, src.root)
for file in files:
if regex_include.match(file) and not (src.excluded_files and regex_exclude.match(file)):
yield os.path.join(path, file)
def _run_rg(args):
result = subprocess.run(args, stdout=subprocess.PIPE)
out = result.stdout.decode("utf-8")
if out:
print(out, end="", flush=True)
def _chunk_by_len(arr, max_size):
chunk = []
size = 0
for e in arr:
if len(e) + size + len(chunk) > max_size:
yield chunk
chunk.clear()
size = 0
size += len(e)
chunk.append(e)
if chunk:
yield chunk
def grep(config, args):
os.chdir(config.src.root)
cmd = [config.unix.rg, "--color", "never", "-j1"] + args
max = 4096 - sum(len(e) for e in cmd)
for chunk in _chunk_by_len(all_files(config.src), max):
_run_rg(cmd + chunk)
| 27.102041 | 100 | 0.611446 | 0 | 0 | 825 | 0.621235 | 0 | 0 | 0 | 0 | 36 | 0.027108 |
bc1ebe2ab303cdc8d8d9f7d8408efd5e477ec4b6 | 1,917 | py | Python | click_example/utils.py | captainCapitalism/typer-oo-example | 00b94948f52a582591c93b7b50c782a71dc744af | [
"MIT"
] | 2 | 2021-09-15T18:04:39.000Z | 2022-01-06T03:45:54.000Z | click_example/utils.py | captainCapitalism/typer-oo-example | 00b94948f52a582591c93b7b50c782a71dc744af | [
"MIT"
] | null | null | null | click_example/utils.py | captainCapitalism/typer-oo-example | 00b94948f52a582591c93b7b50c782a71dc744af | [
"MIT"
] | null | null | null | import click
class command:
def __init__(self, name=None, cls=click.Command, **attrs):
self.name = name
self.cls = cls
self.attrs = attrs
def __call__(self, method):
def __command__(this):
def wrapper(*args, **kwargs):
return method(this, *args, **kwargs)
if hasattr(method, "__options__"):
options = method.__options__
return self.cls(self.name, callback=wrapper, params=options, **self.attrs)
method.__command__ = __command__
return method
class option:
def __init__(self, *param_decls, **attrs):
self.param_decls = param_decls
self.attrs = attrs
def __call__(self, method):
if not hasattr(method, "__options__"):
method.__options__ = []
method.__options__.append(
click.Option(param_decls=self.param_decls, **self.attrs)
)
return method
class Cli:
def __new__(cls, *args, **kwargs):
self = super(Cli, cls).__new__(cls, *args, **kwargs)
self._cli = click.Group()
# Wrap instance options
self.__option_callbacks__ = set()
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__options__") and not hasattr(attr, "__command__"):
self._cli.params.extend(attr.__options__)
self.__option_callbacks__.add(attr)
# Wrap commands
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__command__"):
command = attr.__command__(self)
# command.params.extend(_options)
self._cli.add_command(command)
return self
def run(self):
"""Run the CLI application."""
self()
def __call__(self):
"""Run the CLI application."""
self._cli() | 28.61194 | 86 | 0.574335 | 1,896 | 0.989045 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.102243 |
bc1fee8e7b2dd05b18c6fd6cd75205806000b3bf | 4,716 | py | Python | neuron_simulator_service/SAC_network/stimulus.py | jpm343/RetinaX | 63f84209b4f8bdcdc88f35c54a03e7b7c56f4ab3 | [
"MIT"
] | 2 | 2020-12-06T23:04:39.000Z | 2020-12-29T18:28:20.000Z | neuron_simulator_service/SAC_network/stimulus.py | jpm343/RetinaX | 63f84209b4f8bdcdc88f35c54a03e7b7c56f4ab3 | [
"MIT"
] | null | null | null | neuron_simulator_service/SAC_network/stimulus.py | jpm343/RetinaX | 63f84209b4f8bdcdc88f35c54a03e7b7c56f4ab3 | [
"MIT"
] | 1 | 2020-10-23T19:40:59.000Z | 2020-10-23T19:40:59.000Z | from __future__ import division
import numpy as np
# Set bar stimuli speed
def update_bar_speed(BPsyn, delay, width, speed, d_init,
synapse_type="alphaCSyn", angle=0):
print "Updating bar speed to: %f mm/s" % speed
angrad = angle * np.pi / 180.0
angcos = np.cos(angrad)
angsin = np.sin(angrad)
for BPi in BPsyn:
# Note that initial bar position, d_init, should be as far as to
# ensure all BP are activated
# (xprime, yprime): rotated and translated axes centered on the bar
# xprime: axis of bar, yprime: normal to bar
# xprime = BPi[1] * angsin - BPi[2] * angcos
yprime = BPi[1] * angcos + BPi[2] * angsin + d_init
synapse_onset = delay + yprime / speed
if ((speed > 0 and yprime < (0 - width)) or
(speed < 0 and yprime > 0)):
# Bar won't pass over BP location (yprime)
deactivate_BP_synapse(BPi, synapse_type, synapse_onset)
continue
duration = None
if synapse_type == "BPexc":
duration = abs(width / speed)
activate_BP_synapse(BPi, synapse_type, synapse_onset, duration)
return BPsyn
def set_stimulus(BPsyn, stimulus_type, delay, synapse_type, **kwargs):
if stimulus_type == "bar":
width = kwargs['bar_width']
speed = kwargs['bar_speed']
x_init = kwargs['bar_x_init']
if 'bar_angle' in kwargs:
update_bar_speed(BPsyn, delay, width, speed, x_init, synapse_type,
kwargs['bar_angle'])
else:
update_bar_speed(BPsyn, delay, width, speed, x_init, synapse_type)
elif stimulus_type == "annulus":
center = kwargs['center']
ri = kwargs['inner_diam']
ro = kwargs['outer_diam']
dur = kwargs['duration']
print "Setting up annulus stimulus with delay: %1.1f (ms)" % delay
for BPi in BPsyn:
if in_annulus((BPi[1], BPi[2]), center, ri, ro):
activate_BP_synapse(BPi, synapse_type, delay, dur)
elif stimulus_type == "grating":
width = kwargs['bar_width']
speed = kwargs['bar_speed']
x_init = kwargs['bar_x_init']
x_freq = kwargs['spatial_freq']
N_bars = kwargs['N_bars']
dur = width / speed
period = x_freq / speed
for BPi in BPsyn:
if BPi[1] < (x_init - width): # Grating wont pass over BP location
continue
synapse_onset = delay + (BPi[1] - x_init) / speed
activate_BP_synapse(BPi, synapse_type, synapse_onset, dur, period,
N_bars)
elif stimulus_type == "bar_with_circular_mask":
width = kwargs['bar_width']
speed = kwargs['bar_speed']
x_init = kwargs['bar_x_init']
mask_center = kwargs['mask_center']
mask_diam = kwargs['mask_diam']
dur = width / speed
for BPi in BPsyn:
if BPi[1] < (x_init - width) or not in_annulus((BPi[1], BPi[2]),
mask_center, 0, mask_diam / 2):
continue
synapse_onset = delay + (BPi[1] - x_init) / speed
activate_BP_synapse(BPi, synapse_type, synapse_onset, dur)
return BPsyn
def in_annulus(point, center, inner_diam, outer_diam):
dist = np.linalg.norm(np.array(point) - np.array(center))
return dist >= inner_diam and dist <= outer_diam
def activate_BP_synapse(BPsynapse, synapse_type, synapse_onset, dur=None,
period=0, n_events=1):
if synapse_type in ("alphaCSyn", "expCSyn"):
BPsynapse[0].onset = synapse_onset
BPsynapse[0].dur = BPsynapse[0].default_dur
if dur is not None:
BPsynapse[0].dur = dur
elif synapse_type in ("Exp2Syn", "BPexc"):
BPsynapse[-2].number = n_events
BPsynapse[-2].interval = period
BPsynapse[-2].start = synapse_onset
BPsynapse[-2].noise = 0 # Deafult should be 0 anyway
if synapse_type == "BPexc":
BPsynapse[0].dur = dur
def deactivate_BP_synapse(BPsynapse, synapse_type, synapse_onset):
if synapse_type in ("alphaCSyn", "expCSyn"):
BPsynapse[0].onset = synapse_onset
BPsynapse[0].dur = 0
elif synapse_type in ("Exp2Syn", "BPexc"):
BPsynapse[-2].number = 0
BPsynapse[-2].interval = 0
BPsynapse[-2].start = synapse_onset
BPsynapse[-2].noise = 0 # Deafult should be 0 anyway
if synapse_type == "BPexc":
BPsynapse[0].dur = 0
def insert_voltage_clamp(nrnobj, nrnsec, xsec, voltage_amp, dur):
vclamp = nrnobj.SEClamp(xsec, sec=nrnsec)
vclamp.amp1 = voltage_amp
vclamp.dur1 = dur
return vclamp
| 38.975207 | 79 | 0.599237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 852 | 0.180662 |
bc21fb987995424da0c4a8da3583644d72bd8f6a | 3,326 | py | Python | test_prime_numbers.py | zekedran/python_tdd_ci_tutorial | 6a318ce5f26953dd44ca49da73815de4a2634fe5 | [
"MIT"
] | null | null | null | test_prime_numbers.py | zekedran/python_tdd_ci_tutorial | 6a318ce5f26953dd44ca49da73815de4a2634fe5 | [
"MIT"
] | null | null | null | test_prime_numbers.py | zekedran/python_tdd_ci_tutorial | 6a318ce5f26953dd44ca49da73815de4a2634fe5 | [
"MIT"
] | null | null | null | from prime_numbers import is_prime
def test_is_prime():
assert is_prime(-1) is False
assert is_prime(0) is False
assert is_prime(4) is False
assert is_prime(6) is False
assert is_prime(8) is False
assert is_prime(9) is False
assert is_prime(10) is False
assert is_prime(12) is False
assert is_prime(14) is False
assert is_prime(15) is False
assert is_prime(16) is False
assert is_prime(18) is False
assert is_prime(20) is False
assert is_prime(21) is False
assert is_prime(22) is False
assert is_prime(24) is False
assert is_prime(25) is False
assert is_prime(26) is False
assert is_prime(27) is False
assert is_prime(28) is False
assert is_prime(30) is False
assert is_prime(32) is False
assert is_prime(33) is False
assert is_prime(34) is False
assert is_prime(35) is False
assert is_prime(36) is False
assert is_prime(38) is False
assert is_prime(39) is False
assert is_prime(40) is False
assert is_prime(42) is False
assert is_prime(44) is False
assert is_prime(45) is False
assert is_prime(46) is False
assert is_prime(48) is False
assert is_prime(49) is False
assert is_prime(50) is False
assert is_prime(51) is False
assert is_prime(52) is False
assert is_prime(54) is False
assert is_prime(55) is False
assert is_prime(56) is False
assert is_prime(57) is False
assert is_prime(58) is False
assert is_prime(60) is False
assert is_prime(62) is False
assert is_prime(63) is False
assert is_prime(64) is False
assert is_prime(65) is False
assert is_prime(66) is False
assert is_prime(68) is False
assert is_prime(69) is False
assert is_prime(70) is False
assert is_prime(72) is False
assert is_prime(74) is False
assert is_prime(75) is False
assert is_prime(76) is False
assert is_prime(77) is False
assert is_prime(78) is False
assert is_prime(80) is False
assert is_prime(81) is False
assert is_prime(82) is False
assert is_prime(84) is False
assert is_prime(85) is False
assert is_prime(86) is False
assert is_prime(87) is False
assert is_prime(88) is False
assert is_prime(90) is False
assert is_prime(91) is False
assert is_prime(92) is False
assert is_prime(93) is False
assert is_prime(94) is False
assert is_prime(95) is False
assert is_prime(96) is False
assert is_prime(98) is False
assert is_prime(99) is False
assert is_prime(2) is True
assert is_prime(3) is True
assert is_prime(5) is True
assert is_prime(7) is True
assert is_prime(11) is True
assert is_prime(13) is True
assert is_prime(17) is True
assert is_prime(19) is True
assert is_prime(23) is True
assert is_prime(29) is True
assert is_prime(31) is True
assert is_prime(37) is True
assert is_prime(41) is True
assert is_prime(43) is True
assert is_prime(47) is True
assert is_prime(53) is True
assert is_prime(59) is True
assert is_prime(61) is True
assert is_prime(67) is True
assert is_prime(71) is True
assert is_prime(73) is True
assert is_prime(79) is True
assert is_prime(83) is True
assert is_prime(89) is True
assert is_prime(97) is True
| 31.084112 | 34 | 0.695129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bc220e54ff06969c5a581e943797eaee68f0f203 | 14,914 | py | Python | main.py | white-undo/scientific-calc | 46c24f289446f247a26946b855494458b92bd806 | [
"MIT"
] | null | null | null | main.py | white-undo/scientific-calc | 46c24f289446f247a26946b855494458b92bd806 | [
"MIT"
] | null | null | null | main.py | white-undo/scientific-calc | 46c24f289446f247a26946b855494458b92bd806 | [
"MIT"
] | null | null | null | #====================== #
# Scientific Plotting Calculator #
#---------------------- #
# Preetam Sharma #
# Artificial Intelligence, ICL #
#====================== #
# Usefull modules and tools
from tkinter import BOTTOM, BOTH, TOP, Label, DISABLED, NORMAL, INSERT, END, messagebox
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk
import buttonsCreator
import windowCreator
from numpy import *
import math
class App:
def __init__(self):
windowCreator.CreateWindow(self)
self.FirstExecutionSettings()
self.SetScreens()
buttonsCreator.CreateButtons(self)
self.mainWindow.mainloop()
def FirstExecutionSettings(self):
self.firstScreenText.set("0")
self.secondScreenText.set("0")
self.DEGButtonText.set("DEG")
self.ASINButtonText.set("asin()")
self.ACOSButtonText.set("acos()")
self.ATANButtonText.set("atan()")
self.SINButtonText.set("sin()")
self.COSButtonText.set("cos()")
self.TANButtonText.set("tan()")
def SetScreens(self):
# First screen
self.firstScreen = Label(self.screensFrame, textvariable = self.firstScreenText, font = self.mainWindowFont, anchor = "e")
self.firstScreen.grid(row = 1, column = 0, padx = 0, pady = 2)
self.firstScreen.config(background = "black", fg = "white", justify = "right",height = "5", width = "62")
# Second screen
self.secondScreen = Label(self.screensFrame, textvariable = self.secondScreenText, font = self.secondaryWindowFont, anchor = "e")
self.secondScreen.grid(row = 0, column = 0, padx = 0, pady = 2)
self.secondScreen.config(background = "#20221f", fg = "white", justify = "right",height = "5", width = "62")
# Function to add elements to the firstScreenText
def firstScreenTextSet(self, num):
# Analizing if there are parenthesis
if num == "(":
self.parenthesisOpen = True
elif num == ")":
self.parenthesisOpen = False
if self.operationMade == True:
self.firstScreenText.set("")
self.operationMade = False
# We get whats on the second screen
self.textSecondScreen = self.secondScreenText.get()
if self.textSecondScreen[-1] == True and self.operationFilling == True:
self.firstScreenText.set("")
self.operationFilling = True
if(self.firstScreenText.get() == "0" and num == 0):
return
elif(self.firstScreenText.get() == "0" and num != 0):
self.firstScreenText.set("")
self.firstScreenText.set(self.firstScreenText.get() + str(num))
self.operationFilling= False
else:
self.firstScreenText.set(self.firstScreenText.get() + str(num))
self.operationFilling = False
return
# Function to use on C button
def C(self):
self.firstScreenText.set("0")
self.secondScreenText.set("0")
return
# Function to use de Delete button
def Delete(self):
actualNumber = self.firstScreenText.get()
actualNumber = actualNumber[:-1]
self.firstScreenText.set(actualNumber)
return
def clickOnOperation(self, operation):
if operation == "exp(":
self.firstScreenText.set("")
elif operation == "factorial(":
self.firstScreenText.set("")
textFirstScreen = self.firstScreenText.get()
if textFirstScreen == "nan":
textFirstScreen = ""
textFirstScreen += operation
self.firstScreenText.set(textFirstScreen)
if self.parenthesisOpen == True:
return
self.firstScreenText.set("")
textSecondScreen = self.secondScreenText.get()
if textSecondScreen == "0":
self.secondScreenText.set("")
textSecondScreen = self.secondScreenText.get()
textSecondScreen += textFirstScreen
self.secondScreenText.set(textSecondScreen)
return
# Function to change the DEG button text to RAD (user clicks on DEG button)
def changeDEGtext(self):
if self.DEGButtonText.get() == "DEG":
self.DEGButtonText.set("RAD")
else:
self.DEGButtonText.set("DEG")
return
# Function to change the trigonometric functions buttons text (user clicks on RAD button)
def changeTrigonometricFunctionsText(self):
if self.ASINButtonText.get() == "asin()":
self.ASINButtonText.set("asinh()")
self.ACOSButtonText.set("acosh()")
self.ATANButtonText.set("atanh()")
self.SINButtonText.set("sinh()")
self.COSButtonText.set("cosh()")
self.TANButtonText.set("tanh()")
else:
self.ASINButtonText.set("asin()")
self.ACOSButtonText.set("acos()")
self.ATANButtonText.set("atan()")
self.SINButtonText.set("sin()")
self.COSButtonText.set("cos()")
self.TANButtonText.set("tan()")
return
# Function to make CE (the users clicks on CE button)
def CE(self):
self.firstScreenText.set("0")
return
# Function to use to calculate the result of the operations and add them to the history
def calculate(self):
global operationMade
global actualHistoryOperation
if self.secondScreenText.get() == "0":
self.secondScreenText.set("")
# We get the complete operation
completeText = self.secondScreenText.get() + self.firstScreenText.get()
completeTextWithDot = completeText.replace(",", ".")
self.secondScreenText.set(completeText)
solution = eval(completeTextWithDot)
solutionTextWithoutDot = str(solution).replace(".", ",")
self.firstScreenText.set(solutionTextWithoutDot)
self.secondScreenText.set("0")
self.operationMade = True
self.actualHistoryOperation.set(completeText+" = "+solutionTextWithoutDot + "\n\n")
self.addHistory()
return
# Function to add an operation to the history
def addHistory(self):
global actualHistoryOperation
self.history.config(state = NORMAL)
self.history.insert(INSERT, "\n\n" + self.actualHistoryOperation.get())
self.history.config(state = DISABLED)
return
# Function to use when the user clicks on CLEAR HISTORY button
def clearHistory(self):
global actualHistoryOperation
self.actualHistoryOperation.set("")
self.history.config(state = NORMAL)
self.history.delete(1.0, END)
self.history.config(state = DISABLED)
return
# Function to use when the user wants to calculate trigonometric functions
def calculateTrigonometric(self, function):
if self.secondScreenText.get() == "0":
self.secondScreenText.set("")
# We get the complete operation value for the trigonometric function
completeText = self.secondScreenText.get() + self.firstScreenText.get()
completeTextWithDot = completeText.replace(",", ".")
self.secondScreenText.set(completeText)
# We get the actual value to evaluate in the trigonometric function
value = eval(completeTextWithDot)
# We need to know if the value is in rads or degrees
typevalue = self.DEGButtonText.get()
# Now we call solveTrigonometric
if typevalue == "RAD":
fulloperation = function + "(" + str(value) + ")"
else:
fulloperation = function + "(deg2rad(" + str(value) + "))"
solution = eval(fulloperation)
solutionTextWithoutDot = str(solution).replace(".", ",")
self.firstScreenText.set(solutionTextWithoutDot)
self.secondScreenText.set("0")
self.operationMade = True
self.actualHistoryOperation.set(function + "(" + str(value) + ")" + " = "+ solutionTextWithoutDot + "\n\n")
self.addHistory()
return
# Function to use when the user clicks on sin() button
def _sin(self):
if self.SINButtonText.get() == "sin()":
self.calculateTrigonometric("sin")
else:
self.calculateTrigonometric("sinh")
return
# Function to use when the user clicks on cos() button
def _cos(self):
if self.COSButtonText.get() == "cos()":
self.calculateTrigonometric("cos")
else:
self.calculateTrigonometric("cosh")
return
# Function to use when the user clicks on tan() button
def _tan(self):
if self.TANButtonText.get() == "tan()":
self.calculateTrigonometric("tan")
else:
self.calculateTrigonometric("tanh")
return
# Function to use when the user clicks on asin() button
def asin(self):
if self.ASINButtonText.get() == "asin()":
self.calculateTrigonometric("arcsin")
else:
self.calculateTrigonometric("arcsinh")
return
# Function to use when the user clicks on acos() button
def acos(self):
if self.ACOSButtonText.get() == "acos()":
self.calculateTrigonometric("arccos")
else:
self.calculateTrigonometric("arccosh")
return
# Function to use when the user clicks on atan() button
def atan(self):
if self.ATANButtonText.get() == "atan()":
self.calculateTrigonometric("arctan")
else:
self.calculateTrigonometric("arctanh")
return
# Function to use when the user wants to calculate a logarithm or an exponential
def calculateLogarithmExponential(self, function):
global operationMade
global actualHistoryOperation
if self.secondScreenText.get() == "0":
self.secondScreenText.set("")
# We get the complete operation value for the logarithm function
completeText = self.secondScreenText.get() + self.firstScreenText.get()
completeTextWithDot = completeText.replace(",", ".")
self.secondScreenText.set(completeText)
# We get the actual value to evaluate in the trigonometric function
value = eval(completeTextWithDot)
fulloperation = function + "(" + str(value) + ")"
solution = eval(fulloperation)
solutionTextWithoutDot = str(solution).replace(".", ",")
self.firstScreenText.set(solutionTextWithoutDot)
self.secondScreenText.set("0")
self.operationMade = True
self.actualHistoryOperation.set(function + "(" + str(value) + ")" + " = "+ solutionTextWithoutDot + "\n\n")
self.addHistory()
return
# Function to use when the user clicks on PI button
def insertPI(self):
self.firstScreenText.set("3,141592")
return
# Function to use when the user clicks on DMS button
def usingDMS(self):
firstScreen = self.firstScreenText.get()
secondScreen = self.secondScreenText.get()
if secondScreen != "0":
new = secondScreen + firstScreen
else:
new = firstScreen
self.secondScreenText.set(new)
self.firstScreenText.set(0)
value = float(eval(str(self.secondScreenText.get()).replace(",", ".")))
mnt,sec = divmod(value*3600,60)
deg,mnt = divmod(mnt,60)
result = (deg, mnt, sec)
self.actualHistoryOperation.set("dms(" + str(value) + ") = " + str(result))
self.addHistory()
return
# Function to use when the user clicks on DEG button
def usingDEG(self):
firstScreen = self.firstScreenText.get()
secondScreen = self.secondScreenText.get()
if secondScreen != "0":
new = secondScreen + firstScreen
else:
new = firstScreen
self.secondScreenText.set(new)
self.firstScreenText.set(0)
value = float(eval(str(self.secondScreenText.get()).replace(",", ".")))
result = ((value * 180) / 3.1412)
self.actualHistoryOperation.set("deg(" + str(value) + ") = " + str(result))
self.addHistory()
return
# Function to use when the user clicks on Factorial button
def usingFactorial(self):
firstScreen = self.firstScreenText.get()
secondScreen = self.secondScreenText.get()
if secondScreen != "0":
new = secondScreen + firstScreen
else:
new = firstScreen
self.secondScreenText.set(new)
self.firstScreenText.set(0)
value = float(eval(str(self.secondScreenText.get()).replace(",", ".")))
result = math.factorial(value)
self.actualHistoryOperation.set("fact(" + str(value) + ") = " + str(result))
self.addHistory()
return
# Function to use when the user clicks on 1/x button
def using1X(self):
firstScreen = self.firstScreenText.get()
self.firstScreenText.set(firstScreen + "1/")
return
# Function to use when the user clicks on +- button
def usingPlusMinus(self):
firstScreen = self.firstScreenText.get()
if firstScreen[0] == "-":
self.firstScreenText.set(firstScreen[1:])
else:
self.firstScreenText.set("-" + firstScreen)
return
# Function to use when the user clicks on PLOT button
def plotFunction(self):
chosenColor = self.colorSelected.get()
chosenFunction = self.function.get()
try:
x = array(arange(int(self.xminsize.get()), int(self.xmaxsize.get())+1, 0.1))
y = eval(chosenFunction)
except:
messagebox.showerror('Warning', 'There was a problem with the plotting\nPlease check the limits and the function')
self.a.clear()
self.a.set_xlim([int(self.xminsize.get()), int(self.xmaxsize.get())])
self.a.set_ylim([int(self.yminsize.get()), int(self.ymaxsize.get())])
self.a.plot(x,y, color = chosenColor)
self.canvas.draw()
self.canvas.get_tk_widget().pack(side = BOTTOM, fill = BOTH, expand = True)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.plottingFrame)
self.canvas._tkcanvas.pack(side = TOP, fill = BOTH, expand = True)
return
if __name__ == '__main__':
myApp = App() | 40.972527 | 138 | 0.593 | 14,378 | 0.964061 | 0 | 0 | 0 | 0 | 0 | 0 | 2,907 | 0.194918 |
bc22939116e8ef6b4d13b61eab3aac762d3ff398 | 7,386 | py | Python | client/tests/test_up_args.py | gefyrahq/gefyra | 0bc205b4b01100c640081ead671bdb195761299b | [
"Apache-2.0"
] | 41 | 2022-03-24T15:45:56.000Z | 2022-03-31T08:07:19.000Z | client/tests/test_up_args.py | Schille/gefyra | 43abd17b8ed5867a26266b5e7a5d6f9edfebab4a | [
"Apache-2.0"
] | 23 | 2021-12-02T10:29:09.000Z | 2022-03-17T18:10:57.000Z | client/tests/test_up_args.py | Schille/gefyra | 43abd17b8ed5867a26266b5e7a5d6f9edfebab4a | [
"Apache-2.0"
] | 3 | 2022-02-18T21:16:10.000Z | 2022-03-09T22:46:28.000Z | from gefyra.__main__ import up_parser, up_command
from gefyra.configuration import ClientConfiguration, __VERSION__
REGISTRY_URL = "my-reg.io/gefyra"
QUAY_REGISTRY_URL = "quay.io/gefyra"
STOWAWAY_LATEST = "my-reg.io/gefyra/stowaway:latest"
CARGO_LATEST = "my-reg.io/gefyra/cargo:latest"
OPERATOR_LATEST = "my-reg.io/gefyra/operator:latest"
CARRIER_LATEST = "my-reg.io/gefyra/carrier:latest"
KUBE_CONFIG = "~/.kube/config"
def test_parse_registry_a():
args = up_parser.parse_args(["--registry", REGISTRY_URL])
configuration = ClientConfiguration(registry_url=args.registry)
assert configuration.REGISTRY_URL == REGISTRY_URL
def test_parse_registry_b():
args = up_parser.parse_args(["--registry", "my-reg.io/gefyra/"])
configuration = ClientConfiguration(registry_url=args.registry)
assert configuration.REGISTRY_URL, REGISTRY_URL
args = up_parser.parse_args(["-r", "my-reg.io/gefyra/"])
configuration = ClientConfiguration(registry_url=args.registry)
assert configuration.REGISTRY_URL == REGISTRY_URL
def test_parse_no_registry():
args = up_parser.parse_args()
configuration = ClientConfiguration(registry_url=args.registry)
assert configuration.REGISTRY_URL == QUAY_REGISTRY_URL
def test_parse_no_stowaway_image():
args = up_parser.parse_args()
configuration = ClientConfiguration(stowaway_image_url=args.stowaway)
assert configuration.STOWAWAY_IMAGE == f"quay.io/gefyra/stowaway:{__VERSION__}"
def test_parse_no_carrier_image():
args = up_parser.parse_args()
configuration = ClientConfiguration(carrier_image_url=args.carrier)
assert configuration.CARRIER_IMAGE == f"quay.io/gefyra/carrier:{__VERSION__}"
def test_parse_no_operator_image():
args = up_parser.parse_args()
configuration = ClientConfiguration(operator_image_url=args.operator)
assert configuration.OPERATOR_IMAGE == f"quay.io/gefyra/operator:{__VERSION__}"
def test_parse_no_cargo_image():
args = up_parser.parse_args()
configuration = ClientConfiguration(cargo_image_url=args.cargo)
assert configuration.CARGO_IMAGE == f"quay.io/gefyra/cargo:{__VERSION__}"
def test_parse_stowaway_image():
args = up_parser.parse_args(["--stowaway", STOWAWAY_LATEST])
configuration = ClientConfiguration(stowaway_image_url=args.stowaway)
assert configuration.STOWAWAY_IMAGE == STOWAWAY_LATEST
args = up_parser.parse_args(["-s", STOWAWAY_LATEST])
configuration = ClientConfiguration(stowaway_image_url=args.stowaway)
assert configuration.STOWAWAY_IMAGE == STOWAWAY_LATEST
args = up_parser.parse_args(["-s", STOWAWAY_LATEST, "-r", QUAY_REGISTRY_URL])
configuration = ClientConfiguration(
registry_url=args.registry, stowaway_image_url=args.stowaway
)
assert configuration.STOWAWAY_IMAGE == STOWAWAY_LATEST
def test_parse_cargo_image():
args = up_parser.parse_args(["--cargo", CARGO_LATEST])
configuration = ClientConfiguration(cargo_image_url=args.cargo)
assert configuration.CARGO_IMAGE == CARGO_LATEST
args = up_parser.parse_args(["-a", CARGO_LATEST])
configuration = ClientConfiguration(cargo_image_url=args.cargo)
assert configuration.CARGO_IMAGE == CARGO_LATEST
args = up_parser.parse_args(["-a", CARGO_LATEST, "-r", QUAY_REGISTRY_URL])
configuration = ClientConfiguration(
registry_url=args.registry, cargo_image_url=args.cargo
)
assert configuration.CARGO_IMAGE == CARGO_LATEST
def test_parse_operator_image():
args = up_parser.parse_args(["--operator", OPERATOR_LATEST])
configuration = ClientConfiguration(operator_image_url=args.operator)
assert configuration.OPERATOR_IMAGE == OPERATOR_LATEST
args = up_parser.parse_args(["-o", OPERATOR_LATEST])
configuration = ClientConfiguration(operator_image_url=args.operator)
assert configuration.OPERATOR_IMAGE == OPERATOR_LATEST
args = up_parser.parse_args(["-o", OPERATOR_LATEST, "-r", QUAY_REGISTRY_URL])
configuration = ClientConfiguration(
registry_url=args.registry, operator_image_url=args.operator
)
assert configuration.OPERATOR_IMAGE == OPERATOR_LATEST
def test_parse_carrier_image():
args = up_parser.parse_args(["--carrier", CARRIER_LATEST])
configuration = ClientConfiguration(carrier_image_url=args.carrier)
assert configuration.CARRIER_IMAGE == CARRIER_LATEST
args = up_parser.parse_args(["-c", CARRIER_LATEST])
configuration = ClientConfiguration(carrier_image_url=args.carrier)
assert configuration.CARRIER_IMAGE == CARRIER_LATEST
args = up_parser.parse_args(["-c", CARRIER_LATEST, "-r", QUAY_REGISTRY_URL])
configuration = ClientConfiguration(
registry_url=args.registry, carrier_image_url=args.carrier
)
assert configuration.CARRIER_IMAGE == CARRIER_LATEST
def test_parse_combination_a():
args = up_parser.parse_args(["-c", CARRIER_LATEST])
configuration = ClientConfiguration(
registry_url=args.registry,
stowaway_image_url=args.stowaway,
operator_image_url=args.operator,
cargo_image_url=args.cargo,
carrier_image_url=args.carrier,
)
assert configuration.REGISTRY_URL == QUAY_REGISTRY_URL
assert configuration.OPERATOR_IMAGE == f"quay.io/gefyra/operator:{__VERSION__}"
assert configuration.CARRIER_IMAGE == CARRIER_LATEST
def test_parse_combination_b():
args = up_parser.parse_args(["-r", REGISTRY_URL])
configuration = ClientConfiguration(
registry_url=args.registry,
stowaway_image_url=args.stowaway,
operator_image_url=args.operator,
cargo_image_url=args.cargo,
carrier_image_url=args.carrier,
)
assert configuration.REGISTRY_URL == REGISTRY_URL
assert configuration.OPERATOR_IMAGE == f"my-reg.io/gefyra/operator:{__VERSION__}"
assert configuration.CARRIER_IMAGE == f"my-reg.io/gefyra/carrier:{__VERSION__}"
def test_parse_combination_c():
args = up_parser.parse_args(
["-r", REGISTRY_URL, "-c", "quay.io/gefyra/carrier:latest"]
)
configuration = ClientConfiguration(
registry_url=args.registry,
stowaway_image_url=args.stowaway,
operator_image_url=args.operator,
cargo_image_url=args.cargo,
carrier_image_url=args.carrier,
)
assert configuration.REGISTRY_URL == REGISTRY_URL
assert configuration.OPERATOR_IMAGE == f"my-reg.io/gefyra/operator:{__VERSION__}"
assert configuration.CARRIER_IMAGE == "quay.io/gefyra/carrier:latest"
def test_parse_endpoint():
args = up_parser.parse_args(["-e", "10.30.34.25:31820"])
configuration = ClientConfiguration(
cargo_endpoint=args.endpoint,
registry_url=args.registry,
stowaway_image_url=args.stowaway,
operator_image_url=args.operator,
cargo_image_url=args.cargo,
carrier_image_url=args.carrier,
)
assert configuration.CARGO_ENDPOINT == "10.30.34.25:31820"
def test_parse_up_fct(monkeypatch):
monkeypatch.setattr("gefyra.api.up", lambda config: True)
args = up_parser.parse_args(["-e", "10.30.34.25:31820"])
up_command(args)
def test_parse_up_kube_conf():
configuration = ClientConfiguration(kube_config_file=KUBE_CONFIG)
assert configuration.KUBE_CONFIG_FILE == KUBE_CONFIG
def test_parse_up_no_kube_conf():
configuration = ClientConfiguration()
assert configuration.KUBE_CONFIG_FILE is None
| 37.876923 | 85 | 0.753859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.110885 |
bc22db5abfbd89d0661acf1f13af1e246c8ff360 | 6,476 | py | Python | Experiments/Fully Assembled Actutor Module/RASH_example_trajectory.py | kevinAlfsen/RASH-Bachelor-Thesis | 143a2a3a6997fd00caa420666a0a5c5570ce82ab | [
"BSD-3-Clause"
] | null | null | null | Experiments/Fully Assembled Actutor Module/RASH_example_trajectory.py | kevinAlfsen/RASH-Bachelor-Thesis | 143a2a3a6997fd00caa420666a0a5c5570ce82ab | [
"BSD-3-Clause"
] | null | null | null | Experiments/Fully Assembled Actutor Module/RASH_example_trajectory.py | kevinAlfsen/RASH-Bachelor-Thesis | 143a2a3a6997fd00caa420666a0a5c5570ce82ab | [
"BSD-3-Clause"
] | null | null | null | # coding: utf8
import argparse
import math
import os
import sys
from time import clock
import libmaster_board_sdk_pywrap as mbs
from Trajectory import trajectory
from Plotter import overlay_plot
def example_script(name_interface):
N_SLAVES = 6 # Maximum number of controled drivers
N_SLAVES_CONTROLED = 1 # Current number of controled drivers
cpt = 0 # Iteration counter
dt = 0.001 # Time step
t = 0 # Current time
kp = 1 # Proportional gain
kd = 0.06 # Derivative gain
iq_sat = 1.0 # Maximum amperage (A)
init_pos = [0.0 for i in range(N_SLAVES * 2)] # List that will store the initial position of motors
state = 0 # State of the system (ready (1) or not (0))
t_f = 20
p_arr = []
v_arr = []
p_ref_arr = []
v_ref_arr = []
p_err_arr = []
v_err_arr = []
curr_arr = []
t_arr = []
init_trajectory = True
# contrary to c++, in python it is interesting to build arrays
# with connected motors indexes so we can simply go through them in main loop
motors_spi_connected_indexes = [] # indexes of the motors on each connected slaves
print("-- Start of example script --")
os.nice(-20) # Set the process to highest priority (from -20 highest to +20 lowest)
robot_if = mbs.MasterBoardInterface("enp0s31f6")
robot_if.Init() # Initialization of the interface between the computer and the master board
for i in range(N_SLAVES_CONTROLED): # We enable each controler driver and its two associated motors
robot_if.GetDriver(i).motor1.SetCurrentReference(0)
robot_if.GetDriver(i).motor2.SetCurrentReference(0)
robot_if.GetDriver(i).motor1.Enable()
robot_if.GetDriver(i).motor2.Enable()
robot_if.GetDriver(i).EnablePositionRolloverError()
robot_if.GetDriver(i).SetTimeout(5)
robot_if.GetDriver(i).Enable()
last = clock()
while (not robot_if.IsTimeout() and not robot_if.IsAckMsgReceived()):
if ((clock() - last) > dt):
last = clock()
robot_if.SendInit()
if robot_if.IsTimeout():
print("Timeout while waiting for ack.")
else:
# fill the connected motors indexes array
for i in range(N_SLAVES_CONTROLED):
if robot_if.GetDriver(i).IsConnected():
# if slave i is connected then motors 2i and 2i+1 are potentially connected
motors_spi_connected_indexes.append(2 * i)
motors_spi_connected_indexes.append(2 * i + 1)
while ((not robot_if.IsTimeout()) and (clock() < 20)): # Stop after 15 seconds (around 5 seconds are used at the start for calibration)
if t > t_f:
break
if ((clock() - last) > dt):
last = clock()
cpt += 1
t += dt
robot_if.ParseSensorData() # Read sensor data sent by the masterboard
if (state == 0): # If the system is not ready
state = 1
# for all motors on a connected slave
for i in motors_spi_connected_indexes: # Check if all motors are enabled and ready
if not (robot_if.GetMotor(i).IsEnabled() and robot_if.GetMotor(i).IsReady()):
state = 0
init_pos[i] = robot_if.GetMotor(i).GetPosition()
t = 0
else: # If the system is ready
# for all motors on a connected slave
for i in motors_spi_connected_indexes:
if i % 2 == 0 and robot_if.GetDriver(i // 2).GetErrorCode() == 0xf:
#print("Transaction with SPI{} failed".format(i // 2))
continue #user should decide what to do in that case, here we ignore that motor
if robot_if.GetMotor(i).IsEnabled() and i == 0:
if init_trajectory:
t_f = t + 2
t_i = t
q_i = robot_if.GetMotor(0).GetPosition()
q_f = q_i + 2*math.pi * 9 # 9 = gear ratio
traj = trajectory(q_i, q_f, t_i, t_f)
init_trajectory = False
if t < t_f:
ref, v_ref = traj.step(t)
position = robot_if.GetMotor(0).GetPosition()
velocity = robot_if.GetMotor(0).GetVelocity()
p_err = ref - position # Position error
v_err = v_ref - velocity # Velocity error
cur = kp * p_err + kd * v_err # Output of the PD controler (amperage)
curr_arr.append(cur)
if (cur > iq_sat): # Check saturation
cur = iq_sat
if (cur < -iq_sat):
cur = -iq_sat
p_arr.append(position - init_pos[0])
v_arr.append(velocity)
p_ref_arr.append(ref - init_pos[0])
v_ref_arr.append(v_ref)
p_err_arr.append(p_err)
v_err_arr.append(v_err)
t_arr.append(t)
if (position < q_f):
#continue
robot_if.GetMotor(i).SetCurrentReference(cur) # Set reference currents
robot_if.SendCommand() # Send the reference currents to the master board
overlay_plot(p_arr, v_arr, p_ref_arr, v_ref_arr, p_err_arr, v_err_arr, t_arr, curr_arr, kp, kd)
robot_if.Stop() # Shut down the interface between the computer and the master board
if robot_if.IsTimeout():
print("Masterboard timeout detected.")
print("Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.")
print("-- End of example script --")
def main():
parser = argparse.ArgumentParser(description='Example masterboard use in python.')
parser.add_argument('-i',
'--interface',
required=True,
help='Name of the interface (use ifconfig in a terminal), for instance "enp1s0"')
example_script(parser.parse_args().interface)
if __name__ == "__main__":
main()
| 36.382022 | 140 | 0.553737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,856 | 0.286243 |
bc2355c41df28e3cbb07b0fa1bf6a887e83f0856 | 8,074 | py | Python | main_mini.py | idosharon/Leechy-Prototype-Spectrum-Analyzer | 7708ee9ae538b8f7db5e0596299ac40fb48d75d3 | [
"Unlicense"
] | null | null | null | main_mini.py | idosharon/Leechy-Prototype-Spectrum-Analyzer | 7708ee9ae538b8f7db5e0596299ac40fb48d75d3 | [
"Unlicense"
] | null | null | null | main_mini.py | idosharon/Leechy-Prototype-Spectrum-Analyzer | 7708ee9ae538b8f7db5e0596299ac40fb48d75d3 | [
"Unlicense"
] | 3 | 2022-01-02T08:46:49.000Z | 2022-01-15T16:02:50.000Z | # Leechy Prototype Spectrum Analyzer.
# Important: MAKE SURE KEYBOARD IS ON ENGLISH AND CAPSLOCK IS NOT ON!
import cv2,pickle,xlsxwriter,time,datetime,os, os.path
from imutils import rotate_bound
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from PIL import Image, ImageTk
print('\033[1m' + '\033[91m' + '\nLeechy Labs | Spectrum Analyser\n' + '\033[0m')
drunkdatanum = 0
not_drunkdatanum = 0
DRUNK_DIR = 'data/graph/drunk'
drunkdatanum = len([name for name in os.listdir(DRUNK_DIR) if os.path.isfile(os.path.join(DRUNK_DIR, name))])
NOT_DRUNK_DIR = 'data/graph/not_drunk'
not_drunkdatanum = len([name for name in os.listdir(DRUNK_DIR) if os.path.isfile(os.path.join(NOT_DRUNK_DIR, name))])
row = 1
sensetivity = 615
x1 = 480
y1 = 1000
x2 = 578
y2 = 1040
angle = 52.5
flip = False
plot_graph = False
cmp_graph = [500]
nor_color = (0, 0, 1)
mouse_rectangle = False
clear = True
live = False
auto = False
plt.ion()
def rotate_clockwise(matrix, degree=90):
try: return matrix if not degree else rotate_clockwise(zip(*matrix[::-1]), degree-90)
except TypeError: return [[0]]
def mouse_event(event,x,y,flags,param):
global x1, x2, y1, y2, mouse_rectangle
if event == cv2.EVENT_LBUTTONDOWN:
mouse_rectangle = True
x1,y1 = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if mouse_rectangle == True:
x2,y2 = x, y
elif event == cv2.EVENT_LBUTTONUP:
x2,y2 = x, y
if x2 < x1: x1,x2 = x2,x1
if y2 < y1: y1,y2 = y2,y1
mouse_rectangle = False
cv2.namedWindow("Spectrum Analyser", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Spectrum Analyser", mouse_event)
vc = cv2.VideoCapture(0)
SHEET_DIR = "data/sheet"
sheet_name = "data"
if len([name for name in os.listdir(SHEET_DIR) if os.path.isfile(os.path.join(SHEET_DIR, name))])-1 > 0:
sheet_name = sheet_name + str(len([name for name in os.listdir(SHEET_DIR) if os.path.isfile(os.path.join(SHEET_DIR, name))])-1)
workbook = xlsxwriter.Workbook('data/sheet/' + sheet_name + '.xlsx')
worksheet = workbook.add_worksheet()
worksheet.set_column("A1:J5", 52)
worksheet.set_default_row(220)
worksheet.write(0,1, "Graph")
worksheet.write(0,2, "Status")
worksheet.write(0,3, "Image")
worksheet.write(0,4, "Settings")
worksheet.write(0,5, "Date")
if row < 0 :
print("row can't be less then 0!")
exit()
def AddToWorksheet(row,status):
worksheet.insert_image(row,1, 'data/graph/' + str(status) + '/' + (str(not_drunkdatanum) if status=="not_drunk" else str(drunkdatanum)) + '.png', {'x_scale': 0.47,'y_scale': 0.5,'x_offset': 2,'y_offset': 2,'positioning': 1})
worksheet.insert_image(row,3, 'data/frames/' + str(status) + '/' + (str(not_drunkdatanum) if status=="not_drunk" else str(drunkdatanum)) + '.png', {'x_scale': 0.47,'y_scale': 0.5,'x_offset': 2,'y_offset': 2,'positioning': 1})
worksheet.write(row, 0, row)
worksheet.write(row, 2, status)
worksheet.write(row, 4, str(settingsJson))
worksheet.write(row, 5, str(datetime.datetime.now()))
print("\n----------------\nSaved!" + "\nRow:" + str(row) + "\nNot Drunk Data Num:" + str(not_drunkdatanum) + "\nDrunk Data Num:" + str(drunkdatanum))
def AutoSpectrumFinder(frame):
frame = (rotate_bound(frame, angle))
height, width = frame.shape[:2]
start_row, start_col = 0, 0
end_row, end_col = height, width // 2
frame = frame[start_row:end_row , start_col:end_col]
grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,th = cv2.threshold(grayscale,127,255, 0)
contours, hierarchy = cv2.findContours(th, 2, 1)
cnt = contours
big_contour = []
max = 0
for i in cnt:
area = cv2.contourArea(i)
if(area > max):
max = area
big_contour = i
final = cv2.drawContours(frame, big_contour, -1, (0,255,0), 3)
point = big_contour[int(len(big_contour)/2)][0]
x = point[0]
y = point[1]
return x - 50, y - 20, x + 50, y + 10
if vc.isOpened(): rval, frame = vc.read()
else: rval = False
while rval:
frame = (rotate_bound(frame, angle))
prntframe = frame.copy()
cv2.rectangle(prntframe, (x1, y1), (x2, y2), (0,100,0), 2)
prntframe = prntframe
try: cutframe = cv2.cvtColor(frame[y1:y2, x1:x2], cv2.COLOR_BGR2GRAY)
except cv2.error: cutframe = [[0]]
cv2.imshow("Spectrum Analyser", prntframe)
key = cv2.waitKey(500)
rval, frame = vc.read()
graph = [500]
if plot_graph:
if not mouse_rectangle:
if live == False: plot_graph = False
else: plt.clf()
for line in list(rotate_clockwise(cutframe)):
bright = 0
for point in line: bright += point
graph.append(bright)
graph.append(500)
plt.ylim(top=max(graph))
plt.ylim(bottom=min(graph))
if not cmp_graph == [500]:
plt.plot(cmp_graph[::-1] if flip else cmp_graph, color=(0, 0, 0))
plt.ylim(top=max([max(cmp_graph), max(graph)]))
plt.ylim(bottom=min([min(cmp_graph), min(graph)]))
nor_color = (0, 1, 0)
for i, x in enumerate(graph):
if abs(cmp_graph[i] - x) < sensetivity: nor_color = (1, 0, 0)
plt.plot(graph[::-1] if flip else graph, color=nor_color)
plt.draw()
plt.pause(0.001)
else: plt.draw()
if key == ord("w"): y1 -= 2
elif key == ord("s"): y2 += 2
elif key == ord("l"):
if live: live = False
else: live = True
print("Live = " + str(live))
elif key == ord("/"):
print("Searching...")
try:
x1, y1, x2, y2 = AutoSpectrumFinder(frame)
print("Found", (x1, y1, x2, y2))
except:
print("Could'nt find, please select manually")
elif key == ord("\\"): cv2.destroyWindow("mask")
elif key == ord("a"): x1 -= 2
elif key == ord("d"): x2 += 2
elif key == ord("z"): y1 += 2
elif key == ord("x"): y2 -= 2
elif key == ord("q"): x1 += 2
elif key == ord("e"): x2 -= 2
elif key == ord("r"):
angle -= 0.5
if angle <= 0: angle = 360
print("Angle = " + str(angle))
elif key == ord("t"):
angle += 0.5
if angle >= 360: angle = 0
print("Angle = " + str(angle))
elif key == ord("y"):
angle += 180
sleep(0.1)
if angle <= 0: angle = 360
if angle >= 360: angle = 0
print("Angle = " + str(angle))
elif key == ord("n"):
sensetivity -= 10
print("sensitivity = " + str(sensetivity))
elif key == ord("m"):
sensetivity += 10
print("sensitivity = " + str(sensetivity))
elif key == ord("o"):
plot_graph = False
plt.clf()
plt.close()
print("\nGraph: Closed")
elif key == ord("p"):
plot_graph = True
plt.clf()
print("\nGraph: Plotting...")
elif key == ord("v"):
flip = not flip
sleep(0.1)
print("Flip = " + str(flip))
elif key == ord("c"):
print("System: Closed")
save = ""
while save != 'y' or save != 'n':
save = input("Do you want to save the sheet? (y/n)")
if save == "n":
workbook.close()
os.remove("data/sheet/" + sheet_name + ".xlsx")
print("Deleted!")
break
else:
workbook.close()
break
break
elif key == ord("k"):
cmp_graph = graph
nor_color = (0,1,0)
elif key == ord("j"):
cmp_graph = [500]
nor_color = (1, 0, 0)
elif key == ord("h"):
pickle.dump({"sensitivity": sensetivity, "x1": x1, "y1": y1, "x2": x2, "y2": y2, "angle": angle, "flip": flip}, open("settings.p", "wb"))
elif key == ord("g"):
try:
print("Paused graph, press CTRL+C while focused on the terminal to resume it.")
plt.pause(10000000)
except KeyboardInterrupt:
print("Resumed program")
elif key == ord("]") and not live:
print("Recorded Drunk!")
plt.savefig('data/graph/drunk/' + str(drunkdatanum) + '.png')
cv2.imwrite("data/frames/drunk/" + str(drunkdatanum) + '.png', cutframe)
AddToWorksheet(row,"drunk")
drunkdatanum += 1
row += 1
elif key == ord("[") and not live:
print("Recorded Not Drunk!")
plt.savefig('data/graph/not_drunk/' + str(not_drunkdatanum) + '.png')
cv2.imwrite("data/frames/not_drunk/" + str(drunkdatanum) + '.png', cutframe)
AddToWorksheet(row,"not_drunk")
not_drunkdatanum += 1
row += 1
cv2.destroyWindow("Infrared spectrometer")
workbook.close() | 30.126866 | 227 | 0.628685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,323 | 0.163859 |
bc2367f7ad3d249653384c57887795fc9732fb47 | 136 | py | Python | models/__init__.py | dudtjakdl/OpenNMT-Korean-To-English | 32fcdb860906f40f84375ec17a23ae32cb90baa0 | [
"Apache-2.0"
] | 1,491 | 2017-06-30T16:15:40.000Z | 2022-03-22T02:05:16.000Z | models/__init__.py | dudtjakdl/OpenNMT-Korean-To-English | 32fcdb860906f40f84375ec17a23ae32cb90baa0 | [
"Apache-2.0"
] | 128 | 2017-07-07T21:41:03.000Z | 2021-06-30T13:18:23.000Z | models/__init__.py | dudtjakdl/OpenNMT-Korean-To-English | 32fcdb860906f40f84375ec17a23ae32cb90baa0 | [
"Apache-2.0"
] | 434 | 2017-07-08T12:35:15.000Z | 2022-03-25T06:28:13.000Z | from .EncoderRNN import EncoderRNN
from .DecoderRNN import DecoderRNN
from .TopKDecoder import TopKDecoder
from .seq2seq import Seq2seq
| 27.2 | 36 | 0.852941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bc24a0ca761d939b80b88836c40772f1d9ffb66b | 3,055 | py | Python | panel/layout/spacer.py | sthagen/holoviz-panel | 9abae5ac78e55857ed209de06feae3439f2f533b | [
"BSD-3-Clause"
] | 601 | 2018-08-25T20:01:22.000Z | 2019-11-19T19:37:08.000Z | panel/layout/spacer.py | sthagen/holoviz-panel | 9abae5ac78e55857ed209de06feae3439f2f533b | [
"BSD-3-Clause"
] | 626 | 2018-08-27T16:30:33.000Z | 2019-11-20T17:02:00.000Z | panel/layout/spacer.py | sthagen/holoviz-panel | 9abae5ac78e55857ed209de06feae3439f2f533b | [
"BSD-3-Clause"
] | 73 | 2018-09-28T07:46:05.000Z | 2019-11-18T22:45:36.000Z | """
Spacer components to add horizontal or vertical space to a layout.
"""
import param
from bokeh.models import Div as BkDiv, Spacer as BkSpacer
from ..reactive import Reactive
class Spacer(Reactive):
"""
The `Spacer` layout is a very versatile component which makes it easy to
put fixed or responsive spacing between objects.
Like all other components spacers support both absolute and responsive
sizing modes.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... 1, pn.Spacer(width=200),
... 2, pn.Spacer(width=100),
... 3
... )
"""
_bokeh_model = BkSpacer
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_params())
model = self._bokeh_model(**properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
class VSpacer(Spacer):
"""
The `VSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Column(
... pn.layout.VSpacer(), 'Item 1',
... pn.layout.VSpacer(), 'Item 2',
... pn.layout.VSpacer()
... )
"""
sizing_mode = param.Parameter(default='stretch_height', readonly=True)
class HSpacer(Spacer):
"""
The `HSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... pn.layout.HSpacer(), 'Item 1',
... pn.layout.HSpacer(), 'Item 2',
... pn.layout.HSpacer()
... )
"""
sizing_mode = param.Parameter(default='stretch_width', readonly=True)
class Divider(Reactive):
"""
A `Divider` draws a horizontal rule (a `<hr>` tag in HTML) to separate
multiple components in a layout. It automatically spans the full width of
the container.
Reference: https://panel.holoviz.org/reference/layouts/Divider.html
:Example:
>>> pn.Column(
... '# Lorem Ipsum',
... pn.layout.Divider(),
... 'A very long text... '
>>> )
"""
width_policy = param.ObjectSelector(default="fit", readonly=True)
_bokeh_model = BkDiv
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_params())
properties['style'] = {'width': '100%', 'height': '100%'}
model = self._bokeh_model(text='<hr style="margin: 0px">', **properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
| 27.035398 | 80 | 0.636661 | 2,862 | 0.936825 | 0 | 0 | 0 | 0 | 0 | 0 | 1,933 | 0.632733 |
bc24deaeae5652bf63e557de1533a49cda47444a | 432 | py | Python | reddit2telegram/channels/r_gentlemanboners/app.py | mainyordle/reddit2telegram | 1163e15aed3b6ff0fba65b222d3d9798f644c386 | [
"MIT"
] | 187 | 2016-09-20T09:15:54.000Z | 2022-03-29T12:22:33.000Z | reddit2telegram/channels/r_gentlemanboners/app.py | mainyordle/reddit2telegram | 1163e15aed3b6ff0fba65b222d3d9798f644c386 | [
"MIT"
] | 84 | 2016-09-22T14:25:07.000Z | 2022-03-19T01:26:17.000Z | reddit2telegram/channels/r_gentlemanboners/app.py | mainyordle/reddit2telegram | 1163e15aed3b6ff0fba65b222d3d9798f644c386 | [
"MIT"
] | 172 | 2016-09-21T15:39:39.000Z | 2022-03-16T15:15:58.000Z | #encoding:utf-8
from utils import weighted_random_subreddit
subreddit = weighted_random_subreddit({
'BeautifulFemales': 0.25,
'cutegirlgifs': 0.25,
'gentlemanboners': 0.25,
'gentlemanbonersgifs': 0.25
})
t_channel = '@r_gentlemanboners'
def send_post(submission, r2t):
return r2t.send_simple(submission,
text=False,
gif=True,
img=True,
album=False,
other=False
)
| 18.782609 | 43 | 0.657407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.243056 |
bc2766b525e97b803d5443c5f873e693460a5edd | 5,334 | py | Python | efficientEigensolvers/Page_Rank_Application.py | ICERM-Efficient-Eigensolvers-2020/Implimentation | c5856afaeaa234946fb5151226e4f3f5a60a1018 | [
"MIT"
] | null | null | null | efficientEigensolvers/Page_Rank_Application.py | ICERM-Efficient-Eigensolvers-2020/Implimentation | c5856afaeaa234946fb5151226e4f3f5a60a1018 | [
"MIT"
] | null | null | null | efficientEigensolvers/Page_Rank_Application.py | ICERM-Efficient-Eigensolvers-2020/Implimentation | c5856afaeaa234946fb5151226e4f3f5a60a1018 | [
"MIT"
] | 2 | 2020-07-15T15:09:36.000Z | 2020-10-11T11:30:09.000Z | import sys, os
import Page_Rank_Utils as pru
from Power_Iteration import PowerMethod
from QR_Algorithm import qr_Algorithm_HH, qr_Algorithm_GS, shiftedQR_Algorithm
from Inverse_Iteration import InverseMethod
from Inverse_Iteration_w_shift import InverseShift
import matplotlib.pyplot as plt
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
web_scraper_folder = THIS_FOLDER[:-21] + "webCrawler/"
sys.path.append(web_scraper_folder)
import web_scraper
import networkx as nx
import csv
import time
import numpy as np
import ast
import re
def Stochastic_matrix_test():
diG = nx.DiGraph()
#3 is a dangling node
diG.add_edge(1,2)
diG.add_edge(1,3)
diG.add_edge(1,4)
diG.add_edge(2,3)
diG.add_edge(4,3)
M = pru.stochastic_transition_matrix_from_G(diG, False, 0.15)
#print(M)
def web_scrawler_application(url, max_urls, func_list, recal, weight=0.15):
url_w = url.replace('.', '_')
url_w = url_w.replace('/', '')
url_w = re.sub('https:', '', url_w)
directory = f"test_result_july30/{url_w}/{max_urls}"
result_folder_path = os.path.join(THIS_FOLDER, directory)
if not os.path.exists(result_folder_path):
print("make it!")
os.makedirs(result_folder_path)
logf = open(result_folder_path + "/error.log", "w")
f1 = open(result_folder_path + "/page_rank_algorithms_comparison.txt", "w")
raw_adjacency_matrix_file = result_folder_path + "/raw_adjacency_matrix.npy"
stochastic_matrix_file = result_folder_path + "/prepared_matrix.npy"
internal_url_dict_file = result_folder_path + "/internal_url_dict.txt"
recalculate = recal
if not os.path.exists(stochastic_matrix_file) or not os.path.exists(internal_url_dict_file) or recalculate:
A, diG, internal_url_dict = web_scraper.scraper(url, max_urls)
plt.spy(A)
plt.title(f'Adjacency Matrix for {url} with {max_urls} urls')
plt.savefig(result_folder_path+f'/{max_urls}_adhMatrix')
plt.close()
np.save(raw_adjacency_matrix_file , A)
M = pru.stochastic_transition_matrix_from_G(diG, False, weight)
np.save(stochastic_matrix_file, M)
f_d = open(internal_url_dict_file, "w")
f_d.write(str(internal_url_dict))
f_d.close()
else:
A = np.load(raw_adjacency_matrix_file)
plt.spy(A)
plt.savefig(result_folder_path + f'/{max_urls}_adhMatrix')
M = np.load(stochastic_matrix_file)
f2 = open(internal_url_dict_file, "r")
contents = f2.read()
internal_url_dict = ast.literal_eval(contents)
f2.close()
converge_range = 0.0001
for func in func_list:
try:
t_start = time.time()
if func in [qr_Algorithm_GS, qr_Algorithm_HH, shiftedQR_Algorithm]:
eigenvec, eigenval = func(M, converge_range=converge_range)
eigenval = max(np.abs(eigenval))
eigenvec = eigenvec[[0][0]]
eigenvec = eigenvec/np.linalg.norm(eigenvec)
else:
eigenvec, eigenval = func(M, converge_range=converge_range, file_path=result_folder_path)
t_end = time.time()
time_length = t_end - t_start
f1.write(f"algo: {func.__name__} time:{time_length}\n")
page_rank_dict = {}
for i, page in enumerate(internal_url_dict):
page_rank_dict[page] = eigenvec[i]
try:
page_rank_dict = sorted(page_rank_dict.items(), key=lambda x: x[1], reverse=True)
fields = ['Link', 'Page Rank Score']
with open(result_folder_path + f"/{func.__name__}_page_rank.csv", "w", newline='') as f:
writer = csv.writer(f)
writer.writerow(fields)
for item in page_rank_dict:
writer.writerow(item)
print(f"dominant eigenvector: {eigenvec}", file=f)
print(f"dominant eigenvalue: {eigenval}", file=f)
except:
logf.write(f'\nalgorithm {func.__name__} cannot give out right eigenvector format')
except:
logf.write(f'\nalgorithm {func.__name__} doesnt work when solving the eigenvector')
pass
f1.close()
if __name__ == '__main__':
print("###Page Ranking###")
#another good attemp: https://mathworld.wolfram.com/
filename = sys.argv[-1]
file1 = open(filename, 'r')
lines = file1.readlines()
url = lines[0].strip()
max_urls = int(lines[1].strip())
recal = int(lines[2].strip())
print(url)
"""
import argparse
parser = argparse.ArgumentParser(description="Link Extractor Tool with Python")
parser.add_argument("-u", "--url", help="The URL to extract links from.")
parser.add_argument("-m", "--max-urls", help="Number of max URLs to crawl, default is 30.", default=30, type=int)
args = parser.parse_args()
url = args.url
max_urls = args.max_urls
#comment bove out if you don't want to use shell
url = "https://icerm.brown.edu/"
max_urls = 30
"""
func_list = [PowerMethod, qr_Algorithm_HH, qr_Algorithm_GS, shiftedQR_Algorithm, InverseMethod, InverseShift]
print(max_urls)
web_scrawler_application(url, max_urls, func_list, recal)
Stochastic_matrix_test()
| 37.829787 | 117 | 0.650919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,256 | 0.235471 |
bc29e1bc3e6578fac96ebc8b4d22ae86f30e35fb | 188 | py | Python | 04-ClassMembers/member.py | bakhshalipour/boost-python3-mac | dd86f2a1d2460118878282b794a87c821cd38a3a | [
"BSL-1.0"
] | null | null | null | 04-ClassMembers/member.py | bakhshalipour/boost-python3-mac | dd86f2a1d2460118878282b794a87c821cd38a3a | [
"BSL-1.0"
] | null | null | null | 04-ClassMembers/member.py | bakhshalipour/boost-python3-mac | dd86f2a1d2460118878282b794a87c821cd38a3a | [
"BSL-1.0"
] | null | null | null | #!/usr/bin/env python3
import member
m1 = member.SomeClass("Pavel")
print ("name =",m1.name)
m1.name = "Gunther"
print ("name =",m1.name)
m1.number = 7.3
print ("number =",m1.number)
| 13.428571 | 30 | 0.648936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.340426 |
bc2ad22aede45c1a13a1677de0ef725433124657 | 26,946 | py | Python | valispace/__init__.py | singleit-tech/ValispacePythonAPI | a5681779b3ee898188270a7973e0222875f93b8c | [
"MIT"
] | 9 | 2018-04-28T10:38:31.000Z | 2022-02-18T12:41:47.000Z | valispace/__init__.py | singleit-tech/ValispacePythonAPI | a5681779b3ee898188270a7973e0222875f93b8c | [
"MIT"
] | 7 | 2017-10-26T13:10:30.000Z | 2020-08-08T13:46:19.000Z | valispace/__init__.py | singleit-tech/ValispacePythonAPI | a5681779b3ee898188270a7973e0222875f93b8c | [
"MIT"
] | 13 | 2017-10-06T08:23:06.000Z | 2022-02-09T10:55:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import getpass
import json
import requests
import sys
import six
import re
class API:
"""
Defines REST API endpoints for Valispace.
"""
_writable_vali_fields = [
'reference', 'margin_plus', 'margin_minus', 'unit',
'formula', 'description', 'parent', 'tags', 'shortname',
'minimum', 'maximum',
]
def __init__(self, url=None, username=None, password=None, keep_credentials=False, warn_https=True):
print("\nAuthenticating Valispace...\n")
if url is None:
url = six.moves.input('Your Valispace url: ')
url = url.strip().rstrip("/")
if not (url.startswith('http://') or url.startswith('https://')):
url = 'https://' + url
# Check for SSL connection before sending the username and password.
if warn_https and url[:5] != "https":
sys.stdout.write("Are you sure you want to use a non-SSL connection? "
"This will expose your password to the network and might be a significant security risk [y/n]: ")
while True:
choice = six.moves.input().lower()
if choice == "y":
break
if choice == "n":
return
print("Please answer 'y' or 'n'")
self._url = url + '/rest/'
self._oauth_url = url + '/o/token/'
self._session = requests.Session()
self.username, self.password = None, None
if self.login(username, password):
if keep_credentials:
self.username, self.password = username, password
print("You have been successfully connected to the {} API.".format(self._url))
def login(self, username=None, password=None):
"""
Performs the password-based oAuth 2.0 login for read/write access.
"""
# clear out old auth headers
self._session.headers = {}
if username is None:
username = six.moves.input('Username: ').strip()
if password is None:
password = getpass.getpass('Password: ').strip()
try:
client_id = "ValispaceREST" # registered client-id in Valispace Deployment
response = self._session.post(self._oauth_url, data={
'grant_type': 'password',
'username': username,
'password': password,
'client_id': client_id,
})
except requests.exceptions.RequestException as e:
raise Exception("VALISPACE-ERROR: " + str(e))
except:
# TODO:
# T: Capture specific exceptions, bc it is also possible that
# the endpoint does not work or something like that...
raise Exception("VALISPACE-ERROR: Invalid credentials or url.")
response.raise_for_status()
json = response.json()
if 'error' in json and json['error'] != None:
if 'error_description' in json:
raise Exception("VALISPACE-ERROR: " + json['error_description'])
else:
raise Exception("VALISPACE-ERROR: " + json['error'])
return
access = "Bearer " + json['access_token']
self._session.headers = {
'Authorization': access,
'Content-Type': 'application/json'
}
return True
def get_all_data(self, type=None):
"""
Returns a dict of all component/vali/textvali/tags with their properties.
"""
# Check if valid argument was passed.
if type not in ('component', 'vali', 'textvali', 'tag'):
raise Exception("VALISPACE-ERROR: Type argument expected (component/vali/textvali/tags)")
if type in ('component', 'vali', 'textvali'):
url = type + 's/' # add an s to the end to get to the right endpoint
else:
url = type + '/'
get_data = self.get(url)
return_dictionary = {}
for data in get_data:
return_dictionary[str(data["id"])] = data
return return_dictionary
def get_vali_list(self, workspace_id=None, workspace_name=None, project_id=None, project_name=None, parent_id=None,
parent_name=None, tag_id=None, tag_name=None, vali_marked_as_impacted=None):
"""
Returns JSON with all the Valis that mach the input arguments.
Inputs are integers for IDs or vali_marked_as_impacted strings, and strings for names.
Use the component 'unique_name' (not the 'name') in the parent_name argument.
"""
if workspace_id:
try:
workspace_id = int(workspace_id)
except:
raise Exception("VALISPACE-ERROR: Workspace id must be an integer.")
if project_id:
try:
project_id = int(project_id)
except:
raise Exception("VALISPACE-ERROR: Project id must be an integer")
if parent_id:
try:
parent_id = int(parent_id)
except:
raise Exception("VALISPACE-ERROR: Parent id must be an integer")
if tag_id:
try:
tag_id = int(tag_id)
except:
raise Exception("VALISPACE-ERROR: Tag id must be an integer")
if vali_marked_as_impacted:
try:
vali_marked_as_impacted = int(vali_marked_as_impacted)
except:
raise Exception("VALISPACE-ERROR: Vali_marked_as_impacted must be an integer")
# Construct URL.
url = "valis/?"
if workspace_id:
url += "parent__project__workspace={}".format(workspace_id)
if workspace_name:
url = self.__increment_url(url) + "parent__project__workspace__name={}".format(workspace_name)
if project_id:
url = self.__increment_url(url) + "project={}".format(project_id)
if project_name:
project = self.get_project_by_name(project_name)
url = self.__increment_url(url) + "project={}".format(project[0]['id'])
if parent_id:
url = self.__increment_url(url) + "parent={}".format(parent_id)
if parent_name:
url = self.__increment_url(url) + "parent__unique_name={}".format(parent_name)
if tag_id:
url = self.__increment_url(url) + "tags__id={}".format(tag_id)
if tag_name:
url = self.__increment_url(url) + "tags__name={}".format(tag_name)
if vali_marked_as_impacted:
url = self.__increment_url(url) + "valis_marked_as_impacted={}".format(vali_marked_as_impacted)
try:
return self.get(url)
except Exception as e:
print('Something went wrong with the request. Details: {}'.format(e))
# if response.status_code != 200:
# print('Response:', response)
# print('Status code:', response.status_code)
# print('Text:', response.text)
# return None
# else:
# return response.json()
def get_vali_names(self, project_name=None):
"""
Returns a list of all Valis with only names and IDs.
:returns: JSON object.
"""
if project_name:
project = self.get_project_by_name(project_name)
if project:
project = project[0]
return self.get("valis/?fields=id,name&_project={}".format(project["id"]))
else:
return None
else:
return self.get("valis/?fields=id,name")
def get_vali(self, id):
"""
Returns JSON of a unique Vali.
:param id: ID of the Vali to fetch.
:returns: JSON object.
"""
if type(id) != int:
raise Exception("VALISPACE-ERROR: The function requires an ID (int) as parameter.")
return self.get("valis/{}/".format(id))
def get_vali_by_name(self, vali_name, project_name):
"""
Returns JSON of a unique Vali.
:param vali_name: unique name of the vali.
:returns: JSON object.
"""
if type(vali_name) != str:
raise Exception("VALISPACE-ERROR: The function requires a valid Vali name (str) as parameter.")
if type(project_name) != str:
raise Exception("VALISPACE-ERROR: The function requires a valid Project name (str) as parameter.")
valinames = self.get_vali_names()
for entry in valinames:
if entry['name'] == vali_name:
return self.get("valis/{}/".format(entry["id"]))
raise Exception("VALISPACE-ERROR: There is no Vali with this name and project, make sure you admit a "
"valid full name for the vali (e.g. ComponentX.TestVali) and a valid project name.")
def fuzzysearch_vali(self, searchterm):
"""
Returns JSON of a unique Vali given a similar name
:param searchterm: (not necessarily exact) name of vali
:returns: JSON object.
"""
if type(searchterm) != str:
raise Exception("VALISPACE-ERROR: The function requires a string as parameter.")
url = "fuzzysearch/Vali/name/{}/".format(searchterm)
result = self.get(url, allow_redirects=True)
if result == {}:
raise Exception("VALISPACE-ERROR: Could not find a matching vali for {}".format(searchterm))
else:
return result
def get_vali_value(self, id):
"""
Returns the value of a vali.
:param id: ID of Vali.
:returns: int.
"""
try:
vali = self.get_vali(id)
return vali["value"]
except:
raise Exception("VALISPACE-ERROR: Could not retrieve Vali value.")
# def create_vali(parent=None, type=None, shortname=None, description=None, formula=None, margin_plus=None,
# margin_minus=None, minimum=None, maximum=None, reference=None, tags=None, data={}):
# """
# Creates a new Vali.
# """
# # TBD...
def update_vali(self, id, shortname=None, formula=None, data=None):
"""
Finds the Vali that corresponds to the input id
and updates it with the input shortname, formula and/or data.
"""
if data == None :
data = {}
elif type(data) != dict:
raise Exception('VALISPACE-ERROR: data needs to be a dictionary. To update formula / value use "formula"')
if not id:
raise Exception("VALISPACE-ERROR: You need to pass an ID.")
if not shortname and not formula and not data:
raise Exception("VALISPACE-ERROR: You have to pass data to update.")
# Write Vali.
if shortname:
data["shortname"] = shortname
if formula:
data["formula"] = formula
if not data:
raise Exception("You have not entered any valid fields. Here is a list of all fields \
that can be updated:\n{}.".format(", ".join(self._writable_vali_fields)))
url = "valis/{}/".format(id)
return self.request('PATCH', url, data=data)
def impact_analysis(self, id, target_vali_id, range_from, range_to, range_step_size):
data = {}
if not id:
raise Exception("VALISPACE-ERROR: You need to pass an ID.")
url = "valis/{}/impact-analysis-graph-for/{}/?range_min={}&range_max={}&range_step_size={}".format(id, target_vali_id, range_from, range_to, range_step_size)
# FIXME: (patrickyeon) I special-cased this because there's some
# printing of returned values on error, but I suspect
# that is really better handled by the normal error-
# handling path and getting rid of these print()s
print(url)
result = self._session.get(self._url + url, data=data)
if result.status_code != 200:
print(result.text)
raise Exception("Invalid Request.")
return json.loads(result.text)
def what_if(self, vali_name, target_name, value):
if not id or not target_name or not value:
raise Exception("VALISPACE-ERROR: You need to pass an ID.")
url = "alexa_what_if/{}/{}/{}/".format(vali_name, target_name, value)
# FIXME: (patrickyeon) same comment as on impact_analysis()
print(url)
result = self._session.get(self._url + url)
if result.status_code != 200:
print(result.text)
raise Exception("Invalid Request.")
return json.loads(result.text)
def get_component_list(
self,
workspace_id=None,
workspace_name=None,
project_id=None,
project_name=None,
parent_id=None,
parent_name=None,
tag_id=None,
tag_name=None,
):
"""
Returns JSON with all the Components that match the input arguments.
Inputs are integers for IDs and strings for names.
Use the component 'unique_name' (not the 'name') in the parent_name argument.
:returns: JSON object.
"""
if workspace_id:
try:
workspace_id = int(workspace_id)
except:
raise Exception("VALISPACE-ERROR: Workspace id must be an integer.")
if project_id:
try:
project_id = int(project_id)
except:
raise Exception("VALISPACE-ERROR: Project id must be an integer.")
if parent_id:
try:
parent_id = int(parent_id)
except:
raise Exception("VALISPACE-ERROR: Parent id must be an integer.")
if tag_id:
try:
tag_id = int(tag_id)
except:
raise Exception("VALISPACE-ERROR: Tag id must be an integer.")
# Construct URL.
url = "components/?"
if workspace_id:
url += "project__workspace={}".format(workspace_id)
elif workspace_name:
url = self.__increment_url(url) + "workspace__name={}".format(workspace_name)
elif project_id:
url = self.__increment_url(url) + "project={}".format(project_id)
elif project_name:
url = self.__increment_url(url) + "project__name={}".format(project_name)
elif parent_id:
url = self.__increment_url(url) + "parent={}".format(parent_id)
elif parent_name:
url = self.__increment_url(url) + "parent__unique_name={}".format(parent_name)
elif tag_id:
url = self.__increment_url(url) + "tags__id={}".format(tag_id)
elif tag_name:
url = self.__increment_url(url) + "tags__name={}".format(tag_name)
return self.get(url)
def get_component(self, id):
"""
Returns JSON of a unique Component.
:param id: ID of the component to fetch.
:returns: JSON object.
"""
if type(id) != int:
raise Exception("VALISPACE-ERROR: The function requires an id (int) as argument.")
return self.get("components/{}/".format(id))
def get_component_by_name(self, unique_name, project_name):
"""
Returns JSON of a unique Component.
:param name: unique name of the component to fetch.
:returns: JSON object.
"""
if type(unique_name) != str:
raise Exception("VALISPACE-ERROR: The function requires a component unique name (str) as argument.")
if type(project_name) != str:
raise Exception("VALISPACE-ERROR: The function requires a valid Project name (str) as argument.")
project = self.get_project_by_name(name=project_name)
results = []
component_list = self.get_component_list(project_name=project_name)
for entry in component_list:
if entry['unique_name'] == unique_name:
results.append(entry)
#return self.get("valis/{}/".format(entry["id"]))
num_results = len(results)
if num_results == 1:
return results[0]
if num_results == 0:
raise Exception("VALISPACE-ERROR: A Component with this name does not exist. Please check for typos.")
else:
raise Exception("VALISPACE-ERROR: The name you admitted is ambiguous, are you sure you used the Component's full name?")
def get_project_list(self, workspace_id=None, workspace_name=None):
"""
Returns JSON with all the Projects that mach the input arguments.
Inputs are integers for IDs and strings for names.
:returns: JSON object.
"""
# Construct URL.
url = "project/?"
if workspace_id:
if type(workspace_id) != int:
raise Exception("VALISPACE-ERROR: workspace_id must be an integer.")
url += "workspace={}".format(workspace_id)
elif workspace_name:
if type(workspace_name) != str:
raise Exception("VALISPACE-ERROR: workspace_name must be a string.")
url = self.__increment_url(url) + "workspace__name={}".format(workspace_name)
return self.get(url)
def get_project(self, id):
"""
Retrieve a Project via ID.
:param id: ID of the project.
:returns: JSON object.
"""
if type(id) != int:
raise Exception("VALISPACE-ERROR: The function requires an id (int) as argument.")
return self.get("project/{}/".format(id))
def get_project_by_name(self, name):
"""
Retrieve a Project via name.
:param name: name of the project (unique).
:returns: JSON object.
"""
if type(name) != str:
raise Exception("VALISPACE-ERROR: The function requires a valid project name (str) as argument.")
# Construct URL.
url = "project/?name={}".format(name)
response = self.get(url)
if len(response) == 0:
raise Exception("VALISPACE-ERROR: A Project with this name does not exist. Please check for typos.")
else:
return response
def post_data(self, type=None, data=None):
"""
Post new component/vali/textvali/tags with the input data
Data is expected to be a JSON string with some required fields like name.
:param type: type of object to create/update.
:param data: dict with key value pairs for the object attributes.
:returns: JSON object.
"""
# Check if no argument was passed
if data is None:
data = {}
if type not in ('component', 'vali', 'textvali', 'tag'):
raise Exception("VALISPACE-ERROR: Type argument expected (component/vali/textvali/tags).")
if type in ('component', 'vali', 'textvali'):
url = type + 's/' # Add an s to the end of the type for component, vali and textvali to get to the correct endpoint
else:
url = type + '/'
# FIXME: (patrickyeon) special-casing this, but maybe this whole
# method is not required now that post() exists?
result = self._session.post(self._url + url, data=data)
if result.status_code == 201:
print("Successfully updated Vali:\n" + str(data) + "\n")
elif result.status_code == 204:
raise Exception(
"The server successfully processed the request, but is not "
"returning any content (status code: 204)\n"
)
elif result.status_code == 500:
raise Exception(
"The server encountered an unexpected condition which "
"prevented it from fulfilling the request (status code: 500)\n"
)
else:
raise Exception("Invalid Request (status code: {}): {}\n".format(result.status_code, result.content))
return result.json()
def post(self, url, data=None, **kwargs):
"""
Posts data
:param url: the relative url
:param data: the data
:param **kwargs: additional args passed to the request call
:returns: JSON object.
"""
return self.request('POST', url, data, **kwargs)
def get(self, url, data=None, **kwargs):
"""
Posts data
:param url: the relative url
:param data: the data
:param **kwargs: additional args passed to the request call
:returns: JSON object.
"""
return self.request('GET', url, data, **kwargs)
def request(self, method, url, data=None, **kwargs):
"""
Generic request data
:param method: the method
:param url: the relative url
:param data: the data
:param **kwargs: additional args passed to the request call
:returns: JSON object.
"""
url = self._url + url
result = self._session.request(method, url, json=data, **kwargs)
if result.status_code == 401:
# authentication expired
if self.username is None:
print("Authentication expired, please re-login")
# otherwise, we've got it saved and this is transparent
self.login(self.username, self.password)
# try the request one more time
result = self._session.request(method, url, json=data, **kwargs)
result.raise_for_status()
return result.json()
def get_matrix(self, id):
"""
Returns the correct Matrix.
:param id: ID of Matrix.
:returns: list of lists.
"""
url = "matrices/{}/".format(id)
matrix_data = self.get(url)
try:
# TODO:
# T: there is probably a faster and more efficient way...
matrix = []
for row in range(matrix_data['number_of_rows']):
matrix.append([])
for col in range(matrix_data['number_of_columns']):
matrix[row].append(self.get_vali(matrix_data['cells'][row][col]))
return matrix
except KeyError:
raise Exception("VALISPACE-ERROR: Matrix with id {} not found.".format(id))
except:
raise Exception("VALISPACE-ERROR: Unknown error.")
def get_matrix_str(self, id):
"""
Returns the correct Matrix.
:param id: ID of Matrix.
:returns: list of lists.
"""
url = "matrices/{}/".format(id)
matrix_data = self.get(url)
try:
matrix = []
for row in range(matrix_data['number_of_rows']):
matrix.append([])
for col in range(matrix_data['number_of_columns']):
matrix[row].append({
"vali": matrix_data['cells'][row][col],
"value": self.get_vali(matrix_data['cells'][row][col])["value"],
})
return matrix
except KeyError:
raise Exception("VALISPACE-ERROR: Matrix with id {} not found.".format(id))
except:
raise Exception("VALISPACE-ERROR: Unknown error.")
def update_matrix_formulas(self, id, matrix_formula):
"""
Finds the Matrix that corresponds to the input id,
Finds each of the Valis that correspond to the vali id (contained in each cell of the matrix)
Updates the formula of each of the Valis with the formulas contained in each cell of the input matrix.
"""
# Read Matrix.
url = "matrices/{}/".format(id)
matrix_data = self.get(url)
# Check matrix dimensions.
if not len(matrix_formula) == matrix_data["number_of_rows"] and len(matrix_formula[0]) == matrix_data["number_of_columns"]:
raise Exception('VALISPACE-ERROR: The dimensions of the local and the remote matrix do not match.')
# Update referenced valis in each matrix cell
for row in range(matrix_data['number_of_rows']):
for col in range(matrix_data['number_of_columns']):
self.update_vali(id=matrix_data['cells'][row][col], formula=matrix_formula[row][col])
# Increment function to add multiple fields to url
def __increment_url(self, url):
# TODO:
# T: Replace this with a proper query param function...
if not url.endswith('?'):
url += "&"
return url
def vali_create_dataset(self, vali_id):
"""
Creates a new dataset in vali.
:param vali_id: Id of the vali where we want to create the dataset.
:returns: New datset id.
"""
url = 'rest/valis/' + vali_id + '/convert-to-dataset/'
data = {}
return self.post(url, data)
def create_dataset_and_set_values(self, vali_id, input_data):
"""
Sets a dataset.
:param vali_id: Id of the vali where we want to create the dataset.
:param data: Data, in the format of [[x0, y0...], [x1, y1...], ...]
"""
if type(input_data) != list:
raise Exception('input_data must be an array')
url = 'datasets/'
data = {
"vali": vali_id
}
try:
response = self.post(url, data)
except:
raise Exception("VALISPACE ERROR: Is the vali_id valid?")
dataset_id = response['id']
variable_id = response['points'][0]['variables'][0]['id']
point_id = response['points'][0]['id']
s = 0
for d in input_data:
if s != 0:
if len(d) != s:
raise Exception("Data members with inconsistent length. Found {}, expected {}.".format(len(d), s))
url = 'vali/functions/datasets/points/'
data = {
"dataset": dataset_id
}
response = self.post(url, data)
point_id = response['id']
variable_id = response['variables'][0]['id']
else:
s = len(d)
url = 'vali/functions/datasets/points/{}/'.format(point_id)
data = {
"value": d[0]
}
self.request('PATCH', url, json=data)
for v in d[1:]:
url = 'vali/functions/datasets/points/variables/{}/'.format(variable_id)
data = {
"value_number": v
}
self.request('PATCH', url, json=data)
return dataset_id
def vali_import_dataset(self, vali_id, data, headers=None):
if headers is None:
headers = []
for i in range(len(data[0])):
headers.append(chr(ord('a') + i))
self.request('POST', 'valis/' + str(vali_id) + '/import-dataset/', data={'headers': headers, 'data': data})
| 36.861833 | 165 | 0.572627 | 26,820 | 0.995324 | 0 | 0 | 0 | 0 | 0 | 0 | 11,318 | 0.420025 |
bc2ade7603e85b28fb608daaa69727e893191a3a | 194 | py | Python | setup.py | khuong507/bluemix-cloudbase-init | 10628014687c5f0cfb183f5e8f182856078fa5fa | [
"Apache-2.0"
] | 3 | 2019-05-30T02:41:17.000Z | 2019-11-11T09:59:07.000Z | setup.py | khuong507/bluemix-cloudbase-init | 10628014687c5f0cfb183f5e8f182856078fa5fa | [
"Apache-2.0"
] | 1 | 2021-02-05T14:51:01.000Z | 2021-02-11T08:37:31.000Z | setup.py | khuong507/bluemix-cloudbase-init | 10628014687c5f0cfb183f5e8f182856078fa5fa | [
"Apache-2.0"
] | 2 | 2017-08-10T13:44:23.000Z | 2019-12-19T15:08:52.000Z | from distutils.core import setup
setup(name='Bluemix',
version='0.1',
description='A bluemix datasource to be used with cloudbase-init',
packages=['bluemix', 'bluemix.conf'])
| 27.714286 | 72 | 0.685567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.463918 |
bc2c36f82b74e08da6f7b10aee3d76392a0e0a37 | 49,575 | py | Python | giant/relative_opnav/estimators/ellipse_matching.py | nasa/giant | 1e939272d9a0ca533b4da400d132f854520f3adc | [
"NASA-1.3"
] | 5 | 2021-09-10T14:29:19.000Z | 2022-01-13T20:15:01.000Z | giant/relative_opnav/estimators/ellipse_matching.py | nasa/giant | 1e939272d9a0ca533b4da400d132f854520f3adc | [
"NASA-1.3"
] | null | null | null | giant/relative_opnav/estimators/ellipse_matching.py | nasa/giant | 1e939272d9a0ca533b4da400d132f854520f3adc | [
"NASA-1.3"
] | 2 | 2021-10-01T18:39:13.000Z | 2021-12-30T08:53:08.000Z | # Copyright 2021 United States Government as represented by the Administrator of the National Aeronautics and Space
# Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved.
r"""
This module provides the capability to locate the relative position of a regular target body (well modelled by a
triaxial ellipsoid) by matching the observed ellipse of the limb in an image with the ellipsoid model of the target.
Description of the Technique
----------------------------
Ellipse matching is a form of OpNav which produces a full 3DOF relative position measurement between the target and the
camera. Conceptually, it does this by comparing the observed size of a target in an image to the known size of the
target in 3D space to determine the range, and fits an ellipse to the observed target to locate the center in the image.
As such, this can be a very powerful measurement because it is insensitive to errors in the a priori knowledge of your
range to the target, unlike cross correlation, provides more information than just the bearing to the target
for processing in a filter, and is more computationally efficient. That being said, the line-of-sight/bearing component
of the estimate is generally slightly less accurate than cross correlation (when there is good a priori knowledge of the
shape and the range to the target). This is because ellipse matching only makes use of the visible limb, while cross
correlation makes use of all of the visible target.
While conceptually the ellipse matching algorithm computes both a bearing and a range measurement, in actuality, a
single 3DOF position estimate is computed in a least squares sense, not 2 separate measurements. The steps to extract
this measurement are:
#. Identify the observed illuminated limb of the target in the image being processed using
:meth:`.ImageProcessing.identify_subpixel_limbs`
#. Solve the least squares problem
.. math::
\left[\begin{array}{c}\bar{\mathbf{s}}'^T_1 \\ \vdots \\ \bar{\mathbf{s}}'^T_m\end{array}\right]
\mathbf{n}=\mathbf{1}_{m\times 1}
where :math:`\bar{\mathbf{s}}'_i=\mathbf{B}\mathbf{s}_i`, :math:`\mathbf{s}_i`, is a unit vector in the camera frame
through an observed limb point in an image (computed using :meth:`~.CameraModel.pixels_to_unit`),
:math:`\mathbf{B}=\mathbf{Q}\mathbf{T}^C_P`, :math:`\mathbf{Q}=\text{diag}(1/a, 1/b, 1/c)`, :math:`a-c` are the size
of the principal axes of the tri-axial ellipsoid representing the target, and :math:`\mathbf{T}^C_P` is the rotation
matrix from the principal frame of the target shape to the camera frame.
#. Compute the position of the target in the camera frame using
.. math::
\mathbf{r}=-(\mathbf{n}^T\mathbf{n}-1)^{-0.5}\mathbf{T}_C^P\mathbf{Q}^{-1}\mathbf{n}
where :math:`\mathbf{r}` is the position of the target in camera frame, :math:`\mathbf{T}_C^P` is the rotation from
the principal frame of the target ellipsoid to the camera frame, and all else is as defined previously.
Further details on the algorithm can be found `here <https://arc.aiaa.org/doi/full/10.2514/1.G000708>`_.
.. note::
This implements limb based OpNav for regular bodies. For irregular bodies, like asteroids and comets, see
:mod:`.limb_matching`.
Typically this technique is used once the body is fully resolved in the image (around at least 50 pixels in apparent
diameter) and then can be used as long as the limb is visible in the image.
Tuning
------
There are a few parameters to tune for this method. The main thing that may make a difference is the choice and tuning
for the limb extraction routines. There are 2 categories of routines you can choose from. The first is image
processing, where the limbs are extracted using only the image and the sun direction. To tune the image processing limb
extraction routines you can adjust the following :class:`.ImageProcessing` settings:
========================================= ==============================================================================
Parameter Description
========================================= ==============================================================================
:attr:`.ImageProcessing.denoise_flag` A flag specifying to apply :meth:`~.ImageProcessing.denoise_image` to the
image before attempting to locate the limbs.
:attr:`.ImageProcessing.image_denoising` The routine to use to attempt to denoise the image
:attr:`.ImageProcessing.subpixel_method` The subpixel method to use to refine the limb points.
========================================= ==============================================================================
Other tunings are specific to the subpixel method chosen and are discussed in :mod:`.image_processing`.
The other option for limb extraction is limb scanning. In limb scanning predicted illumination values based on the
shape model and a prior state are correlated with extracted scan lines to locate the limbs in the image. This technique
can be quite accurate (if the shape model is accurate) but is typically much slower and the extraction must be repeated
each iteration. The general tunings to use for limb scanning are from the :class:`.LimbScanner` class:
============================================ ===========================================================================
Parameter Description
============================================ ===========================================================================
:attr:`.LimbScanner.number_of_scan_lines` The number of limb points to extract from the image
:attr:`.LimbScanner.scan_range` The extent of the limb to use centered on the sun line in radians (should
be <= np.pi/2)
:attr:`.LimbScanner.number_of_sample_points` The number of samples to take along each scan line
============================================ ===========================================================================
There are a few other things that can be tuned but they generally have limited effect. See the :class:`.LimbScanner`
class for more details.
In addition, there is one knob that can be tweaked on the class itself.
========================================== =============================================================================
Parameter Description
========================================== =============================================================================
:attr:`.LimbMatching.extraction_method` Chooses the limb extraction method to be image processing or limb scanning.
========================================== =============================================================================
Beyond this, you only need to ensure that you have a fairly accurate ellipsoid model of the target, the knowledge of the
sun direction in the image frame is good, and the knowledge of the rotation between the principal frame and the camera
frame is good.
Use
---
The class provided in this module is usually not used by the user directly, instead it is usually interfaced with
through the :class:`.RelativeOpNav` class using the identifier :attr:`~.RelativeOpNav.ellipse_matching`. For more
details on using the :class:`.RelativeOpNav` interface, please refer to the :mod:`.relnav_class` documentation. For
more details on using the technique class directly, as well as a description of the ``details`` dictionaries produced
by this technique, refer to the following class documentation.
"""
import warnings
from enum import Enum
from typing import List, Optional, Union, Tuple, Callable
import numpy as np
from scipy.interpolate import RegularGridInterpolator, interp1d
from giant.relative_opnav.estimators.estimator_interface_abc import RelNavEstimator, RelNavObservablesType
from giant.relative_opnav.estimators.moment_algorithm import MomentAlgorithm
from giant.ray_tracer.shapes import Ellipsoid
from giant.ray_tracer.rays import Rays
from giant.ray_tracer.illumination import IlluminationModel, McEwenIllumination
from giant.image_processing import ImageProcessing, parabolic_peak_finder_1d, fft_correlator_1d
from giant.camera import Camera
from giant.image import OpNavImage
from giant.ray_tracer.scene import Scene, SceneObject
from giant.point_spread_functions import PointSpreadFunction
from giant._typing import NONEARRAY, Real
class LimbExtractionMethods(Enum):
"""
This enumeration provides the valid options for the limb extraction methods that can be used on the image.
"""
LIMB_SCANNING = "LIMB_SCANNING"
"""
Extract limbs from the image through 1D cross correlation of predicted and observed intensity profiles along scan
vectors.
This method relies on the a priori knowledge of the state vector therefore the limbs are re-extracted after each
iteration.
"""
EDGE_DETECTION = "EDGE_DETECTION"
"""
Extract limbs from the image using edge detection image processing techniques.
Because this does not rely on the a priori knowledge of the state vector and only considers the image and the sun
direction in the image, this is only performed once. The specific edge detection technique and other parameters can
be set in the :class:`.ImageProcessing` class.
The edges are extracted using the :meth:`.ImageProcessing.identify_subpixel_limbs` method.
"""
class LimbScanner:
"""
This class is used to extract limbs from an image and pair them to surface points on the target.
This is done by first determining the surface points on the limb based on the shape model, the scan center vector,
and the sun direction vector. Once these surface points have been identified (using :meth:.Shape.find_limbs`) they
are projected onto the image to generate the predicted limb locations in the image. Then the image is sampled
along the scan line through each predicted limb location and the scan center location in the image using the
``image_interpolator`` input to get the observed intensity line. In addition, the scan line is rendered using
ray tracing to generate the predicted intensity line. The predicted intensity lines and the extracted intensity
lines are then compared using cross correlation to find the shift that best aligns them. This shift is then applied
to the predicted limb locations in the image along the scan line to get the extracted limb location in the image.
This is all handled by the :meth:`extract_limbs` method.
There are a few tuning options for this class. The first collection affects the scan lines that are used to extract
the limb locations from the image. The :attr:`number_of_scan_lines` sets the number of generated scan lines and
directly corresponds to the number of limb points that will be extracted from the image. In addition,
the :attr:`scan_range` attribute sets the angular extent about the sun direction vector that these scan lines will
be evenly distributed. Finally, the :attr:`number_of_sample_points` specifies how many samples to take along the
scan lines for both the extracted and predicted intensity lines and corresponds somewhat to how accurate the
resulting limb location will be. (Generally a higher number will lead to a higher accuracy though this is also
limited by the resolution of the image and the shape model itself. A higher number also will make things take
longer.)
In addition to the control over the scan lines, you can adjust the :attr:`brdf` which is used to generate the
predicted intensity lines (although this will generally not make much difference) and you can change what peak
finder is used to find the subpixel peaks of the correlation lines.
This technique requires decent a priori knowledge of the relative state between the target and the camera for it to
work. At minimum it requires that the scan center be located through both the observed target location in the image
and the target shape model placed at the current relative position in the scene. If this isn't guaranteed by your
knowledge then you can use something like the :mod:`.moment_algorithm` to correct the gross errors in your a priori
knowledge as is done by :class:`.LimbMatching`.
Generally you will not use this class directly as it is used by the :class:`.LimbMatching` class. If you want to
use it for some other purpose however, simply provide the required initialization parameters, then use
:meth:`extract_limbs` to extract the limbs from the image.
"""
def __init__(self, scene: Scene, camera: Camera, psf: PointSpreadFunction, number_of_scan_lines: int = 51,
scan_range: Real = 3 * np.pi / 4, number_of_sample_points: int = 501,
brdf: Optional[IlluminationModel] = None, peak_finder: Callable = parabolic_peak_finder_1d):
r"""
:param scene: The scene containing the target(s) and the light source
:param camera: The camera containing the camera model
:param psf: The point spread function to apply to the predicted intensity lines. This should have a
:meth:`~.PointSpreadFunction.apply_1d`
:param number_of_scan_lines: The number of scan lines to generate (number of extracted limb points)
:param scan_range: The angular extent about the sun direction vector to distribute the scan lines through. This
Should be in units of radians and should generally be less than :math:`\pi`.
:param number_of_sample_points: The number of points to sample along each scan line
:param brdf: The illumination model to use to render the predicted scan intensity lines
:param peak_finder: The peak finder to find the peak of each correlation line. This should assume that each
row of the input array is a correlation line that the peak needs to be found for.
"""
self.scene: Scene = scene
"""
The scene containing the target(s) and the light source
"""
self.camera: Camera = camera
"""
The camera containing the camera model
"""
self.psf: PointSpreadFunction = psf
"""
The point spread function to apply to the predicted intensity lines.
This should provide a :meth:`~.PointSpreadFunction.apply_1d` that accepts in a numpy array where each
row is an intensity line and returns the blurred intensity lines as a numpy array.
"""
self.number_of_scan_lines: int = number_of_scan_lines
"""
The number of scan lines to generate/limb points to extract
"""
self.scan_range: float = float(scan_range)
r"""
The extent about the illumination direction in radians in which to distribute the scan lines.
The scan lines are distributed +/- scan_range/2 about the illumination direction. This therefore should
generally be less than :math:`\frac{\pi}{2}` unless you are 100% certain that the phase angle is perfectly 0
"""
self.number_of_sample_points: int = number_of_sample_points
"""
The number of points to sample each scan line along for the extracted/predicted intensity lines
"""
self.brdf: IlluminationModel = brdf
"""
The illumination function to use to render the predicted scan lines.
"""
# set the default if it wasn't specified
if self.brdf is None:
self.brdf = McEwenIllumination()
self.peak_finder: Callable = peak_finder
"""
the callable to use to return the peak of the correlation lines.
"""
self.predicted_illums: NONEARRAY = None
"""
The predicted intensity lines from rendering the scan lines.
This will be a ``number_of_scan_lines`` by ``number_of_sample_points`` 2d array where each row is a scan line.
This will be ``None`` until :meth:`extract_limbs` is called
"""
self.extracted_illums: NONEARRAY = None
"""
The extracted intensity lines from sampling the image.
This will be a ``number_of_scan_lines`` by ``number_of_sample_points`` 2d array where each row is a scan line.
This will be ``None`` until :meth:`extract_limbs` is called
"""
self.correlation_lines: NONEARRAY = None
"""
The correlation lines resulting from doing 1D cross correlation between the predicted and extracted scan lines.
This will be a ``number_of_scan_lines`` by ``number_of_sample_points`` 2d array where each row is a correlation
line.
This will be ``None`` until :meth:`extract_limbs` is called
"""
self.correlation_peaks: NONEARRAY = None
"""
The peaks of the correlation lines.
This will be a ``number_of_scan_lines`` length 1d array where each element is the peak of the corresponding
correlation line.
This will be ``None`` until :meth:`extract_limbs` is called
"""
def predict_limbs(self, scan_center: np.ndarray, line_of_sight_sun: np.ndarray, target: SceneObject,
camera_temperature: Real) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Predict the limb locations for a given target in the camera frame.
This is done by
#. get the angle between the illumination vector and the x axis of the image
#. Generate :attr:`number_of_scan_lines` scan angles evenly distributed between the sun angle -
:attr:`scan_range` /2 the and sun angle + :attr:`scan_range` /2
#. convert the image scan line directions into directions in the camera frame
#. use :meth:`.Shape.find_limbs` to find the limbs of the target given the scan center and the scan directions
in the camera frame
The limbs will be returned as a 3xn array in the camera frame.
This method is automatically called by :meth:`extract_limbs` and will almost never be used directly, however,
it is exposed for the adventurous types.
:param scan_center: the beginning of the scan in the image (pixels)
:param line_of_sight_sun: the line of sight to the sun in the image (pixels)
:param target: The target the limbs are to be predicted for
:param camera_temperature: The temperature of the camera
:return: The predicted limb locations in the camera frame
"""
# Get the angle of the illumination direction from the x axis in the image
angle_sun = np.arctan2(line_of_sight_sun[1], line_of_sight_sun[0])
# Set the scan angles +/- scan range around the sun direction
scan_angles = np.linspace(angle_sun - self.scan_range / 2, angle_sun + self.scan_range / 2,
self.number_of_scan_lines)
# get the scan directions in the image
scan_dirs_pixels = np.vstack([np.cos(scan_angles), np.sin(scan_angles)])
# get the line of sight to the target in the camera frame
scan_center_camera = self.camera.model.pixels_to_unit(scan_center, temperature=camera_temperature)
scan_center_camera /= scan_center_camera[-1]
# get the scan directions in the camera frame
scan_dirs_camera = self.camera.model.pixels_to_unit(scan_center.reshape(2, 1) + scan_dirs_pixels,
temperature=camera_temperature)
scan_dirs_camera /= scan_dirs_camera[-1]
scan_dirs_camera -= scan_center_camera
scan_dirs_camera /= np.linalg.norm(scan_dirs_camera, axis=0, keepdims=True)
# get the limbs body centered
limbs = target.shape.find_limbs(scan_center_camera, scan_dirs_camera, target.position.ravel())
# return the limbs in the camera frame
return limbs + target.position.reshape(3, 1), scan_dirs_pixels, scan_dirs_camera
def extract_limbs(self, image_interpolator: Callable, camera_temperature: Real, target: SceneObject,
scan_center: np.ndarray, line_of_sight_sun: np.ndarray) -> \
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
This method extracts limb points in an image and pairs them to surface points that likely generated them.
This is completed through the used of 1D cross correlation.
#. The predicted limb locations in the image and the scan lines are determined using :meth:`predict_limbs`
#. Scan lines are generated along the scan directions and used to create extracted intensity lines by sampling
the image and predicted intensity lines by rendering the results of a ray trace along the scan line.
#. The predicted and extracted intensity lines are cross correlated in 1 dimension :func:`.fft_correlator_1d`
#. The peak of each correlation line is found using :attr:`peak_finder`.
#. the peak of the correlation surface is translated into a shift between the predicted and extracted limb
location in the image and used to compute the extracted limb location.
The resulting predicted surface points, predicted image points, observed image points, and scan directions
in the camera frame are then all returned as numpy arrays.
:param image_interpolator: A callable which returns the interpolated image values for provides [y,x] locations
in the image
:param camera_temperature: The temperature of the camera in degrees at the time the image was captured
:param target: The target we are looking for limb points for
:param scan_center: The center where all of our scan lines will start
:param line_of_sight_sun: The line of sight of the sun in the image
:return: The predicted surface points in the camera frame as a 3xn array, the predicted limbs in the image as a
2xn array, the observed limbs in the image as a 2xn array, and the scan directions in the camera frame
as a 3xn array of unit vectors where n is the :attr:`number_of_scan_lines`
"""
# predict the limb locations
predicted_limbs_camera, scan_dirs, scan_dirs_camera = self.predict_limbs(scan_center, line_of_sight_sun,
target, camera_temperature)
predicted_limbs_pixels = self.camera.model.project_onto_image(predicted_limbs_camera,
temperature=camera_temperature)
# set the distance to search along each scan line 2 times the apparent radius of the target in the image
# noinspection PyArgumentList
apparent_radius_pixels = np.linalg.norm(predicted_limbs_pixels - scan_center.reshape(2, 1), axis=0).max()
search_dist = 2 * apparent_radius_pixels
# Create an array of where we want to interpolate the image at/shoot rays through
search_distance_array = np.linspace(-search_dist, search_dist, self.number_of_sample_points)
# Create an interpolator to figure out the distance from the scan center for the subpixel locations of
# the correlation
distance_interpolator = interp1d(np.arange(self.number_of_sample_points), search_distance_array)
# Get the center of each scan line
center = (self.number_of_sample_points - 1) // 2
# Only take the middle of the predicted scan lines since we know the limb will lie in that region
template_selection = self.number_of_sample_points // 4
# Determine the deltas to apply to the limb locations
search_deltas = scan_dirs[:2].T.reshape((-1, 2, 1), order='F') * search_distance_array
# Get the pixels that we are sampling in the image along each scan line
search_points_image = search_deltas + predicted_limbs_pixels.reshape((1, 2, -1), order='F')
# Flatten everything to just 2d matrices instead of nd matrices
sp_flat = np.hstack(search_points_image)
# Select the template portion
sp_flat_template = np.hstack(search_points_image[...,
center - template_selection:center + template_selection + 1])
# Compute the direction vector through each pixel we are sampling in the template
direction_vectors = self.camera.model.pixels_to_unit(sp_flat_template, temperature=camera_temperature)
# Build the rays we are going to trace to determine our predicted scan lines
render_rays = Rays(np.zeros(3), direction_vectors)
# Get the predicted scan line illumination inputs
illum_inputs = self.scene.get_illumination_inputs(render_rays)
# Compute the scan line illuminations
illums = self.brdf(illum_inputs).reshape(search_points_image.shape[0], 2 * template_selection + 1)
# Apply the psf to the predicted illuminations and store the scan lines
self.predicted_illums = self.psf(illums)
# Extract the scan line DN values from the image
self.extracted_illums = image_interpolator(sp_flat[::-1].T).reshape(search_points_image.shape[0],
search_points_image.shape[-1])
# Do the 1d correlations between the extracted and predicted scan lines
self.correlation_lines = fft_correlator_1d(self.extracted_illums, self.predicted_illums)
# Find the peak of each correlation line
self.correlation_peaks = self.peak_finder(self.correlation_lines)
distances = distance_interpolator(self.correlation_peaks.ravel()).reshape(self.correlation_peaks.shape)
observed_limbs_pixels = distances.reshape(1, -1) * scan_dirs + predicted_limbs_pixels
return predicted_limbs_camera, predicted_limbs_pixels, observed_limbs_pixels, scan_dirs_camera
class EllipseMatching(RelNavEstimator):
"""
This class implements GIANT's version of limb based OpNav for regular bodies.
The class provides an interface to perform limb based OpNav for each target body that is predicted to be in an
image. It does this by looping through each target object contained in the :attr:`.Scene.target_objs` attribute
that is requested. For each of the targets, the algorithm:
#. If using limb scanning to extract the limbs, and requested with :attr:`recenter`, identifies the center of
brightness for each target using the :mod:`.moment_algorithm` and moves the a priori target to be along that line
of sight
#. Extracts the observed limbs from the image and pairs them to the target
#. Estimates the relative position between the target and the image using the observed limbs and the steps discussed
in the :mod:.ellipse_matching` documentation
#. Uses the estimated position to get the predicted limb surface location and predicted limb locations in the image
When all of the required data has been successfully loaded into an instance of this class, the :meth:`estimate`
method is used to perform the estimation for the requested image. The results are stored into the
:attr:`observed_bearings` attribute for the observed limb locations and the :attr:`observed_positions` attribute for
the estimated relative position between the target and the camera. In addition, the predicted location for the limbs
for each target are stored in the :attr:`computed_bearings` attribute and the a priori relative position between the
target and the camera is stored in the :attr:`computed_positions` attribute. Finally, the details about the fit are
stored as a dictionary in the appropriate element in the :attr:`details` attribute. Specifically, these
dictionaries will contain the following keys.
=========================== ========================================================================================
Key Description
=========================== ========================================================================================
``'Covariance'`` The 3x3 covariance matrix for the estimated relative position in the camera frame based
on the residuals. This is only available if successful
``'Surface Limb Points'`` The surface points that correspond to the limb points in the target fixed target
centered frame.
``'Failed'`` A message indicating why the fit failed. This will only be present if the fit failed
(so you could do something like ``'Failed' in limb_matching.details[target_ind]`` to
check if something failed. The message should be a human readable description of what
called the failure.
=========================== ========================================================================================
.. warning::
Before calling the :meth:`estimate` method be sure that the scene has been updated to correspond to the correct
image time. This class does not update the scene automatically.
"""
technique: str = 'ellipse_matching'
"""
The name of the technique identifier in the :class:`.RelativeOpNav` class.
"""
observable_type: List[RelNavObservablesType] = [RelNavObservablesType.LIMB, RelNavObservablesType.RELATIVE_POSITION]
"""
The type of observables this technique generates.
"""
def __init__(self, scene: Scene, camera: Camera, image_processing: ImageProcessing,
limb_scanner: Optional[LimbScanner] = None,
extraction_method: Union[LimbExtractionMethods, str] = LimbExtractionMethods.EDGE_DETECTION,
interpolator: type = RegularGridInterpolator,
recenter: bool = True):
"""
:param scene: The :class:`.Scene` object containing the target, light, and obscuring objects.
:param camera: The :class:`.Camera` object containing the camera model and images to be utilized
:param image_processing: The :class:`.ImageProcessing` object to be used to process the images
:param limb_scanner: The :class:`.LimbScanner` object containing the limb scanning settings.
:param extraction_method: The method to use to extract the observed limbs from the image. Should be
``'LIMB_SCANNING'`` or ``'EDGE_DETECTION'``. See :class:`.LimbExtractionMethods` for
details.
:param interpolator: The type of image interpolator to use if the extraction method is set to LIMB_SCANNING.
:param recenter: A flag to estimate the center using the moment algorithm to get a fast rough estimate of the
center-of-figure
"""
# store the scene and camera in the class instance using the super class's init method
super().__init__(scene, camera, image_processing)
# interpret the limb extraction method into the enum
if isinstance(extraction_method, str):
extraction_method = extraction_method.upper()
self.extraction_method: LimbExtractionMethods = LimbExtractionMethods(extraction_method)
"""
The method to use to extract observed limb points from the image.
The valid options are provided in the :class:`LimbExtractionMethods` enumeration
"""
self._limb_scanner: LimbScanner = limb_scanner
"""
The limb scanning instance to use.
"""
self.interpolator: type = interpolator
"""
The type of interpolator to use for the image.
This is ignored if the :attr:`extraction_method` is not set to ``'LIMB_SCANNING'``.
"""
self._edge_detection_limbs: List[NONEARRAY] = [None] * len(self.scene.target_objs)
"""
The extracted limbs from the image in pixels before they have been paired to a target
Until :meth:`estimate` is called this list will be filled with ``None``.
"""
self.limbs_camera: List[NONEARRAY] = [None] * len(self.scene.target_objs)
"""
The limb surface points with respect to the center of the target
Until :meth:`estimate` is called this list will be filled with ``None``.
Each element of this list corresponds to the same element in the :attr:`.Scene.target_objs` list.
"""
self._image_interp: Optional[Callable] = None
"""
The interpolator for the image to use.
This is set on the call to estimate
"""
self._limbs_extracted: bool = False
"""
This flag specifies where limbs have already be extracted from the current image or not.
"""
self.recenter: bool = recenter
"""
A flag specifying whether to locate the center of the target using a moment algorithm before beginning.
If the a priori knowledge of the bearing to the target is poor (outside of the body) then this flag will help
to correct the initial error. See the :mod:`.moment_algorithm` module for details.
"""
# the moment algorithm instance to use if recentering has been requested.
self._moment_algorithm: MomentAlgorithm = MomentAlgorithm(scene, camera, image_processing,
apply_phase_correction=False,
use_apparent_area=True)
"""
The moment algorithm instance to use to recenter if we are using limb scanning
"""
def extract_and_pair_limbs(self, image: OpNavImage, target: SceneObject, target_ind: int) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Extract and pair limb points in an image to the surface point on a target that created it.
For irregular bodies this is an approximate procedure that depends on the current estimate of the state vector.
See :meth:`.Shape.find_limbs` for details.
This technique extracts limbs in 2 ways. If :attr:`extraction_method` is ``EDGE_DETECTION``, then all limbs are
extracted from the image using :meth:`.ImageProcessing.identify_subpixel_limbs`. These extracted limbs are then
stored and paired to their corresponding targets based on the apparent diameter. This only happens once per
image since the extracted limb locations in the image are independent of the relative position of the target to
the camera. If :attr:`extraction_method` is ``LIMB_SCANNING`` then this will extract and pair the limbs for the
requested target using :meth:`.LimbScanner.extract_limbs`. This is performed ever iteration, as the extracted
limb locations are dependent on the relative position of the target in the scene.
For both techniques, the paired observed limb location in the image for the target are stored in the appropriate
element of :attr:`observed_bearings` as a 2xn array of pixel locations.
:param image: The image that the limbs are to be extracted from
:param target: The target that the extracted limbs are to be paired to
:param target_ind: The index of the target that the extracted limbs are to be paired to
:return: The scan center, the scan center direction, the scan directions, the predicted limbs in the camera, and
the predicted limbs in the image.
"""
# set the scan center
scan_center = self.camera.model.project_onto_image(target.position,
temperature=image.temperature)
scan_center_dir = target.position.ravel().copy()
scan_center_dir /= np.linalg.norm(scan_center_dir)
# Determine the illumination direction in the image
line_of_sight_sun = self.camera.model.project_directions(self.scene.light_obj.position.ravel())
if self.extraction_method == LimbExtractionMethods.EDGE_DETECTION:
if not self._limbs_extracted:
n_objs = len(self.scene.target_objs)
# extract the limbs from the image
self._edge_detection_limbs = self.image_processing.identify_subpixel_limbs(image, -line_of_sight_sun,
num_objs=n_objs)
# match the limbs to each target
self._match_limbs_to_targets(image.temperature)
self._limbs_extracted = True
extracted_limbs = self.observed_bearings[target_ind]
# get the scan directions for each extracted limb point
scan_dirs_pixels = extracted_limbs - scan_center.reshape(2, 1)
scan_dirs_camera = self.camera.model.pixels_to_unit(scan_center.reshape(2, 1) + scan_dirs_pixels,
temperature=image.temperature)
scan_dirs_camera /= scan_dirs_camera[2]
scan_dirs_camera -= scan_center_dir.reshape(3, 1)/scan_center_dir[2]
scan_dirs_camera /= np.linalg.norm(scan_dirs_camera, axis=0, keepdims=True)
try:
# find the corresponding limbs
predicted_limbs = target.shape.find_limbs(scan_center_dir, scan_dirs_camera)
# project them onto the image
predicted_limbs_image = self.camera.model.project_onto_image(predicted_limbs,
temperature=image.temperature)
except ZeroDivisionError:
predicted_limbs = np.zeros((3, extracted_limbs.shape[1]), dtype=np.float64)
predicted_limbs_image = np.zeros(extracted_limbs.shape, dtype=np.float64)
else:
(predicted_limbs,
predicted_limbs_image,
self.observed_bearings[target_ind],
scan_dirs_camera) = self._limb_scanner.extract_limbs(self._image_interp, image.temperature, target,
scan_center, line_of_sight_sun)
return scan_center, scan_center_dir, scan_dirs_camera, predicted_limbs, predicted_limbs_image
def _match_limbs_to_targets(self, temperature: Real):
"""
This matches the limb clumps returned by :meth:`.ImageProcessing.identify_subpixel_limbs` to the targets in
:attr:`.Scene.target_objs`.
The matching is done based on apparent size, therefore it is expected that the relative size of each target and
the relative range to each target is mostly correct. (i.e. if in real life target 1 is smaller but closer then
target 2, then in the scene this should also be the case. These can be wrong by a common scale factor, that is
if target 2 is 50% larger than what is truth, then target 1 should also ~50% larger than truth.).
The results are stored in :attr:`observed_bearings`.
"""
apparent_diameters_observed = [np.linalg.norm(limbs.T.reshape((-1, 2, 1)) -
limbs.reshape((1, 2, -1)), axis=1).max(initial=None)
for limbs in self._edge_detection_limbs]
apparent_diameters_predicted = []
for target in self.scene.target_objs:
apparent_diameters_predicted.append(target.get_apparent_diameter(self.camera.model,
temperature=temperature))
sorted_diameters_observed = np.argsort(apparent_diameters_observed)
sorted_diameters_predicted = np.argsort(apparent_diameters_predicted)
for target_ind, limb_ind in zip(sorted_diameters_predicted, sorted_diameters_observed):
self.observed_bearings[target_ind] = self._edge_detection_limbs[limb_ind]
def estimate(self, image: OpNavImage, include_targets: Optional[List[bool]] = None):
"""
This method identifies the position of each target in the camera frame using ellipse matching.
This method first extracts limb observations from an image and matches them to the targets in the scene. Then,
for each target, the position is estimated from the limb observations.
.. warning::
Before calling this method be sure that the scene has been updated to correspond to the correct
image time. This method does not update the scene automatically.
:param image: The image to locate the targets in
:param include_targets: A list specifying whether to process the corresponding target in
:attr:`.Scene.target_objs` or ``None``. If ``None`` then all targets are processed.
"""
if self.extraction_method == LimbExtractionMethods.LIMB_SCANNING:
self._image_interp = self.interpolator((np.arange(image.shape[0]), np.arange(image.shape[1])), image,
bounds_error=False, fill_value=None)
# If we were requested to recenter using a moment algorithm then do it
if self.recenter:
print('recentering', flush=True)
# Estimate the center using the moment algorithm to get a fast rough estimate of the cof
self._moment_algorithm.estimate(image, include_targets=include_targets)
for target_ind, target in self.target_generator(include_targets):
if np.isfinite(self._moment_algorithm.observed_bearings[target_ind]).all():
new_position = self.camera.model.pixels_to_unit(
self._moment_algorithm.observed_bearings[target_ind]
)
new_position *= np.linalg.norm(target.position)
target.change_position(new_position)
# loop through each object in the scene
for target_ind, target in self.target_generator(include_targets):
relative_position = target.position.copy()
self.computed_positions[target_ind] = relative_position
# get the observed limbs for the current target
(scan_center, scan_center_dir,
scan_dirs_camera, self.limbs_camera[target_ind],
self.computed_bearings[target_ind]) = self.extract_and_pair_limbs(image, target, target_ind)
if self.observed_bearings[target_ind] is None:
warnings.warn('unable to find any limbs for target {}'.format(target_ind))
self.details[target_ind] = {'Failed': "Unable to find any limbs for target in the image"}
continue
# Drop any invalid limbs
valid_test = ~np.isnan(self.observed_bearings[target_ind]).any(axis=0)
self.observed_bearings[target_ind] = self.observed_bearings[target_ind][:, valid_test]
# extract the shape from the object and see if it is an ellipsoid
shape = target.shape
if (not isinstance(shape, Ellipsoid)) and hasattr(shape, 'reference_ellipsoid'):
warnings.warn("The primary shape is not an ellipsoid but it has a reference ellipsoid."
"We're going to use the reference ellipsoid but maybe you should actually use "
"limb_matching instead.")
# if the object isn't an ellipsoid, see if it has a reference ellipsoid we can use
shape = shape.reference_ellipsoid
elif not isinstance(shape, Ellipsoid):
warnings.warn('Invalid shape. Unable to do ellipse matching')
self.observed_bearings[target_ind] = np.array([np.nan, np.nan])
self.observed_positions[target_ind] = np.array([np.nan, np.nan, np.nan])
self.details[target_ind] = {'Failed': "The shape representing the target is not applicable to ellipse "
"matching"}
continue
# Extract the Q transformation matrix (principal frame)
q_matrix = np.diag(1 / shape.principal_axes)
# Get the inverse transformation matrix (principal frame)
q_inv = np.diag(shape.principal_axes)
# Now, we need to get the unit vectors in the direction of our measurements
unit_vectors_camera = self.camera.model.pixels_to_unit(self.observed_bearings[target_ind],
temperature=image.temperature)
# Rotate the unit vectors into the principal axis frame
unit_vectors_principal = shape.orientation.T @ unit_vectors_camera
# Now, convert into the unit sphere space and make unit vectors
right_sphere_cone_vectors = q_matrix @ unit_vectors_principal
right_sphere_cone_size = np.linalg.norm(right_sphere_cone_vectors, axis=0, keepdims=True)
right_sphere_cone_vectors /= right_sphere_cone_size
# Now, we solve for the position in the sphere space
location_sphere = np.linalg.lstsq(right_sphere_cone_vectors.T,
np.ones(right_sphere_cone_vectors.shape[1]),
rcond=None)[0]
# Finally, get the position in the camera frame and store it
self.observed_positions[target_ind] = (shape.orientation @ q_inv @ location_sphere /
np.sqrt(np.inner(location_sphere, location_sphere) - 1)).ravel()
# Get the limb locations in the camera frame
scan_center_dir = self.observed_positions[target_ind].copy()
scan_center_dir /= np.linalg.norm(scan_center_dir)
scan_dirs = unit_vectors_camera/unit_vectors_camera[2] - scan_center_dir.reshape(3, 1) / scan_center_dir[2]
scan_dirs /= np.linalg.norm(scan_dirs[:2], axis=0, keepdims=True)
self.limbs_camera[target_ind] = shape.find_limbs(scan_center_dir, scan_dirs,
shape.center - self.observed_positions[target_ind].ravel())
self.computed_bearings[target_ind] = self.camera.model.project_onto_image(self.limbs_camera[target_ind],
temperature=image.temperature)
target_centered_fixed_limb = self.limbs_camera[target_ind] - \
self.observed_positions[target_ind].reshape(3, 1)
target_centered_fixed_limb = target.orientation.matrix.T @ target_centered_fixed_limb
# compute the covariance matrix
delta_ns = location_sphere.reshape(3, 1) - right_sphere_cone_vectors
decomp_matrix = (q_matrix@shape.orientation.T).reshape(1, 3, 3)
meas_std = (self.observed_bearings[target_ind] - self.computed_bearings[target_ind]).std(axis=-1)
model_jacobian = self.camera.model.compute_unit_vector_jacobian(self.observed_bearings[target_ind],
temperature=image.temperature)
meas_cov = model_jacobian@np.diag(meas_std**2)@model_jacobian.swapaxes(-1, -2)
inf_eta = np.diag(right_sphere_cone_size.ravel()**2 *
(delta_ns.T.reshape(-1, 1, 3) @
decomp_matrix @
meas_cov @
decomp_matrix.swapaxes(-1, -2) @
delta_ns.reshape(-1, 3, 1)).squeeze()**(-1))
cov_location_sphere = np.linalg.pinv(right_sphere_cone_vectors @
inf_eta @
right_sphere_cone_vectors.T)
inner_m1 = location_sphere.T@location_sphere - 1
transform_camera = (-shape.orientation @ q_inv @
(np.eye(3) - np.outer(location_sphere, location_sphere) / inner_m1) /
np.sqrt(inner_m1))
covariance_camera = transform_camera@cov_location_sphere@transform_camera.T
# store the details of the fit
self.details[target_ind] = {"Surface Limb Points": target_centered_fixed_limb,
"Covariance": covariance_camera}
def reset(self):
"""
This method resets the observed/computed attributes, the details attribute, and the limb attributes to have
``None``.
This method is called by :class:`.RelativeOpNav` between images to ensure that data is not accidentally applied
from one image to the next.
"""
super().reset()
self._edge_detection_limbs = [None] * len(self.scene.target_objs)
self.limbs_camera = [None] * len(self.scene.target_objs)
self._limbs_extracted = False
self._image_interp = None
| 56.657143 | 120 | 0.655915 | 41,023 | 0.827494 | 0 | 0 | 0 | 0 | 0 | 0 | 31,251 | 0.630378 |
bc2cee5c5a4a0949cd5f8bd23a23aa640050ca78 | 5,071 | py | Python | pytorch/probability/distributions_.py | NunoEdgarGFlowHub/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
] | 83 | 2019-04-12T10:23:23.000Z | 2022-03-30T12:40:43.000Z | pytorch/probability/distributions_.py | sten2lu/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
] | null | null | null | pytorch/probability/distributions_.py | sten2lu/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
] | 11 | 2019-04-12T11:26:00.000Z | 2020-05-12T01:14:21.000Z | import math
import sys
import torch
from numbers import Number
from torch import distributions
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import _standard_normal, broadcast_all
q = sys.exit
class Normal_(ExponentialFamily):
r"""
Creates a normal (also called Gaussian) distribution parameterized by
:attr:`loc` and :attr:`scale`.
Example::
>>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # normally distributed with loc=0 and scale=1
tensor([ 0.1046])
Args:
loc (float or Tensor): mean of the distribution (often referred to as mu)
scale (float or Tensor): standard deviation of the distribution
(often referred to as sigma)
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.loc
@property
def stddev(self):
return self.scale
@property
def variance(self):
return self.stddev.pow(2)
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Normal_, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Normal_, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(Normal_, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.normal(self.loc.expand(shape), self.scale.expand(shape))
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.loc + eps * self.scale
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_scale = math.log(self.scale) if isinstance(self.scale,
Number) else self.scale.log()
return -0.5 * ((value - self.loc) / self.scale) ** 2 - log_scale - 0.5 * math.log(
2 * math.pi)
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 0.5 * (1 + torch.erf(
(value - self.loc) * self.scale.reciprocal() / math.sqrt(2)))
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
return self.loc + self.scale * torch.erfinv(2 * value - 1) * math.sqrt(2)
def entropy(self):
return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.scale)
@property
def _natural_params(self):
return (self.loc / self.scale.pow(2), -0.5 * self.scale.pow(2).reciprocal())
def _log_normalizer(self, x, y):
return -0.25 * x.pow(2) / y + 0.5 * torch.log(-math.pi / y)
class MixtureSameFamily(distributions.Distribution):
def __init__(self, mixture_distribution, components_distribution):
self.mixture_distribution = mixture_distribution
self.components_distribution = components_distribution
super().__init__(
batch_shape=self.components_distribution.batch_shape,
event_shape=self.components_distribution.event_shape
)
def sample(self, sample_shape=torch.Size()):
mixture_mask = self.mixture_distribution.sample(sample_shape) # [S, B, D, M]
if len(mixture_mask.shape) == 3:
mixture_mask = mixture_mask[:, None, ...]
components_samples = self.components_distribution.rsample(
sample_shape) # [S, B, D, M]
samples = torch.sum(mixture_mask * components_samples, dim=-1) # [S, B, D]
return samples
def log_prob(self, value):
# pad value for evaluation under component density
value = value.permute(2, 0, 1) # [S, B, D]
value = value[..., None].repeat(1, 1, 1, self.batch_shape[-1]) # [S, B, D, M]
log_prob_components = self.components_distribution.log_prob(value).permute(1, 2,
3, 0)
# calculate numerically stable log coefficients, and pad
log_prob_mixture = self.mixture_distribution.logits
log_prob_mixture = log_prob_mixture[..., None]
return torch.logsumexp(log_prob_mixture + log_prob_components, dim=-2)
def main():
pass
if __name__ == '__main__':
main()
| 36.482014 | 90 | 0.634589 | 4,717 | 0.930191 | 0 | 0 | 302 | 0.059554 | 0 | 0 | 693 | 0.136659 |
bc2e49c13ad370198ee4d1ae34071ef594b3447a | 376 | py | Python | basicplot.py | sirgogo/docker-meep | 4c41cec929ce829662fe7b2484aa8e8bf490a0a5 | [
"MIT"
] | 2 | 2020-02-22T05:50:14.000Z | 2022-02-26T20:39:34.000Z | basicplot.py | sirgogo/docker-meep | 4c41cec929ce829662fe7b2484aa8e8bf490a0a5 | [
"MIT"
] | null | null | null | basicplot.py | sirgogo/docker-meep | 4c41cec929ce829662fe7b2484aa8e8bf490a0a5 | [
"MIT"
] | 7 | 2017-09-05T11:08:31.000Z | 2021-11-16T00:56:30.000Z | import matplotlib
matplotlib.use('Agg') # this lets us do some headless stuff
import matplotlib.pylab as plt
import numpy as np
x = np.asarray([0,5,2])
y = np.asarray([0,1,3])
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(x,y)
#plt.show() # we have a headless display, can't do this!
f.savefig('basicplot.eps',format='eps',orientation='portrait',transparent=True,dpi=5e4)
| 28.923077 | 87 | 0.720745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.340426 |
bc2e6b8739ca8dec99ed92907eb1665877d1fd33 | 659 | py | Python | py-practice/py-practice-hackerrank/Algorithms/Implementation/utopian_tree.py | beenorgone-notebook/python-notebook | 48e42b5be70e7ca06e8440a75516c055c961371c | [
"MIT"
] | null | null | null | py-practice/py-practice-hackerrank/Algorithms/Implementation/utopian_tree.py | beenorgone-notebook/python-notebook | 48e42b5be70e7ca06e8440a75516c055c961371c | [
"MIT"
] | null | null | null | py-practice/py-practice-hackerrank/Algorithms/Implementation/utopian_tree.py | beenorgone-notebook/python-notebook | 48e42b5be70e7ca06e8440a75516c055c961371c | [
"MIT"
] | 1 | 2018-04-08T13:24:39.000Z | 2018-04-08T13:24:39.000Z | # https://www.hackerrank.com/challenges/utopian-tree
def tree_height(tree, N, start):
if not N:
return tree
if start == 'spring':
for i in range(N // 2):
tree = tree * 2 + 1
if N % 2:
return tree * 2
else:
return tree
elif start == 'summer':
for i in range(N // 2):
tree = (tree + 1) * 2
if N % 2:
return tree + 1
else:
return tree
else:
raise ValueError('start season must be spring or summer')
T = int(input().strip())
for i in range(T):
print(tree_height(1, int(input().strip()), start='spring'))
| 24.407407 | 65 | 0.502276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.174507 |
bc2f04c16bdbecd969f22b63a971bad1b03dcc0a | 56 | py | Python | piodispatch/__init__.py | Kiruse/PioDispatch | ae4b6199795d2cc0ad8451cc17b57a4b517fdbda | [
"MIT"
] | null | null | null | piodispatch/__init__.py | Kiruse/PioDispatch | ae4b6199795d2cc0ad8451cc17b57a4b517fdbda | [
"MIT"
] | null | null | null | piodispatch/__init__.py | Kiruse/PioDispatch | ae4b6199795d2cc0ad8451cc17b57a4b517fdbda | [
"MIT"
] | null | null | null | from .piodispatch import dispatch, ascoroutine, shutdown | 56 | 56 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
bc30514100a65cd97885d46696267a630becc87d | 266 | py | Python | Algorithms/Maximum_Number_of_Coins_You_Can_Get/main.py | ugurcan-sonmez-95/LeetCode | e463a424c2d781f67be31ae42bc3c5c896db6017 | [
"MIT"
] | 1 | 2020-10-01T09:12:09.000Z | 2020-10-01T09:12:09.000Z | Algorithms/Maximum_Number_of_Coins_You_Can_Get/main.py | ugurcan-sonmez-95/LeetCode | e463a424c2d781f67be31ae42bc3c5c896db6017 | [
"MIT"
] | null | null | null | Algorithms/Maximum_Number_of_Coins_You_Can_Get/main.py | ugurcan-sonmez-95/LeetCode | e463a424c2d781f67be31ae42bc3c5c896db6017 | [
"MIT"
] | null | null | null | ### Maximum Number of Coins You Can Get - Solution
class Solution:
def maxCoins(self, piles: List[int]) -> int:
piles.sort()
max_coin, n = 0, len(piles)
for i in range(n//3, n, 2):
max_coin += piles[i]
return max_coin | 29.555556 | 50 | 0.56391 | 214 | 0.804511 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.18797 |
bc3126c570e95df36a490d3edfbfb2c0283d8529 | 986 | py | Python | learner2/pupil/migrations/0012_sessiondata.py | Arunkumar99/learners | 744006a805fb65087e8c41333bd697ec24b7a392 | [
"BSD-3-Clause"
] | 3 | 2020-05-30T17:08:51.000Z | 2021-12-14T02:55:19.000Z | learner2/pupil/migrations/0012_sessiondata.py | Arunkumar99/learners | 744006a805fb65087e8c41333bd697ec24b7a392 | [
"BSD-3-Clause"
] | null | null | null | learner2/pupil/migrations/0012_sessiondata.py | Arunkumar99/learners | 744006a805fb65087e8c41333bd697ec24b7a392 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pupil', '0011_questions_set_id'),
]
operations = [
migrations.CreateModel(
name='SessionData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user_id', models.IntegerField(default=0)),
('session_count', models.IntegerField(default=0)),
('result', models.CharField(max_length=10)),
('level', models.IntegerField(default=0)),
('performance1', models.ImageField(upload_to=b'')),
('performance2', models.ImageField(upload_to=b'')),
('performance3', models.ImageField(upload_to=b'')),
('csv_file', models.FileField(upload_to=b'')),
],
),
]
| 34 | 114 | 0.56998 | 877 | 0.889452 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.179513 |
bc328c1d04ee6b4b30d02e5e2fb637b331521485 | 23,497 | py | Python | mediapipe_utils.py | brightereyeslynxes/corpusLabe_prototype | 806af365d1256d0fadd7ab1f4f54a1233278fee5 | [
"MIT"
] | null | null | null | mediapipe_utils.py | brightereyeslynxes/corpusLabe_prototype | 806af365d1256d0fadd7ab1f4f54a1233278fee5 | [
"MIT"
] | null | null | null | mediapipe_utils.py | brightereyeslynxes/corpusLabe_prototype | 806af365d1256d0fadd7ab1f4f54a1233278fee5 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from collections import namedtuple
from math import ceil, sqrt, exp, pi, floor, sin, cos, atan2, gcd
import time
from collections import deque, namedtuple
# Dictionary that maps from joint names to keypoint indices.
KEYPOINT_DICT = {
"nose": 0,
"left_eye_inner": 1,
"left_eye": 2,
"left_eye_outer": 3,
"right_eye_inner": 4,
"right_eye": 5,
"right_eye_outer": 6,
"left_ear": 7,
"right_ear": 8,
"mouth_left": 9,
"mouth_right": 10,
"left_shoulder": 11,
"right_shoulder": 12,
"left_elbow": 13,
"right_elbow": 14,
"left_wrist": 15,
"right_wrist": 16,
"left_pinky": 17,
"right_pinky": 18,
"left_index": 19,
"right_index": 20,
"left_thumb": 21,
"right_thumb": 22,
"left_hip": 23,
"right_hip": 24,
"left_knee": 25,
"right_knee": 26,
"left_ankle": 27,
"right_ankle": 28,
"left_heel": 29,
"right_heel": 30,
"left_foot_index": 31,
"right_foot_index": 32
}
class Body:
def __init__(self, pd_score=None, pd_box=None, pd_kps=None):
"""
Attributes:
pd_score : detection score
pd_box : detection box [x, y, w, h], normalized [0,1] in the squared image
pd_kps : detection keypoints coordinates [x, y], normalized [0,1] in the squared image
rect_x_center, rect_y_center : center coordinates of the rotated bounding rectangle, normalized [0,1] in the squared image
rect_w, rect_h : width and height of the rotated bounding rectangle, normalized in the squared image (may be > 1)
rotation : rotation angle of rotated bounding rectangle with y-axis in radian
rect_x_center_a, rect_y_center_a : center coordinates of the rotated bounding rectangle, in pixels in the squared image
rect_w, rect_h : width and height of the rotated bounding rectangle, in pixels in the squared image
rect_points : list of the 4 points coordinates of the rotated bounding rectangle, in pixels
expressed in the squared image during processing,
expressed in the original rectangular image when returned to the user
lm_score: global landmark score
norm_landmarks : 3D landmarks coordinates in the rotated bounding rectangle, normalized [0,1]
landmarks : 3D landmarks coordinates in the rotated bounding rectangle, in pixel in the original rectangular image
"""
self.pd_score = pd_score
self.pd_box = pd_box
self.pd_kps = pd_kps
def print(self):
attrs = vars(self)
print('\n'.join("%s: %s" % item for item in attrs.items()))
SSDAnchorOptions = namedtuple('SSDAnchorOptions',[
'num_layers',
'min_scale',
'max_scale',
'input_size_height',
'input_size_width',
'anchor_offset_x',
'anchor_offset_y',
'strides',
'aspect_ratios',
'reduce_boxes_in_lowest_layer',
'interpolated_scale_aspect_ratio',
'fixed_anchor_size'])
def calculate_scale(min_scale, max_scale, stride_index, num_strides):
if num_strides == 1:
return (min_scale + max_scale) / 2
else:
return min_scale + (max_scale - min_scale) * stride_index / (num_strides - 1)
def generate_anchors(options):
"""
option : SSDAnchorOptions
# https://github.com/google/mediapipe/blob/master/mediapipe/calculators/tflite/ssd_anchors_calculator.cc
"""
anchors = []
layer_id = 0
n_strides = len(options.strides)
while layer_id < n_strides:
anchor_height = []
anchor_width = []
aspect_ratios = []
scales = []
# For same strides, we merge the anchors in the same order.
last_same_stride_layer = layer_id
while last_same_stride_layer < n_strides and \
options.strides[last_same_stride_layer] == options.strides[layer_id]:
scale = calculate_scale(options.min_scale, options.max_scale, last_same_stride_layer, n_strides)
if last_same_stride_layer == 0 and options.reduce_boxes_in_lowest_layer:
# For first layer, it can be specified to use predefined anchors.
aspect_ratios += [1.0, 2.0, 0.5]
scales += [0.1, scale, scale]
else:
aspect_ratios += options.aspect_ratios
scales += [scale] * len(options.aspect_ratios)
if options.interpolated_scale_aspect_ratio > 0:
if last_same_stride_layer == n_strides -1:
scale_next = 1.0
else:
scale_next = calculate_scale(options.min_scale, options.max_scale, last_same_stride_layer+1, n_strides)
scales.append(sqrt(scale * scale_next))
aspect_ratios.append(options.interpolated_scale_aspect_ratio)
last_same_stride_layer += 1
for i,r in enumerate(aspect_ratios):
ratio_sqrts = sqrt(r)
anchor_height.append(scales[i] / ratio_sqrts)
anchor_width.append(scales[i] * ratio_sqrts)
stride = options.strides[layer_id]
feature_map_height = ceil(options.input_size_height / stride)
feature_map_width = ceil(options.input_size_width / stride)
for y in range(feature_map_height):
for x in range(feature_map_width):
for anchor_id in range(len(anchor_height)):
x_center = (x + options.anchor_offset_x) / feature_map_width
y_center = (y + options.anchor_offset_y) / feature_map_height
# new_anchor = Anchor(x_center=x_center, y_center=y_center)
if options.fixed_anchor_size:
new_anchor = [x_center, y_center, 1.0, 1.0]
# new_anchor.w = 1.0
# new_anchor.h = 1.0
else:
new_anchor = [x_center, y_center, anchor_width[anchor_id], anchor_height[anchor_id]]
# new_anchor.w = anchor_width[anchor_id]
# new_anchor.h = anchor_height[anchor_id]
anchors.append(new_anchor)
layer_id = last_same_stride_layer
return np.array(anchors)
def generate_blazepose_anchors():
# https://github.com/google/mediapipe/blob/master/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt
anchor_options = SSDAnchorOptions(
num_layers=5,
min_scale=0.1484375,
max_scale=0.75,
input_size_height=224,
input_size_width=224,
anchor_offset_x=0.5,
anchor_offset_y=0.5,
strides=[8, 16, 32, 32, 32],
aspect_ratios= [1.0],
reduce_boxes_in_lowest_layer=False,
interpolated_scale_aspect_ratio=1.0,
fixed_anchor_size=True)
return generate_anchors(anchor_options)
def decode_bboxes(score_thresh, scores, bboxes, anchors, best_only=False):
"""
wi, hi : NN input shape
https://github.com/google/mediapipe/blob/master/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt
# Decodes the detection tensors generated by the TensorFlow Lite model, based on
# the SSD anchors and the specification in the options, into a vector of
# detections. Each detection describes a detected object.
Version 0.8.3.1:
node {
calculator: "TensorsToDetectionsCalculator"
input_stream: "TENSORS:detection_tensors"
input_side_packet: "ANCHORS:anchors"
output_stream: "DETECTIONS:unfiltered_detections"
options: {
[mediapipe.TensorsToDetectionsCalculatorOptions.ext] {
num_classes: 1
num_boxes: 896
num_coords: 12
box_coord_offset: 0
keypoint_coord_offset: 4
num_keypoints: 4
num_values_per_keypoint: 2
sigmoid_score: true
score_clipping_thresh: 100.0
reverse_output_order: true
x_scale: 128.0
y_scale: 128.0
h_scale: 128.0
w_scale: 128.0
min_score_thresh: 0.5
}
}
Version 0.8.4:
[mediapipe.TensorsToDetectionsCalculatorOptions.ext] {
num_classes: 1
num_boxes: 2254
num_coords: 12
box_coord_offset: 0
keypoint_coord_offset: 4
num_keypoints: 4
num_values_per_keypoint: 2
sigmoid_score: true
score_clipping_thresh: 100.0
reverse_output_order: true
x_scale: 224.0
y_scale: 224.0
h_scale: 224.0
w_scale: 224.0
min_score_thresh: 0.5
}
# Bounding box in each pose detection is currently set to the bounding box of
# the detected face. However, 4 additional key points are available in each
# detection, which are used to further calculate a (rotated) bounding box that
# encloses the body region of interest. Among the 4 key points, the first two
# are for identifying the full-body region, and the second two for upper body
# only:
#
# Key point 0 - mid hip center
# Key point 1 - point that encodes size & rotation (for full body)
# Key point 2 - mid shoulder center
# Key point 3 - point that encodes size & rotation (for upper body)
#
scores: shape = [number of anchors 896]
bboxes: shape = [ number of anchors x 12], 12 = 4 (bounding box : (cx,cy,w,h) + 8 (4 palm keypoints)
"""
bodies = []
scores = 1 / (1 + np.exp(-scores))
if best_only:
best_id = np.argmax(scores)
if scores[best_id] < score_thresh: return bodies
det_scores = scores[best_id:best_id+1]
det_bboxes = bboxes[best_id:best_id+1]
det_anchors = anchors[best_id:best_id+1]
else:
detection_mask = scores > score_thresh
det_scores = scores[detection_mask]
if det_scores.size == 0: return bodies
det_bboxes = bboxes[detection_mask]
det_anchors = anchors[detection_mask]
scale = 224 # x_scale, y_scale, w_scale, h_scale
# cx, cy, w, h = bboxes[i,:4]
# cx = cx * anchor.w / wi + anchor.x_center
# cy = cy * anchor.h / hi + anchor.y_center
# lx = lx * anchor.w / wi + anchor.x_center
# ly = ly * anchor.h / hi + anchor.y_center
det_bboxes = det_bboxes* np.tile(det_anchors[:,2:4], 6) / scale + np.tile(det_anchors[:,0:2],6)
# w = w * anchor.w / wi (in the prvious line, we add anchor.x_center and anchor.y_center to w and h, we need to substract them now)
# h = h * anchor.h / hi
det_bboxes[:,2:4] = det_bboxes[:,2:4] - det_anchors[:,0:2]
# box = [cx - w*0.5, cy - h*0.5, w, h]
det_bboxes[:,0:2] = det_bboxes[:,0:2] - det_bboxes[:,3:4] * 0.5
for i in range(det_bboxes.shape[0]):
score = det_scores[i]
box = det_bboxes[i,0:4]
kps = []
for kp in range(4):
kps.append(det_bboxes[i,4+kp*2:6+kp*2])
bodies.append(Body(float(score), box, kps))
return bodies
def non_max_suppression(bodies, nms_thresh):
# cv2.dnn.NMSBoxes(boxes, scores, 0, nms_thresh) needs:
# boxes = [ [x, y, w, h], ...] with x, y, w, h of type int
# Currently, x, y, w, h are float between 0 and 1, so we arbitrarily multiply by 1000 and cast to int
# boxes = [r.box for r in bodies]
boxes = [ [int(x*1000) for x in r.pd_box] for r in bodies]
scores = [r.pd_score for r in bodies]
indices = cv2.dnn.NMSBoxes(boxes, scores, 0, nms_thresh)
return [bodies[i[0]] for i in indices]
def normalize_radians(angle):
return angle - 2 * pi * floor((angle + pi) / (2 * pi))
def rot_vec(vec, rotation):
vx, vy = vec
return [vx * cos(rotation) - vy * sin(rotation), vx * sin(rotation) + vy * cos(rotation)]
def detections_to_rect(body, kp_pair=[0,1]):
# https://github.com/google/mediapipe/blob/master/mediapipe/modules/pose_landmark/pose_detection_to_roi.pbtxt
# # Converts pose detection into a rectangle based on center and scale alignment
# # points. Pose detection contains four key points: first two for full-body pose
# # and two more for upper-body pose.
# node {
# calculator: "SwitchContainer"
# input_side_packet: "ENABLE:upper_body_only"
# input_stream: "DETECTION:detection"
# input_stream: "IMAGE_SIZE:image_size"
# output_stream: "NORM_RECT:raw_roi"
# options {
# [mediapipe.SwitchContainerOptions.ext] {
# contained_node: {
# calculator: "AlignmentPointsRectsCalculator"
# options: {
# [mediapipe.DetectionsToRectsCalculatorOptions.ext] {
# rotation_vector_start_keypoint_index: 0
# rotation_vector_end_keypoint_index: 1
# rotation_vector_target_angle_degrees: 90
# }
# }
# }
# contained_node: {
# calculator: "AlignmentPointsRectsCalculator"
# options: {
# [mediapipe.DetectionsToRectsCalculatorOptions.ext] {
# rotation_vector_start_keypoint_index: 2
# rotation_vector_end_keypoint_index: 3
# rotation_vector_target_angle_degrees: 90
# }
# }
# }
# }
# }
# }
target_angle = pi * 0.5 # 90 = pi/2
# AlignmentPointsRectsCalculator : https://github.com/google/mediapipe/blob/master/mediapipe/calculators/util/alignment_points_to_rects_calculator.cc
x_center, y_center = body.pd_kps[kp_pair[0]]
x_scale, y_scale = body.pd_kps[kp_pair[1]]
# Bounding box size as double distance from center to scale point.
box_size = sqrt((x_scale-x_center)**2 + (y_scale-y_center)**2) * 2
body.rect_w = box_size
body.rect_h = box_size
body.rect_x_center = x_center
body.rect_y_center = y_center
rotation = target_angle - atan2(-(y_scale - y_center), x_scale - x_center)
body.rotation = normalize_radians(rotation)
def rotated_rect_to_points(cx, cy, w, h, rotation):
b = cos(rotation) * 0.5
a = sin(rotation) * 0.5
points = []
p0x = cx - a*h - b*w
p0y = cy + b*h - a*w
p1x = cx + a*h - b*w
p1y = cy - b*h - a*w
p2x = int(2*cx - p0x)
p2y = int(2*cy - p0y)
p3x = int(2*cx - p1x)
p3y = int(2*cy - p1y)
p0x, p0y, p1x, p1y = int(p0x), int(p0y), int(p1x), int(p1y)
return [[p0x,p0y], [p1x,p1y], [p2x,p2y], [p3x,p3y]]
def rect_transformation(body, w, h, scale = 1.25):
"""
w, h : image input shape
"""
# https://github.com/google/mediapipe/blob/master/mediapipe/modules/pose_landmark/pose_detection_to_roi.pbtxt
# # Expands pose rect with marging used during training.
# node {
# calculator: "RectTransformationCalculator"
# input_stream: "NORM_RECT:raw_roi"
# input_stream: "IMAGE_SIZE:image_size"
# output_stream: "roi"
# options: {
# [mediapipe.RectTransformationCalculatorOptions.ext] {
# Version 0831:
# scale_x: 1.5
# scale_y: 1.5
# Version 084:
# scale_x: 1.25
# scale_y: 1.25
# square_long: true
# }
# }
# }
scale_x = scale
scale_y = scale
shift_x = 0
shift_y = 0
width = body.rect_w
height = body.rect_h
rotation = body.rotation
if rotation == 0:
body.rect_x_center_a = (body.rect_x_center + width * shift_x) * w
body.rect_y_center_a = (body.rect_y_center + height * shift_y) * h
else:
x_shift = (w * width * shift_x * cos(rotation) - h * height * shift_y * sin(rotation))
y_shift = (w * width * shift_x * sin(rotation) + h * height * shift_y * cos(rotation))
body.rect_x_center_a = body.rect_x_center*w + x_shift
body.rect_y_center_a = body.rect_y_center*h + y_shift
# square_long: true
long_side = max(width * w, height * h)
body.rect_w_a = long_side * scale_x
body.rect_h_a = long_side * scale_y
body.rect_points = rotated_rect_to_points(body.rect_x_center_a, body.rect_y_center_a, body.rect_w_a, body.rect_h_a, body.rotation)
def warp_rect_img(rect_points, img, w, h):
src = np.array(rect_points[1:], dtype=np.float32) # rect_points[0] is left bottom point !
dst = np.array([(0, 0), (w, 0), (w, h)], dtype=np.float32)
mat = cv2.getAffineTransform(src, dst)
return cv2.warpAffine(img, mat, (w, h))
def distance(a, b):
"""
a, b: 2 points in 3D (x,y,z)
"""
return np.linalg.norm(a-b)
def angle(a, b, c):
# https://stackoverflow.com/questions/35176451/python-code-to-calculate-angle-between-three-point-using-their-3d-coordinates
# a, b and c : points as np.array([x, y, z])
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
# Filtering
class LowPassFilter:
def __init__(self, alpha=1.0):
self.alpha = alpha
self.initialized = False
def apply(self, value):
# Note that value can be a scalar or a numpy array
if self.initialized:
v = self.alpha * value + (1.0 - self.alpha) * self.stored_value
else:
v = value
self.initialized = True
self.stored_value = v
return v
def apply_with_alpha(self, value, alpha):
self.alpha = alpha
return self.apply(value)
# RelativeVelocityFilter : https://github.com/google/mediapipe/blob/master/mediapipe/util/filtering/relative_velocity_filter.cc
# This filter keeps track (on a window of specified size) of
# value changes over time, which as result gives us velocity of how value
# changes over time. With higher velocity it weights new values higher.
# Use @window_size and @velocity_scale to tweak this filter.
# - higher @window_size adds to lag and to stability
# - lower @velocity_scale adds to lag and to stability
WindowElement = namedtuple('WindowElement', ['distance', 'duration'])
class RelativeVelocityFilter:
def __init__(self, window_size, velocity_scale, shape=1):
self.window_size = window_size
self.velocity_scale = velocity_scale
self.last_value = np.zeros(shape)
self.last_value_scale = np.ones(shape)
self.last_timestamp = -1
self.window = deque()
self.lpf = LowPassFilter()
def apply(self, value_scale, value, timestamp=None):
# Applies filter to the value.
# timestamp - timestamp associated with the value (for instance,
# timestamp of the frame where you got value from)
# value_scale - value scale (for instance, if your value is a distance
# detected on a frame, it can look same on different
# devices but have quite different absolute values due
# to different resolution, you should come up with an
# appropriate parameter for your particular use case)
# value - value to filter
if timestamp is None:
timestamp = time.perf_counter()
if self.last_timestamp == -1:
alpha = 1.0
else:
distance = value * value_scale - self.last_value * self.last_value_scale
duration = timestamp - self.last_timestamp
cumul_distance = distance.copy()
cumul_duration = duration
# Define max cumulative duration assuming
# 30 frames per second is a good frame rate, so assuming 30 values
# per second or 1 / 30 of a second is a good duration per window element
max_cumul_duration = (1 + len(self.window)) * 1/30
for el in self.window:
if cumul_duration + el.duration > max_cumul_duration:
break
cumul_distance += el.distance
cumul_duration += el.duration
velocity = cumul_distance / cumul_duration
alpha = 1 - 1 / (1 + self.velocity_scale * np.abs(velocity))
self.window.append(WindowElement(distance, duration))
if len(self.window) > self.window_size:
self.window.popleft()
self.last_value = value
self.last_value_scale = value_scale
self.last_timestamp = timestamp
return self.lpf.apply_with_alpha(value, alpha)
def get_object_scale(landmarks):
# Estimate object scale to use its inverse value as velocity scale for
# RelativeVelocityFilter. If value will be too small (less than
# `options_.min_allowed_object_scale`) smoothing will be disabled and
# landmarks will be returned as is.
# Object scale is calculated as average between bounding box width and height
# with sides parallel to axis.
# landmarks : numpy array of shape nb_landmarks x 3
lm_min = np.min(landmarks[:2], axis=1) # min x + min y
lm_max = np.max(landmarks[:2], axis=1) # max x + max y
return np.mean(lm_max - lm_min) # average of object width and object height
class LandmarksSmoothingFilter:
def __init__(self, window_size, velocity_scale, shape=1):
# 'shape' is shape of landmarks (ex: (33, 3))
self.window_size = window_size
self.velocity_scale = velocity_scale
self.shape = shape
self.init = True
def apply(self, landmarks):
# landmarks = numpy array of shape nb_landmarks x 3 (3 for x, y, z)
# Here landmarks are absolute landmarks (pixel locations)
if self.init: # Init or reset
self.filters = RelativeVelocityFilter(self.window_size, self.velocity_scale, self.shape)
self.init = False
out_landmarks = landmarks
else:
value_scale = 1 / get_object_scale(landmarks)
out_landmarks = self.filters.apply(value_scale, landmarks)
return out_landmarks
def reset(self):
if not self.init: self.init = True
#
def find_isp_scale_params(size, is_height=True):
"""
Find closest valid size close to 'size' and and the corresponding parameters to setIspScale()
This function is useful to work around a bug in depthai where ImageManip is scrambling images that have an invalid size
is_height : boolean that indicates if the value is the height or the width of the image
Returns: valid size, (numerator, denominator)
"""
# We want size >= 288
if size < 288:
size = 288
# We are looking for the list on integers that are divisible by 16 and
# that can be written like n/d where n <= 16 and d <= 63
if is_height:
reference = 1080
other = 1920
else:
reference = 1920
other = 1080
size_candidates = {}
for s in range(288,reference,16):
f = gcd(reference, s)
n = s//f
d = reference//f
if n <= 16 and d <= 63 and int(round(other * n / d) % 2 == 0):
size_candidates[s] = (n, d)
# What is the candidate size closer to 'size' ?
min_dist = -1
for s in size_candidates:
dist = abs(size - s)
if min_dist == -1:
min_dist = dist
candidate = s
else:
if dist > min_dist: break
candidate = s
min_dist = dist
return candidate, size_candidates[candidate] | 40.303602 | 153 | 0.621101 | 5,421 | 0.23071 | 0 | 0 | 0 | 0 | 0 | 0 | 10,563 | 0.449547 |
bc32a3234dd90ad352aba5adb527d79b901adc29 | 303 | py | Python | examples/fixture.no_background/features/steps/use_steplib_behave4cmd.py | wombat70/behave | c54493b0531795d946ac6754bfc643248cf3056a | [
"BSD-2-Clause"
] | 13 | 2019-10-03T19:15:14.000Z | 2019-10-16T02:01:57.000Z | examples/fixture.no_background/features/steps/use_steplib_behave4cmd.py | wombat70/behave | c54493b0531795d946ac6754bfc643248cf3056a | [
"BSD-2-Clause"
] | 2 | 2020-03-21T22:37:54.000Z | 2021-10-04T17:14:14.000Z | examples/fixture.no_background/features/steps/use_steplib_behave4cmd.py | fluendo/behave | eeffde083456dcf1a0ea9b6139b32091970118c0 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Use behave4cmd0 step library (predecessor of behave4cmd).
"""
from __future__ import absolute_import
# -- REGISTER-STEPS FROM STEP-LIBRARY:
# import behave4cmd0.__all_steps__
# import behave4cmd0.failing_steps
import behave4cmd0.passing_steps
import behave4cmd0.note_steps
| 23.307692 | 57 | 0.785479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.640264 |
bc3318ee93392f473d8dec2ba7ec5b3d52f0589d | 4,524 | py | Python | build/python/modules/assembly/package_manifest.py | fabio-d/fuchsia-stardock | e57f5d1cf015fe2294fc2a5aea704842294318d2 | [
"BSD-2-Clause"
] | 5 | 2022-01-10T20:22:17.000Z | 2022-01-21T20:14:17.000Z | build/python/modules/assembly/package_manifest.py | fabio-d/fuchsia-stardock | e57f5d1cf015fe2294fc2a5aea704842294318d2 | [
"BSD-2-Clause"
] | null | null | null | build/python/modules/assembly/package_manifest.py | fabio-d/fuchsia-stardock | e57f5d1cf015fe2294fc2a5aea704842294318d2 | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2022 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from dataclasses import dataclass
from typing import Dict, List, Optional
from serialization import serialize_fields_as
__all__ = ['PackageManifest', 'PackageMetaData', 'BlobEntry']
from .common import FilePath
@dataclass(init=False)
@serialize_fields_as(version=str)
class PackageMetaData:
"""The metadata that describes a package.
This is the package manifest's metadata section, which includes both
intrinsic data like abi_revision, and extrinsic data like the name that it's
published under and repo that it's for publishing to, etc.
"""
name: str
version: int = 0
def __init__(self, name: str, version: Optional[int] = None) -> None:
self.name = name
if version is not None:
self.version = version
else:
self.version = 0
@dataclass
class BlobEntry:
"""A blob that's in the package.
path - The path that the blob has within the package's namespace.
merkle - The merkle of the blob
size - The (uncompressed) size of the blob, in bytes.
source_path - The path to where the blob was found when the package was
being created.
"""
path: FilePath
merkle: str
size: Optional[int] = None
source_path: Optional[FilePath] = None
def compare_with(
self,
other: "BlobEntry",
allow_source_path_differences=False) -> List[str]:
"""Compare this BlobEntry with the other, reporting any differences.
If 'allow_source_path_differences' is True, then the source_paths of
blobs are not compared, just the path, size, and merkle.
"""
errors = []
if self.size != other.size:
errors.append(
f"blob has a different size ({self.size} vs {other.size}) for: {self.path}"
)
elif self.merkle != other.merkle:
errors.append(
f"blob has a different merkle ({self.merkle} vs {other.merkle}) for: {self.path}"
)
elif not allow_source_path_differences:
if self.source_path != other.source_path:
errors.append(
f"blob has a different source ({self.source_path} vs {other.source_path}) for: {self.path}"
)
return errors
@dataclass
class PackageManifest:
"""The output manifest for a Fuchsia package."""
package: PackageMetaData
blobs: List[BlobEntry]
version: str = "1"
blob_sources_relative: Optional[str] = None
def set_blob_sources_relative(self, relative_to_file: bool):
self.blob_sources_relative = "file" if relative_to_file else "working_dir"
def blobs_by_path(self) -> Dict[FilePath, BlobEntry]:
return {blob.path: blob for blob in self.blobs}
def compare_with(
self,
other: "PackageManifest",
allow_source_path_differences=False) -> List[str]:
"""Compare this package manifest with the other, reporting any
differences.
If 'allow_source_path_differences' is True, then the source_paths of
blobs are not compared, just the path, size, and merkle.
"""
errors = []
if self.package.name != other.package.name:
errors.append(
f"package publishing names differ: {self.package.name} vs. {other.package.name}"
)
self_blobs_by_path = self.blobs_by_path()
other_blobs_by_path = other.blobs_by_path()
missing_paths = set(self_blobs_by_path.keys()).difference(
other_blobs_by_path.keys())
extra_paths = set(other_blobs_by_path.keys()).difference(
self_blobs_by_path.keys())
common_paths = set(self_blobs_by_path.keys()).intersection(
other_blobs_by_path.keys())
for missing_path in missing_paths:
errors.append(f"missing a blob for path: {missing_path}")
for extra_path in extra_paths:
errors.append(f"extra blob found at path: {extra_path}")
for common_path in sorted([str(path) for path in common_paths]):
self_blob = self_blobs_by_path[common_path]
other_blob = other_blobs_by_path[common_path]
errors.extend(
self_blob.compare_with(
other_blob, allow_source_path_differences))
return errors
| 34.8 | 111 | 0.641689 | 4,061 | 0.897657 | 0 | 0 | 4,140 | 0.915119 | 0 | 0 | 1,753 | 0.387489 |
bc33b82e9c92fada4f9a24afcaa8e16bbc692cff | 6,025 | py | Python | annotation/black_action/test/move_ry.py | windfall-shogi/feature-annotation | 83ff7c3fa31e542221cf45186b2ea3ef2a10310f | [
"MIT"
] | null | null | null | annotation/black_action/test/move_ry.py | windfall-shogi/feature-annotation | 83ff7c3fa31e542221cf45186b2ea3ef2a10310f | [
"MIT"
] | null | null | null | annotation/black_action/test/move_ry.py | windfall-shogi/feature-annotation | 83ff7c3fa31e542221cf45186b2ea3ef2a10310f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from itertools import product
from pathlib import Path
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.direction import (Direction, get_diagonal_directions,
get_cross_directions)
from annotation.piece import Piece
from ..ry import BlackRyMoveLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/14'
class TestBlackRyMove(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_ry_move(self):
"""
RYについて成り、成らずの判定のテスト
利きが通るかどうかは別のところで判定しているので、ここでは考えない
:return:
"""
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
# 移動距離ごとに用意
effect = {
direction: [np.empty(shape, dtype=np.bool) for _ in range(8)]
for direction in get_cross_directions()
}
effect.update({
direction: np.empty(shape, dtype=np.bool)
for direction in get_diagonal_directions()
})
board = np.empty(shape, dtype=np.int32)
ph_board = tf.placeholder(tf.int32, shape=shape)
ry_effect = {
direction: [
tf.placeholder(tf.bool, shape=shape) for _ in range(8)
] for direction in get_cross_directions()
}
ry_effect.update({
direction: tf.placeholder(tf.bool, shape=shape)
for direction in get_diagonal_directions()
})
non_promoting = BlackRyMoveLayer()(ph_board, ry_effect)
# アクセスしやすいように次元を下げる
non_promoting = {key: tf.squeeze(value)
for key, value in non_promoting.items()}
feed_dict = {}
for direction, ph_list in ry_effect.items():
if direction in get_cross_directions():
for ph, e in zip(ph_list, effect[direction]):
feed_dict[ph] = e
else:
feed_dict[ph_list] = effect[direction]
feed_dict[ph_board] = board
with self.test_session() as sess:
for i, j, piece in product(range(9), range(9), range(Piece.SIZE)):
for direction, effect_list in effect.items():
if direction in get_cross_directions():
for e in effect_list:
e[:] = False
if self.data_format == 'NCHW':
e[0, 0, i, j] = True
else:
e[0, i, j, 0] = True
else:
effect_list[:] = False
if self.data_format == 'NCHW':
effect_list[0, 0, i, j] = True
else:
effect_list[0, i, j, 0] = True
piece = Piece(piece)
board[:] = piece
n = sess.run(non_promoting, feed_dict=feed_dict)
b = np.squeeze(board)
for direction, distance in product(effect.keys(), range(8)):
if direction in get_diagonal_directions():
if distance > 0:
continue
if (direction == Direction.RIGHT_UP and
(i == 8 or j == 8)):
continue
elif (direction == Direction.RIGHT_DOWN and
(i == 8 or j == 0)):
continue
elif (direction == Direction.LEFT_UP and
(i == 8 or j == 8)):
continue
elif (direction == Direction.LEFT_DOWN and
(i == 0 or j == 0)):
continue
if direction == Direction.RIGHT:
if i + distance >= 8:
continue
elif direction == Direction.UP:
if j + distance >= 8:
continue
elif direction == Direction.DOWN:
if j - distance <= 0:
continue
elif direction == Direction.LEFT:
if i - distance <= 0:
continue
n_move = n[direction]
if direction in get_cross_directions():
with self.subTest(i=i, j=j, piece=piece,
direction=direction,
distance=distance):
self.assertTupleEqual((8, 9, 9), n_move.shape)
if b[i, j] < Piece.WHITE_FU:
# 自身の駒があって動けない
self.assertFalse(np.all(n_move[distance]))
else:
self.assertTrue(n_move[distance, i, j])
n_move[distance, i, j] = False
self.assertFalse(np.all(n_move[distance]))
else:
with self.subTest(i=i, j=j, piece=piece,
direction=direction):
self.assertTupleEqual((9, 9), n_move.shape)
if b[i, j] < Piece.WHITE_FU:
# 自身の駒があって動けない
self.assertFalse(np.all(n_move))
else:
self.assertTrue(n_move[i, j])
n_move[i, j] = False
self.assertFalse(np.all(n_move))
| 38.870968 | 78 | 0.446473 | 5,790 | 0.93012 | 0 | 0 | 247 | 0.039679 | 0 | 0 | 474 | 0.076145 |
bc33e1a164ea02a1949fb5d46cf3c8b6e16f624c | 747 | py | Python | assignments/assignment2/solutions/Nkarnaud/devhub-0.1.0/src/account/forms.py | Nkarnaud/python-mentorship | a00735fc8b63e244b91575540c54d0eea942381f | [
"Apache-2.0"
] | 1 | 2019-04-22T18:42:51.000Z | 2019-04-22T18:42:51.000Z | assignments/assignment2/solutions/Nkarnaud/devhub-0.1.0/src/account/forms.py | Nkarnaud/python-mentorship | a00735fc8b63e244b91575540c54d0eea942381f | [
"Apache-2.0"
] | null | null | null | assignments/assignment2/solutions/Nkarnaud/devhub-0.1.0/src/account/forms.py | Nkarnaud/python-mentorship | a00735fc8b63e244b91575540c54d0eea942381f | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.models import User
from account.models import Account
class AccountCreateForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = Account
fields = ('username','first_name', 'last_name' ,'email', 'password1', 'password2')
class AccountChangeForm(UserChangeForm):
class Meta:
model = Account
fields = UserChangeForm.Meta.fields | 41.5 | 97 | 0.745649 | 568 | 0.760375 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.167336 |
bc3513cb203b5eb746d7ca1948d927c26402a7f1 | 390 | py | Python | palsbet/migrations/0003_auto_20180323_0018.py | denis254/palsbetc | d70d0fadaa661ff36c046a4f0a87a88d890c0dc4 | [
"BSD-3-Clause"
] | null | null | null | palsbet/migrations/0003_auto_20180323_0018.py | denis254/palsbetc | d70d0fadaa661ff36c046a4f0a87a88d890c0dc4 | [
"BSD-3-Clause"
] | 11 | 2020-03-24T16:11:23.000Z | 2021-12-13T19:47:29.000Z | palsbet/migrations/0003_auto_20180323_0018.py | denis254/overtimebet | 063af2fc263580d96e396e953ef8658a75ac38a5 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.2 on 2018-03-22 21:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('palsbet', '0002_viptipsgames'),
]
operations = [
migrations.AlterField(
model_name='viptipsgames',
name='cathegory',
field=models.CharField(max_length=100),
),
]
| 20.526316 | 51 | 0.602564 | 297 | 0.761538 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.25641 |
bc3c7092993eb4e13af6dd3b1e0b4b31e2777061 | 18,700 | py | Python | Environment.py | mxrcfdez/TrafficLightsAutomation | fe5b3c1cebe585e4f9a3fb2d2a7653f4ac2edb8a | [
"Apache-2.0"
] | null | null | null | Environment.py | mxrcfdez/TrafficLightsAutomation | fe5b3c1cebe585e4f9a3fb2d2a7653f4ac2edb8a | [
"Apache-2.0"
] | null | null | null | Environment.py | mxrcfdez/TrafficLightsAutomation | fe5b3c1cebe585e4f9a3fb2d2a7653f4ac2edb8a | [
"Apache-2.0"
] | null | null | null | import gym
from gym import spaces
import numpy as np
import learning_data
from Simulation import Simulation
def get_value_or_delimiter(value, delimiter):
return min(delimiter[1], max(delimiter[0], value))
class Environment(gym.Env):
def __init__(self, simulation, training):
super(Environment, self).__init__()
self.simulation = simulation
self.training = training
self.action_space = spaces.Discrete(2)
observation_space_dictionary = dict()
observation_space_dictionary['lights_settings'] = spaces.Box(low=0, high=1, shape=(1,), dtype=np.uint8)
observation_space_dictionary['intersection_cars'] = spaces.Box(low=0, high=1000, shape=(2,), dtype=np.uint8)
observation_space_dictionary['input_cars'] = spaces.Box(low=0, high=1000, shape=(2,), dtype=np.uint8)
observation_space_dictionary['output_cars'] = spaces.Box(low=0, high=1000, shape=(2,), dtype=np.uint8)
self.observation_space = spaces.Dict(observation_space_dictionary)
def reset(self, reset_simulation=True):
print('\033[94m' + 'resetting environment...' + '\033[0;0m')
if reset_simulation:
self.simulation = Simulation(self.simulation.traffic_volume, self.simulation.rows, self.simulation.cols, self.simulation.road_length, self.simulation.simulation_time)
observation = self.simulation.get_observation()
learning_data.previous_observation = observation
return observation
def step(self, action):
if self.training:
previous_observation = learning_data.previous_observation
done = self.simulation.advance_step(action)
current_observation = self.simulation.get_observation()
learning_data.previous_observation = current_observation
reward = self.definitive2x2(previous_observation, action)
else:
done = self.simulation.advance_step(action)
current_observation = self.simulation.get_observation()
reward = 0
return current_observation, reward, done, {}
def render(self, mode='human', close=False):
pass
def reward_function_1(self, previous_observation, current_observation):
previous_horizontal_waiting_time = 0
previous_vertical_waiting_time = 0
current_horizontal_waiting_time = 0
current_vertical_waiting_time = 0
for intersection in range(self.simulation.rows * self.simulation.cols):
for car in range(self.simulation.road_length):
if previous_observation['horizontal_waiting_time'][intersection][car] != -1:
previous_horizontal_waiting_time += previous_observation['horizontal_waiting_time'][intersection][car]
if previous_observation['vertical_waiting_time'][intersection][car] != -1:
previous_vertical_waiting_time += previous_observation['vertical_waiting_time'][intersection][car]
if current_observation['horizontal_waiting_time'][intersection][car] != -1:
current_horizontal_waiting_time += current_observation['horizontal_waiting_time'][intersection][car]
if current_observation['vertical_waiting_time'][intersection][car] != -1:
current_vertical_waiting_time += current_observation['vertical_waiting_time'][intersection][car]
previous_waiting_time = previous_horizontal_waiting_time + previous_vertical_waiting_time
current_waiting_time = current_horizontal_waiting_time + current_vertical_waiting_time
return previous_waiting_time - current_waiting_time
def reward_function_2(self, previous_observation, action):
reward = 0
num_of_intersections = self.simulation.rows * self.simulation.cols
for intersection in range(num_of_intersections):
# If the vertical lights turn green
if previous_observation['lights_settings'][intersection] == 1 and action[intersection] >= 0.5:
reward = reward + previous_observation['vertical_num_of_cars_waiting'][intersection] - previous_observation['horizontal_num_of_cars_waiting'][intersection]
# If the horizontal lights turn green
elif previous_observation['lights_settings'][intersection] == 0 and action[intersection] >= 0.5:
reward = reward + previous_observation['horizontal_num_of_cars_waiting'][intersection] - previous_observation['vertical_num_of_cars_waiting'][intersection]
return reward
def reward_function_3(self, previous_observation, action):
reward = 0
num_of_intersections = self.simulation.rows * self.simulation.cols
for intersection in range(num_of_intersections):
# If the vertical lights turn green
if previous_observation['lights_settings'][intersection] >= 1 and action[intersection] >= 0.5:
for car in range(self.simulation.road_length):
if previous_observation['vertical_waiting_time'][intersection][car] != -1:
reward += previous_observation['vertical_waiting_time'][intersection][car]
if previous_observation['horizontal_waiting_time'][intersection][car] != -1:
reward -= previous_observation['horizontal_waiting_time'][intersection][car]
# If the horizontal lights turn green
if previous_observation['lights_settings'][intersection] == 0 and action[intersection] >= 0.5:
for car in range(self.simulation.road_length):
if previous_observation['horizontal_waiting_time'][intersection][car] != -1:
reward += previous_observation['horizontal_waiting_time'][intersection][car]
if previous_observation['vertical_waiting_time'][intersection][car] != -1:
reward -= previous_observation['vertical_waiting_time'][intersection][car]
return reward
def reward_function_4(self, previous_observation, action):
reward = 0
num_of_intersections = self.simulation.rows * self.simulation.cols
for intersection in range(num_of_intersections):
# If the vertical lights turn green
if previous_observation['lights_settings'][intersection] == 1 and action[intersection] == 1:
if previous_observation['vertical_waiting_time'][intersection][0] != -1:
reward += previous_observation['vertical_waiting_time'][intersection][0]
if previous_observation['horizontal_waiting_time'][intersection][0] != -1:
reward -= previous_observation['horizontal_waiting_time'][intersection][0]
# If the horizontal lights turn green
if previous_observation['lights_settings'][intersection] == 0 and action[intersection] == 1:
if previous_observation['horizontal_waiting_time'][intersection][0] != -1:
reward += previous_observation['horizontal_waiting_time'][intersection][0]
if previous_observation['vertical_waiting_time'][intersection][0] != -1:
reward -= previous_observation['vertical_waiting_time'][intersection][0]
return reward
def reward_function_6(self, current_observation):
return - current_observation['average_waiting_time'][0]
def reward_function_7(self, previous_observation, action):
reward = 0
if action < self.simulation.rows * self.simulation.cols:
if previous_observation['ready_to_switch'][action] == 0:
reward -= 1
return reward
def reward_function_8(self, previous_observation, action):
reward = 0
if action < self.simulation.rows * self.simulation.cols:
if previous_observation['lights_settings'][action] == 1:
reward += (previous_observation['vertical_num_of_cars_waiting'][action] - previous_observation['horizontal_num_of_cars_waiting'][action]) / self.simulation.road_length
else:
reward += (previous_observation['horizontal_num_of_cars_waiting'][action] - previous_observation['vertical_num_of_cars_waiting'][action]) / self.simulation.road_length
if previous_observation['ready_to_switch'][action] == 0:
reward -= 1
return reward
def reward_function_9(self, previous_observation, action):
reward = -4
if action < self.simulation.rows * self.simulation.cols:
if previous_observation['lights_settings'][action] == 1:
reward += previous_observation['vertical_num_of_cars_waiting'][action] - previous_observation['horizontal_num_of_cars_waiting'][action]
else:
reward += previous_observation['horizontal_num_of_cars_waiting'][action] - previous_observation['vertical_num_of_cars_waiting'][action]
return reward
def reward_function_10(self, previous_observation, action):
if action < self.simulation.rows * self.simulation.cols:
# print('vertical cars: ' + str(previous_observation['vertical_num_of_cars'][action]), 'horizontal cars: ' + str(previous_observation['horizontal_num_of_cars'][action]))
# reward = -10 if previous_observation['ready_to_switch'][action] == 0 else 0
reward = - self.simulation.road_length / 2
if previous_observation['lights_settings'][action] == 1:
reward += min(previous_observation['vertical_num_of_cars'][action] - previous_observation['horizontal_num_of_cars'][action], self.simulation.road_length)
else:
reward += min(previous_observation['horizontal_num_of_cars'][action] - previous_observation['vertical_num_of_cars'][action], self.simulation.road_length)
else:
reward = 0
not_action = action - self.simulation.rows * self.simulation.cols
if previous_observation['lights_settings'][not_action] == 1:
reward += max(previous_observation['horizontal_num_of_cars'][not_action] - previous_observation['vertical_num_of_cars'][not_action], - self.simulation.road_length)
else:
reward += max(previous_observation['vertical_num_of_cars'][not_action] - previous_observation['horizontal_num_of_cars'][not_action], - self.simulation.road_length)
# print('action: ' + str(action) + ', reward: ' + str(reward))
return reward
def reward_function_11(self, prev_obs, action):
reward = 0
if action == 1:
reward = - self.simulation.road_length
if prev_obs['lights_settings'][0] == 1:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][0][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][1][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][1] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
else:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][0][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][1][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][0] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
else:
if prev_obs['lights_settings'][0] == 0:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][0][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][0] - prev_obs['num_of_cars'][1][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][0] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
else:
reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][0][0], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=prev_obs['num_of_cars'][0][1] - prev_obs['num_of_cars'][1][1], delimiter=[-self.simulation.road_length, self.simulation.road_length])
# reward += get_value_or_delimiter(value=- prev_obs['num_of_cars'][0][1] + self.simulation.road_length, delimiter=[-self.simulation.road_length, 0])
# print('action: ' + str(action) + ', reward: ' + str(reward))
# print('vertical cars: ' + str(prev_obs['num_of_cars'][0][0]), 'horizontal cars: ' + str(prev_obs['num_of_cars'][0][1]) + '\n')
return reward
def reward_function_12(self, prev_obs, action):
reward = 0
if prev_obs['ready_to_switch'] == 0:
if action == 1:
reward = -10
else:
if action == 1:
if prev_obs['lights_settings'][0] == 1:
reward += 1 if prev_obs['num_of_cars'][0][0] > 8 and prev_obs['num_of_cars'][0][1] < 2 else 0
# reward += 1 if prev_obs['num_of_cars'][1][0] < 2 else 0
else:
reward += 1 if prev_obs['num_of_cars'][0][1] > 8 and prev_obs['num_of_cars'][0][0] < 2 else 0
# reward += 1 if prev_obs['num_of_cars'][2][1] < 2 else 0
else:
if prev_obs['lights_settings'][0] == 0:
reward += 1 if prev_obs['num_of_cars'][0][0] > 8 else 0
else:
reward += 1 if prev_obs['num_of_cars'][0][1] > 8 else 0
# if self.simulation.intersection_to_process == 2:
# print('action: ' + str(action) + ', reward: ' + str(reward))
# print('vertical cars: ' + str(prev_obs['num_of_cars'][0][0]), 'horizontal cars: ' + str(prev_obs['num_of_cars'][0][1]) + '\n')
return reward
def definitive2x2(self, prev_obs, action):
reward = 0
vertical_load = prev_obs['intersection_cars'][0] # + prev_obs['input_cars'][0]
horizontal_load = prev_obs['intersection_cars'][1] # + prev_obs['input_cars'][1]
if prev_obs['lights_settings'][0] == 0:
if horizontal_load > 2 * vertical_load:
if action == 1:
reward = 1
else:
reward = -1
elif vertical_load > 2 * horizontal_load:
if action == 1:
reward = -1
else:
reward = 1
elif action == 1:
reward = -1
if prev_obs['lights_settings'][0] == 1:
if horizontal_load > 2 * vertical_load:
if action == 1:
reward = -1
else:
reward = 1
elif vertical_load > 2 * horizontal_load:
if action == 1:
reward = 1
else:
reward = -1
elif action == 1:
reward = -1
if self.simulation.intersection_to_process == 20:
print('The vertical load is: ' + str(vertical_load))
print('The horizontal load is: ' + str(horizontal_load))
print('The green light is: ' + str('VERTICAL' if prev_obs['lights_settings'][0] == 0 else 'HORIZONTAL'))
print('The action is: ' + str('MAINTAIN' if action == 0 else 'CHANGE'))
print('The reward is: ' + str(reward))
print('\n')
return reward
def definitive3x3(self, prev_obs, action):
reward = 0
vertical_load = prev_obs['intersection_cars'][0]
horizontal_load = prev_obs['intersection_cars'][1]
vertical_output_load = prev_obs['output_cars'][0]
horizontal_output_load = prev_obs['output_cars'][1]
if prev_obs['lights_settings'][0] == 0:
if horizontal_load - vertical_load > 6:
reward = 1 if action == 1 else -1
elif vertical_load - horizontal_load > 4:
reward = -1 if action == 1 else 1
elif vertical_output_load - horizontal_output_load > 6:
reward = 1 if action == 1 else -1
elif horizontal_output_load - vertical_output_load > 4:
reward = -1 if action == 1 else 1
elif vertical_load > 5 and horizontal_load > 5 and vertical_output_load - horizontal_output_load > 3:
reward = 1 if action == 1 else -1
elif vertical_output_load == 10:
reward = 1 if action == 1 else -1
elif action == 1:
reward = -1
if prev_obs['lights_settings'][0] == 1:
if vertical_load - horizontal_load > 6:
reward = 1 if action == 1 else -1
elif horizontal_load - vertical_load > 4:
reward = -1 if action == 1 else 1
elif horizontal_output_load - vertical_output_load > 6:
reward = 1 if action == 1 else -1
elif vertical_output_load - horizontal_output_load > 4:
reward = -1 if action == 1 else 1
elif horizontal_load > 5 and vertical_load > 5 and horizontal_output_load - vertical_output_load > 3:
reward = 1 if action == 1 else -1
elif horizontal_output_load == 10:
reward = 1 if action == 1 else -1
elif action == 1:
reward = -1
print('The intersection is: ' + str(self.simulation.intersection_to_process))
print('The vertical load is: ' + str(vertical_load))
print('The horizontal load is: ' + str(horizontal_load))
print('The vertical output load is: ' + str(vertical_output_load))
print('The horizontal output load is: ' + str(horizontal_output_load))
print('The green light is: ' + str('VERTICAL' if prev_obs['lights_settings'][0] == 0 else 'HORIZONTAL'))
print('The action is: ' + str('MAINTAIN' if action == 0 else 'CHANGE'))
print('The reward is: ' + str(reward))
print('\n')
return reward
| 53.276353 | 190 | 0.634439 | 18,485 | 0.988503 | 0 | 0 | 0 | 0 | 0 | 0 | 4,745 | 0.253743 |
bc3d021b03a2c4dec39faa861c772e80b36950d7 | 1,311 | py | Python | test.py | nerdingitout/STT-- | 4544f89abd6c2a8c11c1cf3e309ceb6f5d9b6b15 | [
"Apache-2.0"
] | null | null | null | test.py | nerdingitout/STT-- | 4544f89abd6c2a8c11c1cf3e309ceb6f5d9b6b15 | [
"Apache-2.0"
] | null | null | null | test.py | nerdingitout/STT-- | 4544f89abd6c2a8c11c1cf3e309ceb6f5d9b6b15 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import json
import csv
# importing the module
import json
# Opening JSON file
with open('response.json') as json_file:
data = json.load(json_file)
# for reading nested data [0] represents
# the index value of the list
print(data['results'][0]['alternatives']['transcript'])
# for printing the key-value pair of
# nested dictionary for looop can be used
print("\nPrinting nested dicitonary as a key-value pair\n")
for i in data['people1']:
print("Name:", i['name'])
print("Website:", i['website'])
print("From:", i['from'])
print()
#def json_csv(filename):
# with open(filename) as data_file: #opening json file
# data = json.load(data_file) #loading json data
# normalized_df = pd.json_normalize(data)
# print(normalized_df['results'][0])
# normalized_df.to_csv('my_csv_file.csv',index=False)
# return pd.DataFrame(data['results'])
json_csv('response.json') #calling the json_csv function, paramter is the source json file
#file = open('response.json')
#obj = json.load(file)
#for element in obj['results']:
# for alternative in element['alternatives']:
# for stamp in alternative['timestamps']:
# name, value1, value2 = stamp
# print(stamp)
| 29.133333 | 91 | 0.650648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 983 | 0.749809 |
70ad458eac22677ad1d19a761c4bf2d78f497f97 | 6,407 | py | Python | loldib/getratings/models/NA/na_sivir/na_sivir_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_sivir/na_sivir_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_sivir/na_sivir_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
class NA_Sivir_Bot_Aatrox(Ratings):
pass
class NA_Sivir_Bot_Ahri(Ratings):
pass
class NA_Sivir_Bot_Akali(Ratings):
pass
class NA_Sivir_Bot_Alistar(Ratings):
pass
class NA_Sivir_Bot_Amumu(Ratings):
pass
class NA_Sivir_Bot_Anivia(Ratings):
pass
class NA_Sivir_Bot_Annie(Ratings):
pass
class NA_Sivir_Bot_Ashe(Ratings):
pass
class NA_Sivir_Bot_AurelionSol(Ratings):
pass
class NA_Sivir_Bot_Azir(Ratings):
pass
class NA_Sivir_Bot_Bard(Ratings):
pass
class NA_Sivir_Bot_Blitzcrank(Ratings):
pass
class NA_Sivir_Bot_Brand(Ratings):
pass
class NA_Sivir_Bot_Braum(Ratings):
pass
class NA_Sivir_Bot_Caitlyn(Ratings):
pass
class NA_Sivir_Bot_Camille(Ratings):
pass
class NA_Sivir_Bot_Cassiopeia(Ratings):
pass
class NA_Sivir_Bot_Chogath(Ratings):
pass
class NA_Sivir_Bot_Corki(Ratings):
pass
class NA_Sivir_Bot_Darius(Ratings):
pass
class NA_Sivir_Bot_Diana(Ratings):
pass
class NA_Sivir_Bot_Draven(Ratings):
pass
class NA_Sivir_Bot_DrMundo(Ratings):
pass
class NA_Sivir_Bot_Ekko(Ratings):
pass
class NA_Sivir_Bot_Elise(Ratings):
pass
class NA_Sivir_Bot_Evelynn(Ratings):
pass
class NA_Sivir_Bot_Ezreal(Ratings):
pass
class NA_Sivir_Bot_Fiddlesticks(Ratings):
pass
class NA_Sivir_Bot_Fiora(Ratings):
pass
class NA_Sivir_Bot_Fizz(Ratings):
pass
class NA_Sivir_Bot_Galio(Ratings):
pass
class NA_Sivir_Bot_Gangplank(Ratings):
pass
class NA_Sivir_Bot_Garen(Ratings):
pass
class NA_Sivir_Bot_Gnar(Ratings):
pass
class NA_Sivir_Bot_Gragas(Ratings):
pass
class NA_Sivir_Bot_Graves(Ratings):
pass
class NA_Sivir_Bot_Hecarim(Ratings):
pass
class NA_Sivir_Bot_Heimerdinger(Ratings):
pass
class NA_Sivir_Bot_Illaoi(Ratings):
pass
class NA_Sivir_Bot_Irelia(Ratings):
pass
class NA_Sivir_Bot_Ivern(Ratings):
pass
class NA_Sivir_Bot_Janna(Ratings):
pass
class NA_Sivir_Bot_JarvanIV(Ratings):
pass
class NA_Sivir_Bot_Jax(Ratings):
pass
class NA_Sivir_Bot_Jayce(Ratings):
pass
class NA_Sivir_Bot_Jhin(Ratings):
pass
class NA_Sivir_Bot_Jinx(Ratings):
pass
class NA_Sivir_Bot_Kalista(Ratings):
pass
class NA_Sivir_Bot_Karma(Ratings):
pass
class NA_Sivir_Bot_Karthus(Ratings):
pass
class NA_Sivir_Bot_Kassadin(Ratings):
pass
class NA_Sivir_Bot_Katarina(Ratings):
pass
class NA_Sivir_Bot_Kayle(Ratings):
pass
class NA_Sivir_Bot_Kayn(Ratings):
pass
class NA_Sivir_Bot_Kennen(Ratings):
pass
class NA_Sivir_Bot_Khazix(Ratings):
pass
class NA_Sivir_Bot_Kindred(Ratings):
pass
class NA_Sivir_Bot_Kled(Ratings):
pass
class NA_Sivir_Bot_KogMaw(Ratings):
pass
class NA_Sivir_Bot_Leblanc(Ratings):
pass
class NA_Sivir_Bot_LeeSin(Ratings):
pass
class NA_Sivir_Bot_Leona(Ratings):
pass
class NA_Sivir_Bot_Lissandra(Ratings):
pass
class NA_Sivir_Bot_Lucian(Ratings):
pass
class NA_Sivir_Bot_Lulu(Ratings):
pass
class NA_Sivir_Bot_Lux(Ratings):
pass
class NA_Sivir_Bot_Malphite(Ratings):
pass
class NA_Sivir_Bot_Malzahar(Ratings):
pass
class NA_Sivir_Bot_Maokai(Ratings):
pass
class NA_Sivir_Bot_MasterYi(Ratings):
pass
class NA_Sivir_Bot_MissFortune(Ratings):
pass
class NA_Sivir_Bot_MonkeyKing(Ratings):
pass
class NA_Sivir_Bot_Mordekaiser(Ratings):
pass
class NA_Sivir_Bot_Morgana(Ratings):
pass
class NA_Sivir_Bot_Nami(Ratings):
pass
class NA_Sivir_Bot_Nasus(Ratings):
pass
class NA_Sivir_Bot_Nautilus(Ratings):
pass
class NA_Sivir_Bot_Nidalee(Ratings):
pass
class NA_Sivir_Bot_Nocturne(Ratings):
pass
class NA_Sivir_Bot_Nunu(Ratings):
pass
class NA_Sivir_Bot_Olaf(Ratings):
pass
class NA_Sivir_Bot_Orianna(Ratings):
pass
class NA_Sivir_Bot_Ornn(Ratings):
pass
class NA_Sivir_Bot_Pantheon(Ratings):
pass
class NA_Sivir_Bot_Poppy(Ratings):
pass
class NA_Sivir_Bot_Quinn(Ratings):
pass
class NA_Sivir_Bot_Rakan(Ratings):
pass
class NA_Sivir_Bot_Rammus(Ratings):
pass
class NA_Sivir_Bot_RekSai(Ratings):
pass
class NA_Sivir_Bot_Renekton(Ratings):
pass
class NA_Sivir_Bot_Rengar(Ratings):
pass
class NA_Sivir_Bot_Riven(Ratings):
pass
class NA_Sivir_Bot_Rumble(Ratings):
pass
class NA_Sivir_Bot_Ryze(Ratings):
pass
class NA_Sivir_Bot_Sejuani(Ratings):
pass
class NA_Sivir_Bot_Shaco(Ratings):
pass
class NA_Sivir_Bot_Shen(Ratings):
pass
class NA_Sivir_Bot_Shyvana(Ratings):
pass
class NA_Sivir_Bot_Singed(Ratings):
pass
class NA_Sivir_Bot_Sion(Ratings):
pass
class NA_Sivir_Bot_Sivir(Ratings):
pass
class NA_Sivir_Bot_Skarner(Ratings):
pass
class NA_Sivir_Bot_Sona(Ratings):
pass
class NA_Sivir_Bot_Soraka(Ratings):
pass
class NA_Sivir_Bot_Swain(Ratings):
pass
class NA_Sivir_Bot_Syndra(Ratings):
pass
class NA_Sivir_Bot_TahmKench(Ratings):
pass
class NA_Sivir_Bot_Taliyah(Ratings):
pass
class NA_Sivir_Bot_Talon(Ratings):
pass
class NA_Sivir_Bot_Taric(Ratings):
pass
class NA_Sivir_Bot_Teemo(Ratings):
pass
class NA_Sivir_Bot_Thresh(Ratings):
pass
class NA_Sivir_Bot_Tristana(Ratings):
pass
class NA_Sivir_Bot_Trundle(Ratings):
pass
class NA_Sivir_Bot_Tryndamere(Ratings):
pass
class NA_Sivir_Bot_TwistedFate(Ratings):
pass
class NA_Sivir_Bot_Twitch(Ratings):
pass
class NA_Sivir_Bot_Udyr(Ratings):
pass
class NA_Sivir_Bot_Urgot(Ratings):
pass
class NA_Sivir_Bot_Varus(Ratings):
pass
class NA_Sivir_Bot_Vayne(Ratings):
pass
class NA_Sivir_Bot_Veigar(Ratings):
pass
class NA_Sivir_Bot_Velkoz(Ratings):
pass
class NA_Sivir_Bot_Vi(Ratings):
pass
class NA_Sivir_Bot_Viktor(Ratings):
pass
class NA_Sivir_Bot_Vladimir(Ratings):
pass
class NA_Sivir_Bot_Volibear(Ratings):
pass
class NA_Sivir_Bot_Warwick(Ratings):
pass
class NA_Sivir_Bot_Xayah(Ratings):
pass
class NA_Sivir_Bot_Xerath(Ratings):
pass
class NA_Sivir_Bot_XinZhao(Ratings):
pass
class NA_Sivir_Bot_Yasuo(Ratings):
pass
class NA_Sivir_Bot_Yorick(Ratings):
pass
class NA_Sivir_Bot_Zac(Ratings):
pass
class NA_Sivir_Bot_Zed(Ratings):
pass
class NA_Sivir_Bot_Ziggs(Ratings):
pass
class NA_Sivir_Bot_Zilean(Ratings):
pass
class NA_Sivir_Bot_Zyra(Ratings):
pass
| 15.364508 | 46 | 0.761667 | 5,806 | 0.906196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
70adc0ef41a2821b404e088ae1c817c6c3515597 | 1,711 | py | Python | policy2tosca/build/lib.linux-x86_64-2.7/policy2tosca/del_type.py | Tosca-Projects/parser | 04dabfa15db432b24656d38628a846b73b658339 | [
"Apache-2.0"
] | 1 | 2018-05-14T09:59:51.000Z | 2018-05-14T09:59:51.000Z | policy2tosca/build/lib.linux-x86_64-2.7/policy2tosca/del_type.py | Tosca-Projects/parser | 04dabfa15db432b24656d38628a846b73b658339 | [
"Apache-2.0"
] | null | null | null | policy2tosca/build/lib.linux-x86_64-2.7/policy2tosca/del_type.py | Tosca-Projects/parser | 04dabfa15db432b24656d38628a846b73b658339 | [
"Apache-2.0"
] | 2 | 2019-04-20T15:00:58.000Z | 2020-06-16T14:42:35.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from cliff.command import Command
from cliff.show import ShowOne
import yaml
import json
class delete_type(Command):
"Delete policy type in a tosca file"
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(delete_type, self).get_parser(prog_name)
parser.add_argument('--policy_type', dest='policy_type', nargs='?',
help='delete policy type')
parser.add_argument('--source', dest='source', nargs='?',
help='source tosca yaml file')
return parser
def take_action(self, parsed_args):
yml = open(parsed_args.source, "r")
#lines=filehandle.readlines()
data = yaml.load(yml)
json_dump = json.dumps(data)
#print json_dump
tosca_json = json.loads(json_dump)
del tosca_json['topology_template']['policy_types'][parsed_args.policy_type]
tosca = json.dumps(tosca_json)
data2 = json.loads(tosca)
yml2 = yaml.safe_dump(data2, default_flow_style=False)
tosca_file = open(parsed_args.source, "w")
tosca_file.write(yml2)
| 35.645833 | 84 | 0.680304 | 1,048 | 0.612507 | 0 | 0 | 0 | 0 | 0 | 0 | 749 | 0.437756 |
70ae466b7e7db5a0d09e04c20a68c88f305a4aac | 77 | py | Python | build_start.py | sanman00/SpongeSkills | 7a54e424071a21a55a84479e0177ed7ebacc9c31 | [
"MIT"
] | null | null | null | build_start.py | sanman00/SpongeSkills | 7a54e424071a21a55a84479e0177ed7ebacc9c31 | [
"MIT"
] | 4 | 2017-05-05T15:51:09.000Z | 2017-05-08T17:18:27.000Z | build_start.py | sanman00/SpongeSkills | 7a54e424071a21a55a84479e0177ed7ebacc9c31 | [
"MIT"
] | null | null | null | from build import replace_text, version
replace_text("@{version}", version)
| 19.25 | 39 | 0.779221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.155844 |
70b064cf7bb27004817810b3d9823f1d22968297 | 17,748 | py | Python | cuppa/cpp/create_version_file_cpp.py | pwj58/cuppa | 6c598a124c5aa52b459637ca1865cda2e2d300bd | [
"BSL-1.0"
] | 25 | 2015-09-24T07:04:45.000Z | 2022-02-19T03:31:03.000Z | cuppa/cpp/create_version_file_cpp.py | pwj58/cuppa | 6c598a124c5aa52b459637ca1865cda2e2d300bd | [
"BSL-1.0"
] | 46 | 2015-05-20T12:48:12.000Z | 2022-01-10T10:38:55.000Z | cuppa/cpp/create_version_file_cpp.py | pwj58/cuppa | 6c598a124c5aa52b459637ca1865cda2e2d300bd | [
"BSL-1.0"
] | 13 | 2015-07-12T09:55:03.000Z | 2021-07-02T15:32:12.000Z |
# Copyright Jamie Allsop 2011-2017
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#-------------------------------------------------------------------------------
# CreateVersionFileCpp
#-------------------------------------------------------------------------------
import os
from os.path import splitext, relpath, sep
from SCons.Script import File
import cuppa.location
from cuppa.utility.attr_tools import try_attr_as_str
def offset_path( path, env ):
build_dir = env['build_dir']
offset_dir = env['offset_dir']
return offset_dir + sep + relpath( path, build_dir)
def hpp_from_cpp( cpp_file ):
return splitext( cpp_file )[0] + '.hpp'
def txt_from_cpp( cpp_file ):
return splitext( cpp_file )[0] + '.txt'
class CreateVersionHeaderCpp:
def __init__( self, env, namespaces, version, location, build_id=None ):
self.__env = env
self.__namespace_guard = "_".join( namespaces )
self.__namespaces = namespaces
self.__version = version
self.__location = location
self.__build_id = build_id
self.__variant = self.__env['variant'].name()
self.__working_dir = os.path.join( env['base_path'], env['build_dir'] )
if not os.path.exists( self.__working_dir ):
os.makedirs( self.__working_dir )
def __call__( self, target, source, env ):
cpp_file = offset_path( target[0].path, env )
hpp_file = hpp_from_cpp( cpp_file )
txt_file = txt_from_cpp( cpp_file )
output_dir = os.path.split( hpp_file )[0]
if output_dir:
output_dir = os.path.join( self.__working_dir, output_dir )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
version_hpp = open( os.path.join( self.__working_dir, hpp_file ), "w" )
version_hpp.write( get_build_identity_header( self.__namespace_guard, self.__namespaces ) )
version_hpp.close()
version_txt = open( os.path.join( self.__working_dir, txt_file ), "w" )
version_txt.write( get_build_identity_txt( self.__version, relpath( env['base_path'], self.__location), self.__namespaces ) )
version_txt.close()
target[0] = File( cpp_file )
source.append( hpp_file )
source.append( txt_file )
return target, source
def get_build_identity_txt( version, location, namespaces ):
lines = []
lines += [ '// v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v\n'
'// Version File for product version [ ' + version + ' ]\n'
'// Location for dependency versions [ ' + location + ' ]\n'
'// Namespace [ ' + "::".join( namespaces ) + ' ]\n'
'// v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v v\n' ]
return "\n".join( lines )
def get_build_identity_header( namespace_guard, namespaces ):
lines = []
lines += [ '// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'#ifndef INCLUDED_' + namespace_guard.upper() + '_BUILD_GENERATED_VERSION_HPP\n'
'#define INCLUDED_' + namespace_guard.upper() + '_BUILD_GENERATED_VERSION_HPP\n'
'// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'\n'
'// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'#include <string>\n'
'#include <vector>\n'
'#include <map>\n'
'// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n' ]
for namespace in namespaces:
lines += [ 'namespace ' + namespace + ' {' ]
lines += [ 'namespace build {\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'\n'
'class identity\n'
'{\n'
'public:\n'
'\n'
' typedef std::string string_t;\n'
' typedef std::vector< string_t > revisions_t;\n'
'\n'
'private:\n'
'\n'
' struct dependency\n'
' {\n'
' dependency()\n'
' {\n'
' }\n'
'\n'
' dependency( const string_t& Name,\n'
' const string_t& Version,\n'
' const string_t& Repository,\n'
' const string_t& Branch,\n'
' const revisions_t& Revisions )\n'
' : name ( Name )\n'
' , version ( Version )\n'
' , repository ( Repository )\n'
' , branch ( Branch )\n'
' , revisions ( Revisions )\n'
' {\n'
' }\n'
'\n'
' string_t name;\n'
' string_t version;\n'
' string_t repository;\n'
' string_t branch;\n'
' revisions_t revisions;\n'
' };\n'
'\n'
'public:\n'
'\n'
' typedef dependency dependency_t;\n'
' typedef std::map< string_t, dependency > dependencies_t;\n'
'\n'
'public:\n' ]
lines += [ function_declaration_from_variable( 'product_version' ) ]
lines += [ function_declaration_from_variable( 'product_repository' ) ]
lines += [ function_declaration_from_variable( 'product_branch' ) ]
lines += [ function_declaration_from_variable( 'product_revision' ) ]
lines += [ function_declaration_from_variable( 'build_variant' ) ]
lines += [ function_declaration_from_variable( 'build_time' ) ]
lines += [ function_declaration_from_variable( 'build_user' ) ]
lines += [ function_declaration_from_variable( 'build_host' ) ]
lines += [ function_declaration_from_variable( 'build_id' ) ]
lines += [ function_declaration_dependencies() ]
lines += [ function_declaration_report() ]
lines += [ '\nprivate:\n'
' static const dependencies_t Dependencies_;\n'
' static const string_t Report_;\n'
'};\n'
'\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'} //end namespace build' ]
for namespace in namespaces:
lines += [ '} //end namespace ' + namespace ]
lines += [ '// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'\n'
'// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'#endif\n'
'// G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G G\n'
'\n' ]
return "\n".join( lines )
def function_declaration_from_variable( name ):
lines = []
lines += [ ' static const char* ' + name + '();' ]
return "\n".join( lines )
def function_declaration_dependencies():
lines = []
lines += [ ' static const dependencies_t& dependencies();' ]
return "\n".join( lines )
def function_declaration_report():
lines = []
lines += [ ' static const char* report();' ]
return "\n".join( lines )
class CreateVersionFileCpp:
def __init__( self, env, namespaces, version, location, build_id=None ):
self.__env = env
self.__namespace_guard = "_".join( namespaces )
self.__namespaces = namespaces
self.__version = version
self.__location = location
self.__build_id = build_id
location = cuppa.location.Location( env, location )
self.__repository = location.repository()
self.__branch = location.branch()
self.__revision = location.revisions()[0]
self.__variant = self.__env['variant'].name()
def __call__( self, target, source, env ):
cpp_file = target[0].path
hpp_file = hpp_from_cpp( cpp_file )
#print "Create CPP Version File at [" + cpp_file + "]"
version_cpp = open( cpp_file, "w" )
version_cpp.write( self.get_build_identity_source( env['BUILD_WITH'], hpp_file, self.__build_id ) )
version_cpp.close()
return None
def function_definition_from_variable( self, name, variable ):
lines = []
lines += [ '\nconst char* identity::' + name + '()' ]
lines += [ '{' ]
lines += [ ' return "' + str( variable ) + '";' ]
lines += [ '}\n' ]
return "\n".join( lines )
def function_definition_dependencies( self ):
lines = []
lines += [ '\nconst identity::dependencies_t& identity::dependencies()\n'
'{\n'
' return Dependencies_;\n'
'}\n' ]
return "\n".join( lines )
def initialise_dependencies_definition( self, dependencies ):
lines = []
lines += [ '\nidentity::dependencies_t initialise_dependencies()\n'
'{\n'
' typedef identity::dependencies_t dependencies_t;\n'
' typedef identity::dependency_t dependency_t;\n'
' typedef identity::revisions_t revisions_t;\n'
' dependencies_t Dependencies;' ]
for name in dependencies:
if name in self.__env['dependencies']:
dependency_factory = self.__env['dependencies'][name]
dependency = dependency_factory( self.__env )
lines += [ ' Dependencies[ "' + name + '" ] = dependency_t( "'
+ try_attr_as_str( dependency, "name", "N/A" ) + '", "'
+ try_attr_as_str( dependency, "version", "N/A" ) + '", "'
+ try_attr_as_str( dependency, "repository", "N/A" ) + '", "'
+ try_attr_as_str( dependency, "branch", "N/A" )
+ '", revisions_t() );' ]
try:
if callable( getattr( dependency, 'revisions' ) ):
revisions = dependency.revisions()
if revisions:
for revision in revisions:
lines += [ ' Dependencies[ "' + name + '" ].revisions.push_back( "' + str(revision) + '" );' ]
except AttributeError:
pass
lines += [ ' return Dependencies;\n'
'}\n'
'\n'
'const identity::dependencies_t identity::Dependencies_ = initialise_dependencies();\n' ]
return "\n".join( lines )
def function_definition_report( self ):
lines = []
lines += [ '\nconst char* identity::report()' ]
lines += [ '{' ]
lines += [ ' return Report_.c_str();' ]
lines += [ '}\n' ]
return "\n".join( lines )
def initialise_report_definition( self ):
lines = []
lines += [ '\nidentity::string_t initialise_report()\n'
'{\n'
' std::ostringstream Report;\n'
'\n'
' Report\n'
' << "Product:\\n"\n'
' " |- Version = " << identity::product_version() << "\\n"\n'
' " |- Repository = " << identity::product_repository() << "\\n"\n'
' " |- Branch = " << identity::product_branch() << "\\n"\n'
' " +- Revision = " << identity::product_revision() << "\\n"\n'
' "Build:\\n"\n'
' " |- Variant = " << identity::build_variant() << "\\n"\n'
' " |- Time = " << identity::build_time() << "\\n"\n'
' " |- User = " << identity::build_user() << "\\n"\n'
' " |- Host = " << identity::build_host() << "\\n"\n'
' " +- ID = " << identity::build_id() << "\\n";\n'
'\n'
' if( !identity::dependencies().empty() )\n'
' {\n'
' Report << "Dependencies:\\n";\n'
' }\n'
'\n'
' identity::dependencies_t::const_iterator Dependency = identity::dependencies().begin();\n'
' identity::dependencies_t::const_iterator End = identity::dependencies().end();\n'
'\n'
' for( ; Dependency != End; ++Dependency )\n'
' {\n'
' Report\n'
' << " " << Dependency->second.name << "\\n"\n'
' << " |- Version = " << Dependency->second.version << "\\n"\n'
' << " |- Repository = " << Dependency->second.repository << "\\n"\n'
' << " |- Branch = " << Dependency->second.branch << "\\n";\n'
'\n'
' identity::revisions_t::const_iterator Revision = Dependency->second.revisions.begin();\n'
' identity::revisions_t::const_iterator End = Dependency->second.revisions.end();\n'
'\n'
' for( ; Revision != End; )\n'
' {\n'
' identity::string_t Value( *Revision );\n'
' if( ++Revision != End )\n'
' {\n'
' Report << " |";\n'
' }\n'
' else\n'
' {\n'
' Report << " +";\n'
' }\n'
' Report << "- Revision = " << Value << "\\n";\n'
' }\n'
' }\n'
'\n'
' return Report.str();\n'
'}\n'
'\n'
'const identity::string_t identity::Report_ = initialise_report();' ]
return "\n".join( lines )
def get_build_identity_source( self, dependencies, header_file, build_id ):
from datetime import datetime
from getpass import getuser
from socket import gethostname
build_time = datetime.utcnow()
build_user = getuser()
build_host = gethostname()
lines = []
lines += [ '// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'// Self Include' ]
lines += [ '#include "' + header_file + '"' ]
lines += [ ''
'// C++ Standard Includes\n'
'#include <sstream>\n'
'\n'
'// I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I I\n'
'\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n' ]
for namespace in self.__namespaces:
lines += [ 'namespace ' + namespace + ' {' ]
lines += [ 'namespace build {\n'
'// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n' ]
lines += [ self.function_definition_from_variable( 'product_version', self.__version ) ]
lines += [ self.function_definition_from_variable( 'product_repository', self.__repository ) ]
lines += [ self.function_definition_from_variable( 'product_branch', self.__branch ) ]
lines += [ self.function_definition_from_variable( 'product_revision', self.__revision ) ]
lines += [ self.function_definition_from_variable( 'build_variant', self.__variant ) ]
lines += [ self.function_definition_from_variable( 'build_time', build_time ) ]
lines += [ self.function_definition_from_variable( 'build_user', build_user ) ]
lines += [ self.function_definition_from_variable( 'build_host', build_host ) ]
lines += [ self.function_definition_from_variable( 'build_id', build_id ) ]
lines += [ self.initialise_dependencies_definition( dependencies ) ]
lines += [ self.function_definition_dependencies() ]
lines += [ self.initialise_report_definition() ]
lines += [ self.function_definition_report() ]
lines += [ '\n// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'} //end namespace build' ]
for namespace in self.__namespaces:
lines += [ '} //end namespace ' + namespace ]
lines += [ '// n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n n\n'
'\n' ]
return "\n".join( lines )
| 44.259352 | 133 | 0.46963 | 11,369 | 0.640579 | 0 | 0 | 0 | 0 | 0 | 0 | 7,409 | 0.417455 |
70b086468733c1b6ce77c49c47bc0d4d8412c1dd | 101 | py | Python | enthought/mayavi/components/implicit_plane.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/mayavi/components/implicit_plane.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/mayavi/components/implicit_plane.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from __future__ import absolute_import
from mayavi.components.implicit_plane import *
| 25.25 | 46 | 0.851485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.138614 |
70b0ec3b0f20b624360319ad2024474c4a16cdca | 928 | py | Python | Learning/python_data_analysis11.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | Learning/python_data_analysis11.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | Learning/python_data_analysis11.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | import pandas as pd
from scipy.stats import ttest_rel
"""
output
"""
# Note: some output is shortened to save spaces.
# This file discusses statistical analysis (Part II).
# ------------------------------------------------------------------------------
# Data stored in form of xlsx with contents:
"""
group data
0 1 34
1 1 37
2 1 28
3 1 36
4 1 30
5 2 43
6 2 45
7 2 47
8 2 49
9 2 39
"""
# Assume these data are paired sample.
# ------------------------------------------------------------------------------
IS_t_test = pd.read_excel('E:\\IS_t_test.xlsx')
Group1 = IS_t_test[IS_t_test['group']==1]['data']
Group2 = IS_t_test[IS_t_test['group']==2]['data']
print (ttest_rel(Group1,Group2))
"""
(-5.6873679190073361, 0.00471961872448184)
"""
# The first element from output is the value of t
# The second element from output is p-value
| 21.090909 | 80 | 0.516164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 717 | 0.772629 |
70b1ab24a4c3db1cf759b4299c57523f7d5eee15 | 6,640 | py | Python | pytorch_ares/third_party/free_adv_train/multi_restart_pgd_attack.py | thu-ml/realsafe | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 107 | 2020-06-15T09:55:11.000Z | 2020-12-20T11:27:11.000Z | pytorch_ares/third_party/free_adv_train/multi_restart_pgd_attack.py | haichen-ber/ares | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 7 | 2020-06-14T03:00:18.000Z | 2020-12-07T07:10:10.000Z | pytorch_ares/third_party/free_adv_train/multi_restart_pgd_attack.py | haichen-ber/ares | 474d549aa402b4cdd5e3629d23d035c31b60a360 | [
"MIT"
] | 19 | 2020-06-14T08:35:33.000Z | 2020-12-19T13:43:41.000Z | """
Implementation of attack methods. Running this file as a program will
evaluate the model and get the validation accuracy and then
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import sys
import cifar10_input
import cifar100_input
import config
from tqdm import tqdm
import os
config = config.get_args()
_NUM_RESTARTS = config.num_restarts
class LinfPGDAttack:
def __init__(self, model, epsilon, num_steps, step_size, loss_func):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
if loss_func == 'xent':
loss = model.xent
elif loss_func == 'cw':
label_mask = tf.one_hot(model.y_input,
10,
on_value=1.0,
off_value=0.0,
dtype=tf.float32)
correct_logit = tf.reduce_sum(label_mask * model.pre_softmax, axis=1)
wrong_logit = tf.reduce_max((1 - label_mask) * model.pre_softmax - 1e4 * label_mask, axis=1)
loss = -tf.nn.relu(correct_logit - wrong_logit + 0)
else:
print('Unknown loss function. Defaulting to cross-entropy')
loss = model.xent
self.grad = tf.gradients(loss, model.x_input)[0]
def perturb(self, x_nat, y, sess):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
x = np.clip(x, 0, 255)
for i in range(self.num_steps):
grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
self.model.y_input: y})
x = np.add(x, self.step_size * np.sign(grad), out=x, casting='unsafe')
x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
x = np.clip(x, 0, 255) # ensure valid pixel range
return x
def get_path_dir(data_dir, dataset, **_):
path = os.path.join(data_dir, dataset)
if os.path.islink(path):
path = os.readlink(path)
return path
if __name__ == '__main__':
import sys
import math
from free_model import Model
model_file = tf.train.latest_checkpoint(config.model_dir)
if model_file is None:
print('No model found')
sys.exit()
dataset = config.dataset
data_dir = config.data_dir
data_path = get_path_dir(data_dir, dataset)
model = Model(mode='eval', dataset=dataset)
attack = LinfPGDAttack(model,
config.epsilon,
config.pgd_steps,
config.step_size,
config.loss_func)
saver = tf.train.Saver()
if dataset == 'cifar10':
cifar = cifar10_input.CIFAR10Data(data_path)
else:
cifar = cifar100_input.CIFAR100Data(data_path)
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, model_file)
# Iterate over the samples batch-by-batch
num_eval_examples = config.eval_examples
eval_batch_size = config.eval_size
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
x_adv = [] # adv accumulator
print('getting clean validation accuracy')
total_corr = 0
for ibatch in tqdm(range(num_batches)):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = cifar.eval_data.xs[bstart:bend, :].astype(np.float32)
y_batch = cifar.eval_data.ys[bstart:bend]
dict_val = {model.x_input: x_batch, model.y_input: y_batch}
cur_corr = sess.run(model.num_correct, feed_dict=dict_val)
total_corr += cur_corr
print('** validation accuracy: %.3f **\n\n' % (total_corr / float(num_eval_examples) * 100))
print('Iterating over {} batches'.format(num_batches))
total_corr, total_num = 0, 0
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
curr_num = bend - bstart
total_num += curr_num
print('mini batch: {}/{} -- batch size: {}'.format(ibatch + 1, num_batches, curr_num))
sys.stdout.flush()
x_batch = cifar.eval_data.xs[bstart:bend, :].astype(np.float32)
y_batch = cifar.eval_data.ys[bstart:bend]
best_batch_adv = np.copy(x_batch)
dict_adv = {model.x_input: best_batch_adv, model.y_input: y_batch}
cur_corr, y_pred_batch, best_loss = sess.run([model.num_correct, model.predictions, model.y_xent],
feed_dict=dict_adv)
for ri in range(_NUM_RESTARTS):
x_batch_adv = attack.perturb(x_batch, y_batch, sess)
dict_adv = {model.x_input: x_batch_adv, model.y_input: y_batch}
cur_corr, y_pred_batch, this_loss = sess.run([model.num_correct, model.predictions, model.y_xent],
feed_dict=dict_adv)
bb = best_loss >= this_loss
bw = best_loss < this_loss
best_batch_adv[bw, :, :, :] = x_batch_adv[bw, :, :, :]
best_corr, y_pred_batch, best_loss = sess.run([model.num_correct, model.predictions, model.y_xent],
feed_dict={model.x_input: best_batch_adv,
model.y_input: y_batch})
print('restart %d: num correct: %d -- loss:%.4f' % (ri, best_corr, np.mean(best_loss)))
total_corr += best_corr
print('accuracy till now {:4}% \n\n'.format(float(total_corr) / total_num * 100))
x_adv.append(best_batch_adv)
x_adv = np.concatenate(x_adv, axis=0)
| 40.242424 | 116 | 0.573795 | 1,916 | 0.288554 | 0 | 0 | 0 | 0 | 0 | 0 | 968 | 0.145783 |
70b214dd70a108da0b34f14109cdc980a1bad769 | 166 | py | Python | run.py | envy7/project-dream-team-three | ebadf13ee0d76bd924bd1ba639d448f8e28ac01c | [
"Unlicense"
] | null | null | null | run.py | envy7/project-dream-team-three | ebadf13ee0d76bd924bd1ba639d448f8e28ac01c | [
"Unlicense"
] | null | null | null | run.py | envy7/project-dream-team-three | ebadf13ee0d76bd924bd1ba639d448f8e28ac01c | [
"Unlicense"
] | null | null | null | import os
from app import create_app
config_name = os.getenv('FLASK_CONFIG')
app = create_app('development')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 18.444444 | 39 | 0.710843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.277108 |
70b25fd80453311c6bb986cd2ea2ef428837a8d3 | 30,392 | py | Python | tests/test_utils.py | jbradberry/universe | 109261f75c49435d118cf9c5238124466a03d2d3 | [
"MIT"
] | null | null | null | tests/test_utils.py | jbradberry/universe | 109261f75c49435d118cf9c5238124466a03d2d3 | [
"MIT"
] | 24 | 2020-04-14T20:32:21.000Z | 2021-08-03T13:29:45.000Z | tests/test_utils.py | jbradberry/universe | 109261f75c49435d118cf9c5238124466a03d2d3 | [
"MIT"
] | 3 | 2016-10-28T19:20:30.000Z | 2021-02-28T03:19:49.000Z | import unittest
from universe import components, engine, utils
class PlanetProductionTestCase(unittest.TestCase):
def test_values(self):
manager = engine.Manager()
manager.register_entity_type('species', [
components.SpeciesProductionComponent(),
])
manager.register_entity_type('planet', [
components.PopulationComponent(),
])
engine.Entity.register_manager(manager)
data = [
(700, 0, 0),
(700, 699, 0),
(700, 700, 1),
(700, 1_000_000, 1428),
(1000, 0, 0),
(1000, 999, 0),
(1000, 1000, 1),
(1000, 1_000_000, 1000),
(1500, 0, 0),
(1500, 1499, 0),
(1500, 1500, 1),
(1500, 1_000_000, 666),
]
for per, pop, value in data:
species = engine.Entity(**{
'type': 'species',
'population_per_r': per,
'minerals_per_m': 5,
'mines_cost_r': 2,
'mines_per_pop': 5,
})
planet = engine.Entity(**{
'type': 'planet',
'population': pop,
})
self.assertEqual(utils.production(species, planet), value,
f"population_per_r: {per}, population: {pop}")
class PlanetValueTestCase(unittest.TestCase):
def test_values(self):
manager = engine.Manager()
manager.register_entity_type('species', [
components.SpeciesComponent(),
])
manager.register_entity_type('planet', [
components.EnvironmentComponent(),
])
engine.Entity.register_manager(manager)
species = engine.Entity(**
{'type': 'species',
'name': 'Human',
'plural_name': 'Humans',
'growth_rate': 15,
'gravity_immune': False,
'gravity_min': 32,
'gravity_max': 86,
'temperature_immune': False,
'temperature_min': 10,
'temperature_max': 64,
'radiation_immune': False,
'radiation_min': 38,
'radiation_max': 90,}
)
data = [ # Taken from a historical Stars game.
(59, 37, 64, 100),
(59, 36, 64, 99),
(57, 37, 64, 98),
(59, 37, 66, 98),
(59, 35, 64, 98),
(62, 37, 64, 97),
(59, 35, 63, 97),
(59, 37, 61, 97),
(59, 40, 64, 97),
(59, 39, 66, 96),
(59, 33, 64, 96),
(59, 36, 68, 95),
(59, 33, 63, 95),
(59, 41, 66, 94),
(53, 37, 64, 94),
(59, 31, 64, 94),
(62, 37, 61, 93),
(59, 44, 64, 93),
(59, 37, 57, 93),
(59, 31, 63, 93),
(59, 45, 64, 92),
(67, 37, 64, 92),
(59, 36, 70, 92),
(59, 37, 72, 92),
(61, 32, 64, 92),
(59, 29, 63, 91),
(59, 47, 64, 90),
(59, 45, 66, 89),
(66, 37, 61, 89),
(69, 36, 64, 89),
(67, 37, 62, 89),
(59, 37, 53, 89),
(59, 37, 76, 88),
(59, 36, 75, 88),
(60, 27, 63, 88),
(59, 26, 65, 88),
(61, 28, 64, 88),
(47, 38, 64, 87),
(59, 36, 76, 87),
(59, 38, 51, 86),
(71, 37, 62, 86),
(68, 41, 64, 86),
(70, 35, 65, 85),
(45, 37, 64, 85),
(73, 37, 64, 85),
(59, 51, 64, 85),
(59, 45, 70, 84),
(52, 40, 68, 84),
(61, 24, 63, 84),
(71, 37, 60, 83),
(63, 26, 65, 83),
(60, 27, 60, 83),
(52, 41, 68, 83),
(68, 35, 68, 83),
(61, 30, 58, 82),
(46, 42, 64, 81),
(56, 51, 64, 81),
(60, 27, 58, 81),
(73, 34, 64, 81),
(52, 42, 68, 81),
(68, 45, 64, 81),
(73, 37, 61, 81),
(51, 35, 57, 80),
(53, 35, 55, 80),
(72, 31, 64, 80),
(60, 27, 57, 80),
(68, 33, 68, 80),
(59, 42, 51, 80),
(59, 32, 77, 80),
(66, 26, 65, 79),
(51, 35, 56, 79),
(59, 37, 49, 79),
(59, 51, 60, 79),
(73, 37, 60, 79),
(68, 32, 68, 79),
(59, 30, 77, 78),
(72, 37, 57, 78),
(61, 27, 57, 78),
(59, 51, 59, 78),
(52, 43, 69, 78),
(60, 24, 70, 78),
(59, 24, 71, 78),
(61, 26, 58, 78),
(57, 42, 77, 78),
(52, 43, 70, 77),
(60, 24, 71, 77),
(73, 43, 64, 77),
(62, 27, 57, 76),
(58, 48, 73, 76),
(59, 46, 51, 76),
(73, 31, 65, 76),
(72, 37, 55, 76),
(60, 24, 72, 76),
(61, 24, 70, 76),
(59, 37, 48, 75),
(52, 43, 72, 75),
(68, 33, 56, 75),
(68, 29, 68, 75),
(73, 45, 64, 75),
(60, 24, 73, 74),
(52, 43, 73, 74),
(59, 26, 77, 74),
(68, 46, 68, 74),
(61, 24, 56, 74),
(46, 43, 68, 73),
(68, 27, 68, 73),
(68, 33, 74, 73),
(63, 24, 70, 73),
(71, 50, 64, 73),
(72, 37, 51, 72),
(52, 43, 74, 72),
(68, 26, 68, 72),
(73, 47, 64, 72),
(73, 36, 54, 71),
(62, 46, 51, 71),
(52, 43, 75, 71),
(51, 50, 68, 71),
(43, 42, 64, 71),
(55, 34, 49, 70),
(59, 54, 61, 70),
(68, 50, 68, 70),
(65, 24, 70, 70),
(66, 46, 74, 69),
(41, 37, 64, 69),
(73, 50, 64, 69),
(52, 48, 73, 68),
(75, 31, 65, 68),
(67, 24, 70, 68),
(68, 24, 69, 68),
(56, 49, 77, 68),
(68, 27, 72, 67),
(43, 46, 64, 67),
(63, 26, 51, 67),
(74, 28, 61, 66),
(65, 46, 51, 66),
(68, 33, 78, 66),
(73, 34, 53, 66),
(41, 36, 61, 65),
(47, 42, 77, 65),
(51, 43, 78, 64),
(59, 37, 45, 64),
(73, 34, 51, 64),
(43, 49, 64, 64),
(64, 38, 81, 64),
(73, 51, 66, 64),
(53, 55, 64, 63),
(49, 27, 54, 63),
(50, 43, 78, 63),
(59, 56, 61, 63),
(57, 23, 78, 63),
(73, 32, 53, 63),
(66, 26, 51, 63),
(57, 25, 79, 62),
(59, 55, 71, 62),
(69, 40, 79, 62),
(75, 28, 61, 62),
(77, 31, 65, 62),
(65, 24, 51, 62),
(53, 50, 77, 62),
(60, 32, 46, 61),
(73, 32, 51, 61),
(56, 23, 78, 61),
(47, 42, 78, 61),
(41, 37, 56, 61),
(47, 45, 77, 60),
(68, 26, 51, 60),
(76, 28, 61, 59),
(66, 46, 49, 59),
(41, 37, 74, 59),
(56, 22, 78, 58),
(74, 31, 53, 58),
(62, 32, 46, 58),
(43, 52, 64, 58),
(64, 38, 45, 57),
(59, 58, 61, 57),
(64, 38, 83, 57),
(73, 51, 58, 57),
(78, 33, 68, 57),
(70, 26, 51, 57),
(41, 37, 52, 57),
(77, 28, 61, 56),
(74, 31, 51, 56),
(79, 31, 65, 56),
(69, 24, 51, 56),
(73, 53, 67, 56),
(49, 50, 77, 56),
(66, 46, 48, 55),
(67, 38, 45, 55),
(72, 26, 51, 55),
(52, 51, 50, 55),
(58, 35, 43, 54),
(56, 21, 78, 54),
(45, 46, 77, 54),
(79, 33, 68, 54),
(78, 28, 61, 53),
(76, 31, 53, 53),
(39, 37, 74, 53),
(47, 50, 77, 53),
(41, 39, 51, 53),
(56, 20, 78, 52),
(63, 16, 67, 52),
(66, 46, 47, 52),
(62, 32, 44, 52),
(80, 31, 65, 52),
(47, 37, 83, 52),
(72, 24, 51, 52),
(64, 38, 85, 51),
(47, 42, 81, 51),
(69, 57, 66, 50),
(76, 31, 51, 50),
(59, 60, 61, 50),
(46, 51, 77, 50),
(48, 51, 50, 50),
(70, 42, 82, 50),
(66, 46, 46, 49),
(67, 40, 44, 49),
(77, 31, 53, 49),
(47, 37, 84, 49),
(47, 39, 83, 49),
(77, 31, 52, 49),
(41, 42, 51, 49),
(47, 40, 83, 48),
(43, 46, 77, 48),
(73, 56, 67, 48),
(82, 31, 65, 47),
(47, 35, 84, 47),
(37, 37, 74, 47),
(81, 42, 68, 47),
(48, 56, 70, 46),
(65, 32, 43, 46),
(62, 32, 42, 46),
(47, 42, 83, 46),
(78, 31, 52, 46),
(72, 22, 51, 46),
(82, 33, 68, 46),
(64, 38, 87, 45),
(78, 31, 51, 45),
(73, 42, 82, 45),
(59, 62, 61, 44),
(79, 31, 53, 44),
(43, 57, 64, 44),
(47, 35, 85, 44),
(84, 33, 64, 44),
(72, 23, 49, 44),
(41, 45, 51, 44),
(70, 27, 82, 43),
(75, 28, 79, 43),
(75, 56, 67, 43),
(46, 52, 78, 43),
(68, 59, 69, 42),
(66, 32, 42, 42),
(35, 37, 74, 42),
(84, 32, 63, 42),
(84, 33, 66, 42),
(47, 51, 48, 42),
(84, 31, 65, 41),
(80, 31, 53, 41),
(47, 35, 86, 41),
(67, 40, 41, 40),
(81, 31, 53, 39),
(41, 45, 49, 39),
(47, 35, 87, 38),
(40, 57, 64, 38),
(85, 32, 62, 38),
(77, 56, 67, 38),
(46, 53, 79, 38),
(77, 42, 82, 37),
(77, 54, 57, 37),
(85, 30, 62, 37),
(33, 37, 74, 36),
(48, 35, 88, 36),
(85, 29, 62, 36),
(72, 21, 48, 36),
(40, 45, 48, 35),
(32, 37, 74, 34),
(83, 31, 53, 34),
(59, 23, 89, 34),
(59, 17, 44, 34),
(62, 19, 44, 34),
(79, 56, 67, 34),
(47, 51, 45, 34),
(67, 40, 38, 33),
(72, 19, 49, 33),
(38, 57, 64, 33),
(61, 18, 44, 33),
(75, 28, 83, 33),
(46, 54, 80, 33),
(84, 31, 53, 32),
(78, 42, 83, 32),
(66, 32, 38, 32),
(57, 17, 44, 32),
(62, 19, 43, 32),
(83, 38, 48, 32),
(71, 11, 68, 31),
(56, 17, 44, 31),
(72, 18, 49, 31),
(37, 57, 64, 31),
(41, 57, 57, 31),
(72, 20, 47, 31),
(38, 45, 48, 31),
(83, 35, 48, 31),
(69, 16, 49, 30),
(81, 56, 67, 30),
(83, 40, 48, 30),
(72, 17, 49, 29),
(79, 56, 57, 29),
(65, 19, 43, 29),
(32, 50, 68, 29),
(46, 54, 82, 29),
(37, 45, 48, 29),
(84, 31, 78, 29),
(83, 33, 48, 29),
(67, 20, 43, 28),
(68, 17, 82, 28),
(47, 53, 44, 28),
(49, 12, 52, 28),
(35, 57, 64, 27),
(69, 45, 38, 27),
(32, 51, 68, 27),
(41, 59, 57, 27),
(38, 56, 57, 27),
(37, 55, 57, 27),
(36, 45, 48, 27),
(84, 31, 79, 27),
(71, 17, 47, 26),
(34, 57, 64, 26),
(86, 51, 70, 26),
(32, 51, 70, 26),
(41, 59, 56, 26),
(61, 12, 46, 26),
(72, 19, 46, 26),
(68, 17, 83, 26),
(47, 12, 52, 26),
(48, 12, 52, 26),
(84, 56, 67, 25),
(83, 47, 49, 25),
(46, 54, 84, 25),
(72, 17, 47, 25),
(35, 45, 48, 25),
(46, 12, 52, 25),
(33, 57, 64, 24),
(86, 52, 70, 24),
(73, 20, 44, 24),
(59, 10, 46, 24),
(41, 59, 54, 24),
(61, 11, 46, 24),
(84, 28, 79, 24),
(47, 55, 44, 24),
(45, 12, 52, 24),
(46, 54, 85, 23),
(86, 53, 70, 23),
(73, 17, 47, 23),
(45, 63, 53, 23),
(61, 10, 46, 23),
(67, 20, 40, 23),
(72, 18, 45, 23),
(34, 45, 48, 23),
(84, 27, 79, 23),
(70, 17, 83, 23),
(32, 57, 64, 22),
(83, 42, 84, 22),
(44, 12, 52, 22),
(65, 17, 41, 22),
(32, 51, 74, 22),
(73, 19, 44, 22),
(33, 45, 48, 22),
(76, 15, 78, 21),
(84, 54, 74, 21),
(84, 25, 79, 21),
(47, 55, 42, 21),
(72, 16, 83, 20),
(41, 61, 53, 20),
(32, 45, 48, 20),
(72, 17, 44, 20),
(73, 17, 83, 20),
(36, 23, 81, 19),
(49, 12, 46, 19),
(42, 63, 53, 18),
(73, 19, 41, 18),
(72, 16, 44, 18),
(86, 25, 79, 18),
(47, 57, 42, 18),
(48, 12, 46, 18),
(86, 58, 69, 17),
(72, 16, 43, 17),
(47, 58, 42, 17),
(47, 12, 46, 17),
(72, 15, 43, 16),
(71, 60, 85, 16),
(73, 20, 39, 16),
(73, 19, 40, 16),
(75, 17, 83, 16),
(47, 59, 42, 16),
(32, 29, 84, 16),
(46, 12, 46, 16),
(85, 60, 71, 15),
(83, 46, 41, 15),
(45, 12, 46, 15),
(76, 16, 83, 14),
(47, 60, 42, 14),
(44, 12, 46, 14),
(47, 61, 42, 13),
(47, 62, 41, 12),
(47, 61, 41, 12),
(43, 12, 46, 12),
(32, 64, 68, 12),
(42, 12, 46, 11),
(77, 14, 83, 11),
(86, 57, 49, 10),
(70, 63, 38, 9),
(77, 12, 83, 9),
(34, 23, 90, 8),
(77, 10, 83, 8),
(75, 12, 88, 7),
(34, 10, 79, 6),
(39, 13, 42, 5),
(32, 60, 85, 3),
(32, 59, 85, 3),
(32, 63, 85, 2),
(75, 9, 88, -1),
(47, 41, 37, -1),
(87, 20, 51, -1),
(87, 53, 70, -1),
(83, 65, 60, -1),
(45, 65, 53, -1),
(31, 45, 48, -1),
(32, 65, 85, -1),
(87, 25, 79, -1),
(49, 41, 37, -1),
(50, 41, 37, -1),
(73, 45, 37, -1),
(31, 29, 84, -1),
(51, 41, 37, -1),
(52, 41, 37, -1),
(76, 53, 36, -2),
(88, 34, 73, -2),
(75, 63, 36, -2),
(30, 45, 48, -2),
(88, 53, 70, -2),
(48, 66, 48, -2),
(48, 66, 50, -2),
(54, 8, 46, -2),
(48, 66, 51, -2),
(55, 8, 46, -2),
(47, 66, 53, -2),
(57, 8, 46, -2),
(59, 8, 46, -2),
(67, 20, 36, -2),
(77, 8, 83, -2),
(72, 8, 44, -2),
(89, 29, 62, -3),
(89, 57, 49, -3),
(77, 7, 83, -3),
(89, 18, 53, -3),
(50, 7, 46, -3),
(84, 20, 35, -3),
(48, 44, 35, -3),
(52, 7, 46, -3),
(89, 53, 70, -3),
(53, 7, 46, -3),
(67, 20, 35, -3),
(31, 66, 85, -3),
(89, 25, 79, -3),
(73, 19, 35, -3),
(51, 44, 35, -3),
(53, 44, 35, -3),
(51, 65, 36, -3),
(54, 44, 35, -3),
(58, 44, 35, -3),
(59, 44, 35, -3),
(89, 31, 78, -3),
(46, 7, 90, -3),
(46, 7, 89, -3),
(63, 23, 34, -4),
(90, 18, 76, -4),
(30, 15, 36, -4),
(56, 68, 70, -4),
(90, 53, 70, -4),
(44, 32, 94, -4),
(66, 32, 34, -4),
(73, 20, 34, -4),
(30, 66, 85, -4),
(90, 25, 79, -4),
(67, 20, 34, -4),
(72, 6, 38, -4),
(51, 65, 35, -4),
(72, 6, 42, -4),
(59, 44, 34, -4),
(28, 29, 84, -4),
(83, 46, 34, -4),
(46, 7, 91, -4),
(70, 32, 34, -4),
(36, 69, 54, -5),
(66, 59, 33, -5),
(91, 61, 52, -5),
(36, 69, 77, -5),
(27, 58, 48, -5),
(66, 69, 78, -5),
(91, 53, 70, -5),
(63, 10, 95, -5),
(27, 27, 69, -5),
(27, 25, 90, -5),
(27, 25, 89, -5),
(91, 25, 79, -5),
(27, 25, 88, -5),
(27, 25, 86, -5),
(27, 25, 85, -5),
(27, 25, 84, -5),
(51, 65, 34, -5),
(27, 26, 84, -5),
(27, 27, 84, -5),
(27, 29, 84, -5),
(59, 44, 33, -5),
(46, 7, 92, -5),
(91, 38, 47, -5),
(32, 21, 32, -6),
(67, 70, 64, -6),
(73, 32, 32, -6),
(92, 53, 70, -6),
(86, 33, 32, -6),
(36, 14, 96, -6),
(26, 52, 88, -6),
(67, 25, 96, -6),
(83, 69, 37, -6),
(92, 25, 79, -6),
(30, 68, 85, -6),
(73, 23, 32, -6),
(73, 25, 32, -6),
(73, 28, 32, -6),
(85, 33, 32, -6),
(83, 32, 32, -6),
(46, 7, 93, -6),
(92, 38, 38, -6),
(92, 38, 40, -6),
(92, 38, 41, -6),
(92, 38, 43, -6),
(26, 52, 87, -6),
(92, 38, 45, -6),
(26, 52, 85, -6),
(92, 38, 46, -6),
(26, 52, 84, -6),
(39, 71, 72, -7),
(28, 12, 35, -7),
(73, 32, 31, -7),
(64, 71, 56, -7),
(75, 27, 31, -7),
(73, 25, 31, -7),
(25, 26, 85, -7),
(73, 30, 31, -7),
(75, 31, 31, -7),
(25, 44, 55, -7),
(53, 62, 97, -7),
(73, 45, 31, -7),
(77, 24, 31, -7),
(27, 25, 92, -7),
(93, 25, 79, -7),
(84, 32, 31, -7),
(30, 69, 85, -7),
(67, 22, 31, -7),
(67, 26, 31, -7),
(62, 32, 31, -7),
(66, 32, 31, -7),
(73, 28, 31, -7),
(93, 37, 61, -7),
(73, 27, 31, -7),
(46, 7, 94, -7),
(59, 40, 31, -7),
(78, 32, 31, -7),
(59, 39, 31, -7),
(75, 32, 31, -7),
(73, 42, 31, -7),
(80, 32, 31, -7),
(73, 31, 31, -7),
(70, 32, 31, -7),
(73, 41, 31, -7),
(63, 32, 31, -7),
(67, 32, 31, -7),
(78, 72, 71, -8),
(72, 20, 30, -8),
(42, 66, 96, -8),
(77, 28, 98, -8),
(39, 65, 31, -8),
(30, 51, 32, -8),
(39, 22, 98, -8),
(84, 32, 30, -8),
(72, 6, 34, -8),
(86, 58, 98, -8),
(71, 7, 33, -8),
(24, 10, 87, -8),
(88, 70, 69, -8),
(80, 29, 98, -8),
(30, 70, 90, -8),
(27, 25, 93, -8),
(30, 70, 88, -8),
(30, 70, 87, -8),
(30, 70, 85, -8),
(92, 38, 36, -8),
(75, 41, 30, -8),
(73, 20, 30, -8),
(24, 50, 86, -8),
(46, 7, 95, -8),
(73, 41, 30, -8),
(73, 37, 30, -8),
(67, 25, 99, -9),
(36, 52, 99, -9),
(23, 61, 65, -9),
(74, 32, 99, -9),
(68, 30, 99, -9),
(32, 39, 99, -9),
(58, 73, 70, -9),
(95, 25, 79, -9),
(78, 73, 88, -9),
(62, 12, 29, -9),
(93, 44, 36, -9),
(32, 37, 29, -9),
(30, 70, 91, -9),
(27, 25, 94, -9),
(92, 38, 35, -9),
(23, 50, 86, -9),
(23, 50, 50, -9),
(46, 7, 96, -9),
(81, 73, 91, -10),
(27, 20, 95, -10),
(22, 49, 82, -10),
(27, 21, 95, -10),
(84, 32, 28, -10),
(27, 23, 95, -10),
(30, 70, 92, -10),
(27, 24, 95, -10),
(92, 38, 34, -10),
(23, 65, 68, -10),
(22, 50, 50, -10),
(28, 32, 31, -11),
(92, 15, 33, -11),
(92, 38, 33, -11),
(84, 32, 27, -11),
(30, 70, 93, -11),
(62, 32, 27, -11),
(82, 76, 75, -12),
(93, 44, 33, -12),
(84, 76, 62, -12),
(65, 74, 92, -12),
(88, 10, 28, -12),
(73, 33, 26, -12),
(88, 13, 28, -12),
(75, 41, 26, -12),
(30, 70, 94, -12),
(93, 25, 95, -12),
(20, 50, 86, -12),
(69, 36, 25, -13),
(78, 56, 25, -13),
(98, 9, 90, -13),
(19, 21, 39, -13),
(84, 32, 25, -13),
(51, 65, 26, -13),
(47, 72, 95, -13),
(46, 60, 25, -13),
(19, 21, 61, -13),
(46, 6, 99, -13),
(19, 49, 46, -13),
(19, 49, 49, -13),
(19, 49, 50, -13),
(19, 50, 50, -13),
(19, 50, 45, -13),
(30, 70, 96, -14),
(37, 66, 26, -14),
(18, 53, 63, -14),
(55, 70, 30, -14),
(18, 49, 46, -14),
(18, 50, 86, -14),
(62, 32, 24, -14),
(65, 32, 24, -14),
(73, 20, 24, -14),
(69, 32, 24, -14),
(14, 28, 85, -15),
(84, 33, 22, -15),
(78, 63, 15, -15),
(74, 61, 12, -15),
(54, 34, 9, -15),
(72, 49, 13, -15),
(52, 44, 16, -15),
(10, 37, 56, -15),
(80, 54, 20, -15),
(17, 14, 89, -15),
(67, 90, 66, -15),
(57, 62, 7, -15),
(85, 16, 19, -15),
(9, 17, 66, -15),
(55, 34, 22, -15),
(58, 91, 89, -15),
(86, 52, 15, -15),
(66, 56, 6, -15),
(57, 90, 89, -15),
(46, 19, 2, -15),
(13, 27, 90, -15),
(70, 87, 65, -15),
(41, 33, 7, -15),
(59, 45, 6, -15),
(84, 32, 23, -15),
(35, 89, 65, -15),
(61, 54, 12, -15),
(26, 37, 99, -15),
(72, 91, 56, -15),
(82, 45, 18, -15),
(11, 30, 85, -15),
(52, 92, 63, -15),
(15, 61, 46, -15),
(69, 16, 17, -15),
(79, 17, 22, -15),
(45, 20, 22, -15),
(85, 39, 17, -15),
(84, 36, 18, -15),
(63, 17, 4, -15),
(6, 31, 61, -15),
(80, 50, 20, -15),
(80, 44, 20, -15),
(58, 80, 44, -15),
(22, 69, 44, -15),
(79, 44, 20, -15),
(51, 49, 14, -15),
(53, 79, 62, -15),
(34, 89, 89, -15),
(10, 51, 72, -15),
(35, 88, 85, -15),
(43, 47, 8, -15),
(23, 70, 68, -15),
(35, 90, 56, -15),
(38, 79, 61, -15),
(17, 11, 69, -15),
(74, 50, 1, -15),
(79, 63, 5, -15),
(78, 44, 20, -15),
(77, 27, 21, -15),
(69, 41, 6, -15),
(15, 55, 90, -15),
(59, 90, 46, -15),
(17, 47, 46, -15),
(17, 35, 76, -15),
(39, 88, 51, -15),
(6, 64, 66, -15),
(47, 79, 87, -15),
(11, 39, 38, -15),
(79, 80, 82, -15),
(59, 21, 23, -15),
(12, 33, 60, -15),
(12, 32, 71, -15),
(9, 35, 43, -15),
(40, 94, 76, -15),
(58, 82, 88, -15),
(7, 54, 48, -15),
(16, 55, 90, -15),
(15, 61, 45, -15),
(82, 90, 86, -15),
(16, 53, 90, -15),
(60, 35, 9, -15),
(16, 51, 90, -15),
(23, 55, 96, -15),
(34, 88, 89, -15),
(61, 34, 18, -15),
(59, 45, 7, -15),
(17, 50, 90, -15),
(59, 45, 12, -15),
(17, 50, 88, -15),
(59, 45, 17, -15),
(17, 50, 87, -15),
(62, 44, 18, -15),
(58, 34, 22, -15),
(60, 34, 22, -15),
(63, 32, 19, -15),
(64, 44, 19, -15),
(70, 16, 17, -15),
(62, 33, 22, -15),
(65, 44, 19, -15),
(73, 19, 17, -15),
(53, 44, 16, -15),
(11, 39, 39, -15),
(65, 32, 21, -15),
(70, 43, 19, -15),
(73, 19, 19, -15),
(54, 44, 16, -15),
(11, 39, 40, -15),
(65, 32, 22, -15),
(70, 38, 21, -15),
(73, 20, 23, -15),
(61, 44, 16, -15),
(7, 64, 66, -15),
(11, 39, 41, -15),
(70, 34, 21, -15),
(62, 44, 16, -15),
(11, 39, 43, -15),
(24, 3, 81, -15),
(8, 64, 66, -15),
(11, 39, 44, -15),
(10, 64, 66, -15),
(11, 64, 66, -15),
(63, 44, 18, -15),
(13, 64, 66, -15),
(63, 44, 19, -15),
(16, 64, 66, -15),
(63, 43, 19, -15),
(37, 79, 54, -15),
(17, 64, 68, -15),
(17, 35, 75, -15),
(17, 61, 68, -15),
(86, 48, 13, -15),
(12, 39, 45, -15),
(63, 37, 19, -15),
(17, 57, 68, -15),
(86, 48, 14, -15),
(14, 39, 45, -15),
(73, 34, 21, -15),
(63, 34, 19, -15),
(17, 54, 68, -15),
(16, 40, 45, -15),
(73, 32, 21, -15),
(65, 32, 23, -15),
(17, 53, 68, -15),
(16, 44, 45, -15),
(86, 48, 16, -15),
(16, 47, 45, -15),
(16, 50, 45, -15),
(24, 2, 78, -16),
(27, 75, 63, -16),
(84, 89, 37, -16),
(25, 66, 31, -16),
(31, 88, 38, -16),
(63, 92, 37, -16),
(31, 92, 88, -16),
(31, 93, 79, -16),
(62, 95, 37, -16),
(52, 9, 2, -16),
(27, 26, 27, -16),
(87, 49, 13, -16),
(65, 75, 96, -17),
(61, 79, 36, -17),
(4, 20, 92, -17),
(88, 23, 16, -17),
(90, 77, 85, -17),
(88, 73, 32, -17),
(88, 51, 13, -17),
(88, 50, 13, -17),
(59, 67, 6, -18),
(3, 14, 93, -18),
(44, 7, 21, -18),
(89, 51, 13, -18),
(5, 36, 94, -19),
(44, 91, 34, -19),
(89, 78, 92, -19),
(90, 51, 13, -19),
(72, 74, 29, -19),
(28, 21, 5, -19),
(16, 23, 95, -20),
(27, 13, 11, -20),
(85, 75, 99, -20),
(91, 39, 17, -20),
(27, 81, 65, -20),
(86, 70, 16, -21),
(44, 78, 97, -21),
(50, 96, 96, -21),
(92, 28, 10, -21),
(95, 76, 68, -21),
(50, 70, 16, -21),
(10, 4, 54, -21),
(47, 71, 2, -22),
(41, 89, 31, -22),
(32, 85, 31, -22),
(29, 72, 27, -22),
(32, 84, 31, -22),
(17, 3, 73, -22),
(32, 83, 31, -22),
(23, 14, 24, -23),
(68, 83, 98, -23),
(55, 95, 98, -23),
(72, 87, 30, -23),
(95, 39, 17, -24),
(23, 11, 10, -24),
(12, 42, 29, -24),
(20, 3, 95, -24),
(96, 16, 22, -25),
(81, 74, 15, -25),
(21, 90, 90, -26),
(21, 20, 21, -26),
(67, 76, 23, -27),
(91, 93, 30, -28),
(14, 77, 72, -28),
(72, 82, 25, -28),
(19, 84, 91, -29),
(13, 74, 34, -29),
(61, 86, 24, -29),
(88, 76, 1, -29),
(9, 27, 24, -29),
(18, 61, 7, -29),
(12, 64, 21, -30),
(3, 85, 69, -30),
(7, 79, 54, -30),
(42, 90, 9, -30),
(15, 79, 77, -30),
(9, 92, 86, -30),
(48, 85, 4, -30),
(4, 89, 68, -30),
(66, 85, 3, -30),
(17, 94, 69, -30),
(12, 34, 18, -30),
(9, 47, 11, -30),
(10, 42, 7, -30),
(7, 85, 65, -30),
(12, 19, 14, -30),
(9, 94, 72, -30),
(12, 80, 85, -30),
(6, 82, 82, -30),
(46, 86, 21, -30),
(7, 22, 6, -30),
(17, 24, 12, -30),
(39, 87, 6, -30),
(40, 84, 10, -30),
(8, 74, 97, -32),
(25, 81, 18, -37),
(25, 93, 19, -37),
(19, 78, 3, -42),
]
for g, t, r, value in data:
planet = engine.Entity(**
{'type': 'planet',
'gravity': g,
'temperature': t,
'radiation': r}
)
self.assertEqual(utils.planet_value(species, planet), value)
| 30.240796 | 75 | 0.274184 | 30,322 | 0.997697 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.015563 |
70b42808fd3e5b69884acc32783742c4f3b80d18 | 12,044 | py | Python | models/model_search.py | WZzhaoyi/TF-NAS | f63e9fd3a5ca0d8c6400891baa19c2168b203513 | [
"MIT"
] | 62 | 2020-07-05T11:59:47.000Z | 2022-01-18T08:09:53.000Z | models/model_search.py | WZzhaoyi/TF-NAS | f63e9fd3a5ca0d8c6400891baa19c2168b203513 | [
"MIT"
] | 4 | 2020-08-17T09:13:47.000Z | 2021-10-01T03:21:28.000Z | models/model_search.py | WZzhaoyi/TF-NAS | f63e9fd3a5ca0d8c6400891baa19c2168b203513 | [
"MIT"
] | 11 | 2020-07-23T06:21:28.000Z | 2021-06-13T20:19:24.000Z | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import *
PRIMITIVES = [
'MBI_k3_e3',
'MBI_k3_e6',
'MBI_k5_e3',
'MBI_k5_e6',
'MBI_k3_e3_se',
'MBI_k3_e6_se',
'MBI_k5_e3_se',
'MBI_k5_e6_se',
# 'skip',
]
OPS = {
'MBI_k3_e3' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 3, s, affine=aff, act_func=act),
'MBI_k3_e6' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 3, s, affine=aff, act_func=act),
'MBI_k5_e3' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 5, s, affine=aff, act_func=act),
'MBI_k5_e6' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, 0, oc, 5, s, affine=aff, act_func=act),
'MBI_k3_e3_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic , oc, 3, s, affine=aff, act_func=act),
'MBI_k3_e6_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic*2, oc, 3, s, affine=aff, act_func=act),
'MBI_k5_e3_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic , oc, 5, s, affine=aff, act_func=act),
'MBI_k5_e6_se' : lambda ic, mc, oc, s, aff, act: MBInvertedResBlock(ic, mc, ic*2, oc, 5, s, affine=aff, act_func=act),
# 'skip' : lambda ic, mc, oc, s, aff, act: IdentityLayer(ic, oc),
}
class MixedOP(nn.Module):
def __init__(self, in_channels, out_channels, stride, affine, act_func, num_ops, mc_num_dict, lat_lookup):
super(MixedOP, self).__init__()
self.num_ops = num_ops
self.lat_lookup = lat_lookup
self.mc_num_dict = mc_num_dict
self.m_ops = nn.ModuleList()
for i in range(num_ops):
primitive = PRIMITIVES[i]
mid_channels = self.mc_num_dict[i]
op = OPS[primitive](in_channels, mid_channels, out_channels, stride, affine, act_func)
self.m_ops.append(op)
self._initialize_log_alphas()
self.reset_switches()
def fink_ori_idx(self, idx):
count = 0
for ori_idx in range(len(self.switches)):
if self.switches[ori_idx]:
count += 1
if count == (idx + 1):
break
return ori_idx
def forward(self, x, sampling, mode):
if sampling:
weights = self.log_alphas[self.switches]
if mode == 'gumbel':
weights = F.gumbel_softmax(F.log_softmax(weights, dim=-1), self.T, hard=False)
idx = torch.argmax(weights).item()
self.switches[idx] = False
elif mode == 'gumbel_2':
weights = F.gumbel_softmax(F.log_softmax(weights, dim=-1), self.T, hard=False)
idx = torch.argmax(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'min_alphas':
idx = torch.argmin(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'max_alphas':
idx = torch.argmax(weights).item()
idx = self.fink_ori_idx(idx)
self.reset_switches()
elif mode == 'random':
idx = random.choice(range(len(weights)))
idx = self.fink_ori_idx(idx)
self.reset_switches()
else:
raise ValueError('invalid sampling mode...')
op = self.m_ops[idx]
return op(x), 0
else:
weights = F.gumbel_softmax(self.log_alphas, self.T, hard=False)
lats = self.get_lookup_latency(x.size(-1))
out = sum(w*op(x) for w, op in zip(weights, self.m_ops))
out_lat = sum(w*lat for w, lat in zip(weights, lats))
return out, out_lat
def get_lookup_latency(self, size):
lats = []
for idx, op in enumerate(self.m_ops):
if isinstance(op, IdentityLayer):
lats.append(0)
else:
key = '{}_{}_{}_{}_{}_k{}_s{}_{}'.format(
op.name,
size,
op.in_channels,
op.se_channels,
op.out_channels,
op.kernel_size,
op.stride,
op.act_func)
mid_channels = op.mid_channels
lats.append(self.lat_lookup[key][mid_channels])
return lats
def _initialize_log_alphas(self):
alphas = torch.zeros((self.num_ops,))
log_alphas = F.log_softmax(alphas, dim=-1)
self.register_parameter('log_alphas', nn.Parameter(log_alphas))
def reset_switches(self):
self.switches = [True] * self.num_ops
def set_temperature(self, T):
self.T = T
class MixedStage(nn.Module):
def __init__(self, ics, ocs, ss, affs, acts, mc_num_ddict, lat_lookup, stage_type):
super(MixedStage, self).__init__()
self.lat_lookup = lat_lookup
self.mc_num_ddict = mc_num_ddict
self.stage_type = stage_type # 0 for stage6 || 1 for stage1 || 2 for stage2 || 3 for stage3/4/5
self.start_res = 0 if ((ics[0] == ocs[0]) and (ss[0] == 1)) else 1
self.num_res = len(ics) - self.start_res + 1
# stage6
if stage_type == 0:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
# stage1
elif stage_type == 1:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
# stage2
elif stage_type == 2:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
self.block3 = MixedOP(ics[2], ocs[2], ss[2], affs[2], acts[2], len(PRIMITIVES), mc_num_ddict['block3'], lat_lookup)
# stage3, stage4, stage5
elif stage_type == 3:
self.block1 = MixedOP(ics[0], ocs[0], ss[0], affs[0], acts[0], len(PRIMITIVES), mc_num_ddict['block1'], lat_lookup)
self.block2 = MixedOP(ics[1], ocs[1], ss[1], affs[1], acts[1], len(PRIMITIVES), mc_num_ddict['block2'], lat_lookup)
self.block3 = MixedOP(ics[2], ocs[2], ss[2], affs[2], acts[2], len(PRIMITIVES), mc_num_ddict['block3'], lat_lookup)
self.block4 = MixedOP(ics[3], ocs[3], ss[3], affs[3], acts[3], len(PRIMITIVES), mc_num_ddict['block4'], lat_lookup)
else:
raise ValueError('invalid stage_type...')
self._initialize_betas()
def forward(self, x, sampling, mode):
res_list = [x,]
lat_list = [0.,]
# stage6
if self.stage_type == 0:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
# stage1
elif self.stage_type == 1:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
# stage2
elif self.stage_type == 2:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
out3, lat3 = self.block3(out2, sampling, mode)
res_list.append(out3)
lat_list.append(lat1+lat2+lat3)
# stage3, stage4, stage5
elif self.stage_type == 3:
out1, lat1 = self.block1(x, sampling, mode)
res_list.append(out1)
lat_list.append(lat1)
out2, lat2 = self.block2(out1, sampling, mode)
res_list.append(out2)
lat_list.append(lat1+lat2)
out3, lat3 = self.block3(out2, sampling, mode)
res_list.append(out3)
lat_list.append(lat1+lat2+lat3)
out4, lat4 = self.block4(out3, sampling, mode)
res_list.append(out4)
lat_list.append(lat1+lat2+lat3+lat4)
else:
raise ValueError
weights = F.softmax(self.betas, dim=-1)
out = sum(w*res for w, res in zip(weights, res_list[self.start_res:]))
out_lat = sum(w*lat for w, lat in zip(weights, lat_list[self.start_res:]))
return out, out_lat
def _initialize_betas(self):
betas = torch.zeros((self.num_res))
self.register_parameter('betas', nn.Parameter(betas))
class Network(nn.Module):
def __init__(self, num_classes, mc_num_dddict, lat_lookup):
super(Network, self).__init__()
self.lat_lookup = lat_lookup
self.mc_num_dddict = mc_num_dddict
self.first_stem = ConvLayer(3, 32, kernel_size=3, stride=2, affine=False, act_func='relu')
self.second_stem = MBInvertedResBlock(32, 32, 8, 16, kernel_size=3, stride=1, affine=False, act_func='relu')
self.stage1 = MixedStage(
ics = [16,24],
ocs = [24,24],
ss = [2,1],
affs = [False, False],
acts = ['relu', 'relu'],
mc_num_ddict = mc_num_dddict['stage1'],
lat_lookup = lat_lookup,
stage_type = 1,)
self.stage2 = MixedStage(
ics = [24,40,40],
ocs = [40,40,40],
ss = [2,1,1],
affs = [False, False, False],
acts = ['swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage2'],
lat_lookup = lat_lookup,
stage_type = 2,)
self.stage3 = MixedStage(
ics = [40,80,80,80],
ocs = [80,80,80,80],
ss = [2,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage3'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage4 = MixedStage(
ics = [80,112,112,112],
ocs = [112,112,112,112],
ss = [1,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage4'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage5 = MixedStage(
ics = [112,192,192,192],
ocs = [192,192,192,192],
ss = [2,1,1,1],
affs = [False, False, False, False],
acts = ['swish', 'swish', 'swish', 'swish'],
mc_num_ddict = mc_num_dddict['stage5'],
lat_lookup = lat_lookup,
stage_type = 3,)
self.stage6 = MixedStage(
ics = [192,],
ocs = [320,],
ss = [1,],
affs = [False,],
acts = ['swish',],
mc_num_ddict = mc_num_dddict['stage6'],
lat_lookup = lat_lookup,
stage_type = 0,)
self.feature_mix_layer = ConvLayer(320, 1280, kernel_size=1, stride=1, affine=False, act_func='swish')
self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = LinearLayer(1280, num_classes)
self._initialization()
def forward(self, x, sampling, mode='max'):
out_lat = self.lat_lookup['base'] if not sampling else 0.0
x = self.first_stem(x)
x = self.second_stem(x)
x, lat = self.stage1(x, sampling, mode)
out_lat += lat
x, lat = self.stage2(x, sampling, mode)
out_lat += lat
x, lat = self.stage3(x, sampling, mode)
out_lat += lat
x, lat = self.stage4(x, sampling, mode)
out_lat += lat
x, lat = self.stage5(x, sampling, mode)
out_lat += lat
x, lat = self.stage6(x, sampling, mode)
out_lat += lat
x = self.feature_mix_layer(x)
x = self.global_avg_pooling(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x, out_lat
def set_temperature(self, T):
for m in self.modules():
if isinstance(m, MixedOP):
m.set_temperature(T)
def weight_parameters(self):
_weight_parameters = []
for k, v in self.named_parameters():
if not (k.endswith('log_alphas') or k.endswith('betas')):
_weight_parameters.append(v)
return _weight_parameters
def arch_parameters(self):
_arch_parameters = []
for k, v in self.named_parameters():
if k.endswith('log_alphas') or k.endswith('betas'):
_arch_parameters.append(v)
return _arch_parameters
def log_alphas_parameters(self):
_log_alphas_parameters = []
for k, v in self.named_parameters():
if k.endswith('log_alphas'):
_log_alphas_parameters.append(v)
return _log_alphas_parameters
def betas_parameters(self):
_betas_parameters = []
for k, v in self.named_parameters():
if k.endswith('betas'):
_betas_parameters.append(v)
return _betas_parameters
def reset_switches(self):
for m in self.modules():
if isinstance(m, MixedOP):
m.reset_switches()
def _initialization(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| 32.907104 | 119 | 0.650614 | 10,759 | 0.893308 | 0 | 0 | 0 | 0 | 0 | 0 | 925 | 0.076802 |
70b8bfd3bed877715148c6075360f5dc2f1f654e | 6,001 | py | Python | code/decision.py | preethi2205/RND_SearchAndSampleReturn | a29ccfe62c74bac1dd2d387d018543a17b87d362 | [
"MIT"
] | null | null | null | code/decision.py | preethi2205/RND_SearchAndSampleReturn | a29ccfe62c74bac1dd2d387d018543a17b87d362 | [
"MIT"
] | null | null | null | code/decision.py | preethi2205/RND_SearchAndSampleReturn | a29ccfe62c74bac1dd2d387d018543a17b87d362 | [
"MIT"
] | null | null | null | import numpy as np
import random as random
def move_to_sample(Rover):
delX = 0; delY = 0;
if len(Rover.rock_angles) > 0:
dist_to_rock = np.mean(np.abs(Rover.rock_dist))
angle_to_rock = np.mean(Rover.rock_angles);
Rover.steer = np.clip(angle_to_rock* 180/np.pi, -15, 15)
if Rover.vel>0.5:
Rover.brake = 0.1;
else:
Rover.brake = 0;
Rover.throttle = 0;
if Rover.vel <0.2 and Rover.near_sample == 0:
Rover.throttle = 0.1;
Rover.brake = 0 ;
if Rover.Is_Stuck:
Rover.brake = 0;
Rover.throttle = Rover.StuckThrottle;
Rover.steer = Rover.StuckSteering;
if Rover.near_sample:
Rover.brake = Rover.brake_set;
return Rover
def is_terrain_navigable(Rover):
if len(Rover.nav_dists)>0:
if (len(Rover.nav_angles) < Rover.stop_forward):
terrain_navigable = 0;
else:
terrain_navigable = 1;
else:
terrain_navigable = 0;
return terrain_navigable;
def is_rover_stuck(Rover):
SteerVel = np.mean(np.diff(Rover.SteerVel[0:6]));
no_new_area_mapped = (np.abs(Rover.new_perc_mapped - Rover.old_perc_mapped) <= 0.25);
rover_unstucking = (np.abs(Rover.total_time - Rover.map_time) <= 2);
rover_steer_not_changing = (np.abs(SteerVel) <= 2);
is_rover_stuck = no_new_area_mapped and rover_steer_not_changing and rover_unstucking;
if no_new_area_mapped and rover_steer_not_changing and Rover.Is_Stuck == 0:
# Rover was not stuck before, but is stuck now
Rover.Is_Stuck = 1;
Rover.StuckSteering = np.random.randint(-15, 16);
Rover.StuckThrottle = np.random.randint(-1, 2);
if Rover.Is_Stuck and ~rover_unstucking:
# Rover unstucking is done
Rover.Is_Stuck = 0;
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover):
# Implement conditionals to decide what to do given perception data
# Here you're all set up with some basic functionality but you'll need to
# improve on this decision tree to do a good job of navigating autonomously!
# Example:
# Check if we have vision data to make decisions with
# If there are, we'll step through the known sample positions
# to confirm whether detections are real
is_rover_stuck(Rover);
if Rover.nav_angles is not None:
# Check for Rover.mode status
#if near a sample, navigate towards the sample and stop
if (Rover.samples_located > Rover.samples_collected):
Rover = move_to_sample(Rover);
elif Rover.mode == 'forward':
# Check the extent of navigable terrain
if Rover.Is_Stuck:
# Rover is stuck, unstuck it
Rover.brake = 0;
Rover.throttle = Rover.StuckThrottle;
Rover.steer = Rover.StuckSteering;
elif is_terrain_navigable(Rover):
# If mode is forward, navigable terrain looks good
# and velocity is below max, then throttle
if Rover.vel < Rover.max_vel:
# Set throttle value to throttle setting
Rover.throttle = Rover.throttle_set
else: # Else coast
Rover.throttle = 0
Rover.brake = 0
# Set steering to average angle clipped to the range +/- 15
Weighted_Angles = np.mean(Rover.nav_angles);
Rover.steer = np.clip(np.mean(Weighted_Angles * 180/np.pi), -15, 15)
else:
# Set mode to "stop" and hit the brakes!
Rover.throttle = 0
# Set brake to stored brake value
Rover.brake = Rover.brake_set
Rover.steer = -15;
Rover.mode = 'stop';
# If we're already in "stop" mode then make different decisions
elif Rover.mode == 'stop':
# If we're in stop mode but still moving keep braking
if Rover.vel > 0.2:
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
# If we're not moving (vel < 0.2) then do something else
elif Rover.vel <= 0.2:
# Now we're stopped and we have vision data to see if there's a path forward
if len(Rover.nav_angles) < Rover.go_forward:
# Release the brake to allow turning
Rover.brake = 0;
# Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning
Rover.steer = -15 # Could be more clever here about which way to turn
# If we're stopped but see sufficient navigable terrain in front then go!
if is_terrain_navigable(Rover):
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)
Rover.mode = 'forward'
# Just to make the rover do something
# even if no modifications have been made to the code
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
# If in a state where want to pickup a rock send pickup command
if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:
Rover.send_pickup = True
Rover.mode = 'stop';
Rover.SteerVel[0:9] = Rover.SteerVel[1:10];
Rover.SteerVel[9] = Rover.steer;
if Rover.vel == 0:
Rover.Ok_To_Map = 0;
else:
Rover.Ok_To_Map = 1;
return Rover
| 40.275168 | 106 | 0.585902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,796 | 0.299283 |
70b8e8b51a58773bba3751b1c3d887e592c99dd0 | 869 | py | Python | ch17/yunqiCrawl/yunqiCrawl/scrapy_redis/connection.py | AaronZhengkk/SpiderBook | 48f82e4627f88f366bbaff71f01bac6412f78eca | [
"MIT"
] | 990 | 2017-04-20T07:58:39.000Z | 2022-03-28T08:52:46.000Z | base/bloom_redis/connection.py | Deep-Heart/Spider | 33fd548c025bd52571f52e4b53d8e7cb8570be6f | [
"Apache-2.0"
] | 119 | 2017-06-20T12:57:11.000Z | 2021-04-27T04:58:34.000Z | base/bloom_redis/connection.py | Deep-Heart/Spider | 33fd548c025bd52571f52e4b53d8e7cb8570be6f | [
"Apache-2.0"
] | 583 | 2017-05-12T04:05:45.000Z | 2022-03-18T02:40:13.000Z | import redis
# Default values.
REDIS_URL = None
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
FILTER_URL = None
FILTER_HOST = 'localhost'
FILTER_PORT = 6379
FILTER_DB = 0
def from_settings(settings):
url = settings.get('REDIS_URL', REDIS_URL)
host = settings.get('REDIS_HOST', REDIS_HOST)
port = settings.get('REDIS_PORT', REDIS_PORT)
# REDIS_URL takes precedence over host/port specification.
if url:
return redis.from_url(url)
else:
return redis.Redis(host=host, port=port)
def from_settings_filter(settings):
url = settings.get('FILTER_URL', FILTER_URL)
host = settings.get('FILTER_HOST', FILTER_HOST)
port = settings.get('FILTER_PORT', FILTER_PORT)
db = settings.get('FILTER_DB', FILTER_DB)
if url:
return redis.from_url(url)
else:
return redis.Redis(host=host, port=port, db=db)
| 24.138889 | 62 | 0.695052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.208285 |
70b9709e5bc187bf4a8b2990ce0f982cea60bcb9 | 13,444 | py | Python | odoo-14.0/addons/hr_holidays/tests/test_automatic_leave_dates.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | odoo-14.0/addons/hr_holidays/tests/test_automatic_leave_dates.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | odoo-14.0/addons/hr_holidays/tests/test_automatic_leave_dates.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import date, datetime
from odoo.tests.common import Form
from odoo.addons.hr_holidays.tests.common import TestHrHolidaysCommon
from odoo.exceptions import ValidationError
class TestAutomaticLeaveDates(TestHrHolidaysCommon):
def setUp(self):
super(TestAutomaticLeaveDates, self).setUp()
self.leave_type = self.env['hr.leave.type'].create({
'name': 'Automatic Test',
'time_type': 'leave',
'allocation_type': 'no',
'validity_start': False,
})
def test_no_attendances(self):
calendar = self.env['resource.calendar'].create({
'name': 'No Attendances',
'attendance_ids': [(5, 0, 0)],
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
def test_single_attendance_on_morning_and_afternoon(self):
calendar = self.env['resource.calendar'].create({
'name': 'simple morning + afternoon',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
}),
(0, 0, {
'name': 'monday afternoon',
'hour_from': 13,
'hour_to': 17,
'day_period': 'afternoon',
'dayofweek': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
leave_form.request_date_from_period = 'pm'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
def test_multiple_attendance_on_morning(self):
calendar = self.env['resource.calendar'].create({
'name': 'multi morning',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning 1',
'hour_from': 8,
'hour_to': 10,
'day_period': 'morning',
'dayofweek': '0',
}),
(0, 0, {
'name': 'monday morning 2',
'hour_from': 10.25,
'hour_to': 12.25,
'day_period': 'morning',
'dayofweek': '0',
}),
(0, 0, {
'name': 'monday afternoon',
'hour_from': 13,
'hour_to': 17,
'day_period': 'afternoon',
'dayofweek': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
leave_form.request_date_from_period = 'pm'
self.assertEqual(leave_form.number_of_days_display, .5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
def test_attendance_on_morning(self):
calendar = self.env['resource.calendar'].create({
'name': 'Morning only',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'Monday All day',
'hour_from': 8,
'hour_to': 16,
'day_period': 'morning',
'dayofweek': '0',
})],
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
# Ask for morning
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '8 Hours')
# Ask for afternoon
leave_form.request_date_from_period = 'pm'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '8 Hours')
def test_attendance_next_day(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'tuesday morning',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '1',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 2, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 2, 10, 0, 0))
def test_attendance_previous_day(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
leave_form.request_date_from = date(2019, 9, 3)
leave_form.request_date_to = date(2019, 9, 3)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 3, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 3, 10, 0, 0))
def test_2weeks_calendar(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'two_weeks_calendar': True,
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning odd week',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
'week_type': '0',
}),
(0, 0, {
'name': 'monday morning even week',
'hour_from': 10,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
'week_type': '1',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
# even week, works 2 hours
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '2 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 2, 8, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 2, 10, 0, 0))
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
# odd week, works 4 hours
leave_form.request_date_from = date(2019, 9, 9)
leave_form.request_date_to = date(2019, 9, 9)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 9, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 9, 10, 0, 0))
def test_2weeks_calendar_next_week(self):
self.env.user.tz = 'Europe/Brussels'
calendar = self.env['resource.calendar'].create({
'name': 'auto next day',
'two_weeks_calendar': True,
'attendance_ids': [(5, 0, 0),
(0, 0, {
'name': 'monday morning odd week',
'hour_from': 8,
'hour_to': 12,
'day_period': 'morning',
'dayofweek': '0',
'week_type': '0',
})]
})
employee = self.employee_emp
employee.resource_calendar_id = calendar
with Form(self.env['hr.leave'].with_context(default_employee_id=employee.id)) as leave_form:
leave_form.holiday_status_id = self.leave_type
# even week, does not work
leave_form.request_date_from = date(2019, 9, 2)
leave_form.request_date_to = date(2019, 9, 2)
leave_form.request_unit_half = True
leave_form.request_date_from_period = 'am'
self.assertEqual(leave_form.number_of_days_display, 0.5)
self.assertEqual(leave_form.number_of_hours_text, '4 Hours')
self.assertEqual(leave_form.date_from, datetime(2019, 9, 2, 6, 0, 0))
self.assertEqual(leave_form.date_to, datetime(2019, 9, 2, 10, 0, 0))
| 46.041096 | 100 | 0.507885 | 13,230 | 0.984082 | 0 | 0 | 0 | 0 | 0 | 0 | 1,973 | 0.146757 |
70ba13d0c276d8e93931e5d84090ba6194a1d012 | 445 | py | Python | oface/model/base.py | 007gzs/oface | 9e4ca8ee572783cff4417c58217cdcb8c7af07f4 | [
"MIT"
] | 2 | 2021-06-01T01:33:32.000Z | 2021-11-23T10:39:35.000Z | oface/model/base.py | 007gzs/oface | 9e4ca8ee572783cff4417c58217cdcb8c7af07f4 | [
"MIT"
] | null | null | null | oface/model/base.py | 007gzs/oface | 9e4ca8ee572783cff4417c58217cdcb8c7af07f4 | [
"MIT"
] | 1 | 2021-07-16T22:42:55.000Z | 2021-07-16T22:42:55.000Z | # encoding: utf-8
from __future__ import absolute_import, unicode_literals
import onnxruntime
class ONNXModel:
def __init__(self, model_file=None, session=None, task_name=''):
self.model_file = model_file
self.session = session
self.task_name = task_name
if self.session is None:
assert self.model_file is not None
self.session = onnxruntime.InferenceSession(self.model_file, None)
| 29.666667 | 78 | 0.698876 | 347 | 0.779775 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.042697 |
70bcf6e650bbb3a26c3ae8d0c7700d683160246d | 3,881 | py | Python | fabrun/views.py | agepoly/azimut-gestion | 76da15a55086fdfacf605c139a1a64884ab0de40 | [
"MIT"
] | null | null | null | fabrun/views.py | agepoly/azimut-gestion | 76da15a55086fdfacf605c139a1a64884ab0de40 | [
"MIT"
] | null | null | null | fabrun/views.py | agepoly/azimut-gestion | 76da15a55086fdfacf605c139a1a64884ab0de40 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseNotFound
from django.utils.encoding import smart_str
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.db import connections
from django.core.paginator import InvalidPage, EmptyPage, Paginator
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.contrib import messages
import subprocess
from django.utils import timezone
from servers.models import Server
from fabrun.models import Task
from fabrun.tasks import run_task
import datetime
KEYWORDS = ('[$AG:NeedGestion]', '[$AG:NeedKM]', '[$AG:NeedUser]', '[$AG:NeedKomUser]', '[$AG:NeedSudo]', '[$AG:NeedMysqlPassword]', '[$AG:NeedSrvIp]')
@login_required
@staff_member_required
def home(request):
"""Show the page to execute scripts"""
if request.method == 'POST':
task = request.POST.get('script')
if task:
for spk in request.POST.getlist('server'):
server = get_object_or_404(Server, pk=spk)
t = Task(creation_date=timezone.now(), server=server, command=task)
t.save()
run_task.delay(t.pk)
messages.success(request, "Created task for server " + str(server))
liste = []
out, __ = subprocess.Popen(['fab', '--shortlist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
# for command in out.split('\n'):
# if command:
# out2, __ = subprocess.Popen(['fab', '-d', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
# description = out2.split('\n')[2]
# for keyword in KEYWORDS:
# description = description.replace(keyword, '')
# description = description.strip()
# liste.append((command, description))
liste = out.split('\n')
servers = Server.objects.exclude(ssh_connection_string_from_gestion=None).order_by('name').all()
tasks = Task.objects.order_by('-creation_date').all()
return render_to_response('fabrun/home.html', {'liste': liste, 'tasks': tasks, 'servers': servers}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def show_run(request, pk):
"""Show output for a run"""
task = get_object_or_404(Task, pk=pk)
return render_to_response('fabrun/show_run.html', {'task': task}, context_instance=RequestContext(request))
@login_required
@staff_member_required
def clean_up(request):
Task.objects.filter(creation_date__lt=timezone.now() - datetime.timedelta(days=1)).delete()
messages.success(request, "Old fabric runs have been deleted")
return HttpResponseRedirect(reverse('fabrun.views.home'))
@login_required
@staff_member_required
def get_description(request):
command = request.GET.get('task')
out, __ = subprocess.Popen(['fab', '--shortlist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
if command in out.split('\n'):
out2, __ = subprocess.Popen(['fab', '-d', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=settings.FABRIC_FOLDER).communicate()
description = out2.split('\n')[2]
for keyword in KEYWORDS:
description = description.replace(keyword, '')
description = description.strip()
return HttpResponse(description)
raise Http404
| 31.552846 | 155 | 0.707034 | 0 | 0 | 0 | 0 | 2,737 | 0.705231 | 0 | 0 | 899 | 0.231641 |
70be7dcca787cb83953f057618abd33581e4ef7e | 1,687 | py | Python | psets/set1/perceptron_helper.py | ichakraborty/CS155-iniproject | e149d82586a7c6b79a71a2e56213daa7dfacbff0 | [
"BSD-2-Clause"
] | 14 | 2021-01-05T06:54:16.000Z | 2022-01-24T23:49:01.000Z | psets/set1/perceptron_helper.py | ichakraborty/CS155-iniproject | e149d82586a7c6b79a71a2e56213daa7dfacbff0 | [
"BSD-2-Clause"
] | null | null | null | psets/set1/perceptron_helper.py | ichakraborty/CS155-iniproject | e149d82586a7c6b79a71a2e56213daa7dfacbff0 | [
"BSD-2-Clause"
] | 25 | 2021-01-06T19:15:00.000Z | 2022-01-10T14:31:15.000Z | ########################################
# CS/CNS/EE 155 2018
# Problem Set 1
#
# Author: Joey Hong
# Description: Set 1 Perceptron helper
########################################
import numpy as np
import matplotlib.pyplot as plt
def predict(x, w, b):
'''
The method takes the weight vector and bias of a perceptron model, and
predicts the label for a single point x.
Inputs:
x: A (D, ) shaped numpy array containing a single point.
w: A (D, ) shaped numpy array containing the weight vector.
b: A float containing the bias term.
Output:
The label (1 or -1) for the point x.
'''
prod = np.dot(w, x) + b
return 1 if prod >= 0 else -1
def plot_data(X, Y, ax):
# This method plots a labeled (with -1 or 1) 2D dataset.
ax.scatter(X[Y == 1, 0], X[Y == 1, 1], c = 'green', marker='+')
ax.scatter(X[Y == -1, 0], X[Y == -1, 1], c = 'red')
def boundary(x_1, w, b):
# Gets the corresponding x_2 value given x_1 and the decision boundary of a
# perceptron model. Note this only works for a 2D perceptron.
if w[1] == 0.0:
denom = 1e-6
else:
denom = w[1]
return (-w[0] * x_1 - b) / denom
def plot_perceptron(w, b, ax):
# This method plots a perceptron decision boundary line. Note this only works for
# 2D perceptron.
xlim = ax.get_xlim(); ylim = ax.get_ylim()
x_2s = [boundary(x_1, w, b) for x_1 in xlim]
ax.plot(xlim, x_2s)
if predict([xlim[0], ylim[0]], w, b) == -1:
ax.fill_between(xlim, ylim[0], x_2s, facecolor='red', alpha=0.5)
else:
ax.fill_between(xlim, x_2s, ylim[-1], facecolor='red', alpha=0.5)
| 29.086207 | 86 | 0.566686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 886 | 0.525193 |
70c12af7d29c0b551f062c5e568d21efaf3953fe | 5,894 | py | Python | 1D_2D_arrays/1D_2D_arrays.py | Speedy905/ICS4U-Scripts | 4867410658f1ddc8e509edfc725058996019ab5d | [
"MIT"
] | null | null | null | 1D_2D_arrays/1D_2D_arrays.py | Speedy905/ICS4U-Scripts | 4867410658f1ddc8e509edfc725058996019ab5d | [
"MIT"
] | null | null | null | 1D_2D_arrays/1D_2D_arrays.py | Speedy905/ICS4U-Scripts | 4867410658f1ddc8e509edfc725058996019ab5d | [
"MIT"
] | null | null | null | #Antonio Karlo Mijares
#ICS4U-01
#November 24 2016
#1D_2D_arrays.py
#Creates 1D arrays, for the variables to be placed in
characteristics = []
num = []
#Creates a percentage value for the numbers to be calculated with
base = 20
percentage = 100
#2d Arrays
#Ugly Arrays
ugly_one_D = []
ugly_one_D_two = []
ugly_two_D = []
#Nice Array
nice_two_D = []
#Sets the default file name to be open
filename = ('character')
#Strength function that will write to a 1D Array
def strength():
#Opens the file
with open(str(filename)+'.txt','r') as s:
#Reads the file into a list
line = s.read().splitlines()
#Strips a part from the selected section
name = line[3].strip(': 17')
number = line[3].strip('Strength: ')
#Appends the info to the 1D Arrays
characteristics.append(name)
num.append(number)
#Constitution function that will write to a 1D Array
def constitution():
#Opens the file
with open(str(filename)+'.txt','r') as s:
#Reads the file into a list
line = s.read().splitlines()
#Strips a part from the selected section
name = line[4].strip(': 10')
number = line[4].strip('Constitution: ')
#Appends the info to the 1D Arrays
characteristics.append(name)
num.append(number)
#Dexerity function that will write to a 1D Array
def dexerity():
with open(str(filename)+'.txt','r') as s:
#Reads the file into a list
line = s.read().splitlines()
#Strips a part from the selected section
name = line[5].strip(': 8')
number = line[5].strip('Dexerity: ')
characteristics.append(name)
num.append(number)
#Intelligence function that will write to
def intelligence():
with open(str(filename)+'.txt','r') as s:
#Reads the file into a list
line = s.read().splitlines()
#Strips a part from the selected section
name = line[6].strip(': 19')
number = line[6].strip('Intelligence: ')
#Appends the info to the 1D Arrays
characteristics.append(name)
num.append(number)
#Wisdom function that will write to
def wisdom():
with open(str(filename)+'.txt','r') as s:
#Reads the file into a list
line = s.read().splitlines()
#Strips a part from the selected section
name = line[7].strip(': 2')
number = line[7].strip('Wisdom: ')
#Appends the info to the 1D Arrays
characteristics.append(name)
num.append(number)
#Charisma function that will write to
def charisma():
with open(str(filename)+'.txt','r') as s:
#Reads the file into a list
line = s.read().splitlines()
#Strips a part from the selected section
name = line[8].strip(': 9')
number = line[8].strip('Charisma: ')
#Appends the info to the 1D Arrays
characteristics.append(name)
num.append(number)
#2d function Array that creates the 2D Array (Nice Way)
def real_two_d(nice_two_D):
nice_two_D = [characteristics,num]
for row in nice_two_D:
for element in row:
print(element, end=" ")
print()
return nice_two_D
#2d function Array that creates the 2D Array (UGLY Way)
def notreal_two_d(ugly_two_D):
ugly_two_D = [characteristics,num]
return ugly_two_D
#Percentage function calculation that determines the
#percentage for each stat
def percentagecalc():
#Converts the number into a interger
#Then divides it by base to be multiplied by 100
strengthpercent = int(num[0]) / base * percentage
constitutionpercent = int(num[1]) / base * percentage
dexeritypercent = int(num[2]) / base * percentage
intelligencepercent = int(num[3]) / base * percentage
wisdompercent = int(num[4]) / base * percentage
charismapercent = int(num[5]) / base * percentage
#Displays the percentage results
print('')
print('Strength: '+ str(strengthpercent)+'%')
print('Constitution: '+ str(constitutionpercent)+'%')
print('Dexerity: '+ str(dexeritypercent)+'%')
print('Intelligence: '+ str(intelligencepercent)+'%')
print('Wisdom: '+ str(wisdompercent)+'%')
print('Charisma: '+ str(charismapercent)+'%')
menu = True
#Runs the menu loop
while menu:
#Menu Screen
print('')
print('Welcome to the 2D Array Creation!')
print('Press:')
print('S to start')
print('H for help')
print('Q to quit')
#Waits for player input
menuinput = input('Response: ')
#States if user inputs s
if (menuinput == 's') or (menuinput == 'S'):
#Runs the 1D array functions
strength()
constitution()
dexerity()
intelligence()
wisdom()
charisma()
#Runs the percentage function
percentagecalc()
#Runs the ugly 2d function
uglytwo_d= notreal_two_d(ugly_two_D)
#Displays the ugly 2d function
print('')
print('Ugly Two D Array:')
print(uglytwo_d)
print('')
print('Nice Two D Array: ')
#Runs the nice 2d function
nice_two_D2= real_two_d(nice_two_D)
#Displays the nice 2d function
#print('Nice Two D Array:')
#print(nice_two_D2)
#States if user inputs h
elif (menuinput == 'h') or (menuinput == 'H'):
print('')
print('This program creates a 2D array')
print('')
helpinput = input('Press Enter to continue.')
#States if user inputs q
elif (menuinput == 'q') or (menuinput == 'Q'):
#Quits the program
print('')
print('You hage quit the program')
print('Have a nice day!')
menu = False
#States if user inputs anything else
else:
#Error screen
print("Sorry, that's an invalid input!")
| 30.225641 | 65 | 0.610282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,401 | 0.407363 |
70c55b64383890933df21310cef6c32194158ea0 | 407 | py | Python | gym_viewshed/envs/__init__.py | baimukashev/gym-viewshed | 802b613433c15f6f8582aa42c8af9b1d2b2c591d | [
"Apache-2.0"
] | null | null | null | gym_viewshed/envs/__init__.py | baimukashev/gym-viewshed | 802b613433c15f6f8582aa42c8af9b1d2b2c591d | [
"Apache-2.0"
] | null | null | null | gym_viewshed/envs/__init__.py | baimukashev/gym-viewshed | 802b613433c15f6f8582aa42c8af9b1d2b2c591d | [
"Apache-2.0"
] | null | null | null | # from gym_viewshed.envs.viewshed_env import ViewshedEnv
# from gym_viewshed.envs.viewshed_basic_env import ViewshedBasicEnv
# from gym_viewshed.envs.viewshed_random_env import ViewshedRandomEnv
# from gym_viewshed.envs.viewshed_greedy_env import ViewshedGreedyEnv
# from gym_viewshed.envs.viewshed_coverage_env import ViewshedCoverageEnv
from gym_viewshed.envs.vector_coverage_env import VectorCoverageEnv
| 58.142857 | 73 | 0.886978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.820639 |
70c5e600c8b44bccf3f53b8bb4d07e06e41833dc | 4,081 | py | Python | syn/base/b/tests/test_wrapper.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 1 | 2021-07-15T08:55:12.000Z | 2021-07-15T08:55:12.000Z | syn/base/b/tests/test_wrapper.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 7 | 2021-01-07T23:51:57.000Z | 2021-12-13T19:50:57.000Z | syn/base/b/tests/test_wrapper.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 2 | 2016-07-11T08:46:31.000Z | 2017-12-13T13:30:51.000Z | import collections
from copy import deepcopy
from nose.tools import assert_raises
from syn.base.b import ListWrapper, Attr
from syn.base.b.tests.test_base import check_idempotence
from syn.types.a import generate
from syn.type.a import Schema
from syn.schema.b.sequence import Sequence
from syn.base_utils import assert_equivalent
#-------------------------------------------------------------------------------
# ListWrapper
def test_listwrapper():
obj = ListWrapper(1, 2, 3)
check_idempotence(obj)
objc = obj.copy()
assert_equivalent(obj, objc)
assert_equivalent(obj._list, objc._list)
assert obj._list is not objc._list
assert isinstance(obj, collections.MutableSequence)
assert issubclass(ListWrapper, collections.MutableSequence)
cobj = deepcopy(obj)
_list = list(range(10))
cobj._list = list(_list)
assert list(cobj) == _list
cobj.append(11)
assert list(cobj) == _list + [11]
cobj.extend([12,13])
assert list(cobj) == _list + [11, 12, 13]
cobj.insert(0, 14)
assert list(cobj) == [14] + _list + [11, 12, 13]
cobj.pop()
assert list(cobj) == [14] + _list + [11, 12]
cobj.remove(12)
assert list(cobj) == [14] + _list + [11]
assert cobj.count(1) == 1
assert cobj.index(0) == 1
cobj.sort()
assert list(cobj) == _list + [11, 14]
assert cobj[-1] == 14
cobj[-1] = 15
assert list(cobj) == _list + [11, 15]
del cobj[-1]
cobj.reverse()
_list.reverse()
assert list(cobj) == [11] + _list
class LW1(ListWrapper):
_opts = dict(max_len = 1,
min_len = None)
class LW2(ListWrapper):
_opts = dict(max_len = None,
min_len = 50)
assert_raises(ValueError, LW1(_list=_list).validate)
assert_raises(ValueError, LW2(_list=_list).validate)
class LW3(ListWrapper):
_attrs = dict(a = Attr(int),
b = Attr(float))
lw = LW3(1, 2, 3, a = 1, b = 1.2)
assert str(lw) == "LW3(1, 2, 3, a = 1, b = 1.2)"
pretty_lw = '''LW3(1,
2,
3,
a = 1,
b = 1.2)'''
assert lw.pretty() == pretty_lw
#-------------------------------------------------------------------------------
# Test ListWrapper Positional Args
class LWPA(ListWrapper):
_opts = dict(max_len = 3,
args = ['a', 'b'],
init_validate = True)
_attrs = dict(a = Attr(int),
b = Attr(float, default=3.2))
def test_listwrapper_positional_args():
lw = LWPA(1, 1.2, 'abc', 4)
assert list(lw) == [1, 1.2, 'abc']
assert lw.a == 4
assert lw.b == 3.2
check_idempotence(lw)
lw = LWPA(1, 1.2, 'abc', 4, 5.3)
assert list(lw) == [1, 1.2, 'abc']
assert lw.a == 4
assert lw.b == 5.3
check_idempotence(lw)
assert_raises(TypeError, LWPA, 1, 1.2, 'abc', 4, 5.3, 6.5)
#-------------------------------------------------------------------------------
# Test ListWrapper Generation
class LWGT1(ListWrapper):
_opts = dict(init_validate = True)
_attrs = dict(a = Attr(int))
class LWGT2(LWGT1):
_opts = dict(min_len = 2)
class LWGT3(LWGT1):
_attrs = dict(_list = Attr(Schema(Sequence(int, float, int))))
class LWGT4(LWGT2):
_opts = dict(max_len = 4)
def test_listwrapper_generation():
lw1 = generate(LWGT1)
assert isinstance(lw1.a, int)
assert len(lw1) == 0
lw2 = generate(LWGT2)
assert isinstance(lw2.a, int)
assert len(lw2) == 2
lw3 = generate(LWGT3)
assert isinstance(lw2.a, int)
assert len(lw3) == 3
assert isinstance(lw3[0], int)
assert isinstance(lw3[1], float)
assert isinstance(lw3[2], int)
lw4 = generate(LWGT4)
assert 2 <= len(lw4) <= 4
lw41 = generate(LWGT4, max_len=3)
assert 2 <= len(lw41) <= 3
assert_raises(TypeError, LWGT1, [], a=1.2)
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
| 27.02649 | 80 | 0.559667 | 779 | 0.190885 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.131095 |
70c72d20e3d2306963f8dc57ddf61ae7eaf753fe | 21,974 | py | Python | code_dense_hmm/experiment.py | fraunhofer-iais/dense-hmm | 28a93fe987e3c4ec2876221a90ba93ac304e1e09 | [
"MIT"
] | null | null | null | code_dense_hmm/experiment.py | fraunhofer-iais/dense-hmm | 28a93fe987e3c4ec2876221a90ba93ac304e1e09 | [
"MIT"
] | null | null | null | code_dense_hmm/experiment.py | fraunhofer-iais/dense-hmm | 28a93fe987e3c4ec2876221a90ba93ac304e1e09 | [
"MIT"
] | null | null | null |
from models import StandardHMM, DenseHMM, HMMLoggingMonitor
from utils import prepare_data, check_random_state, create_directories, dict_get, Timer, timestamp_msg, check_dir, is_multinomial, compute_stationary, check_sequences
from data import penntreebank_tag_sequences, protein_sequences, train_test_split
from datetime import datetime
import os
import copy
import numpy as np
""" Initializes a StandardHMM and a DenseHMM and fits given data to it
"""
def _standard_vs_dense(train_X, test_X, standard_params=None, dense_params=None, gt_AB=None):
t = Timer()
train_X, train_lengths, train_unique = prepare_data(train_X)
test_X, test_lengths, test_unique = prepare_data(test_X)
standard_hmms = []
if standard_params is None:
standard_hmms.append(StandardHMM())
elif type(standard_params) is list or type(standard_params) is tuple:
for params in standard_params:
standard_hmms.append(StandardHMM(**params))
else:
standard_params = dict(standard_params)
standard_hmms.append(StandardHMM(**standard_params))
dense_params = {} if dense_params is None else dict(dense_params)
dense_hmm = DenseHMM(**dense_params)
opt_schemes = dict_get(dense_params, 'opt_schemes', default=('em', 'cooc'))
if 'em' in opt_schemes:
t.tic("Fitting dense HMM in mode 'em' ...")
dense_hmm.fit(train_X, train_lengths, test_X, test_lengths)
t.toc("Fitting finished.")
if 'cooc' in opt_schemes:
t.tic("Fitting dense HMM in mode 'cooc' ...")
dense_hmm.fit_coocs(train_X, train_lengths, test_X, test_lengths, gt_AB)
t.toc("Fitting finished.")
for i, standard_hmm in enumerate(standard_hmms):
t.tic("Fitting standard hmm %d/%d" % (i+1, len(standard_hmms)))
standard_hmm.fit(train_X, train_lengths, test_X, test_lengths)
t.toc("Fitting finished.")
def _dirichlet_random_numbers(alpha_size, sample_size, dirichlet_param, random_state):
return random_state.dirichlet(np.ones(alpha_size) * dirichlet_param,
size=(sample_size,))
""" Initializes the transition matrices of given hmm to dirichlet distributions.
Assumes that random_state is an instance of np.RandomState """
def _dirichlet_matrix_initializer(dirichlet_param, n_hidden_states, n_observables, random_state):
pi = 1. / n_hidden_states * np.ones(n_hidden_states)
A = _dirichlet_random_numbers(n_hidden_states, n_hidden_states, dirichlet_param, random_state)
B = _dirichlet_random_numbers(n_observables, n_hidden_states, dirichlet_param, random_state) # Note: This results in an n x m matrix
return pi, A, B
def _stationary_matrix_init(n, m, rng, matrix_init_func):
pi, A, B = matrix_init_func(n, m, rng)
pi = compute_stationary(A)
return pi, A, B
def _default_standard_hmm_init():
return dict(n_hidden_states=1, startprob_prior=1.0, transmat_prior=1.0,
random_state=None, em_iter=10, convergence_tol=1e-2, verbose=False)
def _default_dense_hmm_init():
return dict(n_hidden_states=1, n_observables=None, startprob_prior=1.0, transmat_prior=1.0,
random_state=None, em_iter=10, convergence_tol=1e-10, verbose=False,
params="ste", init_params="ste", logging_monitor=None, mstep_config=None)
def _compute_fair_standard_n(m, n_dense, l_dense):
pre = - (m - 1)/2
discriminant = pre**2 + l_dense*(3*n_dense + m + 1)
if discriminant < 0:
raise Exception("Complex solution")
n_plus = pre + np.sqrt(discriminant)
n_minus = pre - np.sqrt(discriminant)
n = np.max((n_plus, n_minus))
if n <= 0:
raise Exception("Only negative solutions")
return int(np.around(n))
def _parse_base_parameters(exp_params, path_dict):
path_dict = dict(path_dict)
exp_params = dict(exp_params)
exp_params['standard_params'] = dict_get(exp_params, 'standard_params', default=_default_standard_hmm_init(), cast=dict)
exp_params['dense_params'] = dict_get(exp_params, 'dense_params', default=_default_dense_hmm_init(), cast=dict)
exp_params['dense_opt_schemes'] = dict_get(exp_params, 'dense_opt_schemes', default=('em',))
exp_params['compare_to_fair_standard'] = dict_get(exp_params, 'compare_to_fair_standard', default=False)
return exp_params
def _parse_syntheticgt_parameters(exp_params, path_dict):
exp_params = _parse_base_parameters(exp_params, path_dict)
exp_params['gt_params'] = dict_get(exp_params, 'gt_params', default=_default_standard_hmm_init(), cast=dict)
exp_params['n_seqs_train'] = dict_get(exp_params, 'n_seqs_train', default=10, cast=int)
exp_params['seqlen_train'] = dict_get(exp_params, 'seqlen_train', default=10, cast=int)
exp_params['n_seqs_test'] = dict_get(exp_params, 'n_seqs_test', default=10, cast=int)
exp_params['seqlen_test'] = dict_get(exp_params, 'seqlen_test', default=10, cast=int)
exp_params['gt_stationary'] = dict_get(exp_params, 'gt_stationary', default=False)
exp_params['gt_params']['n_observables'] = exp_params['n_emissions']
# Making sure the initializer returns stationary pi (if gt_stationary = true)...
init_params = dict_get(exp_params['gt_params'], 'init_params', default=None)
if init_params is not None and callable(init_params) and exp_params['gt_stationary']:
init_params_ = lambda n, m, rng: _stationary_matrix_init(n, m, rng, init_params)
init_params = init_params_
exp_params['gt_params']['init_params'] = init_params
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
# Log GT EM optimization by default
gt_log_config = dict_get(exp_params, 'gt_log_config', default=dict(), cast=dict)
gt_log_config['exp_folder'] = dict_get(gt_log_config, 'exp_folder', default=exp_dir)
gt_log_config['log_folder'] = dict_get(gt_log_config, 'log_folder', default='/gt_logs/em_opt')
gt_logmon = HMMLoggingMonitor(gt_log_config)
exp_params['gt_params']['logging_monitor'] = gt_logmon
return exp_params
def _parse_syntheticgt_dirichlet_parameters(exp_params, path_dict):
exp_params = _parse_syntheticgt_parameters(exp_params, path_dict)
exp_params['dirichlet_param'] = dict_get(exp_params, 'dirichlet_param', default=0.1, cast=float)
exp_params['n_emissions'] = dict_get(exp_params, 'n_emissions', default=None)
# Initialize ground truth hmm
def _dirichlet_matrix_init(n, m, rng):
return _dirichlet_matrix_initializer(exp_params['dirichlet_param'], n, m, rng)
init_params = dict_get(exp_params['gt_params'], 'init_params', default=_dirichlet_matrix_init)
if init_params is not None and callable(init_params) and exp_params['gt_stationary']:
init_params_ = copy.deepcopy(init_params)
init_params = lambda n, m, rng: _stationary_matrix_init(n, m, rng, init_params_)
exp_params['gt_params']['init_params'] = init_params
return exp_params
def _parse_standard_and_dense(exp_params, path_dict, n_emissions):
exp_params['n_emissions'] = n_emissions
# Number of emissions must be the same for all models
exp_params['standard_params']['n_observables'] = n_emissions
exp_params['dense_params']['n_observables'] = n_emissions
# Set opt_schemes that are needed
exp_params['dense_params']['opt_schemes'] = exp_params['dense_opt_schemes']
# Setup fair standard hmm
if exp_params['compare_to_fair_standard']:
# TODO check l_uz = l_vw
n_dense, l_dense = exp_params['dense_params']['n_hidden_states'], exp_params['dense_params']['mstep_config']['l_uz']
n_fair = _compute_fair_standard_n(exp_params['n_emissions'], n_dense, l_dense)
exp_params['fair_standard_params'] = copy.deepcopy(exp_params['standard_params'])
exp_params['fair_standard_params']['n_hidden_states'] = n_fair
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
standard_log_config = dict_get(exp_params, 'standard_log_config', default=dict(), cast=dict)
dense_log_config = dict_get(exp_params, 'dense_log_config', default=dict(), cast=dict)
standard_log_config['exp_folder'] = dict_get(standard_log_config, 'exp_folder', default=exp_dir)
standard_log_config['log_folder'] = dict_get(standard_log_config, 'log_folder', default='/standard_logs')
dense_log_config['exp_folder'] = dict_get(dense_log_config, 'exp_folder', default=exp_dir)
dense_log_config['log_folder'] = dict_get(dense_log_config, 'log_folder', default='/dense_logs')
standard_logmon, dense_logmon = HMMLoggingMonitor(standard_log_config), HMMLoggingMonitor(dense_log_config)
exp_params['standard_params']['logging_monitor'] = standard_logmon
exp_params['dense_params']['logging_monitor'] = dense_logmon
fair_standard_logmon = None
if 'fair_standard_params' in exp_params:
fair_standard_log_config = dict_get(exp_params, 'fair_standard_log_config', default=dict(), cast=dict)
fair_standard_log_config['exp_folder'] = dict_get(fair_standard_log_config, 'exp_folder', default=exp_dir)
fair_standard_log_config['log_folder'] = dict_get(fair_standard_log_config, 'log_folder', default='/fair_standard_logs')
fair_standard_logmon = HMMLoggingMonitor(fair_standard_log_config)
exp_params['fair_standard_params']['logging_monitor'] = fair_standard_logmon
return exp_params
def _sample_sequences_from_gt_hmm(exp_params, path_dict, gt_hmm=None, sample_retries=100):
t = Timer()
n_emissions = exp_params['gt_params']['n_observables']
if gt_hmm is None:
gt_hmm = StandardHMM(**exp_params['gt_params'])
# Sample train and test sequences, save them
t.tic()
cur_sample_try = 0
train_X = None
while cur_sample_try < sample_retries and not is_multinomial(train_X, min_symbols=n_emissions):
train_X = gt_hmm.sample_sequences(exp_params['n_seqs_train'], exp_params['seqlen_train'])
cur_sample_try += 1
if not is_multinomial(train_X, min_symbols=n_emissions):
raise Exception("Could not sample a multinomial distribution. Try to increase sequence length and number of sequences. Or change the dirichlet parameter")
cur_sample_try = 0
test_X = None
while cur_sample_try < sample_retries and not is_multinomial(test_X, min_symbols=n_emissions):
test_X = gt_hmm.sample_sequences(exp_params['n_seqs_test'], exp_params['seqlen_test'])
cur_sample_try += 1
t.toc("Generated train and test sequences")
if not is_multinomial(train_X, min_symbols=n_emissions):
raise Exception("Could not sample a multinomial distribution. Try to increase sequence length and number of sequences. Or change the dirichlet parameter.")
t.tic()
if 'gt_dir' in path_dict:
gt_dir = str(path_dict['gt_dir'])
check_dir(gt_dir)
np.save(gt_dir + '/transmat', gt_hmm.transmat_)
np.save(gt_dir + '/emissionprob', gt_hmm.emissionprob_)
np.save(gt_dir + '/startprob', gt_hmm.startprob_)
gt_samples = dict_get(exp_params, 'gt_samples', default=None, cast=tuple)
t.toc("Ground truth parameters logged")
gt_AB = None
if exp_params['gt_stationary']:
gt_AB = (gt_hmm.transmat_, gt_hmm.emissionprob_)
_save_data(path_dict, train_X, test_X, gt_AB)
return train_X, test_X, gt_AB
def _save_data(path_dict, train_X, test_X=None, gt_AB=None):
if 'data_dir' in path_dict:
data_dir = str(path_dict['data_dir'])
check_dir(data_dir)
np.save(data_dir + '/train_X', train_X)
if test_X is not None:
np.save(data_dir + '/test_X', test_X)
if gt_AB is not None:
np.save(data_dir + '/gt_A', gt_AB[0])
np.save(data_dir + '/gt_B', gt_AB[1])
timestamp_msg("Saved data in %s" % data_dir)
def _save_experiment_parameters(exp_params, path_dict):
if 'experiment_directory' in path_dict:
exp_dir = str(path_dict['experiment_directory'])
check_dir(exp_dir)
_exp_params = copy.deepcopy(exp_params)
gt_params = dict_get(_exp_params, 'gt_params', default=None, cast=dict)
if gt_params is not None:
_exp_params['gt_params'] = gt_params
init_params = dict_get(gt_params, 'init_params', default=None)
if callable(init_params):
_exp_params['gt_params']['init_params'] = str(init_params.__name__)
gt_logmon = dict_get(gt_params, 'logging_monitor', default=None)
if gt_logmon is not None and isinstance(gt_logmon, HMMLoggingMonitor):
_exp_params['gt_params']['logging_monitor'] = dict(gt_logmon.log_config)
standard_params = dict_get(_exp_params, 'standard_params', default=None, cast=dict)
standard_logmon = dict_get(standard_params, 'logging_monitor', default=None)
if standard_logmon is not None and isinstance(standard_logmon, HMMLoggingMonitor):
_exp_params['standard_params']['logging_monitor'] = dict(standard_logmon.log_config)
dense_params = dict_get(_exp_params, 'dense_params', default=None, cast=dict)
dense_logmon = dict_get(standard_params, 'logging_monitor', default=None)
if dense_logmon is not None and isinstance(dense_logmon, HMMLoggingMonitor):
_exp_params['dense_params']['logging_monitor'] = dict(dense_logmon.log_config)
fair_standard_params = dict_get(_exp_params, 'fair_standard_params', default=None, cast=dict)
fair_standard_logmon = dict_get(fair_standard_params, 'logging_monitor', default=None)
if fair_standard_logmon is not None and isinstance(fair_standard_logmon, HMMLoggingMonitor):
_exp_params['fair_standard_params']['logging_monitor'] = dict(fair_standard_logmon.log_config)
np.save(exp_dir + '/exp_params', _exp_params)
timestamp_msg("Saved experiment parameters in %s" % exp_dir)
return _exp_params
def synthetic_sequences_experiment(exp_params, path_dict, sample_retries=100, reuse_sequences=None):
t_exp = Timer()
start_time = t_exp.tic("Starting a 'synthetic sequences' experiment.")
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
exp_params = _parse_syntheticgt_dirichlet_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
train_X, test_X, gt_AB = None, None, None
if reuse_sequences is None or type(reuse_sequences) != tuple or len(reuse_sequences) != 3:
train_X, test_X, gt_AB = _sample_sequences_from_gt_hmm(exp_params, path_dict, sample_retries=sample_retries)
else:
train_X, test_X, gt_AB = reuse_sequences
timestamp_msg("Reusing sequences")
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'], gt_AB)
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'], gt_AB)
fin_time, diff = t_exp.toc("Finished a 'synthetic sequences' experiment.")
SUPPORTED_DATASETS = frozenset(('penntree_tag','protein'))
def get_dataset_sequences(ident, ds_params={}, log_dir=None):
if ident not in SUPPORTED_DATASETS:
raise Exception("Given Dataset %s is not supported." % str(ident))
sequences, tag_to_symb, symb_to_tag = None, None, None
if ident == 'penntree_tag':
sequences, tag_to_symb, symb_to_tag = penntreebank_tag_sequences(**ds_params)
elif ident == 'protein':
sequences, tag_to_symb, symb_to_tag = protein_sequences(**ds_params)
if log_dir is not None:
np.save(log_dir + '/symb_to_tag.npy', symb_to_tag)
np.save(log_dir + '/tag_to_symb.npy', tag_to_symb)
return sequences, tag_to_symb, symb_to_tag
def dataset_synthetic_sequences_experiment(exp_params, path_dict, sample_retries=100):
t_exp = Timer()
exp_params = dict(exp_params)
ident = dict_get(exp_params, 'dataset_ident', default='', cast=str)
start_time = t_exp.tic("Starting a 'dataset synthetic sequences' experiment. (%s)" % str(ident))
gt_dir = dict_get(path_dict, 'gt_dir', default=None)
check_dir(gt_dir)
ds_params = dict_get(exp_params, 'dataset_params', default=dict(), cast=dict)
gt_sequences, _, _ = get_dataset_sequences(ident, ds_params, gt_dir)
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
# Check gt_sequences
sequences, lengths, n_emissions = check_sequences(gt_sequences)
exp_params['n_emissions'] = n_emissions
exp_params = _parse_syntheticgt_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
t.tic("Fitting GT HMM...")
gt_hmm = StandardHMM(**exp_params['gt_params'])
gt_hmm.fit(sequences, lengths)
t.toc("Fitting finished")
train_X, test_X, gt_AB = _sample_sequences_from_gt_hmm(exp_params, path_dict, gt_hmm=gt_hmm, sample_retries=sample_retries)
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'], gt_AB)
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'], gt_AB)
fin_time, diff = t_exp.toc("Finished a 'dataset synthetic sequences' experiment.")
def dataset_sequences_experiment(exp_params, path_dict, reuse_sequences=None):
t_exp = Timer()
exp_params = dict(exp_params)
ident = dict_get(exp_params, 'dataset_ident', default='', cast=str)
start_time = t_exp.tic("Starting a 'dataset sequences' experiment. (%s)" % str(ident))
# Get parameters
t = Timer()
t.tic("Parsing parameters ...")
train_perc = dict_get(exp_params, 'train_perc', default=1., cast=float)
gt_dir = dict_get(path_dict, 'gt_dir', default=None)
check_dir(gt_dir)
ds_params = dict_get(exp_params, 'dataset_params', default=dict(), cast=dict)
if reuse_sequences is None or type(reuse_sequences) != tuple or len(reuse_sequences) != 2:
gt_sequences, _, _ = get_dataset_sequences(ident, ds_params, gt_dir)
train_X, test_X = train_test_split(gt_sequences, train_perc)
else:
train_X, test_X = reuse_sequences
timestamp_msg("Reusing sequences ...")
# Check gt_sequences
_, _, n_train_emissions = check_sequences(train_X)
n_test_emissions = None
if test_X is not None and len(test_X) > 0:
_, _, n_test_emissions = check_sequences(test_X)
_save_data(path_dict, train_X, test_X)
if n_test_emissions is not None and n_train_emissions != n_test_emissions:
raise Exception("Number of emissions in train and test sequence differs")
exp_params['n_emissions'] = n_train_emissions
exp_params = _parse_base_parameters(exp_params, path_dict)
exp_params = _parse_standard_and_dense(exp_params, path_dict, exp_params['n_emissions'])
_exp_params = _save_experiment_parameters(exp_params, path_dict)
t.toc("Parameters parsed. Using parameters: %s" % str(_exp_params))
if 'fair_standard_params' in exp_params:
_standard_vs_dense(train_X, test_X, (exp_params['standard_params'], exp_params['fair_standard_params']),
exp_params['dense_params'])
else:
_standard_vs_dense(train_X, test_X, exp_params['standard_params'], exp_params['dense_params'])
fin_time, diff = t_exp.toc("Finished a 'dataset sequences' experiment.")
def run_experiment(exp_type, exp_name, exp_params, reuse_setup=None):
experiment_directory, path_dict = None, None
if reuse_setup is None or type(reuse_setup) != tuple or len(reuse_setup) != 2:
experiment_directory, path_dict = setup_experiment(exp_name, exp_params)
else:
experiment_directory, path_dict = reuse_setup
supported_exp_types = ('synthetic_sequences', 'dataset_synthetic_sequences', 'dataset_sequences')
if exp_type == 'synthetic_sequences':
synthetic_sequences_experiment(exp_params, path_dict)
elif exp_type == 'dataset_synthetic_sequences':
dataset_synthetic_sequences_experiment(exp_params, path_dict)
elif exp_type == 'dataset_sequences':
dataset_sequences_experiment(exp_params, path_dict)
else:
raise Exception('Given experiment type "%s" is not supported. \n'
'It has to be one of the following: %s' % (str(exp_type), str(supported_exp_types)))
print(experiment_directory)
return experiment_directory
def setup_experiment(exp_name, exp_params):
path_dict = {}
experiment_directory = os.getcwd() + '/' + exp_name + datetime.now().strftime('%Y%m%d_%H-%M-%S')
path_dict['experiment_directory'] = experiment_directory
path_dict['data_dir'] = experiment_directory + '/data'
path_dict['gt_dir'] = experiment_directory + '/gt_logs'
return experiment_directory, path_dict
| 47.562771 | 166 | 0.705834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,889 | 0.22249 |
70c846852061594e0e9914e39b2035cd28c6dd88 | 2,167 | py | Python | jetson/ballrunner.py | Reslix/Lohbot | 4920c0d0fcda64ec7b6438dd848c789e4fd15cc4 | [
"MIT"
] | 1 | 2018-02-21T03:49:54.000Z | 2018-02-21T03:49:54.000Z | jetson/ballrunner.py | Reslix/Lohbot | 4920c0d0fcda64ec7b6438dd848c789e4fd15cc4 | [
"MIT"
] | null | null | null | jetson/ballrunner.py | Reslix/Lohbot | 4920c0d0fcda64ec7b6438dd848c789e4fd15cc4 | [
"MIT"
] | null | null | null | from newcamera import TrackingCameraRunner
from serial_io import SerialIO
from show import imshow
import cv2
import math
print("Initializing serial connection with Arduino")
ard = SerialIO()
ard.start()
print("Initializing camera")
c = TrackingCameraRunner(0)
print("Tracking Ball...")
tcenterx = 640
tradius = 40
speed = 40
im = None
last = (0, cv2.getTickCount())
try:
while True:
c.step_frame()
center, radius = c.track_tennis_ball()
#im = imshow(c.frame, im=im)
if center:
# This should al lbe in cm...
distance = 2131/(radius ** 1.02)
horizontal = 3.5/radius * (tcenterx - center[0])
oh = horizontal/distance
if oh < -1:
oh = -1
elif oh > 1:
oh = 1
angle = math.asin(oh)
print('distance: {}\nhorizontal: {}\nangle: {}'.format(distance, horizontal, angle))
#TODO make sure angle is in degrees
'''
if angle < -.1:
differential = 89.1 * angle + 43.7
elif angle > .1:
differential = 126 * angle - 28.9
else:
differential = 0
differential *= 1
'''
differential = angle * 130
deriv = (angle - last[0])/((cv2.getTickCount() - last[1])/cv2.getTickFrequency())
print('deriv: {}'.format(deriv))
last = (angle, cv2.getTickCount())
differential = differential + 0.01 * deriv
translate = (distance - 30)/6
left = + differential
right = - differential
if angle > 0:
left = max(-speed, min(speed, left))
right = max(-speed, min(speed, right))
else:
left = max(-speed, min(speed, left))*3
right = max(-speed, min(speed, right))*3
left += translate
right += translate
print(left,right)
ard.direct(int(right), int(left))
else:
ard.stop()
print("stop")
except KeyboardInterrupt:
ard.stop()
c.close()
pass
| 30.957143 | 97 | 0.513152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.227042 |
70c959da6388e937e02687ad9571d92c479badd1 | 8,247 | py | Python | simple_rl/tasks/taxi/TaxiOOMDPClass.py | KorlaMarch/simple_rl | 30086b5cf4fd3e9dee76ddfb5ae4f565593ce191 | [
"Apache-2.0"
] | 10 | 2021-11-22T12:29:30.000Z | 2022-03-28T10:23:16.000Z | simple_rl/tasks/taxi/TaxiOOMDPClass.py | samlobel/simple_rl_mbrl | ed868916d06dbf68f4af23bea83b0e852e88df6e | [
"Apache-2.0"
] | null | null | null | simple_rl/tasks/taxi/TaxiOOMDPClass.py | samlobel/simple_rl_mbrl | ed868916d06dbf68f4af23bea83b0e852e88df6e | [
"Apache-2.0"
] | 2 | 2022-03-19T07:42:56.000Z | 2022-03-28T10:36:33.000Z | '''
TaxiMDPClass.py: Contains the TaxiMDP class.
From:
Dietterich, Thomas G. "Hierarchical reinforcement learning with the
MAXQ value function decomposition." J. Artif. Intell. Res.(JAIR) 13
(2000): 227-303.
Author: David Abel (cs.brown.edu/~dabel/)
'''
# Python imports.
from __future__ import print_function
import random
import copy
# Other imports.
from simple_rl.mdp.oomdp.OOMDPClass import OOMDP
from simple_rl.mdp.oomdp.OOMDPObjectClass import OOMDPObject
from simple_rl.tasks.taxi.TaxiStateClass import TaxiState
from simple_rl.tasks.taxi import taxi_helpers
class TaxiOOMDP(OOMDP):
''' Class for a Taxi OO-MDP '''
# Static constants.
ACTIONS = ["up", "down", "left", "right", "pickup", "dropoff"]
ATTRIBUTES = ["x", "y", "has_passenger", "in_taxi", "dest_x", "dest_y"]
CLASSES = ["agent", "wall", "passenger"]
def __init__(self, width, height, agent, walls, passengers, slip_prob=0, gamma=0.99):
self.height = height
self.width = width
agent_obj = OOMDPObject(attributes=agent, name="agent")
wall_objs = self._make_oomdp_objs_from_list_of_dict(walls, "wall")
pass_objs = self._make_oomdp_objs_from_list_of_dict(passengers, "passenger")
init_state = self._create_state(agent_obj, wall_objs, pass_objs)
OOMDP.__init__(self, TaxiOOMDP.ACTIONS, self._taxi_transition_func, self._taxi_reward_func, init_state=init_state, gamma=gamma)
self.slip_prob = slip_prob
def _create_state(self, agent_oo_obj, walls, passengers):
'''
Args:
agent_oo_obj (OOMDPObjects)
walls (list of OOMDPObject)
passengers (list of OOMDPObject)
Returns:
(OOMDP State)
TODO: Make this more egneral and put it in OOMDPClass.
'''
objects = {c : [] for c in TaxiOOMDP.CLASSES}
objects["agent"].append(agent_oo_obj)
# Make walls.
for w in walls:
objects["wall"].append(w)
# Make passengers.
for p in passengers:
objects["passenger"].append(p)
return TaxiState(objects)
def _taxi_reward_func(self, state, action):
'''
Args:
state (OOMDP State)
action (str)
Returns
(float)
'''
_error_check(state, action)
# Stacked if statements for efficiency.
if action == "dropoff":
# If agent is dropping off.
agent = state.get_first_obj_of_class("agent")
# Check to see if all passengers at destination.
if agent.get_attribute("has_passenger"):
for p in state.get_objects_of_class("passenger"):
if p.get_attribute("x") != p.get_attribute("dest_x") or p.get_attribute("y") != p.get_attribute("dest_y"):
return 0 - self.step_cost
return 1 - self.step_cost
return 0 - self.step_cost
def _taxi_transition_func(self, state, action):
'''
Args:
state (State)
action (str)
Returns
(State)
'''
_error_check(state, action)
if self.slip_prob > random.random():
# Flip dir.
if action == "up":
action = "down"
elif action == "down":
action = "up"
elif action == "left":
action = "right"
elif action == "right":
action = "left"
if action == "up" and state.get_agent_y() < self.height:
next_state = self.move_agent(state, self.slip_prob, dy=1)
elif action == "down" and state.get_agent_y() > 1:
next_state = self.move_agent(state, self.slip_prob, dy=-1)
elif action == "right" and state.get_agent_x() < self.width:
next_state = self.move_agent(state, self.slip_prob, dx=1)
elif action == "left" and state.get_agent_x() > 1:
next_state = self.move_agent(state, self.slip_prob, dx=-1)
elif action == "dropoff":
next_state = self.agent_dropoff(state)
elif action == "pickup":
next_state = self.agent_pickup(state)
else:
next_state = state
# Make terminal.
if taxi_helpers.is_taxi_terminal_state(next_state):
next_state.set_terminal(True)
# All OOMDP states must be updated.
next_state.update()
return next_state
def __str__(self):
return "taxi_h-" + str(self.height) + "_w-" + str(self.width)
def visualize_agent(self, agent):
from ...utils.mdp_visualizer import visualize_agent
from taxi_visualizer import _draw_state
visualize_agent(self, agent, _draw_state)
_ = input("Press anything to quit ")
sys.exit(1)
def visualize_interaction(self):
from simple_rl.utils.mdp_visualizer import visualize_interaction
from taxi_visualizer import _draw_state
visualize_interaction(self, _draw_state)
raw_input("Press anything to quit ")
sys.exit(1)
# ----------------------------
# -- Action Implementations --
# ----------------------------
def move_agent(self, state, slip_prob=0, dx=0, dy=0):
'''
Args:
state (TaxiState)
dx (int) [optional]
dy (int) [optional]
Returns:
(TaxiState)
'''
if taxi_helpers._is_wall_in_the_way(state, dx=dx, dy=dy):
# There's a wall in the way.
return state
next_state = copy.deepcopy(state)
# Move Agent.
agent_att = next_state.get_first_obj_of_class("agent").get_attributes()
agent_att["x"] += dx
agent_att["y"] += dy
# Move passenger.
taxi_helpers._move_pass_in_taxi(next_state, dx=dx, dy=dy)
return next_state
def agent_pickup(self, state):
'''
Args:
state (TaxiState)
'''
next_state = copy.deepcopy(state)
agent = next_state.get_first_obj_of_class("agent")
# update = False
if agent.get_attribute("has_passenger") == 0:
# If the agent does not have a passenger.
for i, passenger in enumerate(next_state.get_objects_of_class("passenger")):
if agent.get_attribute("x") == passenger.get_attribute("x") and agent.get_attribute("y") == passenger.get_attribute("y"):
# Pick up passenger at agent location.
agent.set_attribute("has_passenger", 1)
passenger.set_attribute("in_taxi", 1)
return next_state
def agent_dropoff(self, state):
'''
Args:
state (TaxiState)
Returns:
(TaxiState)
'''
next_state = copy.deepcopy(state)
# Get Agent, Walls, Passengers.
agent = next_state.get_first_obj_of_class("agent")
# agent = OOMDPObject(attributes=agent_att, name="agent")
passengers = next_state.get_objects_of_class("passenger")
if agent.get_attribute("has_passenger") == 1:
# Update if the agent has a passenger.
for i, passenger in enumerate(passengers):
if passenger.get_attribute("in_taxi") == 1:
# Drop off the passenger.
passengers[i].set_attribute("in_taxi", 0)
agent.set_attribute("has_passenger", 0)
return next_state
def _error_check(state, action):
'''
Args:
state (State)
action (str)
Summary:
Checks to make sure the received state and action are of the right type.
'''
if action not in TaxiOOMDP.ACTIONS:
raise ValueError("Error: the action provided (" + str(action) + ") was invalid.")
if not isinstance(state, TaxiState):
raise ValueError("Error: the given state (" + str(state) + ") was not of the correct class.")
def main():
agent = {"x":1, "y":1, "has_passenger":0}
passengers = [{"x":8, "y":4, "dest_x":2, "dest_y":2, "in_taxi":0}]
taxi_world = TaxiOOMDP(10, 10, agent=agent, walls=[], passengers=passengers)
if __name__ == "__main__":
main()
| 31.477099 | 137 | 0.587486 | 6,937 | 0.841154 | 0 | 0 | 0 | 0 | 0 | 0 | 2,625 | 0.318298 |
70c96909cdb1ba2109525248284a595c630feada | 2,176 | py | Python | tests/bugs/core_2923_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_2923_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_2923_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_2923
# title: Problem with dependencies between a procedure and a view using that procedure
# decription:
# tracker_id: CORE-2923
# min_versions: ['2.5.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set term ^;
create procedure sp_test returns (i smallint) as
begin
i = 32767;
suspend;
end
^
create view v0 as
select i
from sp_test
^
alter procedure sp_test returns (i int) as
begin
i = 32768;
suspend;
end
^
set term ;^
commit;
---
create table t1 (n1 smallint);
insert into t1(n1) values(32767);
commit;
create view v1 as
select *
from t1;
alter table t1 alter n1 type integer;
commit;
insert into t1(n1) values(32768);
commit;
---
create table t2 (n2 smallint);
insert into t2(n2) values(32767);
commit;
create domain d2 integer;
create view v2 as
select * from t2;
alter table t2 alter n2 type d2;
insert into t2(n2) values(32768);
commit;
---
set list on;
select '0' as test_no, v.* from v0 v
union all
select '1', v.* from v1 v
union all
select '2', v.* from v2 v
;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
TEST_NO 0
I 32768
TEST_NO 1
I 32767
TEST_NO 1
I 32768
TEST_NO 2
I 32767
TEST_NO 2
I 32768
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 19.428571 | 93 | 0.529412 | 0 | 0 | 0 | 0 | 183 | 0.084099 | 0 | 0 | 1,717 | 0.789063 |
70c98f390066819db18b2eabe5323591400b47e2 | 5,582 | py | Python | day_09.py | bob-white/advent_2018 | 4139359b322eb42ac6cfef9e574e43bc91caa62d | [
"MIT"
] | 1 | 2018-12-02T05:41:56.000Z | 2018-12-02T05:41:56.000Z | day_09.py | bob-white/advent_2018 | 4139359b322eb42ac6cfef9e574e43bc91caa62d | [
"MIT"
] | null | null | null | day_09.py | bob-white/advent_2018 | 4139359b322eb42ac6cfef9e574e43bc91caa62d | [
"MIT"
] | null | null | null | """
--- Day 9: Marble Mania ---
You talk to the Elves while you wait for your navigation system to initialize. To pass the time, they introduce you to their favorite marble game.
The Elves play this game by taking turns arranging the marbles in a circle according to very particular rules. The marbles are numbered starting with 0 and increasing by 1 until every marble has a number.
First, the marble numbered 0 is placed in the circle. At this point, while it contains only a single marble, it is still a circle: the marble is both clockwise from itself and counter-clockwise from itself. This marble is designated the current marble.
Then, each Elf takes a turn placing the lowest-numbered remaining marble into the circle between the marbles that are 1 and 2 marbles clockwise of the current marble. (When the circle is large enough, this means that there is one marble between the marble that was just placed and the current marble.) The marble that was just placed then becomes the current marble.
However, if the marble that is about to be placed has a number which is a multiple of 23, something entirely different happens. First, the current player keeps the marble they would have placed, adding it to their score. In addition, the marble 7 marbles counter-clockwise from the current marble is removed from the circle and also added to the current player's score. The marble located immediately clockwise of the marble that was removed becomes the new current marble.
For example, suppose there are 9 players. After the marble with value 0 is placed in the middle, each player (shown in square brackets) takes a turn. The result of each of those turns would produce circles of marbles like this, where clockwise is to the right and the resulting current marble is in parentheses:
[-] (0)
[1] 0 (1)
[2] 0 (2) 1
[3] 0 2 1 (3)
[4] 0 (4) 2 1 3
[5] 0 4 2 (5) 1 3
[6] 0 4 2 5 1 (6) 3
[7] 0 4 2 5 1 6 3 (7)
[8] 0 (8) 4 2 5 1 6 3 7
[9] 0 8 4 (9) 2 5 1 6 3 7
[1] 0 8 4 9 2(10) 5 1 6 3 7
[2] 0 8 4 9 2 10 5(11) 1 6 3 7
[3] 0 8 4 9 2 10 5 11 1(12) 6 3 7
[4] 0 8 4 9 2 10 5 11 1 12 6(13) 3 7
[5] 0 8 4 9 2 10 5 11 1 12 6 13 3(14) 7
[6] 0 8 4 9 2 10 5 11 1 12 6 13 3 14 7(15)
[7] 0(16) 8 4 9 2 10 5 11 1 12 6 13 3 14 7 15
[8] 0 16 8(17) 4 9 2 10 5 11 1 12 6 13 3 14 7 15
[9] 0 16 8 17 4(18) 9 2 10 5 11 1 12 6 13 3 14 7 15
[1] 0 16 8 17 4 18 9(19) 2 10 5 11 1 12 6 13 3 14 7 15
[2] 0 16 8 17 4 18 9 19 2(20)10 5 11 1 12 6 13 3 14 7 15
[3] 0 16 8 17 4 18 9 19 2 20 10(21) 5 11 1 12 6 13 3 14 7 15
[4] 0 16 8 17 4 18 9 19 2 20 10 21 5(22)11 1 12 6 13 3 14 7 15
[5] 0 16 8 17 4 18(19) 2 20 10 21 5 22 11 1 12 6 13 3 14 7 15
[6] 0 16 8 17 4 18 19 2(24)20 10 21 5 22 11 1 12 6 13 3 14 7 15
[7] 0 16 8 17 4 18 19 2 24 20(25)10 21 5 22 11 1 12 6 13 3 14 7 15
The goal is to be the player with the highest score after the last marble is used up. Assuming the example above ends after the marble numbered 25, the winning score is 23+9=32 (because player 5 kept marble 23 and removed marble 9, while no other player got any points in this very short example game).
Here are a few more examples:
10 players; last marble is worth 1618 points: high score is 8317
13 players; last marble is worth 7999 points: high score is 146373
17 players; last marble is worth 1104 points: high score is 2764
21 players; last marble is worth 6111 points: high score is 54718
30 players; last marble is worth 5807 points: high score is 37305
What is the winning Elf's score?
Your puzzle answer was 398242.
--- Part Two ---
Amused by the speed of your answer, the Elves are curious:
What would the new winning Elf's score be if the number of the last marble were 100 times larger?
Your puzzle answer was 3273842452.
Both parts of this puzzle are complete! They provide two gold stars: **
"""
import re
from itertools import cycle
from typing import List, Tuple, Dict
from collections import deque, defaultdict
# Part 1 and 2 have been placed in the input file, so running once will work.
with open('day_09.input', 'r') as f:
data = f.read()
# data = """9 players; last marble is worth 25 points: high score is 32
# 10 players; last marble is worth 1618 points: high score is 8317
# 13 players; last marble is worth 7999 points: high score is 146373
# 17 players; last marble is worth 1104 points: high score is 2764
# 21 players; last marble is worth 6111 points: high score is 54718
# 30 players; last marble is worth 5807 points: high score is 37305"""
games: List[Tuple[int, ...]] = [tuple(map(int, re.findall(r'\d+', line))) for line in data.split('\n')]
for game in games:
# Turns out rotating a deque is way faster than inserting nodes into a list.
circle: deque = deque()
# high_score gets dropped when we use our actual data.
players, marbles, *high_score = game
scores: Dict[int, int] = defaultdict(int)
for marble, player in zip(range(marbles + 1), cycle(range(players))):
if marble and not marble % 23:
# Rotate the circle back by 7, and remove that marble.
circle.rotate(-7)
scores[player] += marble + circle.pop()
else:
# Rotate the circle by 2, and then place a new marble
circle.rotate(2)
circle.append(marble)
if high_score:
print(max(scores.values()) == high_score[0])
else:
print(max(scores.values()))
| 52.168224 | 473 | 0.67431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,752 | 0.851308 |
70cae01a3bbf35c083ab9e6723658c85595554ce | 983 | py | Python | problems/test_ic_1_stock_prices.py | gregdferrell/algo | 974ae25b028d49bcb7ded6655a7e11dcf6aa221d | [
"MIT"
] | null | null | null | problems/test_ic_1_stock_prices.py | gregdferrell/algo | 974ae25b028d49bcb7ded6655a7e11dcf6aa221d | [
"MIT"
] | null | null | null | problems/test_ic_1_stock_prices.py | gregdferrell/algo | 974ae25b028d49bcb7ded6655a7e11dcf6aa221d | [
"MIT"
] | null | null | null | from .ic_1_stock_prices import stock_prices_1_brute_force, stock_prices_2_greedy
def test_stock_price_algorithms_lose():
stock_prices = [10, 9, 7]
assert stock_prices_1_brute_force(stock_prices) == -1
assert stock_prices_2_greedy(stock_prices) == -1
def test_stock_price_algorithms_no_gain():
stock_prices = [2, 2, 1, 1]
assert stock_prices_1_brute_force(stock_prices) == 0
assert stock_prices_2_greedy(stock_prices) == 0
def test_stock_price_algorithms_gain_1():
stock_prices = [1, 2, 3, 4, 5]
assert stock_prices_1_brute_force(stock_prices) == 4
assert stock_prices_2_greedy(stock_prices) == 4
def test_stock_price_algorithms_gain_2():
stock_prices = [10, 7, 5, 8, 11, 9]
assert stock_prices_1_brute_force(stock_prices) == 6
assert stock_prices_2_greedy(stock_prices) == 6
def test_stock_price_algorithms_gain_3():
stock_prices = [9, 8, 10, 7, 11, 6, 12]
assert stock_prices_1_brute_force(stock_prices) == 6
assert stock_prices_2_greedy(stock_prices) == 6
| 30.71875 | 80 | 0.787386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
70cd06f1a4e2360da81c2eda5df37352f1b549eb | 762 | py | Python | qctrl_api/control_api/models.py | bibek-Neupane/back-end-challenge | d5a7b33adaa59e5ad566ac7435132c990f80a740 | [
"Apache-2.0"
] | null | null | null | qctrl_api/control_api/models.py | bibek-Neupane/back-end-challenge | d5a7b33adaa59e5ad566ac7435132c990f80a740 | [
"Apache-2.0"
] | null | null | null | qctrl_api/control_api/models.py | bibek-Neupane/back-end-challenge | d5a7b33adaa59e5ad566ac7435132c990f80a740 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Control(models.Model):
objects=models.Manager()
TYPE_CHOICES=(
('Primitive','Primitive'),
('Corpse','CORPSE'),
('Gaussian','Gaussian'),
('CinBB','CinBB'),
)
#pk i.e id --> Refered to as pk while we use it as a lookup variable
name = models.CharField(max_length=200)
type = models.CharField(max_length=200, choices=TYPE_CHOICES, default='Primitive')
maximum_rabi_rate = models.FloatField(validators = [MinValueValidator(0), MaxValueValidator(100)])
polar_angle = models.FloatField(validators = [MinValueValidator(0), MaxValueValidator(1)])
def __str__(self):
return self.name
| 34.636364 | 102 | 0.692913 | 659 | 0.864829 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.198163 |
70cf5a0e46bf75e6b641daf22edae36bebfe4306 | 1,542 | py | Python | pyNastran/bdf/mesh_utils/mesh.py | numenic/pyNastran | fd5d3f0bf18db6595d85b9ac152f611e23122a68 | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/mesh_utils/mesh.py | numenic/pyNastran | fd5d3f0bf18db6595d85b9ac152f611e23122a68 | [
"BSD-3-Clause"
] | null | null | null | pyNastran/bdf/mesh_utils/mesh.py | numenic/pyNastran | fd5d3f0bf18db6595d85b9ac152f611e23122a68 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from pyNastran.bdf.cards.aero.utils import (
points_elements_from_quad_points, create_axisymmetric_body)
def create_structured_cquad4s(model, pid,
p1, p2, p3, p4, nx, ny, nid=1, eid=1, theta_mcid=0.):
"""
Parameters
----------
p1 / p2 / p3 / p4 : (3, ) float ndarray
points defining the quad
nx : int
points in the p1-p2 direction
ny : int
points in the p1-p4 direction
nid / eid : int
node / element id offset
Returns
-------
nid : int
???
eid : int
???
"""
nid0 = nid
x = np.linspace(0., 1., nx + 1)
y = np.linspace(0., 1., ny + 1)
points, elements = points_elements_from_quad_points(p1, p2, p3, p4, x, y, dtype='int32')
for point in points:
model.add_grid(nid, point)
nid += 1
for node_ids in elements + nid0:
model.add_cquad4(eid, pid, node_ids, theta_mcid=theta_mcid)
eid += 1
return nid, eid
#def cone3d(model, pid,
#xs, radius, nx=5, ntheta=10, endpoint=True):
#"""
#create a cone by segments in 3d
#xs = [0., 1., 2.]
#radius = [1., 2., 4.]
#>>> cone(model, pid, xs, radius1, radius2)
#"""
#xstation = np.asarray(xstation)
#ystation = np.zeros(xstation.shape)
#zstation = np.zeros(xstation.shape)
#aspect_ratio = 1.0
#xyz_elems = create_axisymmetric_body(
#nx, aspect_ratio,
#xstation, ystation, zstation, radii,
#p1, dy, dz)
#return
| 27.052632 | 92 | 0.56096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 787 | 0.510376 |
70cf7afe354a978de44df2e05db5b47dec3c12de | 19,755 | py | Python | trtools/dumpSTR/tests/test_filters.py | Kulivox/TRTools | ea05f9126f5145405cced8fd85821ce929657b3a | [
"MIT"
] | 14 | 2020-04-20T15:38:52.000Z | 2022-02-07T11:45:23.000Z | trtools/dumpSTR/tests/test_filters.py | Kulivox/TRTools | ea05f9126f5145405cced8fd85821ce929657b3a | [
"MIT"
] | 74 | 2020-03-02T23:34:53.000Z | 2022-03-21T18:32:10.000Z | trtools/dumpSTR/tests/test_filters.py | Kulivox/TRTools | ea05f9126f5145405cced8fd85821ce929657b3a | [
"MIT"
] | 15 | 2018-10-29T19:41:33.000Z | 2020-02-21T18:41:51.000Z | import argparse
import os,sys
import pytest
from ..dumpSTR import *
from ..filters import *
def base_argparse(tmpdir):
args = argparse.ArgumentParser()
args.vcf = None
args.vcftype = "auto"
args.out = str(tmpdir / "test")
args.min_locus_callrate = None
args.min_locus_hwep = None
args.min_locus_het = None
args.max_locus_het = None
args.use_length = False
args.filter_regions = None
args.filter_regions_names = None
args.filter_hrun = False
args.drop_filtered = False
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = None
args.hipstr_min_call_Q = None
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_stutter = None
args.hipstr_min_supp_reads = None
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = None
args.gangstr_filter_span_only = False
args.gangstr_filter_spanbound_only = False
args.gangstr_filter_badCI = None
args.gangstr_require_support = None
args.gangstr_readlen = None
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = None
args.gangstr_min_call_Q = None
args.advntr_min_call_DP = None
args.advntr_max_call_DP = None
args.advntr_min_spanning = None
args.advntr_min_flanking = None
args.advntr_min_ML = None
args.eh_min_ADFL = None
args.eh_min_ADIR = None
args.eh_min_ADSP = None
args.eh_min_call_LC = None
args.eh_max_call_LC = None
args.popstr_min_call_DP = None
args.popstr_max_call_DP = None
args.popstr_require_support = None
args.num_records = None
args.die_on_warning = False
args.verbose = False
args.zip = False
return args
class EmptyLocInfo:
def __getitem__(self, key):
# return something that can be incremented
return 0
def __setitem__(self, key, value):
pass
class VCFRec:
def __init__(self):
self.FILTER = ''
class DummyRecBase:
def __init__(self):
self.vcfrecord = VCFRec()
self.info = {}
self.format = {}
def GetCalledSamples(self):
return np.array([True, True, False])
def GetNumSamples(self):
return 3
def test_CallRateFilter(tmpdir):
class TestCallRateRec(DummyRecBase):
def GetCallRate(self):
return 0.5
args = base_argparse(tmpdir)
args.min_locus_callrate = 0.7
locus_filters = BuildLocusFilters(args)
assert ApplyLocusFilters(
TestCallRateRec(), locus_filters, EmptyLocInfo(), False
)
args = base_argparse(tmpdir)
args.min_locus_callrate = 0.3
locus_filters = BuildLocusFilters(args)
assert not ApplyLocusFilters(
TestCallRateRec(), locus_filters, EmptyLocInfo(), False
)
def test_HWEFilter(tmpdir):
class TestHWERec(DummyRecBase):
def __init__(self):
super().__init__()
def GetGenotypeCounts(self, uselength=False):
if not uselength:
return {
('ATATAT', 'ATATAT'): 2,
('ATATAT', 'ATAAAT'): 2,
('ATATAT', 'ATATATAT'): 1,
('ATAAAT', 'ATAAAT'): 2,
('ATAAAT', 'ATATATAT'): 1,
('ATATATAT', 'ATATATAT'): 2
}
# acutal het = 0.4
# exp het : 1-2*(7/20)**2-(6/20)**2 = 0.665
# p = 0.95
else:
return {
(3, 3): 6,
(3, 4): 2,
(4, 4): 2
}
# actual het = 0.2
# exp het : 1 - .7**2 - .3**2 = 0.42
# p = 0.21
def GetAlleleFreqs(self, uselength=False):
if not uselength:
return {'ATATAT': .35, 'ATAAAT': .35, 'ATATATAT': .3}
else:
return {3: .7, 4: .3}
def run_test(thresh, passes, uselength=False):
args = base_argparse(tmpdir)
args.min_locus_hwep = thresh
args.use_length = uselength
locus_filters = BuildLocusFilters(args)
assert passes != ApplyLocusFilters(
TestHWERec(), locus_filters, EmptyLocInfo(), False
)
run_test(0.05, True, uselength = True)
run_test(0.1, True, uselength = True)
run_test(0.3, False, uselength = True)
run_test(0.05, True)
run_test(0.1, False)
run_test(0.3, False)
def test_HETFilter(tmpdir):
class TestHETRec(DummyRecBase):
def __init__(self, count_3_1, count_3_2, count_4, count_5):
super().__init__()
self.count_3_1 = count_3_1
self.count_3_2 = count_3_2
self.count_4 = count_4
self.count_5 = count_5
def GetAlleleFreqs(self, uselength=False):
total = (self.count_3_1 + self.count_3_2 + self.count_4 +
self.count_5)
if not uselength:
return {
'ATATAT': self.count_3_1/total,
'ATAAAT': self.count_3_2/total,
'ATATATAT': self.count_4/total,
'ATATATATAT': self.count_5/total
}
else:
return {
3: (self.count_3_1 + self.count_3_2)/total,
4: self.count_4/total,
5: self.count_5/total
}
def run_test(freq_list, thresh, higher, uselength=False):
args = base_argparse(tmpdir)
args.min_locus_het = thresh
args.use_length = uselength
locus_filters = BuildLocusFilters(args)
assert higher != ApplyLocusFilters(
TestHETRec(*freq_list), locus_filters, EmptyLocInfo(), False
)
args = base_argparse(tmpdir)
args.max_locus_het = thresh
args.use_length = uselength
locus_filters = BuildLocusFilters(args)
assert higher == ApplyLocusFilters(
TestHETRec(*freq_list), locus_filters, EmptyLocInfo(), False
)
run_test([0.25, 0.25, 0.25, 0.25], 0.7, True)
run_test([0.25, 0.25, 0.25, 0.25], 0.7, False, uselength=True)
run_test([0.25, 0.25, 0.25, 0.25], 0.8, False)
def test_RegionFilter(tmpdir, vcfdir):
class TestRegionRec(DummyRecBase):
def __init__(self, chrom, pos):
super().__init__()
self.pos = pos
self.chrom = chrom
self.ref_allele_length = 10
args = base_argparse(tmpdir)
args.filter_regions = (vcfdir + '/dumpSTR_vcfs/sample_region.bed.gz,'
+ vcfdir + '/dumpSTR_vcfs/sample_region2.bed.gz')
args.filter_regions_names = 'foo,bar'
locus_filters = BuildLocusFilters(args)
test_foo = TestRegionRec('chr21', 9487191)
assert ApplyLocusFilters(test_foo, locus_filters, EmptyLocInfo(), False)
assert test_foo.vcfrecord.FILTER == 'foo'
test_not = TestRegionRec('chr21', 9487171)
assert not ApplyLocusFilters(test_not, locus_filters, EmptyLocInfo(), False)
assert test_not.vcfrecord.FILTER == 'PASS'
test_both = TestRegionRec('chr21', 9487291)
assert ApplyLocusFilters(test_both, locus_filters, EmptyLocInfo(), False)
assert test_both.vcfrecord.FILTER == 'foo;bar'
test_bar1 = TestRegionRec('chr20', 30)
assert ApplyLocusFilters(test_bar1, locus_filters, EmptyLocInfo(), False)
assert test_bar1.vcfrecord.FILTER == 'bar'
test_bar2 = TestRegionRec('chr20', 230)
assert ApplyLocusFilters(test_bar2, locus_filters, EmptyLocInfo(), False)
assert test_bar2.vcfrecord.FILTER == 'bar'
test_not_bar = TestRegionRec('chr20', 130)
assert not ApplyLocusFilters(test_not_bar, locus_filters, EmptyLocInfo(), False)
assert test_not_bar.vcfrecord.FILTER == 'PASS'
def test_HRUNFilter(tmpdir):
class TestHRUNRec(DummyRecBase):
def __init__(self, ref, period, full=None):
super().__init__()
self.ref_allele = ref
if full is not None:
self.full_alleles = (full, None)
self.full = full is not None
self.full = full
self.info['PERIOD'] = period
def HasFullStringGenotypes(self):
return self.full
args = base_argparse(tmpdir)
args.filter_hrun = True
locus_filters = BuildLocusFilters(args)
for bp in {'A', 'T', 'G', 'C'}:
assert ApplyLocusFilters(
TestHRUNRec(bp*5, 5), locus_filters, EmptyLocInfo(), False
)
assert not ApplyLocusFilters(
TestHRUNRec(bp*5, 6), locus_filters, EmptyLocInfo(), False
)
assert ApplyLocusFilters(
TestHRUNRec(bp*6, 6), locus_filters, EmptyLocInfo(), False
)
assert not ApplyLocusFilters(
TestHRUNRec('TTTTATTTT', 5), locus_filters, EmptyLocInfo(), False
)
assert ApplyLocusFilters(
TestHRUNRec('ATTTTATTTTATTTTATTTTTATTTTATTTTATTTT', 5), locus_filters,
EmptyLocInfo(), False
)
assert ApplyLocusFilters(
TestHRUNRec('TTTTATTTTATTTTA', 5, full='TTTTTATTTTATTTTA'),
locus_filters, EmptyLocInfo(), False
)
def test_HipstrMaxCallFlankIndel(tmpdir):
class TestRec(DummyRecBase):
def __init__(self):
super().__init__()
self.format['DFLANKINDEL'] = np.array([10, 5, np.nan]).reshape(-1, 1)
self.format['DP'] = np.array([20, 20, np.nan]).reshape(-1,1)
args = base_argparse(tmpdir)
args.hipstr_max_call_flank_indel = 0.4
call_filters = BuildCallFilters(args)
assert len(call_filters) == 1
out = call_filters[0](TestRec())
print(out)
assert out[0] == pytest.approx(0.5)
assert np.isnan(out[1])
assert np.isnan(out[2]) # don't apply filter to nocalls
def test_HipstrMaxCallStutter(tmpdir):
class TestRec(DummyRecBase):
def __init__(self):
super().__init__()
self.format['DSTUTTER'] = np.array([10, 5, np.nan]).reshape(-1, 1)
self.format['DP'] = np.array([20, 20, np.nan]).reshape(-1,1)
args = base_argparse(tmpdir)
args.hipstr_max_call_stutter = 0.4
call_filters = BuildCallFilters(args)
assert len(call_filters) == 1
out = call_filters[0](TestRec())
assert out[0] == pytest.approx(0.5)
assert np.isnan(out[1])
assert np.isnan(out[2]) # don't apply filter to nocalls
def test_HipstrMinSuppReads(tmpdir):
class TestRec(DummyRecBase):
def __init__(self):
super().__init__()
self.format['ALLREADS'] = np.array([
'0|23;1|123;2|5', '0|15;1|23;2|7',
'0|23;1|444;2|12', '0|23;1|32;2|66',
'0|867;1|23;2|13', '0|848;1|92;2|483',
'', '', '.'])
self.format['GB'] = np.array(['1|1', '1|1', '1|2', '2|1', '2|0',
'0|2', '1|1', '0|0', '1|0'])
def GetNumSamples(self):
return 9
def GetCalledSamples(self):
return np.array([True, True, True, True, True, True, True, False, False])
args = base_argparse(tmpdir)
args.hipstr_min_supp_reads = 50
call_filters = BuildCallFilters(args)
assert len(call_filters) == 1
out = call_filters[0](TestRec())
assert np.isnan(out[0])
assert out[1] == 23
assert out[2] == 12
assert out[3] == 32
assert out[4] == 13
assert np.isnan(out[5])
assert out[6] == 0 # If ALLREADS is missing, filter
assert np.isnan(out[7]) # don't apply filter to nocalls
assert np.isnan(out[8]) # don't apply filter to nocalls
def test_HipstrMinSuppReads_no_called_samples_with_reads(tmpdir):
class TestRec(DummyRecBase):
def __init__(self):
super().__init__()
self.format['ALLREADS'] = np.array([
'0|23;1|123;2|5', '0|15;1|23;2|7',
'0|23;1|444;2|12', '0|23;1|32;2|66',
'0|867;1|23;2|13', '0|848;1|92;2|483',
'', '', '.'])
self.format['GB'] = np.array(['1|1', '1|1', '1|2', '2|1', '2|0',
'0|2', '1|1', '0|0', '1|0'])
def GetNumSamples(self):
return 9
def GetCalledSamples(self):
return np.array([False, False, False, False, False, False, True, False, True])
args = base_argparse(tmpdir)
args.hipstr_min_supp_reads = 50
call_filters = BuildCallFilters(args)
assert len(call_filters) == 1
out = call_filters[0](TestRec())
assert out.shape == (9,)
assert np.all(out[[6,8]] == 0)
assert np.all(np.isnan(out[[0,1,2,3,4,5,7]]))
def test_HipstrDP(tmpdir):
class TestRec(DummyRecBase):
def __init__(self):
super().__init__()
self.format['DP'] = np.array([10,20, np.nan]).reshape(-1,1)
args = base_argparse(tmpdir)
args.hipstr_min_call_DP = 15
call_filters = BuildCallFilters(args)
assert len(call_filters) == 1
out = call_filters[0](TestRec())
assert out[0] == 10
assert np.isnan(out[1])
assert np.isnan(out[2]) # don't apply filter to nocalls
args = base_argparse(tmpdir)
args.hipstr_max_call_DP = 15
call_filters = BuildCallFilters(args)
assert len(call_filters) == 1
out = call_filters[0](TestRec())
assert out[1] == 20
assert np.isnan(out[0])
assert np.isnan(out[2]) # don't apply filter to nocalls
def test_HipstrMinCallQ(tmpdir):
class TestRec(DummyRecBase):
def __init__(self):
super().__init__()
self.format['Q'] = np.array([.5, .9, np.nan]).reshape(-1, 1)
args = base_argparse(tmpdir)
args.hipstr_min_call_Q = 0.6
call_filters = BuildCallFilters(args)
assert len(call_filters) == 1
out = call_filters[0](TestRec())
assert out[0] == pytest.approx(0.5)
assert np.isnan(out[1])
assert np.isnan(out[2]) # don't apply filter to nocalls
'''
Test needs to be updated from old pyvcf
syntax to new cyvcf2 syntax
def test_GangSTRFilters(tmpdir, vcfdir):
args = base_argparse(tmpdir)
fname = os.path.join(vcfdir, "artificial_gangstr.vcf")
args.vcf = fname
args.vcftype = "gangstr"
artificial_vcf = vcf.Reader(filename=args.vcf)
## Test1: call passes with no filter
vcfcall = vcf.model._Call # Blank call
call_filters = []
reasons1 = FilterCall(vcfcall, call_filters)
assert reasons1==[]
# Line 1 of artificial vcf
record1 = next(artificial_vcf)
call1 = record1.samples[0]
## Check call1 attributes:
assert record1.CHROM=='chr1'
assert record1.POS==3004986
assert record1.REF=='tctgtctgtctg'
assert record1.INFO['RU']=='tctg'
assert call1['DP']==31
assert call1['Q']==0.999912
assert call1['QEXP']==[0.0, 5.1188e-05, 0.999949]
assert call1['RC']=='17,12,0,2'
## Test2: call filter: GangSTRCallMinDepth
vcfcall = call1
args = base_argparse(tmpdir)
args.gangstr_min_call_DP = 50
call_filters = BuildCallFilters(args)
reasons2 = FilterCall(vcfcall, call_filters)
assert reasons2==['GangSTRCallMinDepth']
## Test3: call filter: GangSTRCallMaxDepth
vcfcall = call1
args = base_argparse(tmpdir)
args.gangstr_max_call_DP = 10
call_filters = BuildCallFilters(args)
reasons3 = FilterCall(vcfcall, call_filters)
assert reasons3==['GangSTRCallMaxDepth']
## Test4: call filter: GangSTRCallMinQ
vcfcall = call1
args = base_argparse(tmpdir)
args.gangstr_min_call_Q = 1
call_filters = BuildCallFilters(args)
reasons4 = FilterCall(vcfcall, call_filters)
assert reasons4==['GangSTRCallMinQ']
## Test5: call filter: GangSTRCallExpansionProbHom
vcfcall = call1
args = base_argparse(tmpdir)
args.gangstr_expansion_prob_hom = 1
call_filters = BuildCallFilters(args)
reasons5 = FilterCall(vcfcall, call_filters)
assert reasons5==['GangSTRCallExpansionProbHom']
## Test6: call filter: ProbHet
vcfcall = call1
args = base_argparse(tmpdir)
args.gangstr_expansion_prob_het = 0.8
call_filters = BuildCallFilters(args)
reasons6 = FilterCall(vcfcall, call_filters)
assert reasons6==['GangSTRCallExpansionProbHet']
args = base_argparse(tmpdir)
args.gangstr_expansion_prob_het = 0.0
call_filters = BuildCallFilters(args)
reasons61 = FilterCall(vcfcall, call_filters)
assert reasons61==[]
# Line 2 of artificial vcf
record2 = next(artificial_vcf)
call2 = record2.samples[0]
## Check call2 attributes:
assert record2.CHROM=='chr1'
assert record2.POS==3005549
assert record2.REF=='aaaacaaaacaaaacaaaac'
assert record2.INFO['RU']=='aaaac'
assert call2['DP']==11
assert call2['Q']==1
assert call2['QEXP']==[0.8, 0.2, 0]
assert call2['RC']=='0,11,0,0'
## Test7: call filter: GangSTRCallExpansionProTotal
vcfcall = call2
args = base_argparse(tmpdir)
args.gangstr_expansion_prob_total = 1
call_filters = BuildCallFilters(args)
reasons7 = FilterCall(vcfcall, call_filters)
assert reasons7==['GangSTRCallExpansionProbTotal']
## Test8: call filter: filter span only
vcfcall = call1
args = base_argparse(tmpdir)
args.filter_span_only = True
call_filters = BuildCallFilters(args)
reasons71 = FilterCall(vcfcall, call_filters)
assert reasons71==[]
vcfcall = call2
args = base_argparse(tmpdir)
args.gangstr_filter_span_only = True
call_filters = BuildCallFilters(args)
reasons72 = FilterCall(vcfcall, call_filters)
assert reasons72==['GangSTRCallSpanOnly']
# Line 3 of artificial vcf
record3 = next(artificial_vcf)
call3 = record3.samples[0]
## Check call2 attributes:
assert record3.CHROM=='chr1'
assert record3.POS==3009351
assert record3.REF=='tgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtgtg'
assert record3.INFO['RU']=='tg'
assert call3['DP']==20
assert call3['RC']=='0,10,0,10'
## Test8: call filter: filter span-bound only
vcfcall = call1
args = base_argparse(tmpdir)
args.gangstr_filter_spanbound_only = True
call_filters = BuildCallFilters(args)
reasons81 = FilterCall(vcfcall, call_filters)
assert reasons81==[]
vcfcall = call2
args = base_argparse(tmpdir)
args.gangstr_filter_spanbound_only = True
call_filters = BuildCallFilters(args)
reasons82 = FilterCall(vcfcall, call_filters)
assert reasons82==['GangSTRCallSpanBoundOnly']
vcfcall = call3
args = base_argparse(tmpdir)
args.gangstr_filter_spanbound_only = True
call_filters = BuildCallFilters(args)
reasons83 = FilterCall(vcfcall, call_filters)
assert reasons83==['GangSTRCallSpanBoundOnly']
# Line 4 of artificial vcf
record4 = next(artificial_vcf)
call4 = record4.samples[0]
assert record4.POS==3011925
# Check min supporting reads
# long compared to read length
vcfcall = call4
args = base_argparse(tmpdir)
args.gangstr_require_support = 2
args.gangstr_readlen = 20
call_filters = BuildCallFilters(args)
reason84 = FilterCall(vcfcall, call_filters)
assert reason84==['GangSTRCallRequireSupport']
# intermediate range compared to read length
vcfcall = call4
args = base_argparse(tmpdir)
args.gangstr_require_support = 2
args.gangstr_readlen = 400
call_filters = BuildCallFilters(args)
reason85 = FilterCall(vcfcall, call_filters)
assert reason85==['GangSTRCallRequireSupport']
# should see enclosing for everything. ultra-long read
vcfcall = call4
args = base_argparse(tmpdir)
args.gangstr_require_support = 2
args.gangstr_readlen = 10000
call_filters = BuildCallFilters(args)
reason85 = FilterCall(vcfcall, call_filters)
assert reason85==['GangSTRCallRequireSupport']
'''
| 33.711604 | 90 | 0.631891 | 5,315 | 0.269046 | 0 | 0 | 0 | 0 | 0 | 0 | 7,157 | 0.362288 |
70d0af6ad9af6dd93cea341a74bb165569785d3c | 1,749 | py | Python | src/tower.py | stove41/screeps | d3756785403757aa2cb7b2831c1ddbfb3b0026cd | [
"MIT"
] | null | null | null | src/tower.py | stove41/screeps | d3756785403757aa2cb7b2831c1ddbfb3b0026cd | [
"MIT"
] | 2 | 2022-02-01T04:41:55.000Z | 2022-02-01T06:39:08.000Z | src/tower.py | stove41/screeps | d3756785403757aa2cb7b2831c1ddbfb3b0026cd | [
"MIT"
] | null | null | null | from defs import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
class Tower:
def __init__(self, tower):
self.tower = tower
def get_target(self):
target = self.tower.pos.findClosestByRange(FIND_HOSTILE_CREEPS)
return target
def find_repairs(self):
nearest = self.tower.pos.findClosestByRange(FIND_STRUCTURES,
{"filter": lambda s:
((s.structureType == STRUCTURE_WALL and s.hits <= 50000)
or (s.structureType == STRUCTURE_RAMPART and s.hits <= 50000)
or s.structureType == STRUCTURE_ROAD
and s.hits < s.hitsMax)})
return nearest
def find_wounded(self):
wounded_creeps = self.tower.room.find(FIND_MY_CREEPS,
{"filter": lambda s:
s.hits / s.hitsMax <= 0.55})
return wounded_creeps
def run_tower(self):
if len(self.tower.room.find(FIND_HOSTILE_CREEPS)) != 0:
target = self.get_target()
self.tower.attack(target)
wounded_creeps = self.find_wounded()
if len(wounded_creeps) > 0:
for creep in wounded_creeps:
self.tower.heal(creep)
nearest = self.find_repairs()
if len(nearest) > 0:
self.tower.repair(nearest)
| 36.4375 | 114 | 0.520869 | 1,476 | 0.843911 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.082905 |
70d17a8fc5d57f5a8b35aadcc5b4ae250e597c06 | 6,081 | py | Python | code/crawling/glowpick.py | DataNetworkAnalysis/OliveYoung_for_Man | bdd2c432746309d71437855186a6aa0c12a60f7e | [
"MIT"
] | 6 | 2020-01-28T08:23:55.000Z | 2021-11-12T11:19:56.000Z | code/crawling/glowpick.py | DataNetworkAnalysis/OliveYoung_for_Man | bdd2c432746309d71437855186a6aa0c12a60f7e | [
"MIT"
] | null | null | null | code/crawling/glowpick.py | DataNetworkAnalysis/OliveYoung_for_Man | bdd2c432746309d71437855186a6aa0c12a60f7e | [
"MIT"
] | 1 | 2020-01-26T18:28:30.000Z | 2020-01-26T18:28:30.000Z | '''
실행 방법 : 아나콘다 프롬프트에서 main.py가 있는 폴더 경로에 아래 명령어 입력
python glowpick.py
데이터 목록
1. Category
2. Brand_Name
3. Product_Name
4. volume
5. price
6. Sales Rank
7. rate
8. nb_reviews
7. product_number
'''
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import pandas as pd
import os
import time
class GlowPick:
def __init__(self):
# url
self.url = 'https://www.glowpick.com/'
# self.driver setting
self.driver = webdriver.Chrome()
self.li_lst = None
def run(self):
# make total dataframe
total_df = pd.DataFrame()
# if there's file, load file and concatenate
if os.path.isfile('../dataset/glowpick_products.csv'):
df = pd.read_csv('../dataset/glowpick_products.csv')
total_df = pd.concat([total_df, df], axis=0)
print('total_df.shape: ',total_df.shape)
titla_nb = 17 # 크게 17가지 범주
for cat in range(1,titla_nb+1):
# glowpick main page
self.get_main(cat=cat)
# get title
title = self.driver.find_element_by_xpath(f'//*[@id="gp-home"]/section[1]/div[1]/form/fieldset[1]/div/div/ul/li[{cat}]/div[1]/span[1]').text
print('title: ',title)
print('len(self.li_lst): ',len(self.li_lst))
for i in range(len(self.li_lst)):
self.get_main(cat=cat)
self.driver.implicitly_wait(5)
category = self.li_lst[i].text
print()
print(f'{i} category: ',category)
# if category in total df, continue
if total_df.shape[0] > 0:
title_df = total_df[total_df['title'] == title]
if ('category' in title_df.columns) and (self.li_lst[i].text in title_df.category.unique()):
continue
# select category
self.driver.execute_script("arguments[0].click();", self.li_lst[i])
self.driver.implicitly_wait(5)
# search click
element = self.driver.find_element_by_xpath('//*[@id="gp-home"]/section[1]/div[1]/form/div/button').send_keys(Keys.ENTER)
self.driver.implicitly_wait(5)
# scroll down
self.scroll_down()
# crawling
df = self.crawling()
df['category'] = category
df['title'] = title
total_df = pd.concat([total_df, df], axis=0)
total_df.to_csv('../dataset/glowpick_products.csv', index=False)
print(total_df.tail())
print()
print('Complete')
self.driver.quit()
def get_main(self, cat=17):
'''
args:
- get_lst : sub category li list
- cat : category index. default 17: 남자화장품
'''
# glowpick main page
self.driver.get(self.url)
self.driver.implicitly_wait(5)
# click category list
self.driver.find_element_by_xpath('//*[@id="gp-home"]/section[1]/div[1]/form/fieldset[1]/div/button').send_keys(Keys.ENTER)
time.sleep(1)
self.driver.find_element_by_xpath(f'//*[@id="gp-home"]/section[1]/div[1]/form/fieldset[1]/div/div/ul/li[{cat}]').click()
self.driver.implicitly_wait(5)
time.sleep(1)
li_ul = self.driver.find_element_by_xpath(f'//*[@id="gp-home"]/section[1]/div[1]/form/fieldset[1]/div/div/ul/li[{cat}]/div[2]/ul')
self.li_lst = li_ul.find_elements_by_css_selector('.list-item')
def scroll_down(self):
# scoll down
ul = self.driver.find_element_by_xpath('//*[@id="gp-list"]/div/section[2]/ul')
start_height = ul.size['height']
target = self.driver.find_element_by_link_text('사업자정보 확인')
start = self.driver.find_element_by_link_text('브랜드')
loop = True
actions = ActionChains(self.driver)
while loop:
actions.move_to_element(start)
actions.perform()
actions.move_to_element(target)
actions.perform()
time.sleep(0.5)
# check loop
ul = self.driver.find_element_by_xpath('//*[@id="gp-list"]/div/section[2]/ul')
current_height = ul.size['height']
if start_height == current_height:
loop = False
else:
start_height = current_height
def crawling(self):
# crawling list
brand_lst = []
product_lst = []
vol_price_lst = []
sales_rank_lst = []
rate_lst = []
nb_reviews_lst = []
product_url_lst = []
divs = self.driver.find_elements_by_xpath('//*[@id="gp-list"]/div/section[2]/ul/li')
for div in divs:
lst = div.text.split('\n') # exclude a last element '-'
product_url = div.find_element_by_css_selector('div > div > div').get_attribute('data-url')
product_url_lst.append(product_url)
print(lst, end=" ")
print(product_url)
brand_lst.append(lst[0])
product_lst.append(lst[1])
vol_price_lst.append(lst[2])
rate_lst.append(lst[3])
nb_reviews_lst.append(lst[4])
sales_rank_lst.append(lst[5])
# save dataframe
df = pd.DataFrame({'brand':brand_lst,
'product':product_lst,
'vol_price':vol_price_lst,
'rate':rate_lst,
'nb_reviews':nb_reviews_lst,
'sales_rank':sales_rank_lst,
'product_url':product_url_lst})
return df
if __name__ == '__main__':
gp = GlowPick()
gp.run() | 33.229508 | 153 | 0.535274 | 5,641 | 0.911161 | 0 | 0 | 0 | 0 | 0 | 0 | 1,727 | 0.278953 |
70d1f8b647267479a9bcde7a989424cc5e099952 | 3,651 | py | Python | engine/engine.py | farouqzaib/Personify | ae7c837b5539ccc1f0d4a8f327783814228fe4d7 | [
"MIT"
] | 1 | 2018-05-09T21:56:08.000Z | 2018-05-09T21:56:08.000Z | engine/engine.py | farouqzaib/Personify | ae7c837b5539ccc1f0d4a8f327783814228fe4d7 | [
"MIT"
] | null | null | null | engine/engine.py | farouqzaib/Personify | ae7c837b5539ccc1f0d4a8f327783814228fe4d7 | [
"MIT"
] | 1 | 2018-05-09T21:56:46.000Z | 2018-05-09T21:56:46.000Z | from algorithms.factorization_machine import FactorizationMachine
from algorithms.latent_dirichlet_allocation import LatentDirichletAllocation
from app.config import db
from app.config.config import engine
import datetime
import numpy as np
import pandas as pd
import pickle
class Engine:
def __init__(self):
self.users_to_int = {}
self.items_to_int = {}
self.users_to_int_counter = 0
self.items_to_int_counter = 0
def load_data(self):
'''
Loads the data from the data source
'''
today = datetime.datetime.today()
query = {
"range": {
"created_at": {
"gte": (today - datetime.timedelta(days=engine["training_data_age"])).strftime("%Y-%m-%d"),
"lte": today.strftime('%Y-%m-%d')
}
}
}
#TODO investigate performance of scroll instead of fetching large records in one go
res = db.es.search(index="events", doc_type="event", body={"query": query}, size=100)
events = []
data = res["hits"]["hits"]
# #check the dtype of the essential features
# first_record = record[0]["_source"]
# if type(first_record["user"]) == "string":
# users_to_int = self.transform_feature_to_int()
# if type(first_record["item"]) == "string":
# items_to_int = self.transform_feature_to_int()
for record in data:
event = record["_source"]
events.append((self.transform_feature_to_int(event["user"]), self.transform_feature_to_int(event["item"], "item"), event["rating"])
+ self.apply_feature_engineering(event["created_at"], "date"))
print np.array(events).shape
return np.array(events)
def transform_feature_to_int(self, feature, type='user'):
'''
Assigns a unique integer to an element in a feature
'''
if type == 'user':
if feature not in self.users_to_int:
self.users_to_int[feature] = self.users_to_int_counter
self.users_to_int_counter += 1
return self.users_to_int_counter
else:
if feature not in self.items_to_int:
self.items_to_int[feature] = self.items_to_int_counter
self.items_to_int_counter += 1
return self.items_to_int_counter
def apply_feature_engineering(self, feature, type="date"):
if type == "date":
transformed_feature = pd.to_datetime(feature)
return (transformed_feature.year, transformed_feature.month, transformed_feature.day)
def train(self):
self.fm = FactorizationMachine()
events = self.load_data()
features = events[:, 0:events.shape[1] - 1] #means to select every data in every row for the first and second to last column
target = events[:, -1] #selects every data in every row for the last column
self.fm.fit(features, target)
self.save_model()
def get_recommendations(self):
self.fm.predict(features)
def save_recommendations(self):
pass
def save_model(self):
'''
Saves the trained model to disk
'''
today = datetime.datetime.today()
file_name = 'models/model-{0}.fm'.format(today)
f = open('engine/{0}'.format(file_name), 'w')
pickle.dump(self.fm, f)
res = db.es.index(index="events", doc_type='models', id='', body={'model': file_name, 'created_at': today})
def load_model(self):
pass | 36.148515 | 143 | 0.599836 | 3,375 | 0.924404 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.257738 |
70d273f42e3df714bfd7e79274d26f4a44433731 | 3,179 | py | Python | tests/test_parse_string.py | zalmane/copybook | 1afe405ee8466ca87706c6067b227a62ecd61203 | [
"MIT"
] | 12 | 2020-10-27T11:54:51.000Z | 2022-03-06T18:38:05.000Z | tests/test_parse_string.py | zalmane/copybook | 1afe405ee8466ca87706c6067b227a62ecd61203 | [
"MIT"
] | 5 | 2021-01-11T16:01:04.000Z | 2022-03-10T06:37:36.000Z | tests/test_parse_string.py | zalmane/copybook | 1afe405ee8466ca87706c6067b227a62ecd61203 | [
"MIT"
] | 1 | 2021-11-27T03:34:30.000Z | 2021-11-27T03:34:30.000Z | import pytest
# Reader imports
import copybook
#
# Tests
#
tests = {
"extra header text":"""
10 IDENTIFICATION DIVISION.
PROGRAM-ID. 8-REPORT.
AUTHOR. DDT. MODIFIED BY OREN.
DATE WRITTEN. 10/13/2010.
DATE COMPILED. 10/13/2010.
01 WORK-BOOK.
10 TAX-RATE PIC 9(3).
""",
"no header text":"""
01 RECORD.
05 TAX-RATE PIC 9(3).
""",
"numeric pic no precision":"""
01 WORK-BOOK.
10 TAX-RATE PIC 9(3).
""",
"numeric pic precision only":"""
01 WORK-BOOK.
10 TAX-RATE PIC V9(3).
""",
"numeric pic length and precision":"""
01 WORK-BOOK.
10 TAX-RATE PIC 9(13)V9(3).
""",
"numeric pic length and precision with dot":"""
01 WORK-BOOK.
10 TAX-RATE PIC 9(13).9(3).
""",
"numeric pic signed S":"""
01 WORK-BOOK.
10 TAX-RATE PIC S9(13)V9(3).
""",
"numeric pic signed minus":"""
01 WORK-BOOK.
10 TAX-RATE PIC -9(13)V9(3).
""",
"numeric pic with 9s":"""
01 WORK-BOOK.
10 TAX-RATE PIC 9999.
""",
"numeric pic with 9s and precision":"""
01 WORK-BOOK.
10 TAX-RATE PIC 9(2)V99.
""",
"numeric pic with 9s and precision and sign":"""
01 WORK-BOOK.
10 TAX-RATE PIC -9999.99.
""",
"numeric pic with 9s and precision and separate sign":"""
01 WORK-BOOK.
10 TAX-RATE PIC S9(13)V9(2)
SIGN TRAILING SEPARATE.
""",
"numeric pic with 9s and precision and separate leading sign":"""
01 WORK-BOOK.
10 TAX-RATE PIC S9(13)V9(2)
SIGN LEADING SEPARATE.
""",
"string pic with leading 0s":"""
01 WORK-BOOK.
10 TAX-RATE PIC X(005).
""",
}
from pyparsing import ParseException
def test_parse_string():
failed =0
for test_name,test in tests.items():
print(test_name)
try:
root = copybook.parse_string(test)
except ParseException as pe:
failed += 1
print(f"test failed: {test_name}")
print(pe)
assert failed==0
| 36.125 | 82 | 0.35766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,761 | 0.868512 |
70d28b57c47afe1c70e1683910f82209fac17a5b | 451 | py | Python | decorators.py | ir0nfelix/async_fileserver | 84d5f329982923855e0e0dc0b12fedcaaf26f1f7 | [
"MIT"
] | 1 | 2020-09-07T22:38:47.000Z | 2020-09-07T22:38:47.000Z | decorators.py | ir0nfelix/minio-async-fileserver | 84d5f329982923855e0e0dc0b12fedcaaf26f1f7 | [
"MIT"
] | null | null | null | decorators.py | ir0nfelix/minio-async-fileserver | 84d5f329982923855e0e0dc0b12fedcaaf26f1f7 | [
"MIT"
] | null | null | null | from werkzeug import exceptions
import settings
from utils import get_class_by_path
def authenticate(f):
def decorator(request):
try:
authentication_backend = get_class_by_path(settings.AUTHENTICATION_BACKEND)
authenticator = authentication_backend()
except Exception:
raise exceptions.Unauthorized()
authenticator.authenticate(request)
return f(request)
return decorator
| 26.529412 | 87 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
70d40f732da818242831ed4b95c44dbd678d72f8 | 1,189 | py | Python | quadboost/data_preprocessing/mean_haar.py | jsleb333/quadboost | b4b980ff4af727d5cec0348484a34f34e82168cd | [
"MIT"
] | 1 | 2018-08-27T22:56:30.000Z | 2018-08-27T22:56:30.000Z | quadboost/data_preprocessing/mean_haar.py | jsleb333/quadboost | b4b980ff4af727d5cec0348484a34f34e82168cd | [
"MIT"
] | null | null | null | quadboost/data_preprocessing/mean_haar.py | jsleb333/quadboost | b4b980ff4af727d5cec0348484a34f34e82168cd | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import skimage.transform as skit
import sys, os
sys.path.append(os.getcwd())
from quadboost.datasets import MNISTDataset
from quadboost.utils import *
from haar_preprocessing import *
def plot_images(images, titles):
fig, axes = make_fig_axes(len(images))
for im, title, ax in zip(images, titles, axes):
ax.imshow(im, cmap='gray_r')
ax.set_title(title)
plt.get_current_fig_manager().window.showMaximized()
plt.show()
mnist = MNISTDataset.load('haar_mnist_pad.pkl')
(Xtr, Ytr), (Xts, Yts) = mnist.get_train_test(center=False, reduce=False)
mean_haar = []
for i in range(10):
X_i = np.array([x for x, y in zip(Xtr, Ytr) if y == i])
mean_haar.append(np.mean(X_i, axis=0))
# show_im(mean_haar, range(10))
mean_haar = [2*(mh-np.min(mh))/(np.max(mh)-np.min(mh))-1 for mh in mean_haar]
mean_haar = [skit.resize(mh, (8,8), order=1, mode='reflect', anti_aliasing=True) for mh in mean_haar]
mean_haar = [2*(mh-np.min(mh))/(np.max(mh)-np.min(mh))-1 for mh in mean_haar]
plot_images(mean_haar, range(10))
encodings = {str(i):mh.reshape(-1).tolist() for i, mh in enumerate(mean_haar)}
print(encodings)
| 31.289474 | 101 | 0.70143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.057191 |
70d4dd142230f9c4175d163a6058e4aebffb4747 | 1,233 | py | Python | gpgraph/pyplot/utils.py | lperezmo/gpgraph | 7c62a8181734be8a6bc46a434839a06808736a2c | [
"Unlicense"
] | null | null | null | gpgraph/pyplot/utils.py | lperezmo/gpgraph | 7c62a8181734be8a6bc46a434839a06808736a2c | [
"Unlicense"
] | 1 | 2020-07-02T21:55:17.000Z | 2020-07-02T21:55:17.000Z | gpgraph/pyplot/utils.py | lperezmo/gpgraph | 7c62a8181734be8a6bc46a434839a06808736a2c | [
"Unlicense"
] | 3 | 2019-04-24T20:42:43.000Z | 2020-06-25T04:12:37.000Z | import numpy as np
import matplotlib.colors as colors
import matplotlib.pyplot as plt
def despine(ax=None):
"""Despine axes."""
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
def truncate_colormap(cmap_str, minval=0.0, maxval=1.0, n=100):
"""Truncate a colormap to a narrower subspace of the spectrum."""
cmap = plt.get_cmap(cmap_str)
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def bins(G):
"""Bin genotypes by hamming distance from wildtype.
Parameters
----------
G : GenotypePhenotypeGraph object.
A GenotypePhenotypeGraph object.
"""
temp_bins = {}
for i in range(0, len(G.nodes("binary")[0]) + 1):
temp_bins[i] = []
for node in range(len(list(G.nodes()))):
node_attr = G.node[node]
# Calculate the level of each node
level = node_attr["binary"].count("1")
temp_bins[level].append(node)
return temp_bins
| 28.022727 | 77 | 0.638281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.292782 |
70d587780f92ec710f858bf4bc668d0621a5b699 | 4,034 | py | Python | plugin.video.saltsrd.lite/scrapers/rlssource_scraper.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:37:15.000Z | 2019-03-05T09:37:15.000Z | plugin.video.saltsrd.lite/scrapers/rlssource_scraper.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | plugin.video.saltsrd.lite/scrapers/rlssource_scraper.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T20:48:09.000Z | 2021-11-05T20:48:09.000Z | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
import dom_parser2
import log_utils # @UnusedImport
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://allrls.me'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'RLSSource.net'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=False, cache_limit=.5)
q_str = ''
match = re.search('class="entry-title">([^<]+)', html)
if match:
q_str = match.group(1)
pattern = 'href="?([^" ]+)(?:[^>]+>){2}\s+\|'
for match in re.finditer(pattern, html, re.DOTALL):
url = match.group(1)
if 'adf.ly' in url:
continue
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'quality': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(url).hostname
hoster['quality'] = scraper_utils.blog_get_quality(video, q_str, hoster['host'])
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video, delim=' ')
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" Filter results older than (0=No Filter) (days)" default="30" visible="eq(-3,true)"/>' % (name))
settings.append(' <setting id="%s-select" type="enum" label=" Automatically Select" values="Most Recent|Highest Quality" default="0" visible="eq(-4,true)"/>' % (name))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
html = self._http_get(self.base_url, params={'s': title, 'go': 'Search'}, require_debrid=False, cache_limit=1)
posts = []
for post in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')}):
match = dom_parser2.parse_dom(post.content, 'a', req='href')
if not match: continue
match_url = match[0].attrs['href']
match_title = match[0].content
match_date = dom_parser2.parse_dom(post, 'span', {'class': 'entry-date'})
posts.append('<url>%s</url><title>%s</title><date>%s</date>' % (match_url, match_title, match_date))
pattern = '<url>(?P<url>.*?)</url><title>(?P<post_title>.*?)</title><date>(?P<post_date>.*?)</date>'
date_format = '%B %d, %Y'
return self._blog_proc_results('\n'.join(posts), pattern, date_format, video_type, title, year)
| 42.020833 | 206 | 0.636093 | 3,054 | 0.757065 | 0 | 0 | 767 | 0.190134 | 0 | 0 | 1,498 | 0.371344 |
70d5a45e4ad316255e38a153523ae1f59fe462d2 | 3,940 | py | Python | visualization/experiments/plot_calibration_curves.py | silasbrack/special-course | 47dc396f97b2027d366e90add115d4ed2bc0f1de | [
"MIT"
] | null | null | null | visualization/experiments/plot_calibration_curves.py | silasbrack/special-course | 47dc396f97b2027d366e90add115d4ed2bc0f1de | [
"MIT"
] | null | null | null | visualization/experiments/plot_calibration_curves.py | silasbrack/special-course | 47dc396f97b2027d366e90add115d4ed2bc0f1de | [
"MIT"
] | null | null | null | from typing import List
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from visualization.helper_functions import (
FIGURE_FOLDER,
TYPE_DICT,
calibration_curves,
)
from visualization.load_results import load_results, save_results_to_table
def plot_calibration_curves(
results: pd.DataFrame,
eval_datasets: List[str],
types: List[str],
file_name: str,
):
bins = 10
fig, ax = plt.subplots(
ncols=len(eval_datasets),
nrows=2,
sharex="all",
sharey="all",
figsize=(4 * len(eval_datasets), 6),
)
for i, dataset in enumerate(eval_datasets):
ax_curve = ax[0, i] if len(eval_datasets) > 1 else ax[0]
ax_hist = ax[1, i] if len(eval_datasets) > 1 else ax[1]
for type in types:
criteria = (
f"`Evaluated on` == '{dataset.upper()}' and Type == '{type}'"
)
data = results.query(criteria)
targets = data["Test targets"].values[0][:, None]
probs = data["Test probabilities"].values[0]
# confidences = np.max(probs, axis=1)
ece, prob_true, prob_pred, n_in_bins = calibration_curves(
targets, probs, bins=bins
)
error = 2 * np.sqrt(
(prob_true * (1 - prob_true)) / n_in_bins[n_in_bins > 0]
)
results.loc[results.eval(criteria), "ECE"] = ece
(line,) = ax_curve.plot(
prob_pred,
prob_true,
label=f"{type}",
c=TYPE_DICT[type]["color"],
linestyle=TYPE_DICT[type]["style"],
)
ax_curve.errorbar(
x=prob_pred,
y=prob_true,
yerr=error,
c=line.get_color(),
linestyle=TYPE_DICT[type]["style"],
)
ax_hist.bar(
np.linspace(0, 1, bins + 1)[-bins:] - 1 / (2 * bins),
n_in_bins / np.sum(n_in_bins),
width=1 / bins,
color=line.get_color(),
alpha=0.5,
label=f"{type}",
)
ls = np.linspace(0, 1)
ax_curve.plot(ls, ls, "--", color="grey", alpha=0.3)
ax_curve.set(
xlim=(0, 1),
ylim=(0, 1),
title=dataset.upper(),
)
ax_hist.set(
xlim=(0, 1),
ylim=(0, 1),
)
if i == 0:
ax_curve.set(ylabel="True probability")
ax_hist.set(
xlabel="Predicted probability",
ylabel="Frequency",
)
if i == len(eval_datasets) - 1:
ax_hist.legend(*ax_curve.get_legend_handles_labels())
ax_hist.set(xlabel="Predicted probability")
fig.tight_layout()
fig.savefig(f"{FIGURE_FOLDER}/{file_name}")
if __name__ == "__main__":
for train_set, eval_datasets in (
("mnist", ["mnist"]), # "svhn"
("mura", ["mura"]),
):
results = load_results(train_set, eval_datasets)
to_print = results.drop(
["Type", "Test targets", "Test probabilities"], axis=1
)
print(to_print)
save_results_to_table(to_print, f"{train_set}_full.tex")
plot_calibration_curves(
results,
eval_datasets,
types=["nn", "laplace", "meanfield", "radial", "lowrank"],
file_name=f"{train_set.capitalize()}VI.png",
)
plot_calibration_curves(
results,
eval_datasets,
types=["ensemble_5", "ensemble_10"],
file_name=f"{train_set.capitalize()}Ensembles.png",
)
plot_calibration_curves(
results,
eval_datasets,
types=["multiswag_5", "multiswag_10"],
file_name=f"{train_set.capitalize()}MultiSwag.png",
)
| 29.62406 | 77 | 0.515228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.155838 |
70d80cd9fad27c72a8556e30ec8f23dd7965d36e | 11,794 | py | Python | project/exact_distributions_covariance/exact_distributions_covariance_tools.py | juanhenao21/exact_distributions_financial | 02eb058e5f963fbccb9029aae3fb6e15def7a93a | [
"MIT"
] | 2 | 2021-03-20T18:24:22.000Z | 2021-04-15T07:25:50.000Z | project/exact_distributions_covariance/exact_distributions_covariance_tools.py | juanhenao21/exact_distributions_financial | 02eb058e5f963fbccb9029aae3fb6e15def7a93a | [
"MIT"
] | null | null | null | project/exact_distributions_covariance/exact_distributions_covariance_tools.py | juanhenao21/exact_distributions_financial | 02eb058e5f963fbccb9029aae3fb6e15def7a93a | [
"MIT"
] | null | null | null | """Exact distributions covariance tools module.
The functions in the module do small repetitive tasks, that are used along the
whole implementation. These tools improve the way the tasks are standardized
in the modules that use them.
This script requires the following modules:
* os
* pickle
* typing
* matplotlib
* numpy
* scipy
The module contains the following functions:
* save_data - saves computed data.
* save_plot - saves figures.
* function_header_print_data - prints info about the function running.
* function_header_print_plot - prints info about the plot.
* start_folders - creates folders to save data and plots.
* initial_message - prints the initial message with basic information.
* gaussian_distribution - computes gaussian distribution values.
* pdf_gaussian_gaussian - computes the one dimensional Gaussian-Gaussian
PDF.
* pdf_gaussian_algebraic - computes the one dimensional Gaussian-Algebraic
PDF.
* pdf_algebraic_gaussian - computes the one dimensional Algebraic-Gaussian
PDF.
* pdf_algebraic_algebraic - computes the one dimensional
Algebraic-Algebraic PDF.
* main - the main function of the script.
.. moduleauthor:: Juan Camilo Henao Londono <www.github.com/juanhenao21>
"""
# -----------------------------------------------------------------------------
# Modules
import os
import pickle
from typing import Any, List
from matplotlib import pyplot as plt # type: ignore
import numpy as np # type: ignore
# Gamma function
from scipy.special import gamma # type: ignore
# Modified Bessel function of the second kind of real order v
from scipy.special import kv # type: ignore
# Gauss hypergeometric function 2F1(a, b; c; z)
from scipy.special import hyp2f1 # type: ignore
# Confluent hypergeometric function U
from scipy.special import hyperu # type: ignore
# -----------------------------------------------------------------------------
def save_data(data: Any, function_name: str, dates: List[str], time_step: str) -> None:
"""Saves computed data in pickle files.
Saves the data generated in the functions of the
exact_distributions_covariance_analysis module in pickle files.
:param data: data to be saved. The data can be of different types.
:param function_name: name of the function that generates the plot.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01', '2020-12']).
:param time_step: time step of the data (i.e. '1m', '2m', '5m', ...).
:return: None -- The function saves the data in a file and does not return
a value.
"""
# Saving data
pickle.dump(
data,
open(
f"../data/exact_distributions_covariance/{function_name}_{dates[0]}"
+ f"_{dates[1]}_step_{time_step}.pickle",
"wb",
),
protocol=4,
)
print("Data Saved")
print()
# -----------------------------------------------------------------------------
def save_plot(
figure: plt.Figure, function_name: str, dates: List[str], time_step: str
) -> None:
"""Saves plot in png files.
Saves the plot generated in the functions of the
exact_distributions_covariance_analysis module in png files.
:param figure: figure object that is going to be save.
:param function_name: name of the function that generates the plot.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01', '2020-12']).
:param time_step: time step of the data (i.e. '1m', '2m', '5m', ...).
:return: None -- The function save the plot in a file and does not return
a value.
"""
# Saving plot data
figure.savefig(
f"../plot/exact_distributions_covariance/{function_name}"
+ f"_{dates[0]}_{dates[1]}_step_{time_step}.png"
)
print("Plot Saved")
print()
# -----------------------------------------------------------------------------
def function_header_print_data(
function_name: str, dates: List[str], time_step: str
) -> None:
"""Prints a header of a function that generates data when it is running.
:param function_name: name of the function that generates the data.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01', '2020-12']).
:param time_step: time step of the data (i.e. '1m', '2m', '5m', ...).
:return: None -- The function prints a message and does not return a
value.
"""
print("Exact Distributions")
print(function_name)
print(
"Computing the results of the data in the interval time from the "
+ f"years {dates[0]} to {dates[1]} in time steps of {time_step}"
)
print()
# -----------------------------------------------------------------------------
def function_header_print_plot(
function_name: str, dates: List[str], time_step: str
) -> None:
"""Prints a header of a function that generates a plot when it is running.
:param function_name: name of the function that generates the data.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01', '2020-12']).
:param time_step: time step of the data (i.e. '1m', '2m', '5m', ...).
:return: None -- The function prints a message and does not return a
value.
"""
print("Exact Distributions")
print(function_name)
print(
"Computing the plots of the data in the interval time from the "
+ f"years {dates[0]} to {dates[1]} in time steps of {time_step}"
)
print()
# -----------------------------------------------------------------------------
def start_folders() -> None:
"""Creates the initial folders to save the data and plots.
:return: None -- The function creates folders and does not return a value.
"""
try:
os.mkdir("../data/exact_distributions_covariance")
os.mkdir("../plot/exact_distributions_covariance")
print("Folder to save data created")
print()
except FileExistsError as error:
print("Folder exists. The folder was not created")
print(error)
print()
# -----------------------------------------------------------------------------
def initial_message() -> None:
"""Prints the initial message with basic information.
:return: None -- The function prints a message and does not return a value.
"""
print()
print("###################")
print("Exact Distributions")
print("###################")
print("AG Guhr")
print("Faculty of Physics")
print("University of Duisburg-Essen")
print("Author: Juan Camilo Henao Londono")
print("More information in:")
print("* https://juanhenao21.github.io/")
print("* https://github.com/juanhenao21/exact_distributions")
# print('* https://forex-response_spread-year.readthedocs.io/en/latest/')
print()
# -----------------------------------------------------------------------------
def gaussian_distribution(
mean: float, variance: float, x_values: np.ndarray
) -> np.ndarray:
"""Computes the Gaussian distribution values.
:param mean: mean of the Gaussian distribution.
:param variance: variance of the Gaussian distribution.
:param x: array of the values to compute the Gaussian
distribution
"""
return (1 / (2 * np.pi * variance) ** 0.5) * np.exp(
-((x_values - mean) ** 2) / (2 * variance)
)
# -----------------------------------------------------------------------------
def pdf_gaussian_gaussian(returns: np.ndarray, N: float, Lambda: float) -> np.ndarray:
"""Computes the one dimensional Gaussian-Gaussian PDF.
:param returns: numpy array with the returns values.
:param N: strength of the fluctuations around the mean.
:param Lambda: variance of the returns.
:return: numpy array with the pdf values.
"""
first_part: np.float = 1 / (
2 ** ((N - 1) / 2) * gamma(N / 2) * np.sqrt((np.pi * Lambda) / N)
)
second_part: np.ndarray = np.sqrt((N * returns ** 2) / Lambda) ** ((N - 1) / 2)
third_part: np.ndarray = kv((1 - N) / 2, np.sqrt(N * returns ** 2) / Lambda)
return first_part * second_part * third_part
# -----------------------------------------------------------------------------
def pdf_gaussian_algebraic(
returns: np.ndarray, K: float, L: float, N: float, Lambda: float
) -> np.ndarray:
"""Computes de one dimensional Gaussian-Algebraic PDF.
:param returns: numpy array with the returns values.
:param K: number of companies.
:param L: shape parameter.
:param N: strength of the fluctuations around the mean.
:param Lambda: variance of the returns.
:return: numpy array with the pdf values.
"""
M: np.float = 2 * L - K - N - 1
numerator: np.float = gamma(L - (K + N) / 2 + 1) * gamma(L - (K - 1) / 2)
denominator: np.float = (
gamma(L - (K + N - 1) / 2) * gamma(N / 2) * np.sqrt(2 * np.pi * Lambda * M / N)
)
frac: np.float = numerator / denominator
function: np.ndarray = hyperu(
L - (K + N) / 2 + 1, (1 - N) / 2 + 1, (N * returns ** 2) / (2 * M * Lambda)
)
return frac * function
# -----------------------------------------------------------------------------
def pdf_algebraic_gaussian(
returns: np.ndarray, K: float, l: float, N: float, Lambda: float
) -> np.ndarray:
"""Computes de one dimensional Algebraic-Gaussian PDF.
:param returns: numpy array with the returns values.
:param K: number of companies.
:param l: shape parameter.
:param N: strength of the fluctuations around the mean.
:param Lambda: variance of the returns.
:return: numpy array with the pdf values.
"""
m: np.float = 2 * l - K - 2
numerator: np.float = gamma(l - (K - 1) / 2) * gamma(l - (K - N) / 2)
denominator: np.float = (
gamma(l - K / 2) * gamma(N / 2) * np.sqrt(2 * np.pi * Lambda * m / N)
)
frac: np.float = numerator / denominator
function: np.ndarray = hyperu(
l - (K - 1) / 2, (1 - N) / 2 + 1, (N * returns ** 2) / (2 * m * Lambda)
)
return frac * function
# -----------------------------------------------------------------------------
def pdf_algebraic_algebraic(
returns: np.ndarray, K: float, L: float, l: float, N: float, Lambda: float
) -> np.ndarray:
"""Computes de one dimensional Algebraic-Algebraic PDF.
:param returns: numpy array with the returns values.
:param K: number of companies.
:param L: shape parameter.
:param l: shape parameter.
:param N: strength of the fluctuations around the mean.
:param Lambda: variance of the returns.
:return: numpy array with the pdf values.
"""
M: np.float = 2 * L - K - N - 1
m: np.float = 2 * l - K - 2
numerator: np.float = (
gamma(l - (K - 1) / 2)
* gamma(l - (K - N) / 2)
* gamma(L - (K - 1) / 2)
* gamma(L - (K + N) / 2 + 1)
)
denominator: np.float = (
np.sqrt(np.pi * Lambda * M * m / N)
* gamma(l - K / 2)
* gamma(L + l - (K - 1))
* gamma(L - (K + N - 1) / 2)
* gamma(N / 2)
)
frac: np.float = numerator / denominator
function: np.ndarray = hyp2f1(
l - (K - 1) / 2,
L - (K + N) / 2 + 1,
L + l - (K - 1),
1 - (N * returns ** 2) / (M * m * Lambda),
)
return frac * function
# -----------------------------------------------------------------------------
def main() -> None:
"""The main function of the script.
The main function is used to test the functions in the script.
:return: None.
"""
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 30.475452 | 87 | 0.567916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,652 | 0.648804 |
70d8fcb9af98e95db88cc389a427fda9e1bc42e5 | 3,272 | py | Python | Mission_to_Mars/Flask Application/scrape_mars.py | bigoshunane/Web-Scraping-Challenge-HM-10 | 98ae94b5db9246082fbb600e02b91064e5d3513e | [
"ADSL"
] | null | null | null | Mission_to_Mars/Flask Application/scrape_mars.py | bigoshunane/Web-Scraping-Challenge-HM-10 | 98ae94b5db9246082fbb600e02b91064e5d3513e | [
"ADSL"
] | null | null | null | Mission_to_Mars/Flask Application/scrape_mars.py | bigoshunane/Web-Scraping-Challenge-HM-10 | 98ae94b5db9246082fbb600e02b91064e5d3513e | [
"ADSL"
] | null | null | null | # Import Dependencies
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
def scrape():
# Featured Image scrape
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
url = "https://spaceimages-mars.com/"
browser.visit(url)
html = browser.html
soup = bs(html, "html.parser")
img = [i.get("src") for i in soup.find_all(
"img", class_="headerimage fade-in")]
featured_image_url = url + img[0]
# Top News Scrape
news_url = "https://redplanetscience.com/"
browser.visit(news_url)
html_1 = browser.html
soup_1 = bs(html_1, "html.parser")
latest_news = soup_1.find_all("div", class_="content_title")[0]
latest_news_title = latest_news.text
paragraph = soup_1.find_all("div", class_="article_teaser_body")[0]
latest_news_paragraph = paragraph.text
# Hemisphere Images Scrape
hemi_url = "https://marshemispheres.com/"
browser.visit(hemi_url)
hemisphere_image_urls = []
for i in range(4):
html = browser.html
soup = bs(html, "html.parser")
title = soup.find_all("h3")[i].get_text()
browser.find_by_tag('h3')[i].click()
html = browser.html
soup = bs(html, "html.parser")
img_url = soup.find("img", class_="wide-image")["src"]
hemisphere_image_urls.append({
"title": title,
"img_url": hemi_url + img_url
})
browser.back()
title1 = hemisphere_image_urls[0]["title"]
image1 = hemisphere_image_urls[0]["img_url"]
title2 = hemisphere_image_urls[1]["title"]
image2 = hemisphere_image_urls[1]["img_url"]
title3 = hemisphere_image_urls[2]["title"]
image3 = hemisphere_image_urls[2]["img_url"]
title4 = hemisphere_image_urls[3]["title"]
image4 = hemisphere_image_urls[3]["img_url"]
# Mars Facts Scrape
table_url = "https://galaxyfacts-mars.com/"
browser.visit(table_url)
html_3 = browser.html
soup_3 = bs(html_3, "html.parser")
table = soup_3.find_all("table", class_="table")[0]
table_header = [i.text for i in table("th")]
mars_column = [i.text for i in table("span", class_="orange")]
earth_column = [i.text for i in table("span", class_="purple")]
table_df = {"Description": table_header,
"Mars": mars_column, "Earth": earth_column}
df = pd.DataFrame(table_df)
df.set_index("Description", inplace=True)
df["Earth"] = df["Earth"].str.replace("\t", "")
comparison_table = df.to_html(classes="table table-striped")
browser.quit()
# Store all of the scraped data into a Python dictionary
final_mars_data = {
"latest_title": latest_news_title,
"latest_paragraph": latest_news_paragraph,
"featured_image": featured_image_url,
"html_table": comparison_table,
"hemisphere_scrape": hemisphere_image_urls,
"title1": title1,
"title2": title2,
"title3": title3,
"title4": title4,
"image1": image1,
"image2": image2,
"image3": image3,
"image4": image4,
}
return final_mars_data
| 31.161905 | 74 | 0.645171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.253973 |
70d940d807d775cf236b4b2decb10d2c5a7f64f8 | 15,391 | py | Python | Snake.py | RodneyTheProgrammer/snkgame | 4fdeb1a1c2c871f307ce0984949693506292ec1c | [
"BSD-2-Clause"
] | null | null | null | Snake.py | RodneyTheProgrammer/snkgame | 4fdeb1a1c2c871f307ce0984949693506292ec1c | [
"BSD-2-Clause"
] | null | null | null | Snake.py | RodneyTheProgrammer/snkgame | 4fdeb1a1c2c871f307ce0984949693506292ec1c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import curses
import time
import random
from operator import getitem,attrgetter
stdscr = curses.initscr()
L,W= stdscr.getmaxyx()
curses.start_color()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.keypad(1)
L,W = stdscr.getmaxyx()
normal,infiltrate,get2goal,runaway=0,1,2,3
def start_page(window):
maxy,maxx = window.getmaxyx()
s = 'Snake'
paktc = 'Press any key to continue'
window.addstr(maxy/2,maxx/2-len(s)/2,s,curses.A_BOLD)
window.addstr(maxy/2+1,maxx/2-len(paktc)/2,paktc)
window.refresh()
window.getch()
window.erase()
def MID(L,D): #move in direction
R =L.copy()
if D == 360:
return R
if 0<D<180:
R.y = R.y-1
elif 180 < D < 360 :
R.y = R.y+1
if 90 < D < 270:
R.x = R.x-1
elif 270 < D or D< 90:
R.x = R.x+1
return R
#point-related stuff
class point(object):
def __init__(self,y,x):
self.y =y
self.x =x
def __cmp__(self,other):
if self.x == other.x and self.y == other.y :
return 0
else:
return -1
def __str__(self):
return str((self.y,self.x))
def __repr__(self):
return str(self)
def copy(self):
return point(self.y,self.x)
def pointgen(L,W):
x = point(0,0)
x.y=random.randint(0+1,L-2)
x.x=random.randint(0+1,W-2)
return x
class dummy(object):
def __init__(self,char,location=point(1,1)):
self.location=location
self.char=char
def draw(self,window):
window.addstr(self.location.y,self.location.x,self.char)
#living animals
class man(object):
def __init__(self):
self.score = 0
self.location=point(1,1)
def move(self,D):
X = MID(self.location,D)
if X.y != 0 and X.x != 0:
if X.y != L-1 and X.x != W-1:
self.location = X
def live(self):
pass
def chance(self):
return chance((500-self.score)/5)
def draw(self,window):
window.addstr(self.location.y,self.location.x,'@')
class rat(object):
def __init__(self,coin,dude):
self.master = dude
self.coin = coin
self.location = pointgen(L,W)
self.attached=False
self.previous_escape=45
self.phase=normal
def move(self,D):
X = MID(self.location,D)
if X.y != 0 and X.x != 0:
if X.y !=L-1 and X.x != W-1:
self.location = X
return True
return False
def draw(self,window):
window.addstr(self.location.y,self.location.x,'r',curses.A_REVERSE)
def live(self,snakes):
rat = self.location
snk = nearest_snake(snakes,rat).pieces[-1]
threats = danger_directions(snakes,rat)
sdistance = distance(rat,snk)
isPhase = self.isPhase
self.isAttached()
escape = False
if isPhase(infiltrate):
if not self.goal in threats:
self.phase = normal
else:
escape = True
d = closest(self.goal)[0]
if isPhase(runaway):
if sdistance > 8:
self.phase = normal
if sdistance <= 8:
if isPhase(runaway):
escape = True
if self.previous_escape not in threats:
d = self.previous_escape
else:
d = opposite(direction(rat,snk))
elif sdistance <= 4:
escape = True
if isPhase(infiltrate):
d = closest(self.previous_escape)[0]
else:
if self.previous_escape not in threats:
d = self.previous_escape
d = opposite(direction(rat,snk))
self.phase = runaway
if isPhase(normal):
self.phase = get2goal
if self.phase == get2goal and self.goal() == direction(rat,snk):
self.phase = infiltrate
escape = True
d = closest(self.goal())[0]
if self.phase == get2goal:
d = self.goal()
if self.goal() == 360:
self.attached = True
if not self.move(d) :
s = closest(d)
for n in s:
d = n
if self.move(d):
break
if self.attached:
self.coin.location = MID(self.location,d)
if escape :
self.previous_escape = d
def isAttached(self):
if distance(self.location,self.coin.location) > 1:
self.attached = False
def isPhase(self,phase):
if self.phase == phase:
return True
else:
return False
def goal(self):
if self.attached:
return direction(self.location,self.master.location)
else:
return direction(self.location,self.coin.location)
def chance(self):
return chance((500-self.master.score)/5)
class snake(object):
def __init__(self):
self.lenght=random.randint(3,7)
self.pieces=[point(1,1)]
def move(self,D):
X=MID(self.pieces[-1],D)
if X.y != 0 and X.x != 0:
if X.y!= L-1 and X.x !=W-1:
self.pieces.append(X)
if len(self.pieces) > self.lenght:
del(self.pieces[0])
return True
def draw(self,window):
for p in self.pieces:
window.addstr(p.y,p.x,'s')
p = self.pieces[-1]
window.addstr(p.y,p.x,'S')
def live(self,dude,pact,rats):
m = dude.location
snk = self.pieces[-1]
l = pact[0]
ldng = l.pieces[-1]
h = distance(ldng,m)
k = distance(snk,ldng)
if self !=l and h < 5 and k < 14 :
d = opposite(direction(snk,ldng))
elif dude.chance():
d= random.randint(0,8)*45
else:
d= direction(snk,m)
if not self.move(d) :
for n in closest(d):
if self.move(n):
break
if m in self.pieces:
snakeeaten(m,stdscr)
if type(dude)==rat:
del(rats[rats.index(dude)])
return False
else:
return True
self.lenght= self.lenght+1
def danger_directions(snakes,target):
ret=[]
for s in snakes:
ret.append(direction(target,s.pieces[-1]))
return ret
def nearest_snake(_snakes,target):
snakes = _snakes[:]
a = map(getitem,map(attrgetter('pieces'),snakes),[-1]*len(snakes))
b = map(distance,a,[target]*len(snakes))
nearest = b.index(sorted(b)[0])
return snakes[nearest]
def direction(point,target):
if point.x > target.x:
if point.y > target.y:
return 135
if point.y == target.y:
return 180
if point.y < target.y:
return 225
if point.x == target.x:
if point.y > target.y:
return 90
if point.y == target.y:
return 360
if point.y < target.y:
return 270
if point.x < target.x:
if point.y > target.y:
return 45
if point.y == target.y:
return 0
if point.y < target.y:
return 315
def angle_delta(angle1,angle2):
return abs(angle1-angle2)
def opposite(drctn):
if drctn == 360:
return 360
else :
return closest(drctn)[-1]
def distance(point,target):
return int(((point.x-target.x)**2+(point.y-target.y)**2)**0.5)
def closest(drctn):
if drctn == 360:
return [360]
l = []
append = l.append
a,b = drctn,drctn
x = 1
while a != b or x:
if x:
x=not x
a = a + 45
b = b - 45
if b < 0:
b = 315
if a == 360:
a= 0
append(a)
append(b)
return l
def drw_ln(r,D,L,win,m='#'):
h=L
c=0
while c!=r:
try:
win.addstr(h.y,h.x,m)
except:
pass
h = MID(h,D)
c=c+1
def drw_sqr(r,location,window):
_1=point(location.y-r/2,location.x-r/2)
drw_ln(r,0,_1,window)
drw_ln(r,270,_1,window)
_2=point(location.y+r/2,location.x+r/2)
drw_ln(r,180,_2,window)
drw_ln(r,90,_2,window)
def gatelight(location,window):
c=0
while c != 6:
time.sleep(0.05)
drw_sqr(c,location,window)
window.refresh()
c=c+1
def snakefoo(location,lenght,window):
window.addstr(location.y-1,location.x-1,'0-0')
window.addstr(location.y,location.x-1,'\\_/')
window.refresh()
time.sleep(0.5)
window.addstr(location.y-1,location.x-1,'>-<')
window.addstr(location.y,location.x-1, '\\_/')
window.refresh()
time.sleep(0.2)
c = 1
while c <= lenght:
window.addstr(location.y+c,location.x,'^')
if c>=2:
window.addstr(location.y+c-1,location.x,'|')
window.refresh()
c = c+1
time.sleep(0.2)
while 1 <= c:
window.addstr(location.y+c,location.x,' ')
if c == 2:
window.addstr(location.y+c-1,location.x,'^')
window.refresh()
c = c-1
time.sleep(0.2)
window.addstr(location.y-1,location.x-1,'0-0')
window.addstr(location.y,location.x-1, '\\_/')
window.refresh()
time.sleep(2.5)
def sparkle(location,window):
window.addstr(location.y,location.x,'*')
window.refresh()
time.sleep(0.2)
window.addstr(location.y-1,location.x-1,'\\|/')
window.addstr(location.y,location.x-1,'- -')
window.addstr(location.y+1,location.x-1,'/|\\')
window.refresh()
def snakeeaten(location,window):
window.addstr(location.y-1,location.x-1,'/-\\')
window.addstr(location.y,location.x-1, '.@.')
try:
window.addstr(location.y+1,location.x-1,'\\-/')
except:
pass
window.refresh()
c=0
while c != 10:
time.sleep(0.1)
if c % 2 == 1:
window.addstr(location.y,location.x,'@')
window.refresh()
else:
window.addstr(location.y,location.x,' ')
window.refresh()
c = c+1
window.addstr(location.y-1,location.x-1, '0_0')
window.addstr(location.y,location.x-1, '\\_/')
window.addstr(location.y+1,location.x-1,' ')
window.refresh()
time.sleep(0.5)
window.addstr(location.y-1,location.x-1,'-')
window.refresh()
time.sleep(0.5)
window.addstr(location.y-1,location.x-1,'0')
window.refresh()
time.sleep(0.5)
def snakestare(snakes,window):
locations=[]
for s in snakes:
locations.append(s.pieces[-1])
for l in locations:
window.addstr(l.y-1,l.x-1,'0_0!!')
window.addstr(l.y,l.x-1,'\\_/')
window.refresh()
time.sleep(1)
c=0
while c != 22:
for l in locations:
if c%2:
window.addstr(l.y-1,l.x-1,'0_0 ')
else:
window.addstr(l.y-1,l.x-1,'-_-')
window.refresh()
time.sleep(0.1)
c=c+1
time.sleep(1.5)
def snakehappy(snakes,target,window):
ls=[]
for s in snakes:
ls.append(s.pieces[-1])
for l in ls:
window.addstr(l.y-1,l.x-1,'0-0')
window.addstr(l.y,l.x-1,'\\_/')
window.refresh()
time.sleep(0.2)
for l in ls:
window.addstr(l.y-1,l.x-1,'^-^')
window.refresh()
time.sleep(1)
def chance(percent):
if percent < 4:
percent = 4
x = random.randint(1,100)
if x <= percent:
return True
else:
return False
def border(window):
maxy,maxx=L,W
maxx=maxx-1
maxy=maxy-1
_0 = point(0,0)
drw_ln(maxy,270,_0,window,'|')
drw_ln(maxx,0,_0,window,'-')
_M= point(maxy,maxx)
drw_ln(maxy,90,_M,window,'|')
drw_ln(maxx,180,_M,window,'-')
window.addstr(0,0,'+')
window.addstr(0,maxx,'+')
window.addstr(maxy,0,'+')
window.addstr(maxy-1,maxx,'+')
def windowsave(window):
while curses.is_term_resized(L,W):
maxx,maxy=stdscr.getmaxyx()
if maxx < W or maxy < L:
stdscr.addstr(0,0,'Please resize the window',curses.A_BOLD)
stdscr.refresh()
time.sleep(0.5)
def TheGame(window):
tb = dummy('',point(0,0))
dude = man()
dude.location = pointgen(L,W)
snakes=[snake(),snake(),snake()]
dude.location=pointgen(L,W)
rats = []
reached300=False
Q=map(ord,('Q','q'))
R=map(ord,('R','r'))
for s in snakes:
s.pieces[0]=pointgen(L,W)
while s.pieces[0] == dude.location:
s.pieces[0]=pointgen(L,W)
coin = dummy('$',pointgen(L,W))
gate = dummy('#',pointgen(L,W))
while gate.location == coin.location:
gate.location = pointgen(L,W)
input=''
while 1:
if dude.score >= 300:
reached300 = True
window.erase()
there_are_rats= len(rats)>0
try:
border(window)
for s in snakes:
s.draw(window)
if there_are_rats:
for r in rats:
r.draw(window)
dude.draw(window)
gate.draw(window)
coin.draw(window)
tb.draw(window)
window.refresh()
except:
windowsave(window)
if len(rats) >= 1:
rats[-1].live(snakes)
for r in rats[:-1]:
r.move(random.randint(0,8)*45)
input = window.getch()
if input == curses.KEY_UP:
dude.move(90)
if input == curses.KEY_DOWN:
dude.move(270)
if input == curses.KEY_LEFT:
dude.move(180)
if input == curses.KEY_RIGHT:
dude.move(0)
if input in Q:
tb.char='chicken heart!'
tb.draw(window)
window.refresh()
snakestare(snakes,window)
return (0,dude.score)
if input in R:
if reached300 and dude.score >= 200:
dude.score=dude.score-200
tb.char='real food!'
tb.draw(window)
window.refresh()
rats.append(rat(coin,dude))
sparkle(rats[-1].location,window)
window.addstr(rats[-1].location.y,rats[-1].location.x,'r')
snakehappy(snakes,rats[-1].location,window)
for s in snakes:
if len(rats) > 0 :
h=rats[-1]
else:
h=dude
if s.live(h,snakes,rats) == True:
return (-1,dude.score)
if dude.location == coin.location:
dude.score =dude.score+int(3000*(1.0/(W+L)))
tb.char=str(dude.score)+'$'
coin.location=pointgen(L,W)
if dude.location == gate.location:
gatelight(gate.location,window)
snakestare(snakes,window)
return (1,dude.score)
break
start_page(stdscr)
x=TheGame(stdscr)
stdscr.erase()
curses.nocbreak(),curses.echo(),curses.endwin(),curses.endwin()
if x[0]==-1:
print "You\'ve been eaten with %s grams of gold you found"%x[1]
if x[0]==0:
print "You left the game embarrassingly while having only %s grams of gold"%x[1]
if x[0] == 1:
print "You escaped with %s grams of gold"%x[1]
| 27.681655 | 84 | 0.523553 | 5,367 | 0.34871 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.035345 |
70d993efbc1f1e5a1f92a001568acf76b80283cd | 266 | py | Python | Jawaban/4.py | Rakhid16/pibiti-himatifa-2020 | 734ae59d0ffb3cf41d14fe4fbcaba7b317f5c870 | [
"Apache-2.0"
] | null | null | null | Jawaban/4.py | Rakhid16/pibiti-himatifa-2020 | 734ae59d0ffb3cf41d14fe4fbcaba7b317f5c870 | [
"Apache-2.0"
] | null | null | null | Jawaban/4.py | Rakhid16/pibiti-himatifa-2020 | 734ae59d0ffb3cf41d14fe4fbcaba7b317f5c870 | [
"Apache-2.0"
] | null | null | null | kamus = {"elephant" : "gajah", "zebra" : "zebra", "dog" : "anjing", "camel" : "unta"}
kata = input("Masukan kata berbahasa inggris : ")
if kata in kamus:
print("Terjemahan dari " + kata + " adalah " + kamus[kata])
else:
print("Kata tersebt belum ada di kamus") | 33.25 | 85 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.575188 |
70d9fd3c3648f0a4459448b9661eee37f5884a7a | 2,521 | py | Python | pureskillgg_dsdk/tome/reader_fs.py | pureskillgg/dsdk | 2e91a815dc06cc37ac8272d87014301c64c1b46e | [
"MIT"
] | null | null | null | pureskillgg_dsdk/tome/reader_fs.py | pureskillgg/dsdk | 2e91a815dc06cc37ac8272d87014301c64c1b46e | [
"MIT"
] | 1 | 2022-03-31T15:16:17.000Z | 2022-03-31T19:38:05.000Z | pureskillgg_dsdk/tome/reader_fs.py | pureskillgg/dsdk | 2e91a815dc06cc37ac8272d87014301c64c1b46e | [
"MIT"
] | null | null | null | import pandas as pd
import structlog
import rapidjson
from .constants import (
get_page_key_fs,
get_tome_manifest_key_fs,
get_tome_path_fs,
)
class TomeReaderFs:
def __init__(self, *, root_path, prefix=None, tome_name, log=None, has_header=True):
self._log = log if log is not None else structlog.get_logger()
self._log = self._log.bind(
client="tome_reader_fs",
root_path=root_path,
prefix=prefix,
tome_name=tome_name,
)
self._path = get_tome_path_fs(root_path, prefix, tome_name)
self.has_header = has_header
self.header = None
if self.has_header:
self.header = TomeReaderFs(
root_path=self._path,
tome_name="header",
has_header=False,
log=self._log,
)
@property
def exists(self):
"""If the tome exists"""
try:
self.read_manifest()
except FileNotFoundError:
return False
except:
self._log.error("There was an error while loading the loader")
raise
return True
def read_manifest(self):
key = get_tome_manifest_key_fs(self._path)
with open(key, "r", encoding="utf-8") as file:
data = rapidjson.loads(file.read())
return data
# pylint: disable=no-self-use
def read_metadata(self):
return {}
def read_page(self, page):
dataframe = self.read_page_dataframe(page)
keyset = self.read_page_keyset(page)
return dataframe, keyset
def read_page_keyset(self, page):
key = self._get_page_key("keyset", page)
content_type = page["keysetContentType"]
if content_type != "application/x-parquet":
raise Exception(f"Unknown content type {content_type}")
self._log.info("Read keyset: Start", page_number=page["number"])
df = pd.read_parquet(key)
return list(df.iloc[:, 0])
def read_page_dataframe(self, page):
key = self._get_page_key("dataframe", page)
content_type = page["dataframeContentType"]
if content_type != "application/x-parquet":
raise Exception(f"Unsupported content type {content_type}")
self._log.info("Read Dataframe: Start", page_number=page["number"])
df = pd.read_parquet(key)
return df
def _get_page_key(self, subtype, page):
return get_page_key_fs(self._path, subtype, page)
| 30.373494 | 88 | 0.611265 | 2,364 | 0.937723 | 0 | 0 | 298 | 0.118207 | 0 | 0 | 377 | 0.149544 |
70da002befd824161b1b4d3ae29fa75a282470f6 | 7,713 | py | Python | scripts/google_client.py | hypothe/dialogflow_ros | 9e804acf31c384286ebf15e666a952740e2341d9 | [
"MIT"
] | null | null | null | scripts/google_client.py | hypothe/dialogflow_ros | 9e804acf31c384286ebf15e666a952740e2341d9 | [
"MIT"
] | null | null | null | scripts/google_client.py | hypothe/dialogflow_ros | 9e804acf31c384286ebf15e666a952740e2341d9 | [
"MIT"
] | 2 | 2021-09-09T07:33:27.000Z | 2021-09-09T07:37:28.000Z | #!/usr/bin/env python
# from google.cloud import speech
from google.cloud import speech_v1p1beta1 as speech
from google.cloud.speech_v1p1beta1 import enums
from google.cloud.speech_v1p1beta1 import types
from google.api_core.exceptions import InvalidArgument, OutOfRange
import pyaudio
import Queue
import rospy
import rospkg
import signal
import yaml
from std_msgs.msg import String
class GspeechClient:
def __init__(self):
# Audio stream input setup
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
self.CHUNK = 4096
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=self.CHUNK,
stream_callback=self.get_data)
self._buff = Queue.Queue() # Buffer to hold audio data
self.closed = False
# ROS Text Publisher
self.text_pub = rospy.Publisher('/google_client/text', String, queue_size=10)
# Context clues in yaml file
rospack = rospkg.RosPack()
yamlFileDir = rospack.get_path('dialogflow_ros') + '/config/context.yaml'
with open(yamlFileDir, 'r') as f:
self.context = yaml.load(f)
def get_data(self, in_data, frame_count, time_info, status):
"""PyAudio callback to continuously get audio data from the server and put it in a buffer.
"""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
"""Generator function that continuously yields audio chunks from the buffer.
Used to stream data to the Google Speech API Asynchronously.
"""
while not self.closed:
# Check first chunk of data
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Read in a stream till the end using a non-blocking get()
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except Queue.Empty:
break
yield b''.join(data)
def _listen_print_loop(self, responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
"""
try:
for response in responses:
# If not a valid response, move on to next potential one
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Parse the final utterance
if result.is_final:
rospy.logdebug("Google Speech result: {}".format(transcript))
# Received data is Unicode, convert it to string
transcript = transcript.encode('utf-8')
# Strip the initial space if any
if transcript.startswith(' '):
transcript = transcript[1:]
# Exit if needed
if transcript.lower() == 'exit' or rospy.is_shutdown():
self.shutdown()
# Send the rest of the sentence to topic
self.text_pub.publish(result[1])
except InvalidArgument as e:
rospy.logwarn("{} caught in Mic. Client".format(e))
self.gspeech_client()
except OutOfRange as e:
rospy.logwarn("{} caught in Mic. Client".format(e))
self.gspeech_client()
def gspeech_client(self):
"""Creates the Google Speech API client, configures it, and sends/gets
audio/text data for parsing.
"""
language_code = 'en-US'
# Hints for the API
context = types.SpeechContext(phrases=self.context)
client = speech.SpeechClient()
# Create metadata object, helps processing
metadata = types.RecognitionMetadata()
# Interaction Type:
# VOICE_SEARCH: Transcribe spoken questions and queries into text.
# VOICE_COMMAND: Transcribe voice commands, such as for controlling a device.
metadata.interaction_type = (enums.RecognitionMetadata.InteractionType.VOICE_COMMAND)
# Microphone Distance:
# NEARFIELD: The audio was captured from a closely placed microphone.
# MIDFIELD: The speaker is within 3 meters of the microphone.
# FARFIELD: The speaker is more than 3 meters away from the microphone.
metadata.microphone_distance = (enums.RecognitionMetadata.MicrophoneDistance.MIDFIELD)
# Device Type:
# PC: Speech was recorded using a personal computer or tablet.
# VEHICLE: Speech was recorded in a vehicle.
# OTHER_OUTDOOR_DEVICE: Speech was recorded outdoors.
# OTHER_INDOOR_DEVICE: Speech was recorded indoors.
metadata.recording_device_type = (enums.RecognitionMetadata.RecordingDeviceType.PC)
# Media Type:
# AUDIO: The speech data is an audio recording.
# VIDEO: The speech data originally recorded on a video.
metadata.original_media_type = (enums.RecognitionMetadata.OriginalMediaType.AUDIO)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=44100,
language_code=language_code,
speech_contexts=[context],
use_enhanced=True,
model='command_and_search',
metadata=metadata)
streaming_config = types.StreamingRecognitionConfig(
config=config,
single_utterance=False,
interim_results=False)
# Hack from Google Speech Python docs, very pythonic c:
requests = (types.StreamingRecognizeRequest(audio_content=content) for content in self.generator())
responses = client.streaming_recognize(streaming_config, requests)
self._listen_print_loop(responses)
def __del__(self):
"""Shut down as cleanly as possible"""
rospy.loginfo("Google STT shutting down")
self.closed = True
self._buff.put(None)
self.stream.close()
self.audio.terminate()
exit()
def start_client(self):
"""Entry function to start the client"""
try:
rospy.loginfo("Google STT started")
self.gspeech_client()
except KeyboardInterrupt:
self.__del__()
def signal_handler(signal, frame):
rospy.signal_shutdown("Order 66 Received")
exit("Order 66 Received")
if __name__ == '__main__':
# rospy.init_node('frasier_mic_client', log_level=rospy.DEBUG)
rospy.init_node('google_client')
signal.signal(signal.SIGINT, signal_handler)
g = GspeechClient()
g.start_client()
| 40.594737 | 107 | 0.615714 | 6,984 | 0.905484 | 751 | 0.097368 | 0 | 0 | 0 | 0 | 2,763 | 0.358226 |
70de539c35a70303d9276df027cc058eb5448bce | 6,158 | py | Python | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/callback_life_cycle_hook_option.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/callback_life_cycle_hook_option.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/callback_life_cycle_hook_option.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class CallbackLifeCycleHookOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'lifecycle_action_key': 'str',
'instance_id': 'str',
'lifecycle_hook_name': 'str',
'lifecycle_action_result': 'str'
}
attribute_map = {
'lifecycle_action_key': 'lifecycle_action_key',
'instance_id': 'instance_id',
'lifecycle_hook_name': 'lifecycle_hook_name',
'lifecycle_action_result': 'lifecycle_action_result'
}
def __init__(self, lifecycle_action_key=None, instance_id=None, lifecycle_hook_name=None, lifecycle_action_result=None):
"""CallbackLifeCycleHookOption - a model defined in huaweicloud sdk"""
self._lifecycle_action_key = None
self._instance_id = None
self._lifecycle_hook_name = None
self._lifecycle_action_result = None
self.discriminator = None
if lifecycle_action_key is not None:
self.lifecycle_action_key = lifecycle_action_key
if instance_id is not None:
self.instance_id = instance_id
if lifecycle_hook_name is not None:
self.lifecycle_hook_name = lifecycle_hook_name
self.lifecycle_action_result = lifecycle_action_result
@property
def lifecycle_action_key(self):
"""Gets the lifecycle_action_key of this CallbackLifeCycleHookOption.
生命周期操作令牌,通过查询伸缩实例挂起信息接口获取。指定生命周期回调对象,当不传入instance_id字段时,该字段为必选。当该字段与instance_id字段都传入,优先使用该字段进行回调。
:return: The lifecycle_action_key of this CallbackLifeCycleHookOption.
:rtype: str
"""
return self._lifecycle_action_key
@lifecycle_action_key.setter
def lifecycle_action_key(self, lifecycle_action_key):
"""Sets the lifecycle_action_key of this CallbackLifeCycleHookOption.
生命周期操作令牌,通过查询伸缩实例挂起信息接口获取。指定生命周期回调对象,当不传入instance_id字段时,该字段为必选。当该字段与instance_id字段都传入,优先使用该字段进行回调。
:param lifecycle_action_key: The lifecycle_action_key of this CallbackLifeCycleHookOption.
:type: str
"""
self._lifecycle_action_key = lifecycle_action_key
@property
def instance_id(self):
"""Gets the instance_id of this CallbackLifeCycleHookOption.
实例ID。指定生命周期回调对象,当不传入lifecycle_action_key字段时,该字段为必选。
:return: The instance_id of this CallbackLifeCycleHookOption.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this CallbackLifeCycleHookOption.
实例ID。指定生命周期回调对象,当不传入lifecycle_action_key字段时,该字段为必选。
:param instance_id: The instance_id of this CallbackLifeCycleHookOption.
:type: str
"""
self._instance_id = instance_id
@property
def lifecycle_hook_name(self):
"""Gets the lifecycle_hook_name of this CallbackLifeCycleHookOption.
生命周期挂钩名称。指定生命周期回调对象,当不传入lifecycle_action_key字段时,该字段为必选。
:return: The lifecycle_hook_name of this CallbackLifeCycleHookOption.
:rtype: str
"""
return self._lifecycle_hook_name
@lifecycle_hook_name.setter
def lifecycle_hook_name(self, lifecycle_hook_name):
"""Sets the lifecycle_hook_name of this CallbackLifeCycleHookOption.
生命周期挂钩名称。指定生命周期回调对象,当不传入lifecycle_action_key字段时,该字段为必选。
:param lifecycle_hook_name: The lifecycle_hook_name of this CallbackLifeCycleHookOption.
:type: str
"""
self._lifecycle_hook_name = lifecycle_hook_name
@property
def lifecycle_action_result(self):
"""Gets the lifecycle_action_result of this CallbackLifeCycleHookOption.
生命周期回调操作。ABANDON:终止。CONTINUE:继续。EXTEND:延长超时时间,每次延长1小时。
:return: The lifecycle_action_result of this CallbackLifeCycleHookOption.
:rtype: str
"""
return self._lifecycle_action_result
@lifecycle_action_result.setter
def lifecycle_action_result(self, lifecycle_action_result):
"""Sets the lifecycle_action_result of this CallbackLifeCycleHookOption.
生命周期回调操作。ABANDON:终止。CONTINUE:继续。EXTEND:延长超时时间,每次延长1小时。
:param lifecycle_action_result: The lifecycle_action_result of this CallbackLifeCycleHookOption.
:type: str
"""
self._lifecycle_action_result = lifecycle_action_result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CallbackLifeCycleHookOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.240838 | 124 | 0.650698 | 6,783 | 0.991377 | 0 | 0 | 3,714 | 0.542824 | 0 | 0 | 3,631 | 0.530693 |
70dec0077e310c765f9ecb7dbeebc0c662a8b197 | 9,621 | py | Python | gui.py | TheAlienBee/Search_and_Apply | a62dcad45df09a38ab41c2f450a3202f3def6b3b | [
"MIT"
] | null | null | null | gui.py | TheAlienBee/Search_and_Apply | a62dcad45df09a38ab41c2f450a3202f3def6b3b | [
"MIT"
] | null | null | null | gui.py | TheAlienBee/Search_and_Apply | a62dcad45df09a38ab41c2f450a3202f3def6b3b | [
"MIT"
] | null | null | null | import tkinter as tk #tkinter is our gui lib.
import webbrowser #webbrowser allows us to open user's default web browser. good for clicking on links.
import jsonlines
import io
import genCoverLetter as gcl
from Search_and_Apply.Search_and_Apply.spiders.IndeedSpider import searchFor
from Search_and_Apply.Search_and_Apply.IndeedExpressApply import ExpressApply
if __name__ == '__main__':#not sure what this does. might delete later.
root = tk.Tk()#initialization of root.
#what follows are some global variables.
keywords = []
links = ["link1","link2","link3","link4","link5"]
name = "empty"
email = "empty"
position = "A fine position"
company = "A fine company"
phone = "777"
def BuildMenu(): #buildmenu is called each time the main window is changed. just builds the menu.
mb = tk.Menubutton(root,text="Menu")
mb.grid(row=0,column=0)
mb.menu = tk.Menu (mb, tearoff = 0)
mb["menu"] = mb.menu
mb.menu.add_command(label="Search",command=Keywords)
mb.menu.add_command(label="Resume",command=Resume)
mb.menu.add_command(label="Cover Letter",command=CoverLetter)
mb.menu.add_command(label="Profile",command=Profile)
mb.menu.add_command(label="Job Listings",command=Listings)
mb.menu.add_command(label="Additional Information",command=AdditionalInfo)
mb.menu.add_command(label="Quit",command=Quit)
def Keywords(): #keywords page.
for widget in root.winfo_children(): #eliminates all widgets. clears the window.
widget.destroy()
with open("URLs_from_IndeedSpider.json", mode ='r') as reader:
file_data = reader.read().strip('\n []{}')
if hyperlinks == [] and file_data != '':
display_links()
BuildMenu()
tk.Message(root,text="Search",width=3000).grid(row=1,column=1) #message.
tk.Label(root,text="Search Keywords").grid(row=2,column=1) #label
key_ent = tk.Entry(root) #text entry
key_ent.grid(row=2,column=2)
tk.Button(root,text="Search",command=lambda:search(key_ent.get())).grid(row=2,column=3) #search button
def search(new_keywords): #run one search per keyword. needs work. shouldn't take long.
global keywords #imports global keywords list
keywords = new_keywords #saves input from text entry field
searchFor([keywords])
print("searching... %s" % keywords)
display_links()
def clettergen():
global position
global company
global name
global email
global phone
to_csv()
gcl.write_cover_letter()
def apply(L): #given name and email and a list of relevant links, call applyTo. needs some work. shouldn't take long.
global applyBot
#try:
applyBot.applyTo(L)
except AttributeError as e:
print(e)
print(L)
popup = tk.Toplevel()
tk.Message(popup,text="Error: Please create a profile,",width=3000).grid(row=0,column=0)
tk.Button(popup,text="OK",command=popup.destroy).grid(row=1,column=1)
def open_link(event): #simply opens the links.
webbrowser.open_new_tab(event)
def Resume(): #resume page. needs some work. shouldn't take long.
for widget in root.winfo_children(): #eliminates all widgets. clears the window.
widget.destroy()
BuildMenu()
tk.Message(root,text="Resume",width=3000).grid(row=1,column=1)
def CoverLetter(): #coverletter page. needs some work. shouldn't take long.
for widget in root.winfo_children(): #eliminates all widgets. clears the window.
widget.destroy()
BuildMenu()
tk.Message(root,text="Cover Letter",width=3000).grid(row=1,column=1)
tk.Label(root,text="Name").grid(row=3,column=1)
name_ent = tk.Entry(root)
name_ent.grid(row=3,column=2)
tk.Button(root,text="Save Name",command=lambda: save_name(name_ent.get())).grid(row=3,column=3)
tk.Label(root,text="Email").grid(row=4,column=1)
email_ent = tk.Entry(root)
email_ent.grid(row=4,column=2)
tk.Button(root,text="Save Email",command=lambda: save_email(email_ent.get())).grid(row=4,column=3)
tk.Label(root,text="Position").grid(row=5,column=1)
position_ent = tk.Entry(root)
position_ent.grid(row=5,column=2)
tk.Button(root,text="Save Position",command=lambda: save_position(position_ent.get())).grid(row=5,column=3)
tk.Label(root,text="Company").grid(row=6,column=1)
company_ent= tk.Entry(root)
company_ent.grid(row=6,column=2)
tk.Button(root,text="Save Company",command=lambda: save_company(company_ent.get())).grid(row=6,column=3)
tk.Label(root,text="Phone").grid(row=7,column=1)
phone_ent= tk.Entry(root)
phone_ent.grid(row=7,column=2)
tk.Button(root,text="Phone",command=lambda: save_phone(phone_ent.get())).grid(row=7,column=3)
tk.Button(root,text="Generate Cover Letter",command=clettergen).grid(row=9,column=2)
#takes keywords, generates pdf.
def save_phone(new_phone):
global phone
phone = new_phone
popup = tk.Toplevel()
tk.Message(popup,text="Profile updated with phone number,",width=3000).grid(row=0,column=0)
tk.Button(popup,text="OK",command=popup.destroy).grid(row=1,column=1)
def save_position(new_position):
global position
position = new_position
popup = tk.Toplevel()
tk.Message(popup,text="Profile updated with job position,",width=3000).grid(row=0,column=0)
tk.Button(popup,text="OK",command=popup.destroy).grid(row=1,column=1)
def save_company(new_company):
global company
company = new_company
popup = tk.Toplevel()
tk.Message(popup,text="Profile updated with company name,",width=3000).grid(row=0,column=0)
tk.Button(popup,text="OK",command=popup.destroy).grid(row=1,column=1)
def to_csv():
#to comma-separated string
step1 = [position+","+company+","+name+","+email+","+phone]
step2 = ",".join(step1)
s=io.StringIO(step2)
with open('info.csv','w') as f:
for line in s:
f.write(line)
def Profile(): #profile page. essentially finished.
for widget in root.winfo_children():
widget.destroy()
BuildMenu()
tk.Message(root,text="Profile",width=3000).grid(row=1,column=1)
tk.Label(root,text="Name").grid(row=2,column=1)
name_ent = tk.Entry(root)
name_ent.grid(row=2,column=2)
tk.Button(root,text="Save Name",command=lambda: save_name(name_ent.get())).grid(row=2,column=3)
tk.Label(root,text="Email").grid(row=3,column=1)
email_ent = tk.Entry(root)
email_ent.grid(row=3,column=2)
tk.Button(root,text="Save Email",command=lambda: save_email(email_ent.get())).grid(row=3,column=3)
#we could add things here. profile1, profile2, etc. would take some time.
def save_name(new_name): #simply saves name from text field to global var.
global name
name = new_name
applyBot.addName(name)
popup = tk.Toplevel()
tk.Message(popup,text="Profile updated with your name,",width=3000).grid(row=0,column=0)
tk.Button(popup,text="OK",command=popup.destroy).grid(row=1,column=1)
def save_email(new_email): #simply saves email from text field to global var.
global email
email = new_email
applyBot.addEmail(email)
popup = tk.Toplevel()
tk.Message(popup,text="Profile updated with your email,",width=3000).grid(row=0,column=0)
tk.Button(popup,text="OK",command=popup.destroy).grid(row=1,column=1)
def Listings(): #listings page. functional.
for widget in root.winfo_children():
widget.destroy()
BuildMenu()
tk.Message(root,text="Job Listings",width=3000).grid(row=1,column=1)
display_links()
def display_links(): #fun function. turns global links list into hyperlinks in the window.
with jsonlines.open("URLs_from_IndeedSpider.json", mode ='r') as reader:
distros_dict = reader.iter(type = dict)
i=0
hyperlinks=[]
for link in distros_dict:
hyperlinks.append(tk.Label(root,text=link['Title'],fg="blue",cursor="hand2"))
hyperlinks[i].grid(row=i+10,column=1)
hyperlinks[i].bind("<Button-1>", lambda e: open_link(link['Link'])) #button-1 means left-click.
tk.Button(root,text="Apply",command=lambda : apply(link['Link'])).grid(row=i+10,column=2)
i+=1
def AdditionalInfo(): #additional info page.
for widget in root.winfo_children():
widget.destroy()
BuildMenu()
tk.Message(root,text="Additional Information",width=3000).grid(row=1,column=1)
#not sure what goes here, if we're doing this, etc.
def Quit(): #simply quits.
root.destroy()
exit()
hyperlinks = []
print("loading...")
applyBot = ExpressApply()
BuildMenu()
root.geometry("640x480")
tk.Message(root,text="welcome to Search and Apply.",width=3000).grid(row=1,column=1)
root.mainloop() #starts the engine.
| 41.291845 | 122 | 0.624571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,279 | 0.236878 |
70e03d9e1b93f27a85964cb57647f8629b96f070 | 3,900 | py | Python | followthemoney/model.py | dschulz-pnnl/followthemoney | 0b28240f156bfef0efa2ab636b95fb1bf62cd919 | [
"MIT"
] | null | null | null | followthemoney/model.py | dschulz-pnnl/followthemoney | 0b28240f156bfef0efa2ab636b95fb1bf62cd919 | [
"MIT"
] | null | null | null | followthemoney/model.py | dschulz-pnnl/followthemoney | 0b28240f156bfef0efa2ab636b95fb1bf62cd919 | [
"MIT"
] | null | null | null | import os
import yaml
from followthemoney.types import registry
from followthemoney.schema import Schema
from followthemoney.mapping import QueryMapping
from followthemoney.proxy import EntityProxy
from followthemoney.exc import InvalidModel, InvalidData
class Model(object):
"""A collection of schemata."""
def __init__(self, path):
self.path = path
self.schemata = {}
self.properties = set()
self.qnames = {}
for (path, _, filenames) in os.walk(self.path):
for filename in filenames:
self._load(os.path.join(path, filename))
self.generate()
def generate(self):
for schema in self:
schema.generate()
for prop in self.properties:
self.qnames[prop.qname] = prop
# FIXME: stubs are not correctly assigned
for schema in prop.schema.descendants:
if prop.name not in schema.properties:
schema.properties[prop.name] = prop
def _load(self, filepath):
with open(filepath, "r") as fh:
data = yaml.safe_load(fh)
if not isinstance(data, dict):
raise InvalidModel("Model file is not a mapping.")
for name, config in data.items():
self.schemata[name] = Schema(self, name, config)
def get(self, name):
if isinstance(name, Schema):
return name
return self.schemata.get(name)
def get_qname(self, qname):
return self.qnames.get(qname)
def __getitem__(self, name):
schema = self.get(name)
if schema is None:
raise KeyError("No such schema: %s" % name)
return schema
def get_type_schemata(self, type_):
"""Return all the schemata which have a property of the given type."""
schemata = set()
for schema in self.schemata.values():
for prop in schema.properties.values():
if prop.type == type_:
schemata.add(schema)
return schemata
def make_mapping(self, mapping, key_prefix=None):
"""Parse a mapping that applies (tabular) source data to the model."""
return QueryMapping(self, mapping, key_prefix=key_prefix)
def map_entities(self, mapping, key_prefix=None):
"""Given a mapping, yield a series of entities from the data source."""
mapping = self.make_mapping(mapping, key_prefix=key_prefix)
for record in mapping.source.records:
for entity in mapping.map(record).values():
yield entity
def common_schema(self, left, right):
"""Select the most narrow of two schemata.
When indexing data from a dataset, an entity may be declared as a
LegalEntity in one query, and as a Person in another. This function
will select the most specific of two schemata offered. In the example,
that would be Person.
"""
left = self.get(left) or self.get(right)
right = self.get(right) or self.get(left)
if left.is_a(right):
return left
if right.is_a(left):
return right
# for schema in self.schemata.values():
# if schema.is_a(left) and schema.is_a(right):
# return schema
msg = "No common schema: %s and %s"
raise InvalidData(msg % (left, right))
def make_entity(self, schema, key_prefix=None):
return EntityProxy(self, {"schema": schema}, key_prefix=key_prefix)
def get_proxy(self, data, cleaned=True):
return EntityProxy.from_dict(self, data, cleaned=cleaned)
def to_dict(self):
return {
"schemata": {s.name: s.to_dict() for s in self.schemata.values()},
"types": {t.name: t.to_dict() for t in registry.types},
}
def __iter__(self):
return iter(self.schemata.values())
| 35.454545 | 79 | 0.610769 | 3,641 | 0.93359 | 328 | 0.084103 | 0 | 0 | 0 | 0 | 816 | 0.209231 |
70e0a6a13cc5a05c0b7485409fd0055a7d8eb46a | 93 | py | Python | constants.py | supro200/sdwan-auto-upgrade | 3339d56203c678398da98ac2a6398f2edd66c245 | [
"MIT"
] | 2 | 2021-01-18T18:42:24.000Z | 2021-02-01T01:21:18.000Z | constants.py | supro200/sdwan-auto-upgrade | 3339d56203c678398da98ac2a6398f2edd66c245 | [
"MIT"
] | null | null | null | constants.py | supro200/sdwan-auto-upgrade | 3339d56203c678398da98ac2a6398f2edd66c245 | [
"MIT"
] | null | null | null | JUMPHOST = "jumphost"
VMANAGE = "10.121.6.35"
AZURE_STORAGE_ACCOUNT = "azure-storage-account" | 31 | 47 | 0.763441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.494624 |
70e1500de54114c53300f7cf878cd9e1343b3276 | 1,423 | py | Python | api_v1/urls.py | yogoh31/Repath-App-Backend | c2cc85b05457ae7338de6f299ce369358402ce12 | [
"MIT"
] | null | null | null | api_v1/urls.py | yogoh31/Repath-App-Backend | c2cc85b05457ae7338de6f299ce369358402ce12 | [
"MIT"
] | null | null | null | api_v1/urls.py | yogoh31/Repath-App-Backend | c2cc85b05457ae7338de6f299ce369358402ce12 | [
"MIT"
] | null | null | null | from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from .views import (
RegisterView,
UserDetail,
LocationList,
LocationDetail,
FavoritePlaceList,
FavoritePlaceDetail,
ObstacleList,
ObstacleDetail,
UserObstacleCommentList,
UserObstacleCommentDetail,
)
urlpatterns = [
path('locations/', LocationList.as_view(), name='location-list'),
path('locations/<uuid:pk>/',
LocationDetail.as_view(), name='location-detail'),
path('obstacles/', ObstacleList.as_view(), name='obstacle-list'),
path('obstacles/<uuid:pk>/',
ObstacleDetail.as_view(), name='obstacle-detail'),
path('comments/',
UserObstacleCommentList.as_view(), name='comment-list'),
path('comments/<uuid:pk>/',
UserObstacleCommentDetail.as_view(), name='comment-detail'),
path('users/', RegisterView.as_view(), name='auth-register'),
path('users/token/', TokenObtainPairView.as_view(), name='token-obtain-pair'),
path('users/token/refresh/', TokenRefreshView.as_view(), name='token-refresh'),
path('users/<str:email>/', UserDetail.as_view(), name='user-detail'),
path('users/favorites/', FavoritePlaceList.as_view(),
name='user-favorite-place-list'),
path('users/favorites/<uuid:pk>/', FavoritePlaceDetail.as_view(),
name='user-favorite-place-detail'),
]
| 35.575 | 83 | 0.683767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.295151 |
70e3213acde89557a8953f051282e36ffe6c2163 | 5,868 | py | Python | vp_suite/models/precipitation_nowcasting/ef_conv_lstm.py | AIS-Bonn/vp-suite | 479bd48185b9a93a6bc6bff2dfe226e9c65800d8 | [
"MIT"
] | 3 | 2022-03-05T15:27:25.000Z | 2022-03-25T18:47:35.000Z | vp_suite/models/precipitation_nowcasting/ef_conv_lstm.py | Flunzmas/vp-suite | 391570121b5bd9e3fd23aca9a0945a63c4173a24 | [
"MIT"
] | 16 | 2022-01-06T08:38:26.000Z | 2022-02-23T19:19:28.000Z | vp_suite/models/precipitation_nowcasting/ef_conv_lstm.py | AIS-Bonn/vp-suite | 479bd48185b9a93a6bc6bff2dfe226e9c65800d8 | [
"MIT"
] | 3 | 2022-02-07T22:34:45.000Z | 2022-02-22T11:14:37.000Z | from collections import OrderedDict
from vp_suite.model_blocks import ConvLSTM
from vp_suite.models.precipitation_nowcasting.ef_blocks import Encoder_Forecaster
class EF_ConvLSTM(Encoder_Forecaster):
r"""
This is a reimplementation of the Encoder-Forecaster model based on ConvLSTMs, as introduced in
"Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting" by Shi et al.
(https://arxiv.org/abs/1506.04214). This implementation is based on the PyTorch implementation on
https://github.com/Hzzone/Precipitation-Nowcasting which implements the encoder-forecaster structure from
"Deep Learning for Precipitation Nowcasting: A Benchmark and A New Model" by Shi et al.
(https://arxiv.org/abs/1706.03458).
The Encoder-Forecaster Network stacks multiple convolutional/up-/downsampling and recurrent layers
that operate on different spatial scales.
Note:
The default hyperparameter configuration is intended for input frames of size (64, 64).
For considerably larger or smaller image sizes, you might want to adjust the architecture.
"""
# model-specific constants
NAME = "EF-ConvLSTM (Shi et al.)"
PAPER_REFERENCE = "https://arxiv.org/abs/1506.04214"
CODE_REFERENCE = "https://github.com/Hzzone/Precipitation-Nowcasting"
MATCHES_REFERENCE = "Yes"
# model hyperparameters (c=channels, h=height, w=width, k=kernel_size, s=stride, p=padding)
num_layers = 3 #: Number of recurrent cell layers
enc_c = [16, 64, 64, 96, 96, 96] #: Channels for conv and rnn; Length should be 2*num_layers
dec_c = [96, 96, 96, 96, 64, 16] #: Channels for conv and rnn; Length should be 2*num_layers
# convs
enc_conv_names = ["conv1_leaky_1", "conv2_leaky_1", "conv3_leaky_1"] #: Encoder conv block layer names (for internal initialization)
enc_conv_k = [3, 3, 3] #: Encoder conv block kernel sizes per layer
enc_conv_s = [1, 2, 2] #: Encoder conv block strides per layer
enc_conv_p = [1, 1, 1] #: Encoder conv block paddings per layer
dec_conv_names = ["deconv1_leaky_1", "deconv2_leaky_1", "deconv3_leaky_1"] #: Decoder conv block layer names (for internal initialization)
dec_conv_k = [4, 4, 3] #: Decoder conv block kernel sizes per layer
dec_conv_s = [2, 2, 1] #: Decoder conv block strides per layer
dec_conv_p = [1, 1, 1] #: Decoder conv block paddings per layer
# rnns
enc_rnn_k = [3, 3, 3] #: Encoder recurrent block kernel sizes per layer
enc_rnn_s = [1, 1, 1] #: Encoder recurrent block strides per layer
enc_rnn_p = [1, 1, 1] #: Encoder recurrent block paddings per layer
dec_rnn_k = [3, 3, 3] #: Decoder recurrent block kernel sizes per layer
dec_rnn_s = [1, 1, 1] #: Decoder recurrent block strides per layer
dec_rnn_p = [1, 1, 1] #: Decoder recurrent block paddings per layer
# final convs
final_conv_1_name = "identity" #: Final conv block 1 name
final_conv_1_c = 16 #: Final conv block 1 out channels
final_conv_1_k = 3 #: Final conv block 1 kernel size
final_conv_1_s = 1 #: Final conv block 1 stride
final_conv_1_p = 1 #: Final conv block 1 padding
final_conv_2_name = "conv3_3" #: Final conv block 2 name
final_conv_2_k = 1 #: Final conv block 2 kernel size
final_conv_2_s = 1 #: Final conv block 2 stride
final_conv_2_p = 0 #: Final conv block 2 padding
def __init__(self, device, **model_kwargs):
super(EF_ConvLSTM, self).__init__(device, **model_kwargs)
def _build_encoder_decoder(self):
# build enc layers and encoder
layer_in_c = self.img_c
enc_convs, enc_rnns = [], []
for n in range(self.num_layers):
layer_mid_c = self.enc_c[2 * n]
layer_out_c = self.enc_c[2 * n + 1]
enc_convs.append(OrderedDict(
{self.enc_conv_names[n]: [layer_in_c, layer_mid_c, self.enc_conv_k[n],
self.enc_conv_s[n], self.enc_conv_p[n]]}
))
enc_rnns.append(ConvLSTM(device=self.device, in_channels=layer_mid_c, enc_channels=layer_out_c,
state_h=self.enc_rnn_state_h[n], state_w=self.enc_rnn_state_w[n],
kernel_size=self.enc_rnn_k[n], stride=self.enc_rnn_s[n],
padding=self.enc_rnn_p[n]))
layer_in_c = layer_out_c
# build dec layers and decoder, including final convs
dec_convs, dec_rnns = [], []
for n in range(self.num_layers):
layer_mid_c = self.dec_c[2 * n]
layer_out_c = self.dec_c[2 * n + 1]
dec_rnns.append(ConvLSTM(device=self.device, in_channels=layer_in_c, enc_channels=layer_mid_c,
state_h=self.dec_rnn_state_h[n], state_w=self.dec_rnn_state_w[n],
kernel_size=self.dec_rnn_k[n], stride=self.dec_rnn_s[n],
padding=self.dec_rnn_p[n]))
dec_conv_dict = {
self.dec_conv_names[n]: [layer_mid_c, layer_out_c, self.dec_conv_k[n],
self.dec_conv_s[n], self.dec_conv_p[n]]
}
if n == self.num_layers - 1:
dec_conv_dict[self.final_conv_1_name] = [layer_out_c, self.final_conv_1_c, self.final_conv_1_k,
self.final_conv_1_s, self.final_conv_1_p]
dec_conv_dict[self.final_conv_2_name] = [self.final_conv_1_c, self.img_c, self.final_conv_2_k,
self.final_conv_2_s, self.final_conv_2_p]
dec_convs.append(OrderedDict(dec_conv_dict))
layer_in_c = layer_out_c
return enc_convs, enc_rnns, dec_convs, dec_rnns
| 53.834862 | 143 | 0.644853 | 5,703 | 0.971881 | 0 | 0 | 0 | 0 | 0 | 0 | 2,441 | 0.415985 |
70e3447c4bcf181b14f0d6dcbb687ab8db72f47b | 560 | py | Python | dataPreparation/src/so_helper.py | boneyag/msr-2022 | 624267325bdfc97bb93c89f205b3df1c3db279ac | [
"CC0-1.0"
] | null | null | null | dataPreparation/src/so_helper.py | boneyag/msr-2022 | 624267325bdfc97bb93c89f205b3df1c3db279ac | [
"CC0-1.0"
] | null | null | null | dataPreparation/src/so_helper.py | boneyag/msr-2022 | 624267325bdfc97bb93c89f205b3df1c3db279ac | [
"CC0-1.0"
] | null | null | null | from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import sys
def get_paragraphs(post_content):
paragraphs = list()
only_p_tags = SoupStrainer(["p", "h1", "h2", "h3", "h4", "h5", "h6", "li", ])
soup = BeautifulSoup(post_content, "lxml", parse_only=only_p_tags)
for paragraph in soup:
#replace all link text with LINK
# for a in paragraph.findAll('a'):
# a.string = "LINK"
#replace all code tags with CW
# for a in paragraph.findAll('code'):
# a.string = "CW"
paragraphs.append(paragraph.get_text())
return paragraphs
| 21.538462 | 78 | 0.680357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.371429 |
70e49ef3673c84333e4a25074d53220adfed4c79 | 23,920 | py | Python | src/xiyanghong.py | 1332927388/- | a470c92d489dd31f580dd9ad9e54783089034219 | [
"Apache-2.0"
] | null | null | null | src/xiyanghong.py | 1332927388/- | a470c92d489dd31f580dd9ad9e54783089034219 | [
"Apache-2.0"
] | null | null | null | src/xiyanghong.py | 1332927388/- | a470c92d489dd31f580dd9ad9e54783089034219 | [
"Apache-2.0"
] | 1 | 2020-06-08T12:14:01.000Z | 2020-06-08T12:14:01.000Z | from iFinDPy import *
import datetime as dt
import time
import datetime
import pandas as pd
import statsmodels.api as sm
import numpy as np
import talib
def initialize(account):
account.a_periods=10 # 持有日期上限
account.b_periods=3 # 持有日期上限
account.hold={} # 记录持有天数情况
account.holdSl=[] # 记录短期波段的持有情况
account.holdHb=[] # 记录超短追涨的持有情况
account.security = '000016.SH' # 大盘风控使用上证指数
account.maxStock=20 # 每日最多选择出来的股票数量
account.defend=False # 大盘风控值
account.N = 20 # 取前 N 日的数据
account.M = 400 # RSRS 指标 M 变量
#account.execPoint=['1000','1015','1030','1045','1100','1115','1129','1400','1415','1430','1445','1456'] #卖点判断时刻
account.execPoint=['1000','1030','1100','1129','1400','1430','1456'] #卖点判断时刻
# 短期波段策略的选股条件
condition_sl='''
股本规模是大盘股,
股票简称不包含 st,
市盈率(pe)>0 倍,
boll<收盘价,
非新股,
非停牌
'''
# 超短追涨的选股条件
condition_hb='''
股本规模不是大盘股,
昨天涨停,
非新股,
股票简称不包含 st,
boll<收盘价,
非停牌
'''
# 获取 wencai 的数据,并用不同字段记录
# sl:短线波段
get_iwencai(condition_sl,"sl")
# hb:超短追涨
get_iwencai(condition_hb,"hb")
run_daily(func=dayPart, time_rule='after_open', hours=0, minutes=1)
return
# 盘前处理函数
def before_trading_start(account,data):
# 风险判断
account.defend=secureRatio_d(account,data)
#account.defend=False
if account.defend==True:
log.info("日级风控平仓")
#account.defend=5
# 数据整理
account.hb=pd.DataFrame(
{
"symbol":account.hb,
"buypoint":np.zeros(len(account.hb)),
"market":np.zeros(len(account.hb)),
"kdj":np.zeros(len(account.hb)),
"order_id":np.zeros(len(account.hb))
})
return
def handle_data(account,data):
# 在 execPoint 进行打板策略风控和售出
if get_time() in account.execPoint and account.defend==False:
if secureRatio_m(account,data)==True:
pourAll(account)
else:
for stock in account.holdHb:
b_sellCheck(stock,data,account)
return
def dayPart(account,data):
# 大盘风控
if account.defend==True:
#平仓
pourAll(account)
else:
delayCheck(data,account)
if getTend(account,trade_signal(account,data))==True:
#pourB(account)
log.info("偏好大盘")
#大盘,执行 A 策略
# 9:30 调用每日短线波段操作函数
for stock in account.sl:
a_buyCheck(stock,account)
# 卖出符合条件的 A
a_sellCheck(account,data)
# 风险控制
a_riskDefend(account)
else:
#pourA(account)
log.info("偏好小盘")
#执行 B 策略
#前日风控
for stock in account.holdHb:
if b_prevRiskDefend(stock,data)==True and stock in account.hold:
order(stock,0,style="MarketOrder")
log.info("超短前日风控卖出"+stock)
account.holdHb.pop(dictLoc(account.holdHb,stock))
account.hold.pop(stock)
#处理集合竞价
for i in range(len(account.hb)):
account.hb.ix[i,'buypoint']=b_buyCheck(account.hb.ix[i,'symbol'],data)
account.hb.ix[i,'market']=b_getMarket(account.hb.ix[i,'symbol'])
account.hb.ix[i,'kdj']=b_getKDJ(account.hb.ix[i,'symbol'])
#account.hb=account.hb[account.hb.market<=8000000000]
account.hb=account.hb.sort_values(by=['buypoint','kdj'],ascending=[False,True])
account.hb=account.hb[0:min(5,len(account.hb))]
# 购入高优先级股票
for i in account.hb.index:
if account.hb.ix[i,'buypoint']==1 and len(account.holdHb)<15 and account.hb.ix[i,'symbol'] not in account.holdHb:
atr = getATR(account.hb.ix[i,'symbol'])
if not isNaN(atr[-1]):
#amount = min(1200000,int(account.cash/(atr[-1]*20)))
stock = account.hb.ix[i,'symbol']
log.info("超短高优先买入"+stock)
trade_value(stock,min(1000000,500000*atr[-1]))
account.holdHb.append(account.hb.ix[i,'symbol'])
# 购入低优先级股票
for i in account.hb.index :
if account.hb.ix[i,'buypoint']==0 and len(account.holdHb)<15 and account.hb.ix[i,'symbol'] not in account.holdHb:
atr = getATR(account.hb.ix[i,'symbol'])
if not isNaN(atr[-1]):
#amount = int(account.cash/(atr[-1]*20))
trade_value(account.hb.ix[i,'symbol'],min(1000000,500000*atr[-1]))
log.info("超短低优先买入"+account.hb.ix[i,'symbol'])
account.holdHb.append(account.hb.ix[i,'symbol'])
return
# 盘后处理函数
def after_trading_end(account, data):
# 持股时间增加
for stock in list(account.positions):
if stock not in account.hold:
account.hold[stock] = 0
else:
account.hold[stock] += 1
for stock in account.hold:
if stock not in list(account.positions):
account.hold.pop(stock)
for stock in account.holdHb:
if stock not in list(account.positions):
account.holdHb.remove(stock)
for stock in account.holdSl:
if stock not in list(account.positions):
account.holdSl.remove(stock)
return
# =======================
# 公共函数(util)
# --------------------------------------
# 交易统一用交易函数,包含了撤单功能
def trade_target(stock,aim):
# 交易到目标股数
id=order_target(stock,aim)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
return
def trade_value(stock,value):
# 交易目标金额
id=order_value(stock,value)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
return id
def trade_amount(stock,amount):
# 交易目标数量
id=order(stock,amount)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
return
def secureRatio_d(account,data):
close = history('000001.SH', ['close'], 20, '1d', False, 'pre').iloc[:,0]
MA5 = talib.MA(np.array(close), timeperiod=5)
if((MA5[-1]<MA5[-2])and(MA5[-2]<MA5[-3])):
#if MA5[-1]<MA5[-2]:
return True
else:
return False
def secureRatio_m(account,data):
q_rate = history('000001.SH', ['quote_rate'], 20, '1m', False, 'pre')['quote_rate']
if(len(q_rate)<1 or q_rate[-1]<-0.8):
return True
else:
close = history('000001.SH', ['close'], 20, '30m', False, 'pre').iloc[:,0]
MA5 = talib.MA(np.array(close), timeperiod=5)
#if((MA5[-1]<MA5[-2])and(MA5[-2]<MA5[-3])):
if (MA5[-1]<MA5[-2]):
return True
else:
return False
# 判断是否是 Nan
def isNaN(params):
return params!=params
# 清仓单只股票
# 这里还是考虑了所有股票的
def pourStock(stock,account):
trade_target(stock,0)
if stock in account.hold:
account.hold.pop(stock)
if stock in account.holdHb:
account.holdSl.remove(stock)
if stock in account.holdHb:
account.holdHb.remove(stock)
# 清仓全部股票
# 由于当日买入的不能进行出手,所以从 account.hold 里面选择股票
def pourA(account):
temp=[]
for i in account.holdSl:
stock=account.positions[i].symbol
trade_target(stock,0)
temp.append(stock)
account.hold={}
account.holdSl=list(set(account.holdSl) ^ set(temp))
return
def pourB(account):
temp=[]
for i in account.holdHb:
stock=account.positions[i].symbol
trade_target(stock,0)
temp.append(stock)
account.hold={}
account.holdHb=list(set(account.holdHb) ^ set(temp))
return
def pourAll(account):
temp=[]
for i in account.hold:
stock=account.positions[i].symbol
trade_target(stock,0)
temp.append(stock)
account.hold={}
account.holdSl=list(set(account.holdSl) ^ set(temp))
account.holdHb=list(set(account.holdHb) ^ set(temp))
return
# 定位函数
def dictLoc(dict,value):
i=0
for params in dict:
if value==params:
return i
i+=1
return -1
# RSI 计算
def getRSI(df):
return 100-100/(1+rs(df))
def rs(df):
up=df['up'][1:].mean()
down=df['down'][1:].mean()
if not down==0:
return -df['up'][1:].mean()/df['down'][1:].mean()
else:
return 0
# ATR 计算
def getATR(stock):
price = history(stock, ['close', 'high', 'low'], 20, '1d', False, 'pre', is_panel=1)
high = price['high']
low = price['low']
close = price['close']
return talib.ATR(np.array(high), np.array(low), np.array(close), timeperiod=14)
def trade_signal(account,data):
N = account.N
M = account.M
# 计算单个标的昨日的 RSRS
hz300_RSRS = get_RSRS(data,'000300.SH',N,M)
zz500_RSRS = get_RSRS(data,'000905.SH',N,M)
return [hz300_RSRS,zz500_RSRS]
#5.计算 RSRS
def get_RSRS(data,stock,n,m):
values = data.attribute_history(stock,['high','low'],n+m-1,'1d', skip_paused=True)
high_array = values.high.values[-(n+m-1):]
low_array = values.low.values[-(n+m-1):]
scores = np.zeros(m) #各期斜率
for i in range(m):
high = high_array[i:i+30]
low = low_array[i:i+30]
# 计算单期斜率
x = low #low 作为自变量
X = sm.add_constant(x) #添加常数变量
y = high #high 作为因变量
model = sm.OLS(y,X) #最小二乘法
results = model.fit()
score = results.params[1]
scores[i] = score
# 记录最后一期的 Rsquared(可决系数)
if i==m-1:
R_squared = results.rsquared
# 最近期的标准分
z_score = (scores[-1]-scores.mean())/scores.std()
# RSRS 得分
RSRS_socre = z_score*R_squared
return RSRS_socre
def getTend(account,signals):
#RSRS 大的标的 和值
signal = max(signals)
if signals[0]>signals[1]:
return True
else:
return False
#判断是否发生上、下穿
def checkthrough(a,b,terms=20):
#需要判断之前是否发生上穿
if a[len(a)-1]>b[len(b)-1]:
for i in range(min(terms-1,len(a)-1,len(b)-1)):
if a[len(a)-i-1]<b[len(b)-i-1]:
return 1,i
#需要判断之前是否发生下穿
elif a[len(a)-1]<b[len(b)-1]:
for i in range(min(terms-1,len(a)-1,len(b)-1)):
if a[len(a)-i-1]>b[len(b)-i-1]:
return -1,i
return 0,-1
#获取 stock 股票过去 terms 期,以 step 为步长的的收盘价
def getHistory(stock, terms,start=0,step='1d'):
close=history(stock, ['close'], terms+start, step, True,None)['close']
return close[0:terms]
#持仓到期处理
def delayCheck(data,account):
temp=[]
for stock in account.hold:
if (stock in account.holdSl) and (account.hold[stock] > account.a_periods):
log.info("持有到期卖出:"+stock)
trade_target(stock, 0)
if stock in account.hold:
temp.append(stock)
if stock in account.holdSl:
account.holdSl.remove(stock)
for stock in account.hold:
if (stock in account.holdSl) and (account.hold[stock] > account.b_periods):
log.info("持有到期卖出:"+stock)
trade_target(stock, 0)
if stock in account.hold:
temp.append(stock)
if stock in account.holdHb:
account.holdHb.remove(stock)
for stock in temp:
account.hold.pop(stock)
#大盘安全函数
# 时间获取用封装的函数
# 获取当前日期
def get_date():
return get_datetime().strftime("%Y%m%d")
# 获取星期几
def get_weekday():
date = get_date()
return datetime.datetime.strptime(date, "%Y%m%d").weekday()+1
# 获取时间
def get_time():
datatime=get_datetime()
datatime=pd.to_datetime(datatime, unit='s')
datatime=datatime.strftime('%Y-%m-%d %H:%M:%S')
timeArray=time.strptime(datatime,"%Y-%m-%d %H:%M:%S")
return time.strftime("%H%M", timeArray)
# =======================
# 策略 A(shortLine)
# --------------------------------------
# 买入判断
def a_buyCheck(stock, account):
buypoint = a_condition_MA_b(stock)+a_condition_Flow_b(stock)+a_condition_Volume_b(stock)+a_condition_KDJ_b(stock)+a_condition_WeekTor_b(stock)
if buypoint > 3 and len(account.holdSl)<10:
atr = getATR(stock)
if not isNaN(atr[-1]):
#amount = int(account.cash/(atr[-1]*20))
trade_value(stock,min(1000000,800000*atr[-1])) #按量买入
# trade_amount(stock,amount) #按量买入
account.holdSl.append(stock) #添加记录
log.info("波段买入"+stock)
return
# MA 条件
def a_condition_MA_b(stock):
# 符合 MA5>MA10>MA20
price = history(stock, ['open', 'close'], 20, '1d', False, 'pre', is_panel=1)
close = price['close']
MA5 = talib.MA(np.array(close), timeperiod=5)
MA10 = talib.MA(np.array(close), timeperiod=10)
MA20 = talib.MA(np.array(close), timeperiod=20)
#if(MA5[-1] > MA10[-1]):
if(MA5[-1] > MA10[-1]) and (MA10[-1] > MA20[-1]):
return 1
else:
return 0
# 资金净流条件
def a_condition_Flow_b(stock):
date = get_date()
j = get_weekday()
# 连续两周主力资金净流入大于 0
delta = datetime.timedelta(days=j)
start1 = (get_last_datetime()-delta).strftime("%Y%m%d")
# flow1 为从 start1 到 date 为止的换手率
flow1 = get_money_flow([stock], start1, date, ['net_flow_rate'], count=None,is_panel=0)
this_week = sum(flow1[stock].net_flow_rate)
start2 = (get_last_datetime()-datetime.timedelta(days=j+7)).strftime("%Y%m%d")
end2 = (get_last_datetime()-datetime.timedelta(days=j+1)).strftime("%Y%m%d")
# flow1 为从 start1 到 date 为止的换手率
flow2 = get_money_flow([stock], start2, end2, ['net_flow_rate'], count=None,is_panel=0)
last_week = sum(flow2[stock].net_flow_rate)
if (this_week > 0) and (last_week > 0):
return 1
else:
return 0
# 成交量条件
def a_condition_Volume_b(stock):
# 当前周成交量大于上一周成交量的 1.5 倍
date = get_date()
# 获取历史周级数据
weekdata = get_candle_stick(stock, end_date=date, fre_step='week', fields=['volume'], skip_paused=False, fq='pre', bar_count=20, is_panel=1)
volume = weekdata.iloc[:, 0]
if volume[-1] > 1.5*volume[-2]:
return 1
else:
return 0
# KDJ 条件
def a_condition_KDJ_b(stock):
price = history(stock, ['close', 'high', 'low'], 20, '1d', False, 'pre', is_panel=1)
high = price['high']
low = price['low']
close = price['close']
K, D = talib.STOCH(np.array(high), np.array(low),
np.array(close), 9, 3, 0, 3, 0)
if(K[-1] > K[-2]) and (K[-2] < D[-2]) and (K[-1] > D[-1]):
return 1
else:
return 0
# 换手条件
def a_condition_WeekTor_b(stock):
# 周换手率超过 15%,符合则赋值 1,否则赋值 0
j = get_weekday()
price = history(stock, ['turnover_rate'], 20, '1d', False, 'pre', is_panel=1)
turnover_rate = price['turnover_rate']
weektor = sum(turnover_rate[-5:])
if(weektor > 0.15):
return 1
else:
return 0
# --------------------------------------
# 卖出判断
def a_sellCheck(account,data):
for stock in account.holdSl:
if stock in account.hold:
sellPoint = a_condition_MA_s(stock)+a_condition_Flow_s(stock)+a_condition_RSI_s(stock)+a_condition_Quate_s(stock,data)
temp=[]
if sellPoint >= 1:
if stock in account.holdSl:
# 平仓
id=order_target(stock,0)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
else:
log.info("波段卖点卖出"+stock)
temp.append(stock)
account.holdSl=list(set(account.holdSl) ^ set(temp))
account.hold.pop(stock)
return
# MA 条件
def a_condition_MA_s(stock):
# 符合 MA5 下穿 MA10
price = history(stock, ['close'],20, '1d', False, 'pre', is_panel=1)
close = price['close']
MA5 = talib.MA(np.array(close), timeperiod=5)
MA10 = talib.MA(np.array(close), timeperiod=10)
if checkthrough(MA5,MA10)[0] ==-1:
return 1
else:
return 0
# 资金净流条件
def a_condition_Flow_s(stock):
# 周主力资金净流入小于 0
delta = datetime.timedelta(days=get_weekday())
start = (get_datetime() - delta).strftime("%Y%m%d")
flow = get_money_flow([stock], start, get_date(), ['net_flow_rate'], count=None, is_panel=0)
this_week = sum(flow[stock].net_flow_rate)
if this_week < 0:
return 1
else:
return 0
# RSI 条件
def a_condition_RSI_s(stock):
# 日线 RSI 出现卖点信号
price = history(stock, ['close'],20, '1d', False, 'pre', is_panel=1)
close = price['close']
RSI1 = talib.RSI(np.array(close), timeperiod=5)
RSI2 = talib.RSI(np.array(close), timeperiod=13)
if(RSI1[-1] < RSI1[-2]) and (RSI1[-1] < RSI2[-1]) and (RSI1[-2] > RSI2[-2]):
return 1
else:
return 0
def a_condition_Quate_s(stock,data):
if(len(data.history(stock, 'close', 2, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 2, '1d', True, None)[stock]['close'][0]
now=data.history(stock, 'open', 2, '1d', True, None)[stock]['open'][0]
if now/begin < 0.97:
# log.info("b_1d")
return 1
else:
return 0
return 0
# --------------------------------------
# 风控
def a_riskDefend(account):
condition = 0
# 涨幅
quote_rate = history('000001.SH', ['quote_rate'],10, '1d', False, 'pre', is_panel=1)['quote_rate']
if len(quote_rate)>1:
if quote_rate[-1]<-2.5:
condition += 1
if quote_rate[-1]<-2 and quote_rate[-2]<-2:
condition += 1
# 停牌判断
suspension = 0
for stock in (get_all_securities('stock', get_date()).index):
#这里把原来的 1d 改成了 1m,从取昨天的变成取今天的了
paused = history(stock, ['is_paused'], 5, '1m', False,'pre',is_panel=1)['is_paused']
if len(paused)>0 and paused[0]==1:
suspension+=1
if suspension > 20:
#log.info("停牌条件风控")
condition += 1
temp=[]
if condition > 0:
for stock in account.holdSl:
if stock in account.hold:
id=order_target(stock,0)
orders = get_open_orders(id)
if orders and len(orders)>1:
cancel_order(orders)
else:
log.info("波段风控卖出"+stock)
account.holdSl.remove(stock)
account.hold.pop(stock)
# #重置 account.holdSl
# account.holdSl = list(set(account.holdSl) ^ set(temp))
# =======================
# 策略 B(hitBoard)
# --------------------------------------
# 买入判断
def b_buyCheck(stock,data):
v=data.current(stock)[stock]
close=v.prev_close
begin=v.open
#这里数值范围要讨论
gains=(begin-close)/close
if gains>0.05 or gains<-0.02:
return -1
elif gains>=-0.02 and gains<=0.02:
return 0
elif gains>0.02 and gains<=0.05:
return 1
# return 0
# 流通股本
def b_getMarket(stock):
q = query(
factor.date,
# factor.circulating_cap
factor.current_market_cap
).filter(
factor.symbol == stock,
factor.date == get_date()
)
if len(get_factors(q)['factor_current_market_cap'])==0:
return 0
elif get_factors(q)['factor_current_market_cap'][0] is None:
return 0
else:
return get_factors(q)['factor_current_market_cap'][0]
def b_getKDJ(stock):
price = history(stock, ['close', 'high', 'low'], 20, '1d', False, 'pre', is_panel=1)
high = price['high']
low = price['low']
close = price['close']
K, D = talib.STOCH(np.array(high), np.array(low),np.array(close), 9, 3, 0, 3, 0)
J=3*K-2*D
return (K[-1]+D[-1]+J[-1])/3
# --------------------------------------
# 卖出判断
def b_sellCheck(stock,data,account):
if stock in account.hold:
amount=account.positions[stock].total_amount
if b_riskDefend(stock,data):
order(stock,0,style="MarketOrder")
log.info("超短风控卖出"+stock)
account.holdHb.pop(dictLoc(account.holdHb,stock))
account.hold.pop(stock)
elif b_rsiCheck(stock,data) or b_runtimeTrCheck(stock,data) or b_stopGains(stock,data):
order(stock,0,style="MarketOrder")
log.info("超短卖点卖出"+stock)
account.holdHb.pop(dictLoc(account.holdHb,stock))
account.hold.pop(stock)
return
# 止盈
def b_stopGains(stock,data):
if(len(data.history(stock, 'close', 1, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 1, '1d', True, None)[stock]['close'][0]
v=data.current(stock)[stock]
now=v.open
if now/begin >= 1.09:
# log.info("b_1d")
return True
else:
return False
return True
# rsi 判断
def b_rsiCheck(stock,data):
terms=8
RSI=pd.DataFrame({'rsi1':np.zeros(terms),'rsi2':np.zeros(terms)})
for i in range(terms):
rsi1=getRSI(b_deltaCalc(getHistory(stock,6,i,'60m'),data))
rsi2=getRSI(b_deltaCalc(getHistory(stock,12,i,'60m'),data))
RSI.ix[i,'rsi1']=rsi1
RSI.ix[i,'rsi2']=rsi2
flag=checkthrough(RSI['rsi1'],RSI['rsi2'])[0]
if flag==-1:
return True
else:
return False
#换手率
def b_runtimeTrCheck(stock,data):
#log.info(get_datetime())
if len(data.history(stock,'turnover_rate',1,'1m',True,None)[stock]['turnover_rate'])==0 or len(data.history(stock,'turnover_rate',1,'1d',True,None)[stock]['turnover_rate'])==0:
return False
#前日换手量
else:
exc_y=data.history(stock,'turnover_rate',1,'1d',True,None)[stock]['turnover_rate'][0]/(4*60)
exc_n=data.history(stock,'turnover_rate',1,'1m',True,None)[stock]['turnover_rate'][0]
if exc_n>1.3*exc_y:
return True
else:
return False
#涨跌量的计算
def b_deltaCalc(close,data):
df=pd.DataFrame({'close':close,'up':np.zeros(len(close)),'down':np.zeros(len(close))})
for i in range(len(df)):
if i==0:
df.ix[i,'up']=0
df.ix[i,'down']=0
else:
if df.ix[i,'close']-df.ix[i-1,'close']>0:
df.ix[i,'up']=df.ix[i,'close']-df.ix[i-1,'close']
if df.ix[i,'close']-df.ix[i-1,'close']<0:
df.ix[i,'down']=df.ix[i,'close']-df.ix[i-1,'close']
return df
# --------------------------------------
# 风控
#单日跌幅 2.5%,双日跌 4%
def b_riskDefend(stock, data):
#条件一:单日跌幅超过 2%
if(len(data.history(stock, 'close', 1, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 1, '1d', True, None)[stock]['close'][0]
v=data.current(stock)[stock]
now=v.open
if now/begin < 0.975:
# log.info("b_1d")
return True
else:
#两日 3.5%
begin = data.history(stock, 'close', 2, '1d', True, None)[stock]['close'][0]
v=data.current(stock)[stock]
now=v.open
if now/begin < 0.965:
# log.info("b_2d")
return True
else:
return False
def b_prevRiskDefend(stock,data):
#条件一:单日跌幅超过 2%
if(len(data.history(stock, 'close', 2, '1d', True, None)[stock]['close']))>0:
begin = data.history(stock, 'close', 2, '1d', True, None)[stock]['close'][0]
now=data.history(stock, 'open', 2, '1d', True, None)[stock]['open'][0]
if now/begin < 0.97:
# log.info("b_1d")
return True
return False | 32.902338 | 181 | 0.552341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,051 | 0.235429 |