content
stringlengths 5
1.05M
|
|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration describing how inputs will be received at serving time."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
SINGLE_FEATURE_DEFAULT_NAME = 'feature'
SINGLE_RECEIVER_DEFAULT_NAME = 'input'
class ServingInputReceiver(collections.namedtuple('ServingInputReceiver',
['features',
'receiver_tensors'])):
"""A return type for a serving_input_receiver_fn.
The expected return values are:
features: A dict of string to `Tensor` or `SparseTensor`, specifying the
features to be passed to the model.
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes where this receiver expects to be fed. Typically, this is a
single placeholder expecting serialized `tf.Example` protos.
"""
# TODO(soergel): add receiver_alternatives when supported in serving.
def __new__(cls, features, receiver_tensors):
if features is None:
raise ValueError('features must be defined.')
if not isinstance(features, dict):
features = {SINGLE_FEATURE_DEFAULT_NAME: features}
for name, tensor in features.items():
if not isinstance(name, str):
raise ValueError('feature keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor)):
raise ValueError(
'feature {} must be a Tensor or SparseTensor.'.format(name))
if receiver_tensors is None:
raise ValueError('receiver_tensors must be defined.')
if not isinstance(receiver_tensors, dict):
receiver_tensors = {SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
for name, tensor in receiver_tensors.items():
if not isinstance(name, str):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not isinstance(tensor, ops.Tensor):
raise ValueError(
'receiver_tensor {} must be a Tensor.'.format(name))
return super(ServingInputReceiver, cls).__new__(
cls, features=features, receiver_tensors=receiver_tensors)
|
"""empty message
Revision ID: d371705de5f2
Revises: 94d2c442fe79
Create Date: 2017-10-01 13:23:19.220477
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd371705de5f2'
down_revision = '94d2c442fe79'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('last_seen_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen_at')
# ### end Alembic commands ###
|
import random
for i in range(20):
print('%05.4f' % random.random(), end=' ')
print()
random.seed(1)
for i in range(20):
print('%05.4f' % random.random(), end=' ')
print()
for i in range(20):
print('%6.4f' %random.uniform(1, 100), end=' ')
print()
for i in range(20):
print(random.randint(-100, 100), end=' ')
print()
for i in range(20):
print(random.randrange(0, 100,5), end=' ')
print()
CitiesList = ['Rome','New York','London','Berlin','Moskov', 'Los Angeles','Paris','Madrid','Tokio','Toronto']
for i in range(10):
CitiesItem = random.choice(CitiesList)
print ("Randomly selected item from Cities list is - ", CitiesItem)
DataList = range(10,100,10)
print("Initial Data List = ",DataList)
DataSample = random.sample(DataList,k=5)
print("Sample Data List = ",DataSample)
|
from project.hardware.hardware import Hardware
class HeavyHardware(Hardware):
def __init__(self, name: str, capacity: int, memory):
Hardware.__init__(self, name, "Heavy", capacity * 2, memory * 0.75)
|
#Un mago junior ha elegido un número secreto.
#Lo ha escondido en una variable llamada "númeroSecreto".
#Quiere que todos los que ejecutan su programa jueguen el juego Adivina el número secreto,
#y adivinar qué número ha elegido para ellos.
#¡Quienes no adivinen el número quedarán atrapados en un ciclo sin fin para siempre!
# Desafortunadamente, él no sabe cómo completar el código.
#Tu tarea es ayudar al mago a completar el código en el editor de tal manera que el código:
#- Pedirá al usuario que ingrese un número entero.
#- Utilizará un ciclo while.
#- Comprobará si el número ingresado por el usuario es el mismo que el número escogido por el mago.
# Si el número elegido por el usuario es diferente al número secreto del mago,
# el usuario debería ver el mensaje "¡Ja, ja! ¡Estás atrapado en mi ciclo!"
# y se le solicitará que ingrese un número nuevamente.
# Si el número ingresado por el usuario coincide con el número escogido por el mago,
# el número debe imprimirse en la pantalla, y el mago debe decir las siguientes palabras:
# "¡Bien hecho, muggle! Eres libre ahora".
# ¡El mago está contando contigo! No lo decepciones.
numeroSecreto = 777
print(
"""
+==================================+
| Bienvenido a mi juego, muggle! |
| Introduce un número entero |
| y adivina qué número he |
| elegido para ti. |
| Entonces, |
| ¿Cuál es el número secreto? |
+==================================+
""")
numero = int (input ())
bandera = True
while bandera:
# verificar si el número NO coincide con el numero del Mago
if numero != numeroSecreto:
print("¡Ja, ja! ¡Estás atrapado en mi ciclo!")
# lee el siguiente número
numero = int (input ("¿Cuál es el número secreto?\n"))
else:
# el numero si coincide con el numeroSecreto
print("¡Adivinaste! el número Secreto era: " + str(numero))
print("¡Bien hecho, muggle! Eres libre ahora")
bandera = False
|
import numpy as np
import numba as nb
from .matrices import dict_from_matrix, parasail_aa_alphabet, identity_nb_distance_matrix, tcr_nb_distance_matrix
__all__ = ['nb_running_editdistance',
'nb_running_tcrdist']
"""TODO:
- The way that nndist and neighbors is dynamically expanded when it gets full
is not compatible with parallelization. The fact that only some of the distance
computations return a result (d <= R) really doesn't lend itself to simple
for-loop parallelization. These functions are best run on one CPU, with the tasks
for multiple query sequences spread to multiple CPUs using multiprocessing,
or ideally multithreading with shared seqs_mat memory, since numba can release the GIL"""
def nb_running_tcrdist(query_i, seqs_mat, seqs_L, radius, density_est=0.05, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):
"""Compute "tcrdist" distance between two TCR CDR3 sequences. Using default weight, gap penalty, ntrim and ctrim is equivalent to the
original distance published in Dash et al, (2017). By setting ntrim and ctrim to 0 and adjusting the dist_weight, it is also possible
to compute the CDR1/2 loop distances which can be combined with the CDR3 distance for overall distance. See tcrdist2 package for details.
NOTE: the same alphabet must be used to encode the sequences as integer vectors and to create the distance matrix.
Parameters
----------
query_i : int
Index of seqs_mat for the sequence to be compared to all other seqs in seqs_mat
seqs_mat : np.ndarray dtype=int16 [nseqs, seq_length]
Created by pwsd.seqs2mat with padding to accomodate
sequences of different lengths (-1 padding)
seqs_L : np.ndarray [nseqs]
A vector containing the length of each sequence,
without the padding in seqs_mat
radius : scalar
Maximum threshold distance at which a sequence is included in the returned indices.
density_est : float, [0, 1]
Estimate of the fraction of seqs that are expected to be within the radius. Used to set an initial
size for the vector of neighbor indices. Also used to grow the vector in chunks.
distance_matrix : ndarray [alphabet x alphabet]
Square symetric DISTANCE matrix with zeros along the diagonal. A similarity substitution matrix such as BLOSUM62 cannot be used here
(see kernel2dist for a converter). Each element_ij contains the distance between the symbols at position i and j in the symbol
alphabet that was used to create the matrix. The function make_numba_matrix can create this matrix and returns dtype=np.int16
dist_weight : int
Weight applied to the mismatch distances before summing with the gap penalties
gap_penalty : int
Distance penalty for the difference in the length of the two sequences
ntrim/ctrim : int
Positions trimmed off the N-terminus (0) and C-terminus (L-1) ends of the peptide sequence. These symbols will be ignored
in the distance calculation.
fixed_gappos : bool
If True, insert gaps at a fixed position after the cysteine residue statring the CDR3 (typically position 6).
If False, find the "optimal" position for inserting the gaps to make up the difference in length
Returns
-------
indices : np.ndarray, dtype=np.uint32
Positional indices into seqs_mat of neighbors within radius R
nndists : np.ndarray, dtype=np.uint32
Distances to query seq of neighbors within radius R"""
return _nb_running_tcrdist(query_i, seqs_mat, seqs_L, radius, density_est, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)
@nb.jit(nopython=True, parallel=False)
def _nb_running_tcrdist(query_i, seqs_mat, seqs_L, radius, density_est=0.05, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):
assert seqs_mat.shape[0] == seqs_L.shape[0]
"""Chunk size for allocating array space to hold neighbors: should be a minimum of 100 and a max of seqs_mat.shape[0]"""
chunk_sz = min(max(int((density_est/2) * seqs_mat.shape[0]) + 1, 100), seqs_mat.shape[0])
q_L = seqs_L[query_i]
neighbor_count = 0
neighbors = np.zeros(chunk_sz, dtype=np.uint32)
nndists = np.zeros(chunk_sz, dtype=np.int16)
for seq_i in range(seqs_mat.shape[0]):
s_L = seqs_L[seq_i]
short_len = min(q_L, s_L)
len_diff = abs(q_L - s_L)
tot_gap_penalty = len_diff * gap_penalty
if len_diff == 0:
"""No gaps: substitution distance"""
tmp_dist = 0
for i in range(ntrim, q_L - ctrim):
tmp_dist += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]
"""if tmp_dist > radius:
break"""
if tmp_dist * dist_weight <= radius:
neighbors[neighbor_count] = seq_i
nndists[neighbor_count] = tmp_dist * dist_weight
neighbor_count += 1
if neighbor_count >= neighbors.shape[0]:
neighbors = np.concatenate((neighbors, np.zeros(chunk_sz, dtype=np.uint32)))
nndists = np.concatenate((nndists, np.zeros(chunk_sz, dtype=np.int16)))
continue
#print(f'quiting1 on {seq_i}: dist={tmp_dist * dist_weight}')
continue
elif tot_gap_penalty > radius:
#print(f'quiting2 on {seq_i}: gap_penalty={tot_gap_penalty}')
continue
if fixed_gappos:
min_gappos = min(6, 3 + (short_len - 5) // 2)
max_gappos = min_gappos
else:
min_gappos = 5
max_gappos = short_len - 1 - 4
while min_gappos > max_gappos:
min_gappos -= 1
max_gappos += 1
min_dist = -1
for gappos in range(min_gappos, max_gappos + 1):
tmp_dist = 0
remainder = short_len - gappos
for n_i in range(ntrim, gappos):
"""n_i refers to position relative to N term"""
tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]
if tmp_dist * dist_weight + tot_gap_penalty > radius:
#print(f'quiting3 on {seq_i}: dist={tmp_dist * dist_weight + tot_gap_penalty}')
min_dist = tmp_dist
continue
for c_i in range(ctrim, remainder):
"""c_i refers to position relative to C term, counting upwards from C term"""
tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]
if tmp_dist < min_dist or min_dist == -1:
min_dist = tmp_dist
tot_distance = min_dist * dist_weight + tot_gap_penalty
if tot_distance <= radius:
neighbors[neighbor_count] = seq_i
nndists[neighbor_count] = tot_distance
neighbor_count += 1
if neighbor_count >= neighbors.shape[0]:
neighbors = np.concatenate((neighbors, np.zeros(chunk_sz, dtype=np.uint32)))
nndists = np.concatenate((nndists, np.zeros(chunk_sz, dtype=np.int16)))
else:
#print(f'quiting4 on {seq_i}: dist={tot_distance}')
pass
return neighbors[:neighbor_count], nndists[:neighbor_count]
def nb_running_editdistance(query_i, seqs_mat, seqs_L, radius, density_est=0.05, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):
"""Computes the Levenshtein edit distance between the query sequence and sequences in seqs_mat.
Returns a vector of positinal indices of seqs that were within the radius of the query seq and their edit distances.
Parameters
----------
query_i : int
Index of seqs_mat for the sequence to be compared to all other seqs in seqs_mat
seqs_mat : np.ndarray dtype=int16 [nseqs, seq_length]
Created by pwsd.seqs2mat with padding to accomodate
sequences of different lengths (-1 padding)
seqs_L : np.ndarray [nseqs]
A vector containing the length of each sequence,
without the padding in seqs_mat
radius : scalar
Maximum threshold distance at which a sequence is included in the returned indices.
density_est : float, [0, 1]
Estimate of the fraction of seqs that are expected to be within the radius. Used to set an initial
size for the vector of neighbor indices. Also used to grow the vector in chunks.
distance_matrix : np.ndarray [alphabet, alphabet] dtype=int32
A square distance matrix (NOT a similarity matrix).
Matrix must match the alphabet that was used to create
seqs_mat, where each AA is represented by an index into the alphabet.
gap_penalty : int
Penalty for insertions and deletions in the optimal alignment.
Returns
-------
indices : np.ndarray, dtype=np.uint32
Positional indices into seqs_mat of neighbors within radius R
nndists : np.ndarray, dtype=np.uint32
Distances to query seq of neighbors within radius R"""
return _nb_running_editdistance(query_i, seqs_mat, seqs_L, radius, density_est, distance_matrix, gap_penalty)
@nb.jit(nopython=True, parallel=False)
def _nb_running_editdistance(query_i, seqs_mat, seqs_L, radius, density_est=0.05, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):
assert seqs_mat.shape[0] == seqs_L.shape[0]
q_L = seqs_L[query_i]
mx_L = np.max(seqs_L)
"""Chunk size for allocating array space to hold neighbors: should be a minimum of 100 and a max of seqs_mat.shape[0]"""
chunk_sz = min(max(int((density_est/2) * seqs_mat.shape[0]) + 1, 100), seqs_mat.shape[0])
neighbor_count = 0
neighbors = np.zeros(chunk_sz, dtype=np.uint32)
nndists = np.zeros(chunk_sz, dtype=np.int16)
"""As long as ldmat is big enough to accomodate the largest sequence
its OK to only use part of it for the smaller sequences
NOTE that to create a 2D array it must be created 1D anfd reshaped"""
ldmat = np.zeros(nb.int_(q_L) * nb.int_(mx_L), dtype=np.int16).reshape((q_L, mx_L))
for seq_i in range(seqs_mat.shape[0]):
# query_i = indices[ind_i, 0]
# seq_i = indices[ind_i, 1]
s_L = seqs_L[seq_i]
len_diff = abs(q_L - s_L)
tot_gap_penalty = len_diff * gap_penalty
if len_diff == 0:
"""No gaps: substitution distance
This will make it differ from a strict edit-distance since
the optimal edit-distance may insert same number of gaps in both sequences"""
tmp_dist = 0
for i in range(q_L):
tmp_dist += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]
if tmp_dist <= radius:
neighbors[neighbor_count] = seq_i
nndists[neighbor_count] = tmp_dist
neighbor_count += 1
if neighbor_count >= neighbors.shape[0]:
neighbors = np.concatenate((neighbors, np.zeros(chunk_sz, dtype=np.uint32)))
nndists = np.concatenate((nndists, np.zeros(chunk_sz, dtype=np.int16)))
#print(f'quiting1 on {seq_i}: dist={tmp_dist}')
continue
elif tot_gap_penalty > radius:
#print(f'quiting2 on {seq_i}: gap_penalty={tot_gap_penalty}')
continue
"""Do not need to re-zero each time"""
# ldmat = np.zeros((q_L, s_L), dtype=np.int16)
for row in range(1, q_L):
ldmat[row, 0] = row * gap_penalty
for col in range(1, s_L):
ldmat[0, col] = col * gap_penalty
for col in range(1, s_L):
for row in range(1, q_L):
ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,
ldmat[row, col-1] + gap_penalty,
ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution
if ldmat[row, col] <= radius:
"""Means that the nested loops finished withour BREAKing"""
neighbors[neighbor_count] = seq_i
nndists[neighbor_count] = ldmat[row, col]
neighbor_count += 1
if neighbor_count >= neighbors.shape[0]:
neighbors = np.concatenate((neighbors, np.zeros(chunk_sz, dtype=np.uint32)))
nndists = np.concatenate((nndists, np.zeros(chunk_sz, dtype=np.int16)))
else:
pass
#print(f'quiting3 on {seq_i}: dist={ldmat[row, col]}')
return neighbors[:neighbor_count], nndists[:neighbor_count]
|
#!/usr/bin/env python
import os
import sys
import click
from credstash import getSecret, listSecrets
from jinja2 import Environment, FileSystemLoader
def render_with_credentials(file):
"""Render file argument with credstash credentials
Load file as jinja2 template and render it with context where keys are
credstash keys and values are credstash values
Args:
file (str): jinja2 template file path
Returns:
str: Rendered string
"""
env = Environment(loader=FileSystemLoader(os.path.dirname(file)))
template = env.get_template(os.path.basename(file))
context = {secret['name']: getSecret(secret['name'])
for secret in listSecrets()}
return template.render(**context)
@click.command()
@click.argument('file')
def main(file):
"""Output rendered template
Args:
file (str): jinja2 template file path
"""
sys.stdout.write(render_with_credentials(file))
if __name__ == '__main__':
main()
|
import datetime
from tkinter import *
from tkinter import ttk
import bq_method
from dateutil.relativedelta import relativedelta
setting = {}
ws = Tk()
ws.title("GUI data olap")
# ws.geometry('500x500')
ws["bg"] = "gray26"
def verify(bqjsonservicefile="polar.json", bqdataset="DB2019", bqtable="ozon_wb_1c"):
setting["bqjsonservicefile"] = bqjsonservicefile
setting["bqdataset"] = bqdataset
setting["bqtable"] = bqtable
b1 = ttk.Button(text="Verify last mont")
b1.bind("<Button-1>", verify_last_month)
b1.pack(side=LEFT)
b2 = ttk.Button(text="Verify last week")
b2.bind("<Button-1>", verify_last_week)
b2.pack(side=LEFT)
ws.mainloop()
def verify_last_month(b):
print("month")
filterList = []
date_start = datetime.date.today() - relativedelta(months=1)
date_end = datetime.date.today() - relativedelta(days=1)
unitlist = '("OZON", "WILDBERRIES", "ЯНДЕКС")'
field = "Unit"
filterList.append(
{
"fieldname": field,
"operator": "in",
"value": unitlist,
}
)
filterList.append(
{
"fieldname": "date",
"operator": ">=",
"value": date_start.strftime("%Y-%m-%d"),
}
)
filterList.append(
{
"fieldname": "date",
"operator": "<=",
"value": date_end.strftime("%Y-%m-%d"),
}
)
operationlist = ("Продажи WB-OZON-YM ЛК", "Продажи WB-OZON ЛК (old)")
field = "Operation"
querytotal = ""
fieldlist = "date,day,week,month,year,Operation,Unit,Value,0 as ValueOld"
for oper in operationlist:
if querytotal != "":
querytotal = querytotal + " UNION ALL "
fieldlist = "date,day,week,month,year,Operation,Unit,0,Value"
newfilter = filterList.copy()
newfilter.append(
{
"fieldname": "Operation",
"operator": "=",
"value": oper,
}
)
query = bq_method.get_selectquery_for_table(
setting["bqjsonservicefile"],
setting["bqdataset"],
setting["bqtable"],
newfilter,
fieldlist,
)
querytotal = querytotal + query
querytotal = (
"select date,day,week,month,year,Unit,Sum(Value) Value,Sum(ValueOld) ValueOld from ("
+ querytotal
+ ") as grp Group by date,day,week,month,year,Unit Order by Unit,date"
)
resultquery = bq_method.SelectQuery(
setting["bqjsonservicefile"],
setting["bqdataset"],
setting["bqtable"],
filterList,
querytotal,
)
game_frame = Frame(ws)
game_frame.pack(expand=True, fill="y")
my_game = ttk.Treeview(game_frame)
my_game["columns"] = ("Period", "Unit", "Value_LK", "Value_LK_OLD", "DeltaPercent")
my_game.column("#0", width=0, stretch=NO)
my_game.column("Period", anchor=CENTER, width=80)
my_game.column("Unit", anchor=CENTER, width=80)
my_game.column("Value_LK", anchor=CENTER, width=80)
my_game.column("Value_LK_OLD", anchor=CENTER, width=80)
my_game.column("DeltaPercent", anchor=CENTER, width=80)
my_game.heading("#0", text="", anchor=CENTER)
my_game.heading("Period", text="Period", anchor=CENTER)
my_game.heading("Unit", text="Unit", anchor=CENTER)
my_game.heading("Value_LK", text="Value LK", anchor=E)
my_game.heading("Value_LK_OLD", text="Value manual", anchor=E)
my_game.heading("DeltaPercent", text="Delta, %", anchor=E)
count = 0
for row in resultquery:
tag = "normal"
DeltaPercent = "N/A"
if row.ValueOld != 0.0:
if row.Value != 0.0:
DeltaPercent = 100 - row.ValueOld / row.Value * 100
if DeltaPercent > 10 or DeltaPercent < -10:
tag = "red"
my_game.insert(
parent="",
index="end",
iid=count,
text="",
values=(row.date, row.Unit, row.Value, row.ValueOld, DeltaPercent),
tags=tag,
)
count = count + 1
my_game.tag_configure("red", background="red")
my_game.pack(expand=True, fill="y")
ws.mainloop()
pass
def verify_last_week(b):
pass
if __name__ == "__main__":
verify()
|
"""
"""
from typing import List, Dict
import time
import random
import requests
from requests_middleware.base import Base
class SmartRequests(Base):
SLEEP_TIME = 30
def __init__(self, proxies: List[Dict[str, str]], use_proxy: bool) -> None:
super().__init__(proxies=proxies)
self.use_proxy = use_proxy
def get(self, url: str, cookies: Dict[str, str]= {}, params: Dict[str, str]= {}, headers: Dict[str, str] = {}, timeout: int = 15, verify: bool = True) -> requests.Response:
proxy = random.choice(self.proxies)
try:
response = requests.get(
url=url,
headers=headers,
params=params,
cookies=cookies,
timeout=timeout,
verify=verify,
proxy=proxy,
)
return response
except Exception as e:
self.proxy_management[proxy] = time.time()
raise e
|
import pygame
import math
import os
import palletts as p
import accessors as a
class text:
def __init__(self, color=-1, font=a.monoid, fontSize=10, text=-1):
self.color = color
self.font = self.return_font(font)
self.fontSize = fontSize
self.text = text
self.parent = None
self.rendering = True
self.default = False
def return_font(self, font):
dir_path = os.path.dirname(os.path.realpath(__file__))
p = dir_path + font
if os.path.exists( p ):
return p
elif os.path.exists( font ):
return font
return dir_path + a.monoid
def __reinit__(self, parent):
self.parent = parent
if self.color == -1:
self.default = True
self.color = parent.pallett.text_RGB
def __color__(self, color):
if color == - 1 or not self.default:
return self.color
else:
return color
def __text__(self, text):
if self.text == -1:
return text
else:
return self.text
def render(self, message, pos, color=-1, alignmentX="center", alignmentY="center"):
if self.rendering:
font = pygame.font.Font(self.font, self.fontSize)
text = font.render(self.__text__(message),
True, self.__color__(color))
textRect = text.get_rect()
y = self.parent.graph.handler.height - pos[1]
textRect.center = (pos[0], y)
if alignmentX == "left":
textRect.left = pos[0]
elif alignmentX == "right":
textRect.right = pos[0]
if alignmentY == "top":
textRect.top = y
elif alignmentY == "bottom":
textRect.bottom = y
self.parent.graph.handler.surface.blit(text, textRect)
# USER FUNCTIONS
def update_visibility(self, vis):
self.rendering = vis
return self.parent
class line:
def __init__(self, color=-1, stroke=1):
self.color = color
self.stroke = stroke
self.parent = None
self.rendering = True
self.default = False
def __reinit__(self, parent):
self.parent = parent
if self.color == -1:
self.default = True
self.color = parent.pallett.text_RGB
def __color__(self, color):
if color == - 1 or not self.default:
return self.color
else:
return color
def render(self, p, p2, color=-1):
if self.rendering:
pos = (p[0], self.parent.graph.handler.height - p[1])
pos2 = (p2[0], self.parent.graph.handler.height - p2[1])
pygame.draw.line(self.parent.graph.handler.screen,
self.__color__(color), pos, pos2, self.stroke)
# USER FUNCTIONS
def update_visibility(self, vis):
self.rendering = vis
return self.parent
class point:
def __init__(self, color=-1, radius=2, stroke=0, stroke_color=-1, shape=-1):
self.color = color
self.radius = radius
self.stroke = stroke
self.stroke_color = stroke_color
self.shape = shape
self.handler = None
self.parent = None
self.rendering = True
self.default = False
def __reinit__(self, parent):
self.parent = parent
self.handler = parent.graph.handler
if self.stroke_color == -1:
self.stroke_color = parent.pallett.text_RGB
if self.color == -1:
self.default = True
self.color = parent.pallett.prim_RGB
def __color__(self, color):
if color == - 1 or not self.default:
return self.color
else:
return color
def __render_polygon__(self, center, color):
coords = []
outlines = []
for side in range(0, self.shape):
theta = 2 * math.pi * (side / self.shape)
point = ((math.cos(theta) * self.radius),
(math.sin(theta) * self.radius))
outline = (math.cos(theta) * (self.radius + self.stroke),
math.sin(theta) * (self.radius + self.stroke))
coords.append((point[0]+center[0], point[1]+center[1]))
outlines.append((outline[0]+center[0], outline[1]+center[1]))
pygame.draw.polygon(self.handler.surface, self.stroke_color, outlines)
pygame.draw.polygon(self.handler.surface, color, coords)
def render(self, p, color=-1):
if self.rendering:
pos = (p[0], self.handler.height - p[1])
if self.shape == -1:
pygame.draw.circle(
self.handler.surface, self.stroke_color, pos, self.radius + self.stroke)
pygame.draw.circle(self.handler.surface,
self.__color__(color), pos, self.radius)
else:
self.__render_polygon__(pos, self.__color__(color))
# USER FUNCTIONS
def update_visibility(self, vis):
self.rendering = vis
return self.parent
class domain:
def __init__(self, pos, size, padding=(75, 30), parent=None, pallett=p.green_tea):
self.pos = pos
self.size = size
self.padding = padding
self.pallett = pallett
self.parent = parent
self.rendering = True
def __reinit__(self, parent):
self.parent = parent
self.pallett = parent.pallett
def render(self):
if self.rendering:
rect = pygame.Rect(
self.pos[0] - self.padding[0],
self.pos[1] + self.size[1] + self.padding[1],
self.size[0] + (self.padding[0] * 2),
self.size[1] + (self.padding[1] * 2)
)
self.parent.graph.handler.render_rect(rect, self.pallett.back_RGB)
# User Functions
def update_padding(self, padding):
self.padding = padding
return self.parent
def update_visibility(self, vis):
self.rendering = vis
return self.parent
default_domain = domain(
pos=(100, 100),
size=(500, 500),
)
|
import torch.nn as nn
import torch
class SelfTrainLoss(nn.Module):
def __init__(self):
super(SelfTrainLoss, self).__init__()
self.l1_loss = nn.L1Loss()
self.mse_loss = nn.MSELoss()
self.is_train = False
self.iteres = {
'self_supervised_common_mix': 0,
'self_supervised_upper_mix': 0,
'self_supervised_lower_mix': 0,
'self_supervised_fusion_mix': 0,
'total_loss': 0
}
def inital_losses(self, b_input_type, losses, compute_num):
if 'self_supervised_mixup' in b_input_type:
tmp = b_input_type.count('self_supervised_mixup')
losses['self_supervised_common_mix'] = 0
compute_num['self_supervised_common_mix'] = tmp
losses['self_supervised_upper_mix'] = 0
compute_num['self_supervised_upper_mix'] = tmp
losses['self_supervised_lower_mix'] = 0
compute_num['self_supervised_lower_mix'] = tmp
losses['self_supervised_fusion_mix'] = 0
compute_num['self_supervised_fusion_mix'] = tmp
def forward(self, img1, img2, gt_img, common_part, upper_part, lower_part, fusion_part, b_input_type):
losses = {}
compute_num = {}
losses['total_loss'] = 0
self.inital_losses(b_input_type, losses, compute_num)
for index, input_type in enumerate(b_input_type):
common_part_i = common_part[index].unsqueeze(0)
upper_part_i = upper_part[index].unsqueeze(0)
lower_part_i = lower_part[index].unsqueeze(0)
fusion_part_i = fusion_part[index].unsqueeze(0)
img1_i = img1[index].unsqueeze(0)
img2_i = img2[index].unsqueeze(0)
gt_i = gt_img[index].unsqueeze(0)
if input_type == 'self_supervised_mixup':
mask1 = gt_i[:, 0:1, :, :]
mask2 = gt_i[:, 1:2, :, :]
gt_img1_i = gt_i[:, 2:5, :, :]
gt_img2_i = gt_i[:, 5:8, :, :]
common_mask = ((mask1 == 1.) & (mask2 == 1.)).float()
gt_common_part = common_mask * gt_img1_i
gt_upper_part = (mask1 - common_mask).abs() * gt_img1_i
gt_lower_part = (mask2 - common_mask).abs() * gt_img2_i
if self.iteres['total_loss'] < 3000:
common_part_pre = common_part_i * common_mask
upper_part_pre = upper_part_i * (mask1 - common_mask).abs()
lower_part_pre = lower_part_i * (mask2 - common_mask).abs()
common_part_post = 0
upper_part_post = 0
lower_part_post = 0
else:
annel_alpha = min(self.iteres['total_loss'], 7000) / 7000
annel_alpha = annel_alpha ** 2
annel_alpha = annel_alpha * 0.15
lower_annel_beta = 1
if self.iteres['total_loss'] > 40000:
annel_alpha *= 0.1
common_part_pre = common_part_i * annel_alpha + common_part_i * common_mask * (1 - annel_alpha)
upper_part_pre = upper_part_i * annel_alpha + upper_part_i * (mask1 - common_mask).abs() * (1 - annel_alpha)
lower_part_pre = lower_part_i * annel_alpha * lower_annel_beta + lower_part_i * (mask2 - common_mask).abs() * (1 - annel_alpha * lower_annel_beta)
self_supervised_common_mix_loss = self.l1_loss(common_part_pre, gt_common_part) #\
losses['self_supervised_common_mix'] += self_supervised_common_mix_loss #+ self_supervised_common_mix_loss_a_channel
self_supervised_upper_mix_loss = self.l1_loss(upper_part_pre, gt_upper_part) #+ 5 * \
losses['self_supervised_upper_mix'] += self_supervised_upper_mix_loss #+ self_supervised_upper_mix_loss_a_channel
self_supervised_lower_mix_loss = self.l1_loss(lower_part_pre, gt_lower_part) #+ 5 * \
losses['self_supervised_lower_mix'] += self_supervised_lower_mix_loss #+ self_supervised_lower_mix_loss_a_channel
if self.iteres['total_loss'] >= 17000:
annel_beta = min(self.iteres['total_loss'] - 10000, 14000) / 14000
annel_beta = annel_beta ** 2
self_supervised_fusion_mix_loss = 1 * self.l1_loss(gt_img1_i, fusion_part_i) * annel_beta
#+ 0 * self.ssim_loss(gt_img1_i, fusion_part_i))
else:
self_supervised_fusion_mix_loss = torch.tensor(0.0).cuda()
losses['self_supervised_fusion_mix'] += self_supervised_fusion_mix_loss
losses['total_loss'] += self_supervised_common_mix_loss + self_supervised_upper_mix_loss \
+ self_supervised_lower_mix_loss + self_supervised_fusion_mix_loss #\
for k, v in losses.items():
if k in self.iteres.keys():
self.iteres[k] += 1
if k != 'total_loss':
losses[k] = v / compute_num[k]
return losses, self.iteres.copy()
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import os
import sys
import threading
from ._types import str_cls, type_name
from .errors import LibraryNotFoundError
__version__ = '0.17.2'
__version_info__ = (0, 17, 2)
_backend_lock = threading.Lock()
_module_values = {
'backend': None,
'backend_config': None
}
def backend():
"""
:return:
A unicode string of the backend being used: "openssl", "osx", "win"
"""
if _module_values['backend'] is not None:
return _module_values['backend']
with _backend_lock:
if _module_values['backend'] is not None:
return _module_values['backend']
if sys.platform == 'win32':
_module_values['backend'] = 'win'
elif sys.platform == 'darwin':
_module_values['backend'] = 'osx'
else:
_module_values['backend'] = 'openssl'
return _module_values['backend']
def _backend_config():
"""
:return:
A dict of config info for the backend. Only currently used by "openssl",
it may contains zero or more of the following keys:
- "libcrypto_path"
- "libssl_path"
"""
if backend() != 'openssl':
return {}
if _module_values['backend_config'] is not None:
return _module_values['backend_config']
with _backend_lock:
if _module_values['backend_config'] is not None:
return _module_values['backend_config']
_module_values['backend_config'] = {}
return _module_values['backend_config']
def use_openssl(libcrypto_path, libssl_path, trust_list_path=None):
"""
Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll),
or using a specific dynamic library on Linux/BSD (.so).
This can also be used to configure oscrypto to use LibreSSL dynamic
libraries.
This method must be called before any oscrypto submodules are imported.
:param libcrypto_path:
A unicode string of the file path to the OpenSSL/LibreSSL libcrypto
dynamic library.
:param libssl_path:
A unicode string of the file path to the OpenSSL/LibreSSL libssl
dynamic library.
:param trust_list_path:
An optional unicode string of the path to a file containing
OpenSSL-compatible CA certificates in PEM format. If this is not
provided and the platform is OS X or Windows, the system trust roots
will be exported from the OS and used for all TLS connections.
:raises:
ValueError - when one of the paths is not a unicode string
OSError - when the trust_list_path does not exist on the filesystem
oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem
RuntimeError - when this function is called after another part of oscrypto has been imported
"""
if not isinstance(libcrypto_path, str_cls):
raise ValueError('libcrypto_path must be a unicode string, not %s' % type_name(libcrypto_path))
if not isinstance(libssl_path, str_cls):
raise ValueError('libssl_path must be a unicode string, not %s' % type_name(libssl_path))
if not os.path.exists(libcrypto_path):
raise LibraryNotFoundError('libcrypto does not exist at %s' % libcrypto_path)
if not os.path.exists(libssl_path):
raise LibraryNotFoundError('libssl does not exist at %s' % libssl_path)
if trust_list_path is not None:
if not isinstance(trust_list_path, str_cls):
raise ValueError('trust_list_path must be a unicode string, not %s' % type_name(trust_list_path))
if not os.path.exists(trust_list_path):
raise OSError('trust_list_path does not exist at %s' % trust_list_path)
with _backend_lock:
if _module_values['backend'] is not None:
raise RuntimeError('Another part of oscrypto has already been imported, unable to force use of OpenSSL')
_module_values['backend'] = 'openssl'
_module_values['backend_config'] = {
'libcrypto_path': libcrypto_path,
'libssl_path': libssl_path,
'trust_list_path': trust_list_path,
}
|
import asyncio
import random
import discord
import youtube_dl
from discord import Embed, Colour
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.utils.manage_commands import create_option, create_choice
class Music(commands.Cog):
def __init__(self, client):
"""Initialisation client"""
self.client = client
self.FFMPEG_OPTIONS = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'
}
self.songs, self.current = [], ""
@cog_ext.cog_slash(name="clearplaylist", description="Clear Music Playlist")
async def clearplaylist(self, ctx):
"""Clear Music Playlist"""
if (
ctx.author.voice is None
or ctx.author.voice.channel != ctx.voice_client.channel
):
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
elif not self.songs:
self.songs = []
discord.utils.get(self.client.voice_clients, guild=ctx.guild).stop()
await ctx.send(
embed=Embed(
title="**The Playlist** has been cleared",
color=Colour(0x59d9b9)
)
)
else:
await ctx.send(
embed=Embed(
title="**The Playlist** is empty",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="join", description="Parzibot Joins to Your Current Voice Channel")
async def join(self, ctx):
"""Parzibot Joins to Your Current Voice Channel"""
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if ctx.author.voice is not None and voice is None or not voice.is_connected():
channel = ctx.author.voice.channel
await channel.connect()
await ctx.send(
embed=Embed(
title="**Parzibot** has been connected to **Voice Channel**",
color=Colour(0x59d9b9)
)
)
else:
await ctx.send(
embed=Embed(
title="**Parzibot** already connected to **Voice Channel**",
color=Colour(0xd95959)
)
)
@cog_ext.cog_slash(name="leave", description="Parzibot Leaves from Your Current Voice Channel")
async def leave(self, ctx):
"""Parzibot Leaves Your Current Voice Channel"""
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if ctx.author.voice is None or (
ctx.author.voice.channel != ctx.voice_client.channel or voice.is_connected() is None):
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
else:
self.songs, self.current = [], ""
await voice.disconnect()
await ctx.send(
embed=Embed(
title="**Parzibot** has left **Voice Channel**",
color=Colour(0x59d9b9)
))
@cog_ext.cog_slash(
name="musichelp",
description="The List of Parzibot Music Commands",
options=[
create_option(
name="command",
description="The Help Message for Specific Music Command",
option_type=3,
required=False,
choices=[
create_choice(name="clearplaylist", value="clearplaylist"),
create_choice(name="join", value="join"),
create_choice(name="leave", value="leave"),
create_choice(name="musichelp", value="musichelp"),
create_choice(name="next", value="next"),
create_choice(name="pause", value="pause"),
create_choice(name="play", value="play"),
create_choice(name="playlist", value="playlist"),
create_choice(name="replay", value="replay"),
create_choice(name="resume", value="resume"),
create_choice(name="shuffle", value="shuffle"),
create_choice(name="stop", value="stop")
])
])
async def musichelp(self, ctx, command=None):
"""The List of Parzibot Music Commands"""
if command is None:
await ctx.send(
embed=Embed(
title=f"Music commands",
description=(
' - **/clearplaylist** - Clear Music Playlist\n'
' - **/join** - Parzibot Joins to Your Current Voice Channel\n'
' - **/leave** - Parzibot Leaves Your Current Voice Channel\n'
' - **/musichelp** `command` - The List of Parzibot Music Commands\n'
' - **/next** - Play The Next Song in The Playlist\n'
' - **/pause** - Pause The Current Song\n'
' - **/play** `url` - Play The Song in The Current Voice Channel\n'
' - **/playlist** - The Number of Songs in The Playlist\n'
' - **/replay** - Replay The Current Song\n'
' - **/resume** - Resume The Current Song\n'
' - **/shuffle** - Shuffle The Playlist of Songs\n'
' - **/stop** - Stop The Current Song'
),
color=Colour(0x59d9b9)
)
)
elif command == "clearplaylist":
await ctx.send(
embed=Embed(
title="**/clearplaylist** command - Clear Music Playlist",
description=(
'**Syntax:** **/clearplaylist**'
),
color=Colour(0x59d9b9)
)
)
elif command == "join":
await ctx.send(
embed=Embed(
title="**/join** command - Parzibot Joins to Your Current Voice Channel",
description=(
'**Syntax:** **/join**'
),
color=Colour(0x59d9b9)
)
)
elif command == "leave":
await ctx.send(
embed=Embed(
title="**/leave** command - Parzibot Leaves Your Current Voice Channel",
description=(
'**Syntax:** **/leave**'
),
color=Colour(0x59d9b9)
)
)
elif command == "musichelp":
await ctx.send(
embed=Embed(
title="**/musichelp** command - The List of Parzibot Music Commands",
description=(
'**Syntax:** **/musichelp** `command`\n'
'**Options:** `command` - The Help Message for Specific Music Command **(Optional)**'
),
color=Colour(0x59d9b9)
)
)
elif command == "next":
await ctx.send(
embed=Embed(
title="**/next** command - Play The Next Song in The Playlist",
description=(
'**Syntax:** **/next**'
),
color=Colour(0x59d9b9)
)
)
elif command == "pause":
await ctx.send(
embed=Embed(
title="**/pause** command - Pause The Current Song",
description=(
'**Syntax:** **/pause**'
),
color=Colour(0x59d9b9)
)
)
elif command == "play":
await ctx.send(
embed=Embed(
title="**/play** command - Play The Song in The Current Voice Channel",
description=(
'**Syntax:** **/play** `url`\n'
'**Options:** `url` - YouTube Video URL **(Required)**'
),
color=Colour(0x59d9b9)
)
)
elif command == "playlist":
await ctx.send(
embed=Embed(
title="**/playlist** command - The Number of Songs in The Playlist",
description=(
'**Syntax:** **/playlist**'
),
color=Colour(0x59d9b9)
)
)
elif command == "replay":
await ctx.send(
embed=Embed(
title="**/replay** command - Replay The Current Song",
description=(
'**Syntax:** **/replay**'
),
color=Colour(0x59d9b9)
)
)
elif command == "resume":
await ctx.send(
embed=Embed(
title="**/resume** command - Resume The Current Song",
description=(
'**Syntax:** **/resume**'
),
color=Colour(0x59d9b9)
)
)
elif command == "shuffle":
await ctx.send(
embed=Embed(
title="**/shuffle** command - Shuffle The List of Songs",
description=(
'**Syntax:** **/shuffle**'
),
color=Colour(0x59d9b9)
)
)
elif command == "stop":
await ctx.send(
embed=Embed(
title="**/stop** command - Stop The Current Song",
description=(
'**Syntax:** **/stop**'
),
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="next", description="Play The Next Song in The Playlist")
async def next(self, ctx):
"""Play The Next Song in The Playlist"""
if (
ctx.author.voice is None
or ctx.author.voice.channel != ctx.voice_client.channel
):
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
voice.stop()
if self.songs: await self.play_song(ctx)
else:
await ctx.send(
embed=Embed(
title="**The Playlist** is empty",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="pause", description="Pause The Current Song")
async def pause(self, ctx):
"""Pause The Current Song"""
if ctx.author.voice is None or ctx.author.voice.channel != ctx.voice_client.channel:
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if voice.is_playing():
voice.pause()
await ctx.send(
embed=Embed(
title="**The Song** has been paused",
color=Colour(0x59d9b9)
)
)
else:
await ctx.send(
embed=Embed(
title="**The Song** isn't playing right now",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(
name="play",
description="Play The Song in The Current Voice Channel",
options=[
create_option(
name="url",
description="YouTube Video URL",
option_type=3,
required=True
)
])
async def play(self, ctx, url: str):
"""Play The Song in The Current Voice Channel"""
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if (
ctx.author.voice is None or voice is not None and ctx.author.voice.channel != ctx.voice_client.channel
):
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
self.songs.append(str(url))
channel = ctx.author.voice.channel
if channel and channel is not None:
if voice is not None and voice.is_connected() is not None:
await voice.move_to(channel)
else: voice = await channel.connect()
if not voice.is_playing() or voice.is_paused():
await self.play_song(ctx)
else:
await ctx.send(
embed=Embed(
title="**The Song** added to playlist",
description="If you want to play song right now write **/next**",
color=Colour(0x59d9b9)
)
)
else:
await ctx.send(
embed=Embed(
title="You're not connected to any **Voice Channel**",
color=Colour(0xd95959)
)
)
async def play_song(self, ctx):
def search(url):
with youtube_dl.YoutubeDL({'format': 'bestaudio', 'noplaylist': 'True'}) as ydl:
info = ydl.extract_info(f"ytsearch:{url}", download=False)['entries'][0]
return {'source': info['formats'][0]['url'], 'title': info['title']}
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if self.songs:
self.current = self.songs.pop(0)
data = search(self.current)
voice.play(discord.FFmpegPCMAudio(data['source'], **self.FFMPEG_OPTIONS),
after=lambda e: asyncio.run_coroutine_threadsafe(self.play_song(ctx), self.client.loop))
voice.is_playing()
await ctx.send(
embed=Embed(
title=f"**{data['title']}** is playing now",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="playlist", description="The Number of Songs in The Playlist")
async def playlist(self, ctx):
"""The Number of Songs in The Playlist"""
if (
ctx.author.voice is None or ctx.voice_client is None
or ctx.author.voice.channel != ctx.voice_client.channel
):
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
if self.songs:
await ctx.send(
embed=Embed(
title=f"**The Playlist** contains about **{len(self.songs)}** song(-s)",
color=Colour(0x59d9b9)
)
)
else:
await ctx.send(
embed=Embed(
title="**The Playlist** is empty",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="replay", description="Replay The Current Song")
async def replay(self, ctx):
"""Replay The Current Song"""
if (
ctx.author.voice is None
or ctx.author.voice.channel != ctx.voice_client.channel
):
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
voice.stop()
await self.replay_song(ctx)
async def replay_song(self, ctx):
def search(url):
with youtube_dl.YoutubeDL({'format': 'bestaudio', 'noplaylist': 'True'}) as ydl:
info = ydl.extract_info(f"ytsearch:{url}", download=False)['entries'][0]
return {'source': info['formats'][0]['url'], 'title': info['title']}
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
data = search(self.current)
voice.play(discord.FFmpegPCMAudio(data['source'], **self.FFMPEG_OPTIONS),
after=lambda e: asyncio.run_coroutine_threadsafe(self.play_song(ctx), self.client.loop))
voice.is_playing()
await ctx.send(
embed=Embed(
title=f"**{data['title']}** is playing now",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="resume", description="Resume The Current Song")
async def resume(self, ctx):
"""Resume The Current Song"""
if ctx.author.voice is None or ctx.author.voice.channel != ctx.voice_client.channel:
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
if voice.is_paused():
voice.resume()
await ctx.send(
embed=Embed(
title="**The Song** has been resumed",
color=Colour(0x59d9b9)
)
)
else:
await ctx.send(
embed=Embed(
title="**The Song** isn't paused",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="shuffle", description="Shuffle The Playlist of Songs")
async def shuffle(self, ctx):
"""Shuffle The Playlist of Songs"""
if (
ctx.author.voice is None
or ctx.author.voice.channel != ctx.voice_client.channel
):
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
elif self.songs:
random.shuffle(self.songs)
await ctx.send(
embed=Embed(
title=f"**The Playlist** has been shuffled",
color=Colour(0x59d9b9)
)
)
else:
await ctx.send(
embed=Embed(
title=f"**The Playlist** is empty",
color=Colour(0x59d9b9)
)
)
@cog_ext.cog_slash(name="stop", description="Stop The Current Song")
async def stop(self, ctx):
"""Stop The Current Song"""
if ctx.author.voice is None or ctx.author.voice.channel != ctx.voice_client.channel:
await ctx.send(
embed=Embed(
title="**Parzibot** isn't connected to your **Voice Channel**",
color=Colour(0xd95959)
)
)
return
voice = discord.utils.get(self.client.voice_clients, guild=ctx.guild)
voice.stop()
await ctx.send(
embed=Embed(
title=f"**The Song** has been stopped",
color=Colour(0x59d9b9)
)
)
def setup(client):
"""Setup function"""
client.add_cog(Music(client))
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import itertools
import colorlog
logger = colorlog.getLogger(__name__)
def build_kmer(length=6, letters='CG'):
"""Return list of kmer of given length based on a set of letters
:return: list of kmers
"""
# build permutations of CG letters with a sequence of given lengths
# TODO include N other letters
combos = list(itertools.product(letters, repeat=length))
return ["".join(this) for this in combos]
def get_kmer(sequence, k=7):
"""Given a sequence, return consecutive kmers
:return: iterator of kmers
"""
for i in range(0, len(sequence)-k+1):
yield sequence[i:i+k]
|
def save_model(model,nets):
for i in range(nets):
nome_file = str(i+1)+".h5"
model[i].save("../model/"+nome_file)
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
# external packages
# local imports
from base.ascomClass import AscomClass
class DomeAscom(AscomClass):
"""
"""
__all__ = ['DomeAscom']
shutterStates = ['Open', 'Closed', 'Opening', 'Closing', 'Error']
def __init__(self, app=None, signals=None, data=None):
super().__init__(app=app, data=data, threadPool=app.threadPool)
self.signals = signals
self.data = data
def processPolledData(self):
"""
:return: true for test purpose
"""
azimuth = self.data.get('ABS_DOME_POSITION.DOME_ABSOLUTE_POSITION', 0)
self.signals.azimuth.emit(azimuth)
return True
def workerPollData(self):
"""
:return: true for test purpose
"""
azimuth = self.getAscomProperty('Azimuth')
self.storePropertyToData(azimuth, 'ABS_DOME_POSITION.DOME_ABSOLUTE_POSITION')
self.signals.azimuth.emit(azimuth)
self.getAndStoreAscomProperty('Slewing', 'Slewing')
self.getAndStoreAscomProperty('CanSetAltitude', 'CanSetAltitude')
self.getAndStoreAscomProperty('CanSetAzimuth', 'CanSetAzimuth')
self.getAndStoreAscomProperty('CanSetShutter', 'CanSetShutter')
state = self.getAscomProperty('ShutterStatus')
if state == 0:
stateText = self.shutterStates[state]
self.storePropertyToData(stateText, 'Status.Shutter')
self.storePropertyToData(True,
'DOME_SHUTTER.SHUTTER_OPEN',
elementInv='DOME_SHUTTER.SHUTTER_CLOSED')
elif state == 1:
stateText = self.shutterStates[state]
self.storePropertyToData(stateText, 'Status.Shutter')
self.storePropertyToData(False,
'DOME_SHUTTER.SHUTTER_OPEN',
elementInv='DOME_SHUTTER.SHUTTER_CLOSED')
else:
self.data['DOME_SHUTTER.SHUTTER_OPEN'] = None
self.data['DOME_SHUTTER.SHUTTER_CLOSED'] = None
return True
def slewToAltAz(self, altitude=0, azimuth=0):
"""
:param altitude:
:param azimuth:
:return: success
"""
if not self.deviceConnected:
return False
if self.data.get('CanSetAzimuth'):
self.callMethodThreaded(self.client.SlewToAzimuth, azimuth)
if self.data.get('CanSetAltitude'):
self.callMethodThreaded(self.client.SlewToAltitude, altitude)
return True
def openShutter(self):
"""
:return: success
"""
if not self.deviceConnected:
return False
if self.data.get('CanSetShutter'):
self.callMethodThreaded(self.client.OpenShutter)
return True
def closeShutter(self):
"""
:return: success
"""
if not self.deviceConnected:
return False
if self.data.get('CanSetShutter'):
self.callMethodThreaded(self.client.CloseShutter)
return True
def abortSlew(self):
"""
:return: success
"""
if not self.deviceConnected:
return False
self.callMethodThreaded(self.client.AbortSlew)
return True
|
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import time
import pymongo
def getCollectionObject(collectionName):
connection = pymongo.MongoClient("ds127044.mlab.com", 27044)
db = connection['adaptive_web']
status = db.authenticate(username, password)
if status == True:
return db[collectionName]
else:
print("Authentication Error!")
return
def validateLogin(userName, password):
userLoginDetails = getCollectionObject('userLoginDetails')
userData = userLoginDetails.find_one({'username': userName})
if userData is None:
return "Username doesn't exist, it seems!"
if password == userData['password']:
return "Logged In"
return "Incorrect Login"
def createUserLogin(userName, password, rePassword):
if password != rePassword:
return "No Match"
userLoginDetails = getCollectionObject('userLoginDetails')
checkData = userLoginDetails.find_one({'username': userName})
if checkData is None:
userData = {'username': userName, 'password': password}
userLoginDetails.insert_one(userData)
return "Account Created"
else:
return "Username Exists"
return
def getCurrentTime():
timestamp = time.strftime('%Y-%m-%d %H:%M:%S')
return timestamp
app = Flask(__name__, template_folder = 'templates')
app.secret_key = 'NoSoupForYou!'
@app.route("/")
def index():
if 'username' in session:
return redirect(url_for('user', userName = session['username']))
return render_template('index.html')
@app.route('/login', methods = ['POST'])
def login():
userName = request.form['userName']
password = request.form['password']
status = validateLogin(userName, password)
if status == 'Logged In':
timestamp = getCurrentTime()
session['username'] = userName
print(session['username'])
userLog = getCollectionObject('userLog')
logData = {'Username': userName, 'Timestamp': timestamp, 'Action': 'Login'}
userLog.insert_one(logData)
return redirect(url_for('user', userName = userName))
else:
return status
@app.route('/register', methods = ['POST'])
def register():
userName = request.form['newUserName']
password = request.form['newPassword']
rePassword = request.form['reNewPassword']
status = createUserLogin(userName, password, rePassword)
if status == 'Username Exists':
return status
if status == 'No Match':
return "Passwords do not match!"
session['username'] = userName
timestamp = getCurrentTime()
userLog = getCollectionObject('userLog')
logData = {'Username': userName, 'Timestamp': timestamp, 'Action': 'Register'}
userLog.insert_one(logData)
return redirect(url_for('user', userName = userName))
@app.route('/user/logout')
def logout():
timestamp = getCurrentTime()
userName = session['username']
userLog = getCollectionObject('userLog')
logData = {'Username': userName, 'Timestamp': timestamp, 'Action': 'Logout'}
userLog.insert_one(logData)
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/user/<userName>')
def user(userName):
userLog = getCollectionObject('userLog')
cursor = userLog.find()
df = pd.DataFrame(list(cursor))
df = df[df['Username'] == userName]
df = df[['Action', 'Timestamp']]
df = df.to_html(classes = "table is-bordered has-text-centered", index = False)
return render_template('userProfile.html', userName = userName, userlog = df)
@app.route('/user/<userName>/clicks', methods = ['POST'])
def userClicks(userName):
clickDetails = request.form['clickDetails'];
print(session['username'])
print(clickDetails)
return redirect(url_for('user', userName = userName))
@app.route('/user/stackoverflow/tags', methods = ['POST'])
def pageTags():
if 'username' not in session:
return redirect(url_for('index'))
tags = request.form['tags']
if tags is None or tags == '':
tags = "NA"
url = request.form['url']
timestamp = request.form['timeStamp']
print(tags)
username = session['username']
tagsLog = getCollectionObject('tagsLog')
logData = {'username': username, 'url': url, 'timestamp': timestamp, 'tags': tags}
tagsLog.insert_one(logData)
return redirect(url_for('user', userName = session['username']))
@app.route('/user/stackoverflow/searchBox', methods = ['POST'])
def searchField():
if 'username' not in session:
return redirect(url_for('index'))
url = request.form['url']
timestamp = request.form['timeStamp']
print("search" + url)
username = session['username']
searchLog = getCollectionObject('searchLog')
logData = {'username': username, 'url': url, 'timestamp': timestamp}
searchLog.insert_one(logData)
return redirect(url_for('user', userName = session['username']))
@app.route('/user/stackoverflow/scroll', methods = ['POST'])
def scrollEvent():
if 'username' not in session:
return redirect(url_for('index'))
url = request.form['url']
scrollRatio = request.form['scrollRatio']
timestamp = request.form['timeStamp']
username = session['username']
print(scrollRatio)
row = username + "," + url + "," + timestamp + "," + scrollRatio + "\n"
scrollLog = getCollectionObject('scrollLog')
logData = {'username': username, 'url': url, 'timestamp': timestamp, 'scrollRatio': scrollRatio}
scrollLog.insert_one(logData)
return redirect(url_for('user', userName = session['username']))
@app.route('/user/stackoverflow/idleTime', methods = ['POST'])
def idleEvent():
if 'username' not in session:
return redirect(url_for('index'))
url = request.form['url']
timestamp = request.form['timeStamp']
print("Idle" + url)
username = session['username']
row = username + "," + url + "," + timestamp + "\n"
idleLog = getCollectionObject('idleLog')
logData = {'username': username, 'url': url, 'timestamp': timestamp}
idleLog.insert_one(logData)
return redirect(url_for('user', userName = session['username']))
@app.route('/user/stackoverflow/clicks', methods = ['POST'])
def clicks():
if 'username' not in session:
return redirect(url_for('index'))
url = request.form['url']
targetClass = request.form['targetClass']
timestamp = request.form['timeStamp']
username = session['username']
row = username + "," + url + "," + timestamp + "," + targetClass + "\n"
clicksLog = getCollectionObject('clicksLog')
logData = {'username': username, 'url': url, 'timestamp': timestamp, 'targetClass': targetClass}
clicksLog.insert_one(logData)
return redirect(url_for('user', userName = session['username']))
@app.route('/user/stackoverflow/copiedElement', methods = ['POST'])
def copiedElement():
if 'username' not in session:
return redirect(url_for('index'))
url = request.form['url']
timestamp = request.form['timeStamp']
copiedClass = request.form['elementClass']
copiedText = request.form['elementText']
print(copiedClass)
username = session['username']
copyLog = getCollectionObject('copyLog')
logData = {'username': username, 'url': url, 'timestamp': timestamp, 'copiedClass': copiedClass, 'copiedText': copiedText}
copyLog.insert_one(logData)
return redirect(url_for('user', userName = session['username']))
@app.route('/user/stackoverflow/submitButton', methods = ['POST'])
def submitButton():
if 'username' not in session:
return redirect(url_for('index'))
url = request.form['url']
timestamp = request.form['timeStamp']
username = session['username']
submitLog = getCollectionObject('submitLog')
logData = {'username': username, 'url': url, 'timestamp': timestamp}
submitLog.insert_one(logData)
return redirect(url_for('user', userName = session['username']))
if __name__ == '__main__':
app.run()
|
from django import forms
from core.models import Reserva, Quarto
class QuartoChangeForm(forms.ModelForm):
class Meta:
model = Quarto
fields = (
'name',
'endereco',
'tratar',
'telefone',
'description',
'imagem',
'color',
)
class ReservaChangeForm(forms.ModelForm):
start_date = forms.DateField(input_formats=['%d/%m/%Y'])
end_date = forms.DateField(input_formats=['%d/%m/%Y'])
class Meta:
model = Reserva
fields = (
'quarto',
'name',
'telefone',
'status',
'start_date',
'end_date',
'comprovante',
)
|
# coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from itertools import chain
from .internals import ConfigurationSettingsType, json_encode_string
from .decorators import ConfigurationSetting, Option
from .streaming_command import StreamingCommand
from .search_command import SearchCommand
from .validators import Set
from .. import six
class ReportingCommand(SearchCommand):
""" Processes search result records and generates a reporting data structure.
Reporting search commands run as either reduce or map/reduce operations. The reduce part runs on a search head and
is responsible for processing a single chunk of search results to produce the command's reporting data structure.
The map part is called a streaming preop. It feeds the reduce part with partial results and by default runs on the
search head and/or one or more indexers.
You must implement a :meth:`reduce` method as a generator function that iterates over a set of event records and
yields a reporting data structure. You may implement a :meth:`map` method as a generator function that iterates
over a set of event records and yields :class:`dict` or :class:`list(dict)` instances.
ReportingCommand configuration
==============================
Configure the :meth:`map` operation using a Configuration decorator on your :meth:`map` method. Configure it like
you would a :class:`StreamingCommand`. Configure the :meth:`reduce` operation using a Configuration decorator on
your :meth:`ReportingCommand` class.
You can configure your command for operation under Search Command Protocol (SCP) version 1 or 2. SCP 2 requires
Splunk 6.3 or later.
"""
# region Special methods
def __init__(self):
SearchCommand.__init__(self)
# endregion
# region Options
phase = Option(doc='''
**Syntax:** phase=[map|reduce]
**Description:** Identifies the phase of the current map-reduce operation.
''', default='reduce', validate=Set('map', 'reduce'))
# endregion
# region Methods
def map(self, records):
""" Override this method to compute partial results.
:param records:
:type records:
You must override this method, if :code:`requires_preop=True`.
"""
return NotImplemented
def prepare(self):
phase = self.phase
if phase == 'map':
# noinspection PyUnresolvedReferences
self._configuration = self.map.ConfigurationSettings(self)
return
if phase == 'reduce':
streaming_preop = chain((self.name, 'phase="map"', str(self._options)), self.fieldnames)
self._configuration.streaming_preop = ' '.join(streaming_preop)
return
raise RuntimeError('Unrecognized reporting command phase: {}'.format(json_encode_string(six.text_type(phase))))
def reduce(self, records):
""" Override this method to produce a reporting data structure.
You must override this method.
"""
raise NotImplementedError('reduce(self, records)')
def _execute(self, ifile, process):
SearchCommand._execute(self, ifile, getattr(self, self.phase))
# endregion
# region Types
class ConfigurationSettings(SearchCommand.ConfigurationSettings):
""" Represents the configuration settings for a :code:`ReportingCommand`.
"""
# region SCP v1/v2 Properties
required_fields = ConfigurationSetting(doc='''
List of required fields for this search which back-propagates to the generating search.
Setting this value enables selected fields mode under SCP 2. Under SCP 1 you must also specify
:code:`clear_required_fields=True` to enable selected fields mode. To explicitly select all fields,
specify a value of :const:`['*']`. No error is generated if a specified field is missing.
Default: :const:`None`, which implicitly selects all fields.
Supported by: SCP 1, SCP 2
''')
requires_preop = ConfigurationSetting(doc='''
Indicates whether :meth:`ReportingCommand.map` is required for proper command execution.
If :const:`True`, :meth:`ReportingCommand.map` is guaranteed to be called. If :const:`False`, Splunk
considers it to be an optimization that may be skipped.
Default: :const:`False`
Supported by: SCP 1, SCP 2
''')
streaming_preop = ConfigurationSetting(doc='''
Denotes the requested streaming preop search string.
Computed.
Supported by: SCP 1, SCP 2
''')
# endregion
# region SCP v1 Properties
clear_required_fields = ConfigurationSetting(doc='''
:const:`True`, if required_fields represent the *only* fields required.
If :const:`False`, required_fields are additive to any fields that may be required by subsequent commands.
In most cases, :const:`True` is appropriate for reporting commands.
Default: :const:`True`
Supported by: SCP 1
''')
retainsevents = ConfigurationSetting(readonly=True, value=False, doc='''
Signals that :meth:`ReportingCommand.reduce` transforms _raw events to produce a reporting data structure.
Fixed: :const:`False`
Supported by: SCP 1
''')
streaming = ConfigurationSetting(readonly=True, value=False, doc='''
Signals that :meth:`ReportingCommand.reduce` runs on the search head.
Fixed: :const:`False`
Supported by: SCP 1
''')
# endregion
# region SCP v2 Properties
maxinputs = ConfigurationSetting(doc='''
Specifies the maximum number of events that can be passed to the command for each invocation.
This limit cannot exceed the value of `maxresultrows` in limits.conf_. Under SCP 1 you must specify this
value in commands.conf_.
Default: The value of `maxresultrows`.
Supported by: SCP 2
.. _limits.conf: http://docs.splunk.com/Documentation/Splunk/latest/admin/Limitsconf
''')
run_in_preview = ConfigurationSetting(doc='''
:const:`True`, if this command should be run to generate results for preview; not wait for final output.
This may be important for commands that have side effects (e.g., outputlookup).
Default: :const:`True`
Supported by: SCP 2
''')
type = ConfigurationSetting(readonly=True, value='reporting', doc='''
Command type name.
Fixed: :const:`'reporting'`.
Supported by: SCP 2
''')
# endregion
# region Methods
@classmethod
def fix_up(cls, command):
""" Verifies :code:`command` class structure and configures the :code:`command.map` method.
Verifies that :code:`command` derives from :class:`ReportingCommand` and overrides
:code:`ReportingCommand.reduce`. It then configures :code:`command.reduce`, if an overriding implementation
of :code:`ReportingCommand.reduce` has been provided.
:param command: :code:`ReportingCommand` class
Exceptions:
:code:`TypeError` :code:`command` class is not derived from :code:`ReportingCommand`
:code:`AttributeError` No :code:`ReportingCommand.reduce` override
"""
if not issubclass(command, ReportingCommand):
raise TypeError('{} is not a ReportingCommand'.format( command))
if command.reduce == ReportingCommand.reduce:
raise AttributeError('No ReportingCommand.reduce override')
if command.map == ReportingCommand.map:
cls._requires_preop = False
return
f = vars(command)[b'map'] # Function backing the map method
# EXPLANATION OF PREVIOUS STATEMENT: There is no way to add custom attributes to methods. See [Why does
# setattr fail on a method](http://stackoverflow.com/questions/7891277/why-does-setattr-fail-on-a-bound-method) for a discussion of this issue.
try:
settings = f._settings
except AttributeError:
f.ConfigurationSettings = StreamingCommand.ConfigurationSettings
return
# Create new StreamingCommand.ConfigurationSettings class
module = command.__module__ + b'.' + command.__name__ + b'.map'
name = b'ConfigurationSettings'
bases = (StreamingCommand.ConfigurationSettings,)
f.ConfigurationSettings = ConfigurationSettingsType(module, name, bases)
ConfigurationSetting.fix_up(f.ConfigurationSettings, settings)
del f._settings
pass
# endregion
pass
# endregion
|
#! /usr/bin/env python
import os, errno
import cv
import threading
import winsound
import shutil
from datetime import datetime
import Image
import ImageFont, ImageDraw, ImageOps
import strip_printer
# dependancies:
# python 2.7
# winsound(on windows), on linux change the playSound function to something else
# openCV python bindings
# PIL(python imaging library)
# PyWin32 - for printing
PHOTOBOOTH_WINDOW = "photobooth"
PHOTO_COUNT = 4
PHOTO_FILE_EXTENSION = 'png'
PHOTO_FORMAT = 'PNG'
PHOTO_FOLDER = 'photos/'
ORIGINAL_FOLDER = 'photos/originals/'
STRIPE_FOLDER = 'photos/stripes/'
COLOR_FOLDER = 'photos/stripes/color/'
GREYSCALE_FOLDER = 'photos/stripes/greyscale/'
SOUND_FOLDER = 'sounds/'
HALF_WIDTH = 175
HALF_HEIGHT = 200
PHOTO_WIDTH = HALF_WIDTH * 2
PHOTO_HEIGHT = HALF_HEIGHT * 2
PAGE_WIDTH = 1400;
PAGE_HEIGHT = 1800;
FOOTER_HEIGHT = 130
BORDER_WIDTH = 10
BG_COLOR = (255,255,255)
def main():
create_folder_struct()
cv.NamedWindow(PHOTOBOOTH_WINDOW , 1)
capture = cv.CaptureFromCAM(1)
#when the program starts the booth needs to be empty
is_booth_empty = True
#capture a few frames to let the light levels adjust
for i in range(100):
cv.QueryFrame(capture)
#now create a histogram of the empty booth to compare against in the future
empty_booth_hist = get_hsv_hist(cv.QueryFrame(capture))
while(True):
img = cv.QueryFrame(capture)
#check if button is pressed(enter)
key = cv.WaitKey(10)
if(key == 32):
playAudio('start')
take_picture(capture, 1)
take_picture(capture, 2)
take_picture(capture, 3)
take_picture(capture, 4)
playAudio('end')
path = create_photo_strips()
strip_printer.print_strip(path)
archive_images()
elif(key == 27):
break
#check for movement
booth_empty_check = check_is_booth_empty(img, empty_booth_hist)
if booth_empty_check != None and is_booth_empty != booth_empty_check:
print 'hello' if is_booth_empty else 'goodbye'
#playAudio('hello' if is_booth_empty else 'goodbye')
is_booth_empty = not is_booth_empty
cv.ShowImage(PHOTOBOOTH_WINDOW , img)
def create_folder_struct():
create_folder(PHOTO_FOLDER)
create_folder(ORIGINAL_FOLDER)
create_folder(STRIPE_FOLDER)
create_folder(COLOR_FOLDER)
create_folder(GREYSCALE_FOLDER)
def create_folder(folderPath):
try:
os.makedirs(folderPath)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def take_picture(capture, i):
playAudio('cheese-' + str(i))
#capture a couple frames to get the current frame,
#I think all my blocking calls mess up the capture process
#for open cv, requesting a couple frames seems to solve that,
#it also creates a nice deplay between the audio and the capture
for j in range(5):
img = cv.QueryFrame( capture )
cv.ShowImage(PHOTOBOOTH_WINDOW , img)
cv.WaitKey(100)
playAudio('click')
cv.SaveImage(PHOTO_FOLDER + str(i) + '.png',img)
def get_hsv_hist(img):
hsv = cv.CloneImage(img)
cv.CvtColor(img, hsv, cv.CV_BGR2HSV)
h_plane = cv.CreateImage ((cv.GetSize(img)[0],cv.GetSize(img)[1]), 8, 1)
s_plane = cv.CreateImage ((cv.GetSize(img)[0],cv.GetSize(img)[1]), 8, 1)
cv.Split(hsv, h_plane, s_plane, None, None)
hist = cv.CreateHist([32,64], cv.CV_HIST_ARRAY, [[0,180], [0,255]], 1)
cv.CalcHist([h_plane, s_plane], hist)
return hist
def check_is_booth_empty(img, empty_booth_hist):
hist = get_hsv_hist(img)
difference = cv.CompareHist(empty_booth_hist, hist, cv.CV_COMP_CORREL)
print difference
if difference > 0.90:
return True
elif difference < 0.80:
return False
else:
#too hard to say so return None
None
def create_photo_strips():
'''using the original images we build a color and black and white photo strip and save it to photos/strips'''
strip = Image.new('RGB', (PHOTO_HEIGHT + (BORDER_WIDTH * 2) + FOOTER_HEIGHT, (PHOTO_WIDTH * PHOTO_COUNT) + (BORDER_WIDTH * 2)), BG_COLOR)
for i in range(PHOTO_COUNT):
photo = Image.open(PHOTO_FOLDER + str(i+1) + '.' + PHOTO_FILE_EXTENSION)
w, h = map(lambda x: x/2, photo.size)
photo = ImageOps.fit(photo, (PHOTO_WIDTH, PHOTO_HEIGHT), centering=(0.5, 0.5))
photo = photo.rotate(270)
photo = ImageOps.autocontrast(photo, cutoff=0)
strip.paste(photo, (FOOTER_HEIGHT, (i * PHOTO_WIDTH) + (i * BORDER_WIDTH)))
#append footer
font = ImageFont.truetype('font_1.ttf', 40)
footer_img = Image.new("RGB", ((PHOTO_COUNT * PHOTO_WIDTH) + (PHOTO_COUNT * BORDER_WIDTH), FOOTER_HEIGHT), BG_COLOR)
draw = ImageDraw.Draw(footer_img)
draw.text((220, 40), "ashley & david's wedding, july 28, 2012", font=font, fill=(100,100,0))
strip.paste(footer_img.rotate(270), (0,0))
strip.save(COLOR_FOLDER + current_timestamp() + '.png', PHOTO_FORMAT)
ImageOps.grayscale(strip).save(GREYSCALE_FOLDER + current_timestamp() + '.png', PHOTO_FORMAT)
strip_to_print = Image.new('RGB', (PAGE_WIDTH, PAGE_HEIGHT), BG_COLOR)
strip_to_print.paste(ImageOps.grayscale(strip), (-BORDER_WIDTH, -BORDER_WIDTH))
strip_to_print.save('to_print.png', PHOTO_FORMAT)
return 'to_print.png'
def current_timestamp():
return datetime.now().strftime("%d.%m.%y-%H.%M.%S")
def archive_images():
'''move the original images to the photos/originals and rename them with a timestamp. Also delete the now printed version of the strip'''
for i in range(1, 4):
shutil.move(PHOTO_FOLDER + str(i) + '.png', ORIGINAL_FOLDER + current_timestamp() + ' ' + str(i) + '.png')
os.remove('to_print.png')
def playAudio(audio_name):
'''play the audio file assoicated with the given name, this blocks while the sound plays'''
winsound.PlaySound(SOUND_FOLDER + audio_name + '.wav', winsound.SND_FILENAME)
if __name__=="__main__":
main()
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase
from django.core.urlresolvers import reverse
class CrumbsTest(TestCase):
def setUp(self):
self.view_url = reverse('test_view')
def test_crumbs(self):
response = self.client.get(self.view_url)
self.failUnless(hasattr(response.context['request'], 'breadcrumbs'))
def test_context_data(self):
response = self.client.get(self.view_url)
self.assertEqual(response.context['show_crumbs'], True)
class NoCrumbsTest(TestCase):
def setUp(self):
self.view_url = reverse('test_view')
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
middleware_class = 'breadcrumbs.middleware.BreadcrumbsMiddleware'
for x, m in enumerate(settings.MIDDLEWARE_CLASSES):
if m.startswith(middleware_class):
settings.MIDDLEWARE_CLASSES.pop(x)
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
def test_context_data(self):
response = self.client.get(self.view_url)
self.assertEqual(response.context['show_crumbs'], False)
|
import os
from .base import SourceTestCase
HOST = os.environ.get('MYSQL_HOST', 'localhost')
USER = os.environ.get('MYSQL_USER', '')
PASSWORD = os.environ.get('MYSQL_PASSWORD', '')
class TestCase(SourceTestCase):
generator = 'mysql'
output_name = 'chinook_mysql.json'
def generate(self):
client = self.module.Client(database='chinook',
host=HOST,
user=USER,
password=PASSWORD)
return client.generate()
|
import dectate
class App(dectate.App):
pass
class Other(dectate.App):
pass
class R:
pass
@App.directive("foo")
class FooAction(dectate.Action):
def __init__(self, name):
self.name = name
def identifier(self):
return self.name
def perform(self, obj):
pass
@Other.directive("foo")
class OtherFooAction(dectate.Action):
def __init__(self, name):
self.name = name
def identifier(self):
return self.name
def perform(self, obj):
pass
|
from django.db import models
import datetime
from datetime import date
from django import forms
from django.db import models
from django.http import Http404, HttpResponse
from django.utils.dateformat import DateFormat
from django.utils.formats import date_format
import wagtail
from wagtail.admin.edit_handlers import (FieldPanel, FieldRowPanel,
InlinePanel, MultiFieldPanel,
PageChooserPanel, StreamFieldPanel)
from wagtail.contrib.forms.models import AbstractEmailForm, AbstractFormField
from wagtail.contrib.routable_page.models import RoutablePageMixin, route
from wagtail.core import blocks
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Page
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.documents.models import Document, AbstractDocument
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.snippets.models import register_snippet
from portfolio.blocks import TwoColumnBlock
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.tags import ClusterTaggableManager
from taggit.models import Tag as TaggitTag
from taggit.models import TaggedItemBase
from wagtailmd.utils import MarkdownField, MarkdownPanel
# Create your models here.
@register_snippet
class BlogCategory(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True, max_length=80)
panels = [
FieldPanel('name'),
FieldPanel('slug'),
]
def __str__(self):
return self.name
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
class BlogPageTag(TaggedItemBase):
content_object = ParentalKey('PostPage', related_name='post_tags')
@register_snippet
class Tag(TaggitTag):
class Meta:
proxy = True
class BlogPage(RoutablePageMixin, Page):
description = models.CharField(max_length=255, blank=True,)
content_panels = Page.content_panels + [
FieldPanel('description', classname="full")
]
def get_context(self, request, *args, **kwargs):
context = super(BlogPage, self).get_context(request, *args, **kwargs)
context['posts'] = self.posts
context['blog_page'] = self
context['search_type'] = getattr(self, 'search_type', "")
context['search_term'] = getattr(self, 'search_term', "")
return context
def get_posts(self):
return PostPage.objects.descendant_of(self).live().order_by('-date')
@route(r'^(\d{4})/$')
@route(r'^(\d{4})/(\d{2})/$')
@route(r'^(\d{4})/(\d{2})/(\d{2})/$')
def post_by_date(self, request, year, month=None, day=None, *args, **kwargs):
self.posts = self.get_posts().filter(date__year=year)
self.search_type = 'date'
self.search_term = year
if month:
self.posts = self.posts.filter(date__month=month)
df = DateFormat(date(int(year), int(month), 1))
self.search_term = df.format('F Y')
if day:
self.posts = self.posts.filter(date__day=day)
self.search_term = date_format(date(int(year), int(month), int(day)))
return Page.serve(self, request, *args, **kwargs)
@route(r'^(\d{4})/(\d{2})/(\d{2})/(.+)/$')
def post_by_date_slug(self, request, year, month, day, slug, *args, **kwargs):
post_page = self.get_posts().filter(slug=slug).first()
if not post_page:
raise Http404
return Page.serve(post_page, request, *args, **kwargs)
@route(r'^tag/(?P<tag>[-\w]+)/$')
def post_by_tag(self, request, tag, *args, **kwargs):
self.search_type = 'tag'
self.search_term = tag
self.posts = self.get_posts().filter(tags__slug=tag)
return Page.serve(self, request, *args, **kwargs)
@route(r'^category/(?P<category>[-\w]+)/$')
def post_by_category(self, request, category, *args, **kwargs):
self.search_type = 'category'
self.search_term = category
self.posts = self.get_posts().filter(categories__slug=category)
return Page.serve(self, request, *args, **kwargs)
@route(r'^$')
def post_list(self, request, *args, **kwargs):
self.posts = self.get_posts()
return Page.serve(self, request, *args, **kwargs)
@route(r'^search/$')
def post_search(self, request, *args, **kwargs):
search_query = request.GET.get('q', None)
self.posts = self.get_posts()
if search_query:
self.posts = self.posts.filter(body__contains=search_query)
self.search_term = search_query
self.search_type = 'search'
return Page.serve(self, request, *args, **kwargs)
class PostPage(Page):
body = StreamField([('body', blocks.RichTextBlock()),])
description = models.CharField(max_length=255, blank=True,)
date = models.DateTimeField(verbose_name="Post date", default=datetime.datetime.today)
excerpt = RichTextField(
verbose_name='excerpt', blank=True,
)
header_image = models.ForeignKey(
'wagtailimages.Image',
null=True, blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
categories = ParentalManyToManyField('portfolio.BlogCategory', blank=True)
tags = ClusterTaggableManager(through='portfolio.BlogPageTag', blank=True)
content_panels = Page.content_panels + [
ImageChooserPanel('header_image'),
StreamFieldPanel("body"),
FieldPanel("excerpt"),
FieldPanel('categories', widget=forms.CheckboxSelectMultiple),
FieldPanel('tags'),
]
settings_panels = Page.settings_panels + [
FieldPanel('date'),
]
@property
def blog_page(self):
return self.get_parent().specific
def get_context(self, request, *args, **kwargs):
context = super(PostPage, self).get_context(request, *args, **kwargs)
context['blog_page'] = self.blog_page
context['post'] = self
context['posts'] = self.get_posts
return context
def get_posts(self):
return PostPage.objects.exclude(id = self.id).live().order_by('?')[:3]
class LandingPage(Page):
body = StreamField([
('heading', blocks.CharBlock(classname="full title")),
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock(icon="image")),
('two_columns', TwoColumnBlock()),
('embedded_video', EmbedBlock(icon="media")),
], null=True, blank=True)
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
@property
def blog_page(self):
return self.get_parent().specific
def get_context(self, request, *args, **kwargs):
context = super(LandingPage, self).get_context(request, *args, **kwargs)
context['blog_page'] = self.blog_page
return context
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='custom_form_fields')
class FormPage(AbstractEmailForm):
thank_you_text = RichTextField(blank=True)
content_panels = AbstractEmailForm.content_panels + [
InlinePanel('custom_form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldRowPanel([
FieldPanel('from_address', classname="col6"),
FieldPanel('to_address', classname="col6"),
]),
FieldPanel('subject'),
], "Email Notification Config"),
]
def get_context(self, request, *args, **kwargs):
context = super(FormPage, self).get_context(request, *args, **kwargs)
context['blog_page'] = self.blog_page
return context
def get_form_fields(self):
return self.custom_form_fields.all()
@property
def blog_page(self):
return self.get_parent().specific
class PortfolioPage(Page):
# Title Page
name = models.CharField(max_length=150, blank=True,)
phoneno = models.CharField(max_length=150, blank=True,)
profile_image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+' )
designation = models.CharField(max_length=150, blank=True,)
age = models.DateField(verbose_name="Date of Birth", blank=True,)
email = models.EmailField( max_length=70, blank=True, unique=True )
location = models.CharField(max_length=150, blank=True,)
# Header Page
header_title = models.CharField(max_length=150, blank=True,)
header_content = MarkdownField( verbose_name='Header Content', blank=True, )
resume_csv = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
# Education Page
resume_title = models.CharField(max_length=150, blank=True,)
resume_content = MarkdownField( verbose_name='Resume Content', blank=True, )
education_title = models.CharField(max_length=150, blank=True,)
class EducationBlock(blocks.StructBlock):
university = blocks.CharBlock(classname="full title")
year_passing = blocks.CharBlock(classname="full title")
education_content = blocks.CharBlock(classname="full title")
education = StreamField([
('education', EducationBlock()),
], null=True, blank=True)
#Employment Page
employment_title = models.CharField(max_length=150, blank=True,)
class EmploymentBlock(blocks.StructBlock):
company_name = blocks.CharBlock(classname="full title")
experience = blocks.CharBlock(classname="full title")
designation = blocks.CharBlock(classname="full title")
employment = StreamField([
('employment', EmploymentBlock()),
], null=True, blank=True)
#Skills Page
skills_title = models.CharField(max_length=150, blank=True,)
class SkillsBlock(blocks.StructBlock):
skills = blocks.CharBlock(classname="full title")
percentage = blocks.CharBlock(classname="full title")
skills = StreamField([
('skills', SkillsBlock()),
], null=True, blank=True)
#Testimonials
class TestimonialBlock(blocks.StructBlock):
title = blocks.CharBlock(classname="full title")
sub_title = blocks.CharBlock(classname="full title")
content = blocks.RichTextBlock(blank=True)
testimonial = StreamField([
('testimonial', TestimonialBlock()),
], null=True, blank=True)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('name'),
ImageChooserPanel('profile_image'),
FieldPanel('phoneno'),
FieldPanel('designation'),
FieldPanel('age'),
FieldPanel('email'),
FieldPanel('location')
], "Heading"),
MultiFieldPanel([
FieldPanel('header_title'),
MarkdownPanel('header_content'),
DocumentChooserPanel('resume_csv'),
], "Header"),
MultiFieldPanel([
FieldPanel('resume_title'),
MarkdownPanel('resume_content'),
FieldPanel('education_title'),
StreamFieldPanel('education'),
FieldPanel('employment_title'),
StreamFieldPanel('employment'),
FieldPanel('skills_title'),
StreamFieldPanel('skills'),
], "Resume"),
MultiFieldPanel([
StreamFieldPanel('testimonial'),
], "Testimonial"),
]
def get_context(self, request, *args, **kwargs):
context = super(PortfolioPage, self).get_context(request, *args, **kwargs)
context['portfolio'] = self
context['posts'] = self.get_posts
context['blog_page'] = self.get_blogs
context['projects'] = self.get_projects
context['parent_projects'] = self.get_parent_project
context['gits'] = self.get_gits
context['parent_gits'] = self.get_parent_git
return context
def get_posts(self):
return PostPage.objects.live().order_by('date')[:6]
def get_blogs(self):
return BlogPage.objects.live().first()
def get_projects(self):
return ProjectPage.objects.live().order_by('?')[:3]
def get_parent_project(self):
return ProjectParentPage.objects.live().first()
def get_gits(self):
return GitPage.objects.live().order_by('?')[:3]
def get_parent_git(self):
return GitParentPage.objects.live().first()
class ProjectParentPage(RoutablePageMixin, Page):
description = models.CharField(max_length=255, blank=True,)
content_panels = Page.content_panels + [
FieldPanel('description', classname="full")
]
def get_context(self, request, *args, **kwargs):
context = super(ProjectParentPage, self).get_context(request, *args, **kwargs)
context['parent_project'] = self
context['projects'] = self.get_projects
return context
def get_projects(self):
return ProjectPage.objects.live().order_by('-date')
class ProjectPage(Page):
#Project Portfolio Page
project_title = models.CharField(max_length=150, blank=True,)
date = models.DateTimeField(verbose_name="Project date", default=datetime.datetime.today)
class ProjectBlock(blocks.StructBlock):
name = blocks.CharBlock(classname="full title")
description = blocks.RichTextBlock()
excerpt = blocks.RichTextBlock(blank=True)
menu_title = blocks.CharBlock(classname="full title")
project_url = blocks.URLBlock(classname="full title")
project_image = ImageChooserBlock()
image_text = blocks.CharBlock(classname="full title")
start_date = blocks.CharBlock(classname="full title")
end_date = blocks.CharBlock(classname="full title")
language = blocks.StreamBlock([
('skills', blocks.CharBlock()),
],icon='user')
project = StreamField([
('project', ProjectBlock()),
], null=True, blank=True)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('project_title'),
StreamFieldPanel('project'),
], "Project"),
]
settings_panels = Page.settings_panels + [
FieldPanel('date'),
]
@property
def parent_project_page(self):
return self.get_parent().specific
def get_context(self, request, *args, **kwargs):
context = super(ProjectPage, self).get_context(request, *args, **kwargs)
context['parent_project'] = self.parent_project_page
context['project'] = self
context['projects'] = self.get_projects
return context
def get_projects(self):
return ProjectPage.objects.exclude(id = self.id).live().order_by('?')[:3]
class GitParentPage(RoutablePageMixin, Page):
description = models.CharField(max_length=255, blank=True,)
content_panels = Page.content_panels + [
FieldPanel('description', classname="full")
]
def get_context(self, request, *args, **kwargs):
context = super(GitParentPage, self).get_context(request, *args, **kwargs)
context['parent_git'] = self
context['gits'] = self.get_projects
return context
def get_projects(self):
return GitPage.objects.live().order_by('-date')
class GitPage(Page):
#Git Portfolio Page
git_title = models.CharField(max_length=150, blank=True,)
date = models.DateTimeField(verbose_name="Project date", default=datetime.datetime.today)
class GitBlock(blocks.StructBlock):
name = blocks.CharBlock(classname="full title")
description = blocks.RichTextBlock()
excerpt = blocks.RichTextBlock(blank=True)
menu_title = blocks.CharBlock(classname="full title")
git_url = blocks.URLBlock(classname="full title")
git_image = ImageChooserBlock()
image_text = blocks.CharBlock(classname="full title")
start_date = blocks.CharBlock(classname="full title")
end_date = blocks.CharBlock(classname="full title")
language = blocks.StreamBlock([
('skills', blocks.CharBlock()),
],icon='user')
GIT = StreamField([
('GIT', GitBlock()),
], null=True, blank=True)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('git_title'),
StreamFieldPanel('GIT'),
], "GIT"),]
settings_panels = Page.settings_panels + [
FieldPanel('date'),
]
@property
def parent_git_page(self):
return self.get_parent().specific
def get_context(self, request, *args, **kwargs):
context = super(GitPage, self).get_context(request, *args, **kwargs)
context['parent_project_git'] = self.parent_git_page
context['git'] = self
context['gits'] = self.get_projects
return context
def get_projects(self):
return GitPage.objects.exclude(id = self.id).live().order_by('?')[:3]
class URLPage(RoutablePageMixin, Page):
git_url = models.URLField(blank=True, null=True)
linkedin_url = models.URLField(blank=True, null=True)
bitbucket_url = models.URLField(blank=True, null=True)
facebook_url = models.URLField(blank=True, null=True)
copy_rights = models.CharField(max_length=150,blank=True, null=True)
content_panels = Page.content_panels + [
FieldPanel('git_url', classname="full"),
FieldPanel('linkedin_url', classname="full"),
FieldPanel('bitbucket_url', classname="full"),
FieldPanel('facebook_url', classname="full"),
FieldPanel('copy_rights', classname="full"),
]
def get_context(self, request, *args, **kwargs):
context = super(URLPage, self).get_context(request, *args, **kwargs)
context['url'] = self
return context
|
"""Unit tests for the OWASP Dependency Check Jenkins plugin source."""
from datetime import datetime
from tests.source_collectors.source_collector_test_case import SourceCollectorTestCase
from collector_utilities.functions import days_ago
class OWASPDependencyCheckJenkinsPluginTest(SourceCollectorTestCase):
"""Unit tests for the OWASP Dependency Check Jenkins plugin metrics."""
def setUp(self):
self.sources = dict(
sourceid=dict(
type="owasp_dependency_check_jenkins_plugin",
parameters=dict(url="https://jenkins/job", severities=["critical", "high", "normal"])))
async def test_warnings(self):
"""Test that the number of security warnings is returned."""
metric = dict(type="security_warnings", addition="sum", sources=self.sources)
response = await self.collect(
metric,
get_request_json_return_value=dict(
warnings=[
dict(fileName="/f1", priority="NORMAL"),
dict(fileName="/f1", priority="HIGH"),
dict(fileName="/f2", priority="NORMAL"),
dict(fileName="/f3", priority="LOW"),
dict(fileName="/f4", priority="CRITICAL")]))
expected_entities = [
dict(key="-f1", file_path="/f1", highest_severity="High", nr_vulnerabilities="2"),
dict(key="-f2", file_path="/f2", highest_severity="Normal", nr_vulnerabilities="1"),
dict(key="-f4", file_path="/f4", highest_severity="Critical", nr_vulnerabilities="1")]
self.assert_measurement(response, value="3", entities=expected_entities)
async def test_up_to_dateness(self):
"""Test that the source age in days is returned."""
metric = dict(type="source_up_to_dateness", addition="max", sources=self.sources)
response = await self.collect(
metric, get_request_json_return_value=dict(timestamp="1565284457173"))
expected_age = days_ago(datetime.fromtimestamp(1565284457173 / 1000.))
self.assert_measurement(response, value=str(expected_age))
|
import attach
import unittest
class TestKnowValues(unittest.TestCase):
list_of_test_foldernames = (
("hello", "hello_2"),
("hello2", "hello2_2"),
("hello545", "hello545_2"),
("hello904352", "hello904352_2"),
("hello1", "hello1_2"),
("hello53431", "hello53431_2"),
("hello_", "hello_2"),
("hello_2", "hello_3"),
("hello_4324", "hello_4324_2"),
("hello__", "hello__2"),
("hello__2", "hello__3"),
("hello__4234", "hello__4234_2")
)
def test_attach_input_output_conversion(self):
for oldfolder, newfolder in self.list_of_test_foldernames:
test_answer = attach.destination_path_correction(oldfolder)
self.assertEqual(newfolder, test_answer)
if __name__ == '__main__':
unittest.main()
|
from enum import Enum
class IndicatorType(Enum):
CLEAR_CONTEXT = 1
YES = 2
NO = 3
PLACES_NEARBY = 4
RELATIVE_LANDMARK = 5
EMPTY_AFTER_FILTERING = 6
INDICATORS = {
IndicatorType.CLEAR_CONTEXT:
set(['hm', 'hmm', 'hrm', 'oops', 'sorry', 'actually']),
IndicatorType.YES: {
'yes': [[]], 'sure': [[]], 'alright': [[]], 'definitely': [[]],
'ok': [[]], 'okay': [[]], 'yep': [[]], 'yeah': [[]], 'yah': [[]],
'perfect': [[]], 'great': [[]],
'lets': [['roll'], ['go'], ['leave']],
'sounds': [['good']],
'thats': [['it'], ['right'], ['the', 'one']],
},
IndicatorType.NO:
set(['not', 'no']),
IndicatorType.PLACES_NEARBY:
set([
'any',
'anything',
'anywhere',
'nearby',
'nearer',
'nearest',
'closer',
'closest',
'farther',
'farthest',
'further',
'furthest',
'another',
'other',
'others',
'places',
'around',
'option',
'options',
'someplace',
'suggest',
'suggestion',
'suggestions',
'recommend',
'recommendation',
'recommendations',
]),
IndicatorType.RELATIVE_LANDMARK: {
'across': [['the', 'street']],
'next': [['to']],
'near': [[]],
'by': [[]],
'close': [['to']],
'from': [[]],
},
}
PLACES_NEARBY_WORD_TO_INDEX_MAP = {
"first": 0,
"second": 1,
"third": 2,
"1st": 0,
"2nd": 1,
"3rd": 2
}
|
# write your code here
|
# -*- coding: utf-8 -*-
from flask import json
from flask import request
from flask_api import status
from flask_restful import Resource
from app import token_auth, db
from app.models.capsule_model import is_capsule_open, get_capsules_by_id
from app.modules.capsule.serialize.capsule import serialize_capsule
from app.modules import capsule
_URL = '/capsules/<prefix>'
class CapsulesInfo(Resource):
@capsule.API
@token_auth.login_required
def get(self, prefix):
capsule_id = prefix
if is_capsule_open(capsule_id):
capsule = get_capsules_by_id(capsule_id)
return serialize_capsule(capsule), status.HTTP_200_OK
else:
return "Capsule locked", status.HTTP_400_BAD_REQUEST
|
import os
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('/Users/zhancheng-ibm/anaconda2/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('/Users/zhancheng-ibm/anaconda2/share/OpenCV/haarcascades/haarcascade_eye.xml')
face_recognizer = cv2.face.createLBPHFaceRecognizer()
images = []
labels = []
os.chdir('./face_detect/train_mike')
for idx in range(1,113):
img_location = "/Users/zhancheng-ibm/Desktop/opencv-projects/face_detect/train_mike/img-"+str(idx)+".jpg"
print(img_location)
image = cv2.imread(img_location,0)
if image is not None:
images.append(image)
labels.append(1)
face_recognizer.train(images,np.array(labels))
for idx in range(1,151):
img_location = "/Users/zhancheng-ibm/Desktop/opencv-projects/face_detect/train_matt/img-"+str(idx)+".jpg"
print(img_location)
image = cv2.imread(img_location,0)
if image is not None:
images.append(image)
labels.append(2)
face_recognizer.train(images,np.array(labels))
for idx in range(1,130):
img_location = "/Users/zhancheng-ibm/Desktop/opencv-projects/face_detect/train_york/img-"+str(idx)+".jpg"
print(img_location)
image = cv2.imread(img_location,0)
if image is not None:
images.append(image)
labels.append(3)
face_recognizer.train(images,np.array(labels))
print("Face Recongnition Trainning done")
face_recognizer.save("faceRecong.xml")
camera = cv2.VideoCapture(0)
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
while True:
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
(grabbed, frame)=camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30))
for (x,y,w,h) in faces :
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
label = face_recognizer.predict(gray[y:y+h, x:x+w])
if label == 1:
cv2.putText(frame, 'Mike', (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
if label == 2:
cv2.putText(frame, 'MATTTTTT', (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
if label == 3:
cv2.putText(frame, 'York', (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
cv2.imshow("Frame", frame)
|
from .logic import AND, NOT, OR, Predicate
from .action import Action
from .utils import TextTree, TypeTree
import re
from .strips import Domain, KnowledgeState, Problem
supported_requirements = {":strips", ":typing", ":disjunctive-preconditions", ":negative-preconditions"}
def load_pddl(domain_file, problem_file):
domain = load_domain(domain_file)
problem = load_problem(domain, problem_file)
return domain, problem
def strip_comments(lines):
strip_comments = []
for l in lines:
idx = l.find(";")
if idx == -1:
strip_comments.append(l)
else:
strip_comments.append(l[:idx])
return strip_comments
def load_textTree(text_file):
all_text = ""
with open(text_file, "r") as df:
lines = df.readlines()
lines = strip_comments(lines)
all_text = ''.join(lines)
all_text = all_text.replace('\r', '').replace('\n', '')
return TextTree(all_text)
def load_problem(domain, problem_file):
t = load_textTree(problem_file)
if t.root.text.replace(" ", "").lower() != "define":
raise SyntaxError("Incorrectly formatted PDDL file.")
problem_name = ""
objects = {}
initial_state = KnowledgeState()
goal_state = None
for child in t.root.children:
text_split = list(filter(None, child.text.split()))
if text_split[0].lower() == "problem":
problem_name = text_split[1]
elif text_split[0].lower() == ":domain":
domain_name = text_split[1]
if domain_name != domain.name:
raise SyntaxError(
"Domain supplied in problem file does not match the domain supplied in the domain file.")
elif text_split[0].lower() == ":objects":
objs = []
skip_next = True
for i, o in enumerate(text_split):
if skip_next:
skip_next = False
elif o == "-":
objects[text_split[i+1]] = objs
objs = []
skip_next = True
else:
objs.append(o)
if len(objs) != 0:
objects[None] = objs
elif text_split[0].lower() == ":init":
initial = []
for pred in child.children:
i = grounded_pred_from_str(pred.text, domain.predicates.values())
if i.check_grounded():
initial.append(i)
else:
raise SyntaxError(
"Initial state must be completely grounded.")
initial_state = initial_state.teach(initial)
elif text_split[0].lower() == ":goal":
goal_state = process_proposition_nodes(child.children[0], domain.predicates.values())
if not goal_state.check_grounded():
raise SyntaxError("Goal state must be completely grounded.")
else:
raise SyntaxError("Unrecognized keyword: {}".format(text_split[0]))
return Problem(problem_name, domain, objects, initial_state, goal_state)
def load_domain(domain_file):
t = load_textTree(domain_file)
if t.root.text.replace(" ", "").lower() != "define":
raise SyntaxError("Incorrectly formatted PDDL file.")
domain_name = ""
predicates = []
actions = []
types = TypeTree()
for child in t.root.children:
text_split = list(filter(None, child.text.split()))
if text_split[0].lower() == "domain":
domain_name = text_split[1]
elif text_split[0].lower() == ":requirements":
for req in text_split[1:]:
if req.lower() not in supported_requirements:
raise NotImplementedError(
"The requirement '{}' is not yet supported.".format(req))
elif text_split[0].lower() == ":types":
tps = []
skip_next = True
for i, t in enumerate(text_split):
if skip_next:
skip_next = False
elif t == "-":
types.add_types(tps, text_split[i+1])
tps = []
skip_next = True
else:
tps.append(t)
if len(tps) != 0:
types.add_types(tps)
elif text_split[0].lower() == ":predicates":
for pred in child.children:
predicates.append(Predicate.from_str(pred.text))
elif text_split[0].lower() == ":action":
action_name = text_split[1]
parameters = None
precondition = None
effect = None
for i, item in enumerate(text_split[2:]):
if item.lower() == ":parameters":
ws_pattern = re.compile(r'\s+')
params = list(
filter(None, re.sub(ws_pattern, '', child.children[i].text).split("?")))
parameters = []
for p in params:
splits = p.split("-")
pname = splits[0]
ptype = splits[1] if len(splits) == 2 else None
parameters.append((pname, ptype))
elif item.lower() == ":precondition":
precondition = process_proposition_nodes(child.children[i], predicates)
elif item.lower() == ":effect":
effect = process_proposition_nodes(child.children[i], predicates)
else:
raise SyntaxError(
"Unrecognized keyword in action definition: {}".format(item))
actions.append(
Action(action_name, parameters, precondition, effect))
else:
raise SyntaxError("Unrecognized keyword: {}".format(text_split[0]))
return Domain(domain_name, types, predicates, actions)
def process_proposition_nodes(t, predicates):
txt = t.text.replace(" ", "").lower()
if txt == "and":
return AND([process_proposition_nodes(c, predicates) for c in t.children])
elif txt == "or":
return OR([process_proposition_nodes(c, predicates) for c in t.children])
elif txt == "not":
if len(t.children) != 1:
raise SyntaxError(
"Incorrect number of arguments for NOT statement.")
return NOT(process_proposition_nodes(t.children[0], predicates))
else:
return grounded_pred_from_str(t.text, predicates)
def grounded_pred_from_str(s, predicates):
s = s.replace('\r', '').replace('\n', '')
pred = list(filter(None, s.split()))
if len(pred) < 2:
raise ValueError(
"Incorrect formatting for PDDL-style predicate string.")
name = pred[0]
pred_match = None
for p in predicates:
if p.name == name:
pred_match = p
break
if pred_match is None:
raise SyntaxError("Predicate not yet defined: {}".format(name))
if len(pred[1:]) != len(pred_match.variables):
raise SyntaxError("Incorrect number of arguments for the predicate with name {}".format(pred.name))
var_names = []
grounding = {}
for i, p in enumerate(pred[1:]):
if p[0] == "?":
if len(p) < 2:
raise ValueError(
"Incorrect formatting for PDDL-style predicate string.")
if (p[1:], pred_match.types[i]) in var_names:
raise ValueError("Duplicate variable name found: {}".format(p[1:]))
var_names.append((p[1:], pred_match.types[i]))
else:
vn = "x{}".format(i)
if (vn, pred_match.types[i]) in var_names:
raise ValueError("Duplicate variable name found: {}".format(vn))
var_names.append((vn, pred_match.types[i]))
grounding[vn] = p
return Predicate(name, var_names, grounding)
|
from libs.datasets import voc as voc
def get_train_dataset(CONFIG, p_split=None):
if CONFIG.DATASET == 'VOC2012' or CONFIG.DATASET=='VOC2012':
train_dataset = voc.VOC(
root=CONFIG.ROOT,
split='trainaug' if p_split is None else p_split ,
image_size=CONFIG.IMAGE.SIZE.TRAIN,
crop_size=CONFIG.IMAGE.SIZE.TRAIN,
scale=True,
flip=True,
)
else:
raise ValueError('Dataset name '+str(CONFIG.DATASET) + 'does not match with implemented datasets.')
return None
return train_dataset
def get_val_dataset(CONFIG):
if CONFIG.DATASET == 'VOC2012' or CONFIG.DATASET == 'VOC2012' or CONFIG.DATASET == 'VOC2012_mmi':
val_dataset = voc.VOC(
root=CONFIG.ROOT,
split='val',
image_size=CONFIG.IMAGE.SIZE.VAL,
crop_size=CONFIG.IMAGE.SIZE.VAL,
scale=False,
flip=False,
)
else:
raise ValueError('Dataset name '+str(CONFIG.DATASET) + 'does not match with implemented datasets.')
return None
return val_dataset
|
from urllib.parse import urljoin
import requests
from crosswalk_client.exceptions import BadResponse
from crosswalk_client.objects.entity import EntityObject
from crosswalk_client.validators.entity import (
validate_block_attrs_kwarg,
validate_domain_kwarg,
)
class GetEntities(object):
@validate_block_attrs_kwarg
@validate_domain_kwarg
def get_entities(self, block_attrs={}, domain=None):
if domain is None:
domain = self.domain
response = requests.get(
urljoin(
self.service_address, "domains/{}/entities/".format(domain)
),
headers=self.headers,
params=block_attrs,
)
if response.status_code != requests.codes.ok:
raise BadResponse(
"The service responded with a {}: {}".format(
response.status_code, response.content
)
)
entities = response.json()
return [
EntityObject({"entity": entity}, client=self)
for entity in entities
]
|
"""
vocareum
python3 train.py
local
spark-submit train.py
"""
import json
import os
import platform
import re
import sys
import time
import numpy as np
from pyspark import SparkConf, SparkContext, StorageLevel
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
import support
system_type = platform.system()
if system_type == 'Linux':
print(system_type)
# for run on vocareum
os.environ['PYSPARK_PYTHON'] = '/usr/local/bin/python3.6'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/local/bin/python3.6'
train_file = "../resource/asnlib/publicdata/train_review.json"
model_file = "model.json"
elif system_type == 'Darwin':
print(system_type)
# run for local macos
stopwords_file = "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/project/dev/stopwords"
train_file = "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/train_review.json"
user_avg_file = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/user_avg.json"
business_avg_file = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/business_avg.json"
user_json = "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/user.json"
business_json = "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/business.json"
model_dir = './model/'
als_model_file = model_dir + "als.json"
checkpoint_file = "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/project/dev/checkpoint"
agm_train_file = model_dir + "agm_train.json"
else:
print('wrong system type.')
sys.exit()
JS_THRESHOLD = 0.7
AGM_USER_THRESHOLD = 8
AGM_THRESHOLD = 3
UNK = 3.7961611526341503
LONELY_USER_THRESHOLD = 5
LONELY_BUSINESS_THRESHOLD = 8
# # for tuning
# CORATED_LIMIT = int(sys.argv[1])
# LONELY_USER_THRESHOLD = int(sys.argv[2])
# LONELY_BUSINESS_THRESHOLD = int(sys.argv[3])
# itemcf_model_file = '/Users/markduan/duan/USC_course/USC_APDS/INF553/project/model/model_itemCF.json'
# als_model_file = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/model/als.json"
def processCategories(v, stopwords):
# v - "Arcades, Arts & Entertainment"
v = v.lower()
pattern = r"[a-z]+" # only words
words_without_punc_num = re.findall(pattern, v)
words_clean = set([word for word in words_without_punc_num if word not in stopwords])
return words_clean
def computeJaccardSimilarity(i_set, j_set):
fenzi = len(i_set.intersection(j_set))
fenmu = len(i_set.union(j_set))
return fenzi / fenmu
def getJS(i_b, b_profile, b_list):
i_set = b_profile[i_b]
l_ = []
for j in range(len(b_list)):
j_b = b_list[j]
if j_b > i_b:
j_set = b_profile[j_b]
sim = computeJaccardSimilarity(i_set, j_set)
if sim >= JS_THRESHOLD and sim != 0.0:
new_1 = (i_b, [(j_b, sim)])
new_2 = (j_b, [(i_b, sim)])
l_.append(new_1)
l_.append(new_2)
return l_
def adjustedSim(sim, target, accord, n_b_avg):
t_avg = n_b_avg.get(target, UNK)
a_avg = n_b_avg.get(accord, UNK)
if a_avg > t_avg:
return sim
else:
return 1 / sim
def processValues(vs, jaccard_sim, n_b_avg):
# vs - [(n_b, star), ...]
# jaccard_sim - [(0, {1:0.7, ...}), ...]
if len(vs) >= AGM_USER_THRESHOLD or len(vs) < AGM_THRESHOLD:
return vs
v_d = {k: v for k, v in vs}
v_d_keys = set(v_d.keys())
vs_agm = []
for x in jaccard_sim:
target_b = x[0]
if target_b not in v_d_keys:
sim_b = x[1]
sim_b_keys = set(sim_b.keys())
inter = list(v_d_keys.intersection(sim_b_keys))
if len(inter) >= AGM_THRESHOLD and len(inter) != 0:
v_vct = np.array([v_d[k] for k in inter])
b_vct_fenzi = np.array([adjustedSim(sim_b[k], target_b, k, n_b_avg) for k in inter])
b_vct = np.array([sim_b[k] for k in inter])
agm_stars = np.dot(v_vct, b_vct_fenzi) / b_vct.sum()
if agm_stars > 5.0:
agm_stars = 5.0
vs_agm.append((target_b, agm_stars))
return vs + vs_agm
def collectAlsModel(modelRDD, u_table, b_table):
user_featrue = modelRDD.userFeatures() \
.map(lambda x: (u_table[x[0]], list(x[1])[0])) \
.collectAsMap()
product_feature = modelRDD.productFeatures() \
.map(lambda x: (b_table[x[0]], list(x[1])[0])) \
.collectAsMap()
return [user_featrue, product_feature]
def saveAlsModel(modelRDD, u_table, b_table, model_file):
model = collectAlsModel(modelRDD, u_table, b_table)
with open(model_file, 'w', encoding='utf-8') as fp:
json.dump(model, fp)
def train():
conf = SparkConf() \
.setAppName("project") \
.setMaster("local[*]") \
.set("spark.driver.memory","4g")
sc = SparkContext(conf=conf)
# check model dir
if not os.path.exists(model_dir):
os.mkdir(model_dir)
# rename
raw_data = sc.textFile(train_file).map(json.loads).persist(StorageLevel.MEMORY_AND_DISK)
u_table1 = raw_data.map(lambda x: x['user_id']).distinct().collect()
u_set1 = set(u_table1)
b_table1 = raw_data.map(lambda x: x['business_id']).distinct().collect()
b_set1 = set(b_table1)
user_avg = support.getAvg(user_avg_file)
business_avg = support.getAvg(business_avg_file)
u_set2 = set(user_avg.keys())
b_set2 = set(business_avg.keys())
b_table3 = sc.textFile(business_json).map(json.loads).map(lambda x: x['business_id']).collect()
b_set3 = set(b_table3)
u_table = list(u_set1.union(u_set2))
b_table = list(b_set1.union(b_set2).union(b_set3))
u_d = {u_table[i]: i for i in range(len(u_table))}
b_d = {b_table[i]: i for i in range(len(b_table))}
# agmentation
business_avg = support.getAvg(business_avg_file)
n_b_avg = {b_d[k]: business_avg[k] for k in business_avg}
# get stopwords
stopwords = sc.textFile(stopwords_file).collect()
b_profile = sc.textFile(business_json) \
.map(json.loads) \
.map(lambda x: (x['business_id'], x['categories'])) \
.map(lambda x: (b_d[x[0]], x[1])) \
.mapValues(lambda v: processCategories(v, stopwords)) \
.collectAsMap()
b_list = list(sorted(b_profile.keys()))
b_length = len(b_profile)
jaccard_sim = sc.parallelize(b_list) \
.flatMap(lambda x: getJS(x, b_profile, b_list)) \
.reduceByKey(lambda x, y: x + y) \
.mapValues(lambda vs: {k: v for k, v in vs}) \
.collect()
agm_data = raw_data.map(lambda r: (r['user_id'], r['business_id'], r['stars'])) \
.map(lambda x: (u_d[x[0]], b_d[x[1]], x[2])) \
.map(lambda x: (x[0], [(x[1], x[2])])) \
.reduceByKey(lambda x, y: x + y) \
.mapValues(lambda vs: processValues(vs, jaccard_sim, n_b_avg)) \
.flatMap(lambda x: [(x[0], b, star) for b, star in x[1]]) \
.persist(StorageLevel.MEMORY_AND_DISK)
# asl
agm_train = agm_data.map(lambda x: ((u_table[x[0]], b_table[x[1]]), x[2])).collect()
support.writeDownRenameTable(agm_train, agm_train_file)
lonely_user = agm_data.map(lambda x: (x[0], 1)) \
.reduceByKey(lambda x, y: x + y) \
.filter(lambda x: x[1] < LONELY_USER_THRESHOLD) \
.map(lambda x: x[0]) \
.collect()
lonely_business = agm_data.map(lambda x: (x[1], 1)) \
.reduceByKey(lambda x, y: x + y) \
.filter(lambda x: x[1] < LONELY_BUSINESS_THRESHOLD) \
.map(lambda x: x[0]) \
.collect()
stars_data = agm_data.filter(lambda x: x[0] not in lonely_user and x[1] not in lonely_business) \
.map(lambda x: Rating(x[0], x[1], x[2])).persist(StorageLevel.MEMORY_AND_DISK)
sc.setCheckpointDir(checkpoint_file)
ALS.checkpointInterval = 2
modelRDD = ALS.train(ratings=stars_data, rank=1, iterations=70, lambda_=0.01, nonnegative=True)
saveAlsModel(modelRDD, u_table, b_table, als_model_file)
if __name__ == "__main__":
t_1 = time.time()
train()
print('Time %fs.' % (time.time() - t_1))
|
""" Exceptions used throughout this package """
class AuthRequiredError(Exception):
""" Error raised when authentication is required """
class InputRequiredError(Exception):
""" Error raised if input is required """
class InvalidComponentError(Exception):
""" Error raised if invalid component is provided """
class InvalidComponentVersionError(Exception):
""" Error raised if invalid component version is provided """
class HTTPError(Exception):
""" Error raised http error occurs """
class FileLoadError(Exception):
""" Error raised if file load error occurs """
class SSHCommandStdError(Exception):
""" Error raised if ssh client command response contains stderr """
class DeviceReadyError(Exception):
""" Error raised if device ready check fails """
class RetryInterruptedError(Exception):
""" Error raised if method retry is intentionally interrupted """
class InvalidAuthError(Exception):
""" Error raised if authentication fails """
|
from app.utils.safe_dict import safeDict
|
"""
## Classes that define the cross section of a pipe or duct.
"""
from typing import Optional, Type
import math
import quantities as qty
from pypeflow.core.pipe_schedules import PipeSchedule
class CrossSection:
"""
Base class from which different shapes of cross sections are derived.
"""
@property
def area(self) -> qty.Area:
"""
Get the area (*quantities.Area*) of the cross section.
"""
return qty.Area()
@property
def diameter(self) -> qty.Length:
"""
Get/set the (equivalent) diameter (*quantities.Length*) of the cross section.
"""
return qty.Length()
@diameter.setter
def diameter(self, di_th: qty.Length):
pass
class Circular(CrossSection):
"""Class that models a circular cross section."""
def __init__(self):
self._di: float = math.nan # inside diameter that corresponds with nominal diameter
self._dn: float = math.nan # nominal diameter according to pipe schedule
self._di_th: float = math.nan # theoretical or calculated inside diameter
self._pipe_schedule: Optional[Type[PipeSchedule]] = None
@classmethod
def create(cls, pipe_schedule: Type[PipeSchedule], dn: Optional[qty.Length] = None,
di_th: Optional[qty.Length] = None):
"""
Create a circular cross section.
To create the cross section, either the nominal diameter or a calculated, theoretical diameter must be passed
to the method.
**Parameters:**
- `pipe_schedule` : *type of core.pipe_schedules.PipeSchedule*<br>
The pipe schedule that defines the dimensions of the pipe's cross section.
- `dn` : object of *quantities.Length* (optional, default None)<br>
The nominal diameter of the cross section that belongs to the pipe schedule being used.
- `di_th` : object of *quantities.Length* (optional, default None)<br>
The calculated or theoretical inside diameter of the cross section.
"""
c = cls()
c.pipe_schedule = pipe_schedule
if dn is not None:
c.nominal_diameter = dn
elif di_th is not None:
c.diameter = di_th
return c
@property
def nominal_diameter(self) -> qty.Length:
"""
Get/set the nominal diameter (*quantities.Length*) of the cross section.
The inside diameter that corresponds with the nominal diameter is also set based on the pipe schedule that
was passed at the instance the CrossSection object was created.
"""
return qty.Length(self._dn)
@nominal_diameter.setter
def nominal_diameter(self, dn: qty.Length):
self._dn = dn()
# when nominal diameter is set, the corresponding inside diameter is also set
self._di = self._di_th = self._pipe_schedule.inside_diameter(DN=dn).get()
@property
def area(self) -> qty.Area:
"""
Get the area of the cross section.
**Returns:**
- object of *quantities.Area*
"""
return qty.Area(math.pi * self._di ** 2.0 / 4.0)
@property
def diameter(self) -> qty.Length:
"""
Get/set the inside diameter (*quantities.Length) of the cross section.
This will also set the nearest nominal diameter and corresponding inside diameter based on the pipe schedule
that was passed when creating the cross section.
"""
return qty.Length(self._di)
@diameter.setter
def diameter(self, di_th: qty.Length):
self._di_th = di_th()
# get the nearest nominal diameter
dn = self._pipe_schedule.nominal_diameter(d_int=di_th)
self._dn = dn()
# get the inside diameter that corresponds with the nominal diameter
self._di = self._pipe_schedule.inside_diameter(dn).get()
@property
def calculated_diameter(self) -> qty.Length:
"""
Get the calculated or theoretical inside diameter (*quantities.Length*) of the cross section.
"""
return qty.Length(self._di_th)
@property
def pipe_schedule(self) -> Type[PipeSchedule]:
"""
Get/set the pipe schedule (*core.pipe_schedules.PipeSchedule*) of the cross section.
"""
return self._pipe_schedule
@pipe_schedule.setter
def pipe_schedule(self, schedule: Type[PipeSchedule]):
self._pipe_schedule = schedule
|
__author__ = 'mpetyx'
from django.contrib import admin
from .models import OpeniNutrition
class NutritionAdmin(admin.ModelAdmin):
pass
admin.site.register(OpeniNutrition, NutritionAdmin)
|
#!/usr/bin/env python3
# Copyright (c) 2015-2021 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
feature_llmq_rotation.py
Checks LLMQs Quorum Rotation
'''
from io import BytesIO
from test_framework.test_framework import DashTestFramework
from test_framework.messages import CBlock, CBlockHeader, CCbTx, CMerkleBlock, FromHex, hash256, msg_getmnlistd, QuorumId
from test_framework.mininode import P2PInterface
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
connect_nodes,
sync_blocks,
wait_until,
)
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def extract_quorum_members(quorum_info):
return [d['proTxHash'] for d in quorum_info["members"]]
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.last_mnlistdiff = None
def on_mnlistdiff(self, message):
self.last_mnlistdiff = message
def wait_for_mnlistdiff(self, timeout=30):
def received_mnlistdiff():
return self.last_mnlistdiff is not None
return wait_until(received_mnlistdiff, timeout=timeout)
def getmnlistdiff(self, baseBlockHash, blockHash):
msg = msg_getmnlistd(baseBlockHash, blockHash)
self.last_mnlistdiff = None
self.send_message(msg)
self.wait_for_mnlistdiff()
return self.last_mnlistdiff
class LLMQQuorumRotationTest(DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(16, 15, fast_dip3_enforcement=True)
self.set_dash_llmq_test_params(4, 4)
def run_test(self):
llmq_type=103
llmq_type_name="llmq_test_dip0024"
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
# Connect all nodes to node1 so that we always have the whole network connected
# Otherwise only masternode connections will be established between nodes, which won't propagate TXs/blocks
# Usually node0 is the one that does this, but in this test we isolate it multiple times
for i in range(len(self.nodes)):
if i != 1:
connect_nodes(self.nodes[i], 0)
self.activate_dip8()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
self.activate_dip0024(expected_activation_height=900)
self.log.info("Activated DIP0024 at height:" + str(self.nodes[0].getblockcount()))
#At this point, we need to move forward 3 cycles (3 x 24 blocks) so the first 3 quarters can be created (without DKG sessions)
#self.log.info("Start at H height:" + str(self.nodes[0].getblockcount()))
self.move_to_next_cycle()
self.log.info("Cycle H height:" + str(self.nodes[0].getblockcount()))
self.move_to_next_cycle()
self.log.info("Cycle H+C height:" + str(self.nodes[0].getblockcount()))
self.move_to_next_cycle()
self.log.info("Cycle H+2C height:" + str(self.nodes[0].getblockcount()))
b_0 = self.nodes[0].getbestblockhash()
(quorum_info_0_0, quorum_info_0_1) = self.mine_cycle_quorum(llmq_type_name=llmq_type_name, llmq_type=llmq_type)
quorum_members_0_0 = extract_quorum_members(quorum_info_0_0)
quorum_members_0_1 = extract_quorum_members(quorum_info_0_1)
assert_equal(len(intersection(quorum_members_0_0, quorum_members_0_1)), 0)
self.log.info("Quorum #0_0 members: " + str(quorum_members_0_0))
self.log.info("Quorum #0_1 members: " + str(quorum_members_0_1))
q_100_0 = QuorumId(100, int(quorum_info_0_0["quorumHash"], 16))
q_102_0 = QuorumId(102, int(quorum_info_0_0["quorumHash"], 16))
q_104_0 = QuorumId(104, int(quorum_info_0_0["quorumHash"], 16))
q_103_0_0 = QuorumId(103, int(quorum_info_0_0["quorumHash"], 16))
q_103_0_1 = QuorumId(103, int(quorum_info_0_1["quorumHash"], 16))
b_1 = self.nodes[0].getbestblockhash()
expectedDeleted = []
expectedNew = [q_100_0, q_102_0, q_104_0, q_103_0_0, q_103_0_1]
quorumList = self.test_getmnlistdiff_quorums(b_0, b_1, {}, expectedDeleted, expectedNew)
(quorum_info_1_0, quorum_info_1_1) = self.mine_cycle_quorum(llmq_type_name=llmq_type_name, llmq_type=llmq_type)
quorum_members_1_0 = extract_quorum_members(quorum_info_1_0)
quorum_members_1_1 = extract_quorum_members(quorum_info_1_1)
assert_equal(len(intersection(quorum_members_1_0, quorum_members_1_1)), 0)
self.log.info("Quorum #1_0 members: " + str(quorum_members_1_0))
self.log.info("Quorum #1_1 members: " + str(quorum_members_1_1))
q_100_1 = QuorumId(100, int(quorum_info_1_0["quorumHash"], 16))
q_102_1 = QuorumId(102, int(quorum_info_1_0["quorumHash"], 16))
q_103_1_0 = QuorumId(103, int(quorum_info_1_0["quorumHash"], 16))
q_103_1_1 = QuorumId(103, int(quorum_info_1_1["quorumHash"], 16))
b_2 = self.nodes[0].getbestblockhash()
expectedDeleted = [q_103_0_0, q_103_0_1]
expectedNew = [q_100_1, q_102_1, q_103_1_0, q_103_1_1]
quorumList = self.test_getmnlistdiff_quorums(b_1, b_2, quorumList, expectedDeleted, expectedNew)
(quorum_info_2_0, quorum_info_2_1) = self.mine_cycle_quorum(llmq_type_name=llmq_type_name, llmq_type=llmq_type)
quorum_members_2_0 = extract_quorum_members(quorum_info_2_0)
quorum_members_2_1 = extract_quorum_members(quorum_info_2_1)
assert_equal(len(intersection(quorum_members_2_0, quorum_members_2_1)), 0)
self.log.info("Quorum #2_0 members: " + str(quorum_members_2_0))
self.log.info("Quorum #2_1 members: " + str(quorum_members_2_1))
q_100_2 = QuorumId(100, int(quorum_info_2_0["quorumHash"], 16))
q_102_2 = QuorumId(102, int(quorum_info_2_0["quorumHash"], 16))
q_103_2_0 = QuorumId(103, int(quorum_info_2_0["quorumHash"], 16))
q_103_2_1 = QuorumId(103, int(quorum_info_2_1["quorumHash"], 16))
b_3 = self.nodes[0].getbestblockhash()
expectedDeleted = [q_100_0, q_102_0, q_103_1_0, q_103_1_1]
expectedNew = [q_100_2, q_102_2, q_103_2_0, q_103_2_1]
quorumList = self.test_getmnlistdiff_quorums(b_2, b_3, quorumList, expectedDeleted, expectedNew)
mninfos_online = self.mninfo.copy()
nodes = [self.nodes[0]] + [mn.node for mn in mninfos_online]
sync_blocks(nodes)
quorum_list = self.nodes[0].quorum("list", llmq_type)
quorum_blockhash = self.nodes[0].getbestblockhash()
fallback_blockhash = self.nodes[0].generate(1)[0]
self.log.info("h("+str(self.nodes[0].getblockcount())+") quorum_list:"+str(quorum_list))
assert_greater_than_or_equal(len(intersection(quorum_members_0_0, quorum_members_1_0)), 3)
assert_greater_than_or_equal(len(intersection(quorum_members_0_1, quorum_members_1_1)), 3)
assert_greater_than_or_equal(len(intersection(quorum_members_0_0, quorum_members_2_0)), 2)
assert_greater_than_or_equal(len(intersection(quorum_members_0_1, quorum_members_2_1)), 2)
assert_greater_than_or_equal(len(intersection(quorum_members_1_0, quorum_members_2_0)), 3)
assert_greater_than_or_equal(len(intersection(quorum_members_1_1, quorum_members_2_1)), 3)
self.log.info("Mine a quorum to invalidate")
(quorum_info_3_0, quorum_info_3_1) = self.mine_cycle_quorum(llmq_type_name=llmq_type_name, llmq_type=llmq_type)
new_quorum_list = self.nodes[0].quorum("list", llmq_type)
assert_equal(len(new_quorum_list[llmq_type_name]), len(quorum_list[llmq_type_name]) + 2)
new_quorum_blockhash = self.nodes[0].getbestblockhash()
self.log.info("h("+str(self.nodes[0].getblockcount())+") new_quorum_blockhash:"+new_quorum_blockhash)
self.log.info("h("+str(self.nodes[0].getblockcount())+") new_quorum_list:"+str(new_quorum_list))
assert new_quorum_list != quorum_list
self.log.info("Invalidate the quorum")
self.bump_mocktime(5)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 4070908800)
self.wait_for_sporks_same()
self.nodes[0].invalidateblock(fallback_blockhash)
assert_equal(self.nodes[0].getbestblockhash(), quorum_blockhash)
assert_equal(self.nodes[0].quorum("list", llmq_type), quorum_list)
self.log.info("Reconsider the quorum")
self.bump_mocktime(5)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.wait_for_sporks_same()
self.nodes[0].reconsiderblock(fallback_blockhash)
wait_until(lambda: self.nodes[0].getbestblockhash() == new_quorum_blockhash, sleep=1)
assert_equal(self.nodes[0].quorum("list", llmq_type), new_quorum_list)
def test_getmnlistdiff_quorums(self, baseBlockHash, blockHash, baseQuorumList, expectedDeleted, expectedNew):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
assert_equal(set(d.deletedQuorums), set(expectedDeleted))
assert_equal(set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]), set(expectedNew))
newQuorumList = baseQuorumList.copy()
for e in d.deletedQuorums:
newQuorumList.pop(e)
for e in d.newQuorums:
newQuorumList[QuorumId(e.llmqType, e.quorumHash)] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
if cbtx.version >= 2:
hashes = []
for qc in newQuorumList.values():
hashes.append(hash256(qc.serialize()))
hashes.sort()
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootQuorums)
return newQuorumList
def test_getmnlistdiff_base(self, baseBlockHash, blockHash):
hexstr = self.nodes[0].getblockheader(blockHash, False)
header = FromHex(CBlockHeader(), hexstr)
d = self.test_node.getmnlistdiff(int(baseBlockHash, 16), int(blockHash, 16))
assert_equal(d.baseBlockHash, int(baseBlockHash, 16))
assert_equal(d.blockHash, int(blockHash, 16))
# Check that the merkle proof is valid
proof = CMerkleBlock(header, d.merkleProof)
proof = proof.serialize().hex()
assert_equal(self.nodes[0].verifytxoutproof(proof), [d.cbTx.hash])
# Check if P2P messages match with RPCs
d2 = self.nodes[0].protx("diff", baseBlockHash, blockHash)
assert_equal(d2["baseBlockHash"], baseBlockHash)
assert_equal(d2["blockHash"], blockHash)
assert_equal(d2["cbTxMerkleTree"], d.merkleProof.serialize().hex())
assert_equal(d2["cbTx"], d.cbTx.serialize().hex())
assert_equal(set([int(e, 16) for e in d2["deletedMNs"]]), set(d.deletedMNs))
assert_equal(set([int(e["proRegTxHash"], 16) for e in d2["mnList"]]), set([e.proRegTxHash for e in d.mnList]))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["deletedQuorums"]]), set(d.deletedQuorums))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["newQuorums"]]), set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]))
return d
if __name__ == '__main__':
LLMQQuorumRotationTest().main()
|
import os
import keras
import tensorflow as tf
import numpy as np
from chord.chord_generator import ChordGenerator
from note.chord_to_note_generator import ChordToNoteGenerator
from keras import backend as K
import pandas as pd
from pandas import DataFrame
from tensorflow.contrib.tensorboard.plugins import projector
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def main():
ROOT_DIR = './graphs/'
os.makedirs(ROOT_DIR, exist_ok=True)
OUTPUT_MODEL_FILE_NAME = os.path.join(ROOT_DIR,'tf.ckpt')
# get the keras model
# ChordToNote embedding's didn't reveal anything. Trying chord generator
cg = ChordGenerator()
model = cg.build_model()
model.load_weights('../chord/chord_weights_bidem.h5')
# ctng.load_model(model_name="bidem", is_fast_load=True)
# model = ctng.model
layer = model.layers[0] # embedding layer
print(layer)
inp = model.input # input placeholder
output = layer.output # embedding layer outputs
functor = K.function([inp, K.learning_phase()], [output]) # evaluation functions
# Testing
test = np.arange(2, 26)
print(len(test))
# test = np.pad(test, (0, 1200 - len(test)), 'constant', constant_values=0)
layer_outs = np.array(functor([test])).squeeze()
# Get working directory
PATH = os.getcwd()
# Path to save the embedding and checkpoints generated
LOG_DIR = PATH + '/graphs/'
# Load data
df = pd.DataFrame(layer_outs)
df.to_csv('test.csv')
# Load the metadata file. Metadata consists your labels. This is optional.
# Metadata helps us visualize(color) different clusters that form t-SNE
# metadata = os.path.join(LOG_DIR, 'df_labels.tsv')
# Generating PCA and
pca = PCA(n_components=32,
random_state=123,
svd_solver='full'
)
df_pca = pd.DataFrame(pca.fit_transform(df))
df_pca = df_pca.values
# TensorFlow Variable from data
tf_data = tf.Variable(df_pca)
# Running TensorFlow Session
with tf.Session() as sess:
saver = tf.train.Saver([tf_data])
sess.run(tf_data.initializer)
saver.save(sess, os.path.join(LOG_DIR, 'tf_data.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = tf_data.name
# Link this tensor to its metadata(Labels) file
# embedding.metadata_path = metadata
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(LOG_DIR), config)
if __name__ == "__main__":
main()
|
from .core import KobotoolboxSeleniumMixin # noqa
|
# -*- coding: utf-8 -*-
import sys
import os
import yaml
from sqlalchemy import Table,literal_column,select
import csv
def importVolumes(connection,metadata,sourcePath):
invVolumes = Table('invVolumes',metadata)
invTypes = Table('invTypes',metadata)
with open(os.path.join(sourcePath,'invVolumes1.csv'), 'r') as groupVolumes:
volumereader=csv.reader(groupVolumes, delimiter=',')
for group in volumereader:
connection.execute(invVolumes.insert().from_select(['typeID','volume'],select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1]))))
with open(os.path.join(sourcePath,'invVolumes2.csv'), 'r') as groupVolumes:
volumereader=csv.reader(groupVolumes, delimiter=',')
for group in volumereader:
connection.execute(invVolumes.insert(),typeID=group[1],volume=group[0])
|
import logging
from datetime import datetime
import dask
import dask.dataframe as dd
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
logging.basicConfig()
logger = logging.getLogger("Helpers")
logger.setLevel(logging.DEBUG)
class Helpers:
# Returns log file data as a Dataframe
def ReadLog(self, listFiles):
logger.info("Reading Data")
cols = [
"Timestamp",
"Elapsed Time",
"Client",
"Log_TagHTTP_Code",
"Size",
"Method",
"URI",
"UserID",
"HierarchyHostname",
"Content_type",
]
logger.debug("Reading " + listFiles[0])
df = dd.read_csv(listFiles[0], names=cols, delim_whitespace=True, header=None)
for filePath in listFiles[1:]:
logger.debug("Reading " + filePath)
# Reading File
df = df.append(
dd.read_csv(filePath, names=cols, delim_whitespace=True, header=None)
)
# Separating Log_Tag and HTTP_Code
logger.debug("Splitting LogTag and HTTP Code")
logTag = df.Log_TagHTTP_Code.apply(lambda x: str(x).split("/")[0], meta=object)
httpCode = df.Log_TagHTTP_Code.apply(lambda x: x.split("/")[1], meta=object)
df = df.assign(Log_Tag=logTag)
df = df.assign(HTTP_Code=httpCode)
# Separating Hostname and Hierarchy
logger.debug("Splitting Hierarchy and hostname")
hierarchy = df.HierarchyHostname.apply(lambda x: x.split("/")[0], meta=object)
hostname = df.HierarchyHostname.apply(lambda x: x.split("/")[1], meta=object)
df = df.assign(Hierarchy=hierarchy)
df = df.assign(Hostname=hostname)
# Extracting Domain from URI
logger.debug("Extracting Domain Names")
m = df["URI"].str.extract("(?<=http://)(.*?)(?=/)|(?<=https://)(.*?)(?=/)")
m = m[0].fillna(m[1])
df["Domain_Name"] = m
df["Domain_Name"] = df["Domain_Name"].fillna(
df["URI"].str.extract("()(.*?)(?=:)")[1]
)
# Dropping Useless Data to reduce RAM usage
logger.debug("Dropping Useless Columns")
# Converting TimeStamp datatype
df["Timestamp"] = dd.to_datetime(df["Timestamp"], unit="s")
domainsToDrop = {
"gateway.iitmandi.ac.in": "IIT Mandi Login Portal",
"ak.staticimgfarm.com": "Free Anti-Malware by Chrome",
"login.live.com": "Microsoft Login",
"ctldl.windowsupdate.com": "Windows Update",
"www.msftconnecttest.com": "Microsoft Connection Test",
"ssw.live.com": "Windows Sneaking data from Us (Windows just can't stop talking to Microsoft)",
"splunkci.gingersoftware.com": "Language Checking sofware",
"in.archive.ubuntu.com": "ubuntu connecting behind peoples back",
}
# Storing Information about filtered Contents
rv = {
"labels": [
"Filtered Domains",
"Number of Hits",
"Percentage Hits",
"Description",
],
"values": [],
}
self.totalHits = len(df)
self.domainCounts = df.Domain_Name.value_counts().compute()
for domain in domainsToDrop.keys():
# Skip step if domain not present
if domain not in self.domainCounts.keys():
continue
count = self.domainCounts[domain]
rv["values"].append(
[domain, count, count * 1.0 / self.totalHits, domainsToDrop[domain]]
)
self.totalHits -= self.domainCounts[domain]
self.domainCounts[domain] = 0
df = df[df["Domain_Name"] != domain]
self.df = df
logger.info("Data Read Successfully")
return rv
def CountWebsite(self, ax):
logger.info("Counting Most Visited Domains")
columnList = self.domainCounts
elementList = columnList.nlargest(n=20)
mostVisited = columnList.max()
leastVisited = columnList.min()
websites = [key for key, val in elementList.items()]
frequency = [val for key, val in elementList.items()]
ax.bar(websites, frequency)
ax.set_title("Most Frequently Visited Websites")
ax.set_xlabel("Domain_Name")
ax.set_ylabel("Frequency")
ax.tick_params(labelrotation=60)
return ax
def PlotHttpCode(self, ax):
logger.info("Plotting HTTP Status Code")
columnList = self.df["HTTP_Code"].value_counts().compute()
httpcode = [key for key, val in columnList.items()]
frequency = [val for key, val in columnList.items()]
ax.bar(httpcode, frequency)
ax.set_title("HTTP Response Occurence")
ax.set_xlabel("HTTP Code")
ax.set_ylabel("Frequency")
ax.tick_params(labelrotation=60)
return ax
def PlotAcceptedDeniedCount(self, ax):
logger.info("Counting Total Requests")
countAll = [0] * 24
countDenied = [0] * 24
time = self.df["Timestamp"]
logTag = self.df["Log_Tag"]
allSeries = self.df["Timestamp"].dt.hour.value_counts().compute()
deniedSeries = (
self.df[self.df["Log_Tag"] == "TCP_DENIED"]["Timestamp"]
.dt.hour.value_counts()
.compute()
)
logger.debug("Counting Hourly Denied Requests ")
for i in range(24):
try:
countDenied[i] = deniedSeries[i + 1]
except:
continue
try:
countAll[i] = allSeries[i + 1]
except:
continue
barWidth = 0.25
ax.set_ylabel("Number of Requests", fontweight="bold")
ax.bar(
np.arange(24),
countAll,
color="blue",
width=barWidth,
edgecolor="white",
label="All",
)
ax.bar(
np.arange(24) + barWidth,
countDenied,
color="red",
width=barWidth,
edgecolor="white",
label="Denied",
)
ax.set_xlabel("Time(Hours of day)", fontweight="bold")
ax.set_xticks([r + barWidth for r in range(len(countAll))])
ax.set_xticklabels([str(x) for x in range(1, 25)])
return ax
def GetTopClients(self):
logger.info("Calculating Top Clients")
clientsRequestCounts = self.df["Client"].value_counts()
topClients = clientsRequestCounts.nlargest(50).compute()
data = {"labels": ["Client IP", "Number of Hits"], "values": []}
for client, hits in topClients.items():
data["values"].append([client, hits])
return data
def PeakHourForEachWebsites(self, ax):
logger.info("Calculating Peak time for each Domain")
Websites = self.df["Domain_Name"]
times = self.df["Timestamp"]
WebsitesList = {}
MostActiveHour = {}
for i in Websites:
WebsitesList[i] = [0] * 24
MostActiveHour[i] = [0, 0]
for i, j in zip(Websites, times):
temp = j.hour
WebsitesList[i][temp] += 1
if MostActiveHour[i][1] < WebsitesList[i][temp]:
MostActiveHour[i] = [temp, WebsitesList[i][temp]]
# Hours = []
# for i in WebsitesList:
# Hours.append(sum(WebsitesList[i]))
# Hours.sort(reverse=True)
# Hours = Hours[:20]
# TopTwenty = {}
# Count = 0
# for i in WebsitesList:
# if sum(WebsitesList[i]) in Hours and Count < 20:
# TopTwenty[i] = MostActiveHour[i]
# Count += 1
# plt.plot([x for x in range(0,24)],WebsitesList[i],label = i)
# plt.bar(TopTwenty.keys(),TopTwenty.values())
# plt.title("Peak Hours For Top 20 Visited websites : ")
# plt.xlabel("Domain_Name")
# plt.ylabel("Hour")
# plt.xticks(rotation=90)
# plt.subplots_adjust(bottom=0.3)
# plt.show()
return MostActiveHour
def GetNumberOfUniqueWebsites(self, time1, time2):
logger.info("Calculating Number of Unique Websites in the given time-frame.")
# sample formats
# time1 = "24/12/12 12:33:22"
# time2 = "25/12/20 12:12:12"
# dd/mm/yy hh:mm:ss
start = datetime.strptime(time1, "%d/%m/%y %H:%M:%S")
end = datetime.strptime(time2, "%d/%m/%y %H:%M:%S")
tmp = self.df.loc[
(self.df["Timestamp"] <= end) & (self.df["Timestamp"] >= start)
]
# alternate(slower) implementation
# times = self.df["Timestamp"].values
# names = self.df["Domain_Name"].values
# d=set()
# for i in range(len(times)):
# hr = datetime.fromtimestamp(times[i])
# if(i==0 or i==len(times)-1):
# print(hr)
# if hr<=end and hr>=start :
# d.add(names[i])
# print(tmp.tail())
denied_requests = len(tmp.loc[tmp["Log_Tag"] == "TCP_DENIED"])
different_clients = len(tmp.drop_duplicates(subset=["Client"]))
different_websites = len(tmp.drop_duplicates(subset=["Domain_Name"]))
mylist = [denied_requests, different_clients, different_websites]
mylist = dask.compute(*mylist)
# print(
# "between %s and %s :\nnumber of different clients: %s , number of different websites: %s, number of denied requests: %s"
# % (time1, time2, mylist[1], mylist[2], mylist[0])
# )
d = {"labels":["Label","Value"],"values":[]}
d["values"].append(["start time",time1])
d["values"].append(["end time",time2])
d["values"].append(["different clients",mylist[1]])
d["values"].append(["different websites",mylist[2]])
d["values"].append(["number of denied requests",mylist[0]])
return d
def GetURICategories(self):
logger.info("Categorizing Domains")
urlCounts = self.df.URI.value_counts().compute()
logger.debug("Loading Model")
model = joblib.load("model.pkl")
logger.debug("Predicting Categories")
pred = model.predict(urlCounts.keys())
categories = np.unique(pred)
logger.debug("Counting Domains")
rv = {
"labels": [
"Category",
"Number of Unique Hits",
"Total Number of Hits",
"Percentage Hits",
],
"values": [],
}
for category in categories:
indices = np.where(pred == category)[0]
rv["values"].append(
[
category,
len(indices),
np.sum(urlCounts[indices]),
np.sum(urlCounts[indices]) / self.totalHits,
]
)
logger.info("Categorization Complete")
return rv
|
# -*- coding: utf-8 -*-
'''
@Author : Xu
@Software: PyCharm
@File : tc_test.py
@Time : 2019-05-30 20:05
@Desc : 测试
'''
from Text_auto_correct_v1 import auto_correct_sentence
def test(msg):
print("Test case 1:")
correct_sent = auto_correct_sentence(msg)
print("original sentence:" + msg + "\n==>\n" + "corrected sentence:" + correct_sent)
err_sent_1 = '我想买奥地'
test(msg=err_sent_1)
|
import tempfile
from collections import defaultdict
from threading import Lock
from PyPDF2 import PdfFileMerger
from PyPDF2.utils import PdfReadError
from telegram import (
ChatAction,
ParseMode,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
Update,
)
from telegram.ext import (
CallbackContext,
CommandHandler,
ConversationHandler,
Filters,
MessageHandler,
)
from pdf_bot.constants import (
CANCEL,
DONE,
PDF_INVALID_FORMAT,
PDF_TOO_LARGE,
REMOVE_LAST,
TEXT_FILTER,
)
from pdf_bot.language import set_lang
from pdf_bot.utils import (
cancel,
check_pdf,
check_user_data,
reply_with_cancel_btn,
send_file_names,
write_send_pdf,
)
WAIT_MERGE = 0
MERGE_IDS = "merge_ids"
MERGE_NAMES = "merge_names"
merge_locks = defaultdict(Lock)
def merge_cov_handler() -> ConversationHandler:
handlers = [
MessageHandler(Filters.document, check_doc, run_async=True),
MessageHandler(TEXT_FILTER, check_text, run_async=True),
]
conv_handler = ConversationHandler(
entry_points=[CommandHandler("merge", merge, run_async=True)],
states={
WAIT_MERGE: handlers,
ConversationHandler.WAITING: handlers,
},
fallbacks=[CommandHandler("cancel", cancel, run_async=True)],
allow_reentry=True,
)
return conv_handler
def merge(update: Update, context: CallbackContext) -> int:
update.effective_message.chat.send_action(ChatAction.TYPING)
user_id = update.effective_message.from_user.id
merge_locks[user_id].acquire()
context.user_data[MERGE_IDS] = []
context.user_data[MERGE_NAMES] = []
merge_locks[user_id].release()
return ask_first_doc(update, context)
def ask_first_doc(update: Update, context: CallbackContext) -> int:
_ = set_lang(update, context)
text = _(
"Send me the PDF files that you'll like to merge\n\n"
"Note that the files will be merged in the order that you send me"
)
reply_with_cancel_btn(update, context, text)
return WAIT_MERGE
def check_doc(update: Update, context: CallbackContext) -> int:
message = update.effective_message
message.chat.send_action(ChatAction.TYPING)
result = check_pdf(update, context, send_msg=False)
if result in [PDF_INVALID_FORMAT, PDF_TOO_LARGE]:
return process_invalid_pdf(update, context, result)
user_id = message.from_user.id
merge_locks[user_id].acquire()
context.user_data[MERGE_IDS].append(message.document.file_id)
context.user_data[MERGE_NAMES].append(message.document.file_name)
result = ask_next_doc(update, context)
merge_locks[user_id].release()
return result
def process_invalid_pdf(
update: Update, context: CallbackContext, pdf_result: int
) -> int:
_ = set_lang(update, context)
if pdf_result == PDF_INVALID_FORMAT:
text = _("The file you've sent is not a PDF file")
else:
text = _("The PDF file you've sent is too large for me to download")
update.effective_message.reply_text(text)
user_id = update.effective_message.from_user.id
merge_locks[user_id].acquire()
if not context.user_data[MERGE_NAMES]:
result = ask_first_doc(update, context)
else:
result = ask_next_doc(update, context)
merge_locks[user_id].release()
return result
def ask_next_doc(update: Update, context: CallbackContext) -> int:
_ = set_lang(update, context)
send_file_names(update, context, context.user_data[MERGE_NAMES], _("PDF files"))
reply_markup = ReplyKeyboardMarkup(
[[_(DONE)], [_(REMOVE_LAST), _(CANCEL)]],
resize_keyboard=True,
one_time_keyboard=True,
)
update.effective_message.reply_text(
_(
"Press <b>Done</b> if you've sent me all the PDF files that "
"you'll like to merge or keep sending me the PDF files"
),
reply_markup=reply_markup,
parse_mode=ParseMode.HTML,
)
return WAIT_MERGE
def check_text(update: Update, context: CallbackContext) -> int:
message = update.effective_message
message.chat.send_action(ChatAction.TYPING)
_ = set_lang(update, context)
text = message.text
if text in [_(REMOVE_LAST), _(DONE)]:
user_id = message.from_user.id
lock = merge_locks[user_id]
if not check_user_data(update, context, MERGE_IDS, lock):
return ConversationHandler.END
if text == _(REMOVE_LAST):
return remove_doc(update, context, lock)
elif text == _(DONE):
return preprocess_merge_pdf(update, context, lock)
elif text == _(CANCEL):
return cancel(update, context)
def remove_doc(update: Update, context: CallbackContext, lock: Lock) -> int:
_ = set_lang(update, context)
lock.acquire()
file_ids = context.user_data[MERGE_IDS]
file_names = context.user_data[MERGE_NAMES]
file_ids.pop()
file_name = file_names.pop()
update.effective_message.reply_text(
_("<b>{}</b> has been removed for merging").format(file_name),
parse_mode=ParseMode.HTML,
)
if len(file_ids) == 0:
result = ask_first_doc(update, context)
else:
result = ask_next_doc(update, context)
lock.release()
return result
def preprocess_merge_pdf(update: Update, context: CallbackContext, lock: Lock) -> int:
_ = set_lang(update, context)
lock.acquire()
num_files = len(context.user_data[MERGE_IDS])
if num_files == 0:
update.effective_message.reply_text(_("You haven't sent me any PDF files"))
result = ask_first_doc(update, context)
elif num_files == 1:
update.effective_message.reply_text(_("You've only sent me one PDF file."))
result = ask_next_doc(update, context)
else:
result = merge_pdf(update, context)
lock.release()
return result
def merge_pdf(update: Update, context: CallbackContext) -> int:
_ = set_lang(update, context)
update.effective_message.reply_text(
_("Merging your PDF files"), reply_markup=ReplyKeyboardRemove()
)
# Setup temporary files
user_data = context.user_data
file_ids = user_data[MERGE_IDS]
file_names = user_data[MERGE_NAMES]
temp_files = [tempfile.NamedTemporaryFile() for _ in range(len(file_ids))]
merger = PdfFileMerger()
# Merge PDF files
for i, file_id in enumerate(file_ids):
file_name = temp_files[i].name
file = context.bot.get_file(file_id)
file.download(custom_path=file_name)
try:
merger.append(open(file_name, "rb"))
except PdfReadError:
update.effective_message.reply_text(
_(
"I can't merge your PDF files as I couldn't open and read \"{}\". "
"Ensure that it is not encrypted"
).format(file_names[i])
)
return ConversationHandler.END
# Send result file
write_send_pdf(update, context, merger, "files.pdf", "merged")
# Clean up memory and files
if user_data[MERGE_IDS] == file_ids:
del user_data[MERGE_IDS]
if user_data[MERGE_NAMES] == file_names:
del user_data[MERGE_NAMES]
for tf in temp_files:
tf.close()
return ConversationHandler.END
|
INSTRUMENT_TOPIC_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"active": {
"type": "boolean"
},
"exchangeIds": {
"type": "array",
"items": [
{
"type": "string"
},
{
"type": "string"
}
]
},
"exchanges": {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"slug": {
"type": "string"
},
"active": {
"type": "boolean"
},
"nameAtExchange": {
"type": "string"
},
"symbolAtExchange": {
"type": "string"
},
"firstSeen": {
"type": "integer"
},
"lastSeen": {
"type": "integer"
},
"firstTradingDay": {
"type": "null"
},
"lastTradingDay": {
"type": "null"
},
"tradingTimes": {
"type": "null"
}
},
"required": [
"slug",
"active",
"nameAtExchange",
"symbolAtExchange",
"firstSeen",
"lastSeen",
"firstTradingDay",
"lastTradingDay",
"tradingTimes"
]
},
{
"type": "object",
"properties": {
"slug": {
"type": "string"
},
"active": {
"type": "boolean"
},
"nameAtExchange": {
"type": "string"
},
"symbolAtExchange": {
"type": "string"
},
"firstSeen": {
"type": "integer"
},
"lastSeen": {
"type": "integer"
},
"firstTradingDay": {
"type": "null"
},
"lastTradingDay": {
"type": "null"
},
"tradingTimes": {
"type": "null"
}
},
"required": [
"slug",
"active",
"nameAtExchange",
"symbolAtExchange",
"firstSeen",
"lastSeen",
"firstTradingDay",
"lastTradingDay",
"tradingTimes"
]
}
]
},
"dividends": {
"type": "array",
"items": {}
},
"splits": {
"type": "array",
"items": {}
},
"cfi": {
"type": "string"
},
"name": {
"type": "string"
},
"typeId": {
"type": "string"
},
"wkn": {
"type": "string"
},
"legacyTypeChar": {
"type": "string"
},
"isin": {
"type": "string"
},
"priceFactor": {
"type": "integer"
},
"shortName": {
"type": "string"
},
"homeSymbol": {
"type": "string"
},
"intlSymbol": {
"type": "string"
},
"homeNsin": {
"type": "string"
},
"tags": {
"type": "array",
"items": [
{
"type": "object",
"properties": {
"type": {
"type": "string"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"icon": {
"type": "string"
}
},
"required": [
"type",
"id",
"name",
"icon"
]
},
{
"type": "object",
"properties": {
"type": {
"type": "string"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"icon": {
"type": "string"
}
},
"required": [
"type",
"id",
"name",
"icon"
]
},
{
"type": "object",
"properties": {
"type": {
"type": "string"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"icon": {
"type": "string"
}
},
"required": [
"type",
"id",
"name",
"icon"
]
},
{
"type": "object",
"properties": {
"type": {
"type": "string"
},
"id": {
"type": "string"
},
"name": {
"type": "string"
},
"icon": {
"type": "string"
}
},
"required": [
"type",
"id",
"name",
"icon"
]
}
]
},
"derivativeProductCategories": {
"type": "array",
"items": [
{
"type": "string"
},
{
"type": "string"
}
]
},
"company": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"description": {
"type": "string"
},
"ipoDate": {
"type": "null"
}
},
"required": [
"name",
"description",
"ipoDate"
]
},
"marketCap": {
"type": "object",
"properties": {
"value": {
"type": "string"
},
"currencyId": {
"type": "null"
}
},
"required": [
"value",
"currencyId"
]
},
"lastDividend": {
"type": "null"
},
"shareType": {
"type": "string"
},
"custodyType": {
"type": "string"
},
"kidRequired": {
"type": "boolean"
},
"kidLink": {
"type": "null"
},
"tradable": {
"type": "boolean"
},
"fundInfo": {
"type": "null"
},
"derivativeInfo": {
"type": "null"
},
"targetMarket": {
"type": "object",
"properties": {
"investorType": {
"type": "null"
},
"investorExperience": {
"type": "null"
}
},
"required": [
"investorType",
"investorExperience"
]
},
"savable": {
"type": "boolean"
},
"fractionalTradingAllowed": {
"type": "boolean"
},
"issuer": {
"type": "null"
}
},
"required": [
"active",
"exchangeIds",
"exchanges",
"dividends",
"splits",
"cfi",
"name",
"typeId",
"wkn",
"legacyTypeChar",
"isin",
"priceFactor",
"shortName",
"homeSymbol",
"intlSymbol",
"homeNsin",
"tags",
"derivativeProductCategories",
"company",
"marketCap",
"lastDividend",
"shareType",
"custodyType",
"kidRequired",
"kidLink",
"tradable",
"fundInfo",
"derivativeInfo",
"targetMarket",
"savable",
"fractionalTradingAllowed",
"issuer"
]
}
|
import os
import tkinter as tk
from utils.data import *
from utils.utilities import *
from utils.statusbar import StatusBar
from utils.oreoide import OreoIDE
from utils.oreoeditor import OreoEditor
from utils.oreomenu import OreoMenu
from utils.terminal import Terminal
if __name__ == "__main__":
root = OreoIDE()
center(root, width=1000, height=700)
oreoeditor = OreoEditor(_root=root, background="#ffffff")
root.add_editor(oreoeditor)
oreomenu = OreoMenu(root, relief=tk.FLAT)
root.add_oreomenu(oreomenu)
terminal = Terminal(root, fira_code)
terminal.pack(fill=tk.BOTH, expand=True)
root.add_terminal(terminal)
statusbar = StatusBar(root, bg="#007acc")
statusbar.pack(fill=tk.BOTH, side=tk.BOTTOM)
root.add_statusbar(statusbar)
root.mainloop()
|
# encoding: utf-8
from .extended import *
from ..models import GoogleAPISettings
# from django import template
# from django.conf import settings
# from django.db.models import Count
# from django.utils import timezone
# from taggit_templatetags.templatetags.taggit_extras import get_queryset
# register = template.Library()
@register.inclusion_tag('cargo/pagination/_digg_pagination.html')
def digg_pagination(objects):
return {
'objects': objects,
'page': objects.number if hasattr(objects, 'number') else None,
'name': u'Résultat',
'name_plural': u'Résultats',
}
########################### METAS
from django import template
from django.template import resolve_variable
from django.template.base import FilterExpression
from django.template.loader import get_template
from django.template import engines
from django.conf import settings
import re
# @register.simple_tag
# def meta_title(value):
# global TEMPLATE_META_TITLE = value
def _setup_metas_dict(parser):
try:
parser._metas
except AttributeError:
parser._metas = {}
class DefineMetaNode(template.Node):
def __init__(self, name, nodelist, args):
self.name = name
self.nodelist = nodelist
self.args = args
def render(self, context):
## empty string - {% meta %} tag does no output
return ''
@register.tag(name="meta")
def do_meta(parser, token):
try:
args = token.split_contents()
tag_name, meta_name, args = args[0], args[1], args[2:]
except IndexError:
raise template.TemplateSyntaxError, "'%s' tag requires at least one argument (macro name)" % token.contents.split()[0]
# TODO: check that 'args' are all simple strings ([a-zA-Z0-9_]+)
r_valid_arg_name = re.compile(r'^[a-zA-Z0-9_]+$')
for arg in args:
if not r_valid_arg_name.match(arg):
raise template.TemplateSyntaxError, "Argument '%s' to macro '%s' contains illegal characters. Only alphanumeric characters and '_' are allowed." % (arg, macro_name)
nodelist = parser.parse(('endmeta', ))
parser.delete_first_token()
## Metadata of each macro are stored in a new attribute
## of 'parser' class. That way we can access it later
## in the template when processing 'usemacro' tags.
_setup_metas_dict(parser)
if not meta_name in parser._metas:
parser._metas[meta_name] = DefineMetaNode(meta_name, nodelist, args)
return parser._metas[meta_name]
class UseMetaNode(template.Node):
def __init__(self, meta, filter_expressions, truncate=None):
self.nodelist = meta.nodelist
self.args = meta.args
self.filter_expressions = filter_expressions
self.truncate = truncate
def render(self, context):
for (arg, fe) in [(self.args[i], self.filter_expressions[i]) for i in range(len(self.args))]:
context[arg] = fe.resolve(context)
return self.nodelist.render(context)
class NoopNode(template.Node):
def render(self, context):
return ''
@register.tag(name="usemeta")
def do_usemeta(parser, token, truncate=None):
try:
args = token.split_contents()
tag_name, meta_name, values = args[0], args[1], args[2:]
except IndexError:
raise template.TemplateSyntaxError, "'%s' tag requires at least one argument (macro name)" % token.contents.split()[0]
try:
meta = parser._metas[meta_name]
except (AttributeError, KeyError):
return NoopNode()
raise template.TemplateSyntaxError, "Macro '%s' is not defined" % meta_name
if (len(values) != len(meta.args)):
raise template.TemplateSyntaxError, "Macro '%s' was declared with %d parameters and used with %d parameter" % (
meta_name,
len(meta.args),
len(values))
filter_expressions = []
for val in values:
if (val[0] == "'" or val[0] == '"') and (val[0] != val[-1]):
raise template.TemplateSyntaxError, "Non-terminated string argument: %s" % val[1:]
filter_expressions.append(FilterExpression(val, parser))
return UseMetaNode(meta, filter_expressions, truncate)
@register.inclusion_tag('cargo/metas/header.html', takes_context=True)
def meta_headers(context):
return context
########################### END METAS
import json
from oauth2client.client import SignedJwtAssertionCredentials
@register.inclusion_tag('cargo/analytics/analytics.html', takes_context=True)
def analytics(context, view_id=None, next = None):
# The scope for the OAuth2 request.
SCOPE = 'https://www.googleapis.com/auth/analytics.readonly'
token = ""
ggsettings = GoogleAPISettings.objects.first()
if ggsettings and ggsettings.account_key_file:
if not view_id:
view_id = "%s" % int(ggsettings.analytics_default_view_id)
_key_data = json.load(ggsettings.account_key_file)
# Construct a credentials objects from the key data and OAuth2 scope.
try:
_credentials = SignedJwtAssertionCredentials(
_key_data['client_email'],
_key_data['private_key'],
'https://www.googleapis.com/auth/analytics.readonly',
# token_uri='https://accounts.google.com/o/oauth2/token'
)
token = _credentials.get_access_token().access_token
except Exception, e:
print e.message
token = ""
return {
'token': token,
'view_id': view_id
}
|
import mimetypes
from urllib.parse import quote_plus, urljoin, urlencode
import prs_utility as utility
import requests
__all__ = ['hash_request', 'sign_request', 'get_auth_header', 'request']
def hash_request(path, payload, hash_alg='keccak256'):
prefix = 'path={}'.format(quote_plus(path))
sorted_qs = utility.get_sorted_qs(payload or {})
sep = '&' if sorted_qs else ''
data = f'{prefix}{sep}{sorted_qs}'
return utility.hash_text(data, hash_alg=hash_alg)
def sign_request(path, payload, private_key, hash_alg='keccak256'):
return utility.sign_hash(hash_request(path, payload, hash_alg=hash_alg), private_key)
def get_auth_header(path, payload, private_key, hash_alg='keccak256'):
sign = sign_request(path, payload, private_key, hash_alg=hash_alg)
signature, _hash = sign['signature'], sign['hash']
address = utility.sig_to_address(_hash, signature)
return {
'Content-Type': 'application/json',
'X-Po-Auth-Address': address,
'X-Po-Auth-Sig': signature,
'X-Po-Auth-Msghash': _hash
}
def create_api_url(host, version='v2', path=None):
if version == 'v1':
version = None
lst = ['api', version, path]
query_path = '/'.join(x for x in lst if x)
return urljoin(host, query_path)
def create_api_path(path, query=None):
if path[0] != '/':
path = f'/{path}'
if query:
query_string = urlencode(query, doseq=True, quote_via=quote_plus)
path = f'{path}?{query_string}'
return path
def request(
host, version='v2', method='GET', path=None, query=None, data=None,
headers=None, auth_opts=None, fields=None, file_data=None,
timeout=None, debug=False
):
"""
:param host: str, host
:param version: str, the defautl value is `v2`
:param method: str
:param path: str
:param query:
:param data:
:param headers:
:param auth_opts: dict, {'private_key': 'xx', 'token': 'xxx'}
:param fields:
:param file_data: {
'field': 'field name', 'filename': 'file name', 'file': 'file object'
}
"""
session = requests.Session()
headers = headers if headers else {}
path = create_api_path(path, query)
url = create_api_url(host, version=version, path=path)
if data:
data = {'payload': data}
if auth_opts and auth_opts.get('private_key'):
payload = data and data.get('payload')
headers.update(
get_auth_header(path, payload, auth_opts['private_key'])
)
elif auth_opts and auth_opts.get('token'):
headers.update({
'authorization': f'Bearer {auth_opts["token"]}'
})
session.headers = headers
if debug:
print(
f'request {method} {url}\n'
f'query: {query}\ndata: {data}\n'
f'fields: {fields}\nfile_data: {file_data}\n'
f'headers: {session.headers}\n'
)
if (
isinstance(file_data, dict)
and file_data.get('field')
and file_data.get('file')
and file_data.get('filename')
):
filename = file_data['filename']
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
filename_lower = file_data['filename'].lower()
if filename_lower.endswith('.md'):
mimetype = 'text/markdown'
elif filename_lower.endswith('.webp'):
mimetype = 'image/webp'
files = {
file_data['field']: (
file_data['filename'], file_data['file'], mimetype
),
}
resp = session.request(
method, url, data=fields, files=files, timeout=timeout
)
else:
resp = session.request(
method, url, json=data, timeout=timeout
)
if debug:
print(f'response {resp.content}')
return resp
|
#!/usr/bin/env python
from setuptools import setup
import ssha
setup(
name='ssha',
version=ssha.__version__,
description='SSH into AWS EC2 instances',
author='Raymond Butcher',
author_email='ray.butcher@claranet.uk',
url='https://github.com/claranet/ssha',
license='MIT License',
packages=(
'ssha',
),
scripts=(
'bin/ssha',
),
install_requires=(
'boto-source-profile-mfa>=0.0.8',
'botocore>=1.5.8',
'boto3',
'cryptography',
'paramiko',
'pyhcl',
),
)
|
"""
copyright bijan shokrollahi
10.06.2021
"""
import unittest
from dynamic_programming_and_greedy_algos.fast_fourier_transformation import *
from random import randint
class MyTestCase(unittest.TestCase):
def test_max_subarray(self):
self.assertEqual(25, maxSubArray([100, -2, 5, 10, 11, -4, 15, 9, 18, -2, 21, -11]))
self.assertEqual(26, maxSubArray([-5, 1, 10, 4, 11, 4, 15, 9, 18, 0, 21, -11]))
self.assertEqual(18, maxSubArray([26, 0, 5, 18, 11, -1, 15, 9, 13, 5, 16, -11]))
def get_random_array(n):
assert (n > 100)
lst = [randint(0, 25) for j in range(n)]
lst[0] = 1000
lst[10] = -15
lst[25] = 40
lst[n - 10] = 60
lst[n - 3] = -40
return lst
self.assertEqual(75, maxSubArray(get_random_array(50000)))
self.assertEqual(75, maxSubArray(get_random_array(500000)))
print('All tests passed (10 points!)')
def test_multiplication_ftt(self):
def check_poly(lst1, lst2):
print(f'Your code found: {lst1}')
print(f'Expected: {lst2}')
self.assertEqual(len(lst1), len(lst2))
for (k, j) in zip(lst1, lst2):
self.assertTrue(abs(k - j) <= 1E-05)
print('Passed!')
print('-------')
print('Test # 1')
# multiply (1 + x - x^3) with (2 - x + x^2)
a = [1, 1, 0, -1]
b = [2, -1, 1]
c = polynomial_multiply(a, b)
self.assertEqual(6, len(c))
print(f'c={c}')
check_poly(c, [2, 1, 0, -1, 1, -1])
print('-------')
print('Test # 2')
# multiply 1 - x + x^2 + 2 x^3 + 3 x^5 with
# -x^2 + x^4 + x^6
a = [1, -1, 1, 2, 0, 3]
b = [0, 0, -1, 0, 1, 0, 1]
c = polynomial_multiply(a, b)
self.assertEqual(12, len(c))
print(f'c={c}')
check_poly(c, [0, 0, -1, 1, 0, -3, 2, -2, 1, 5, 0, 3])
print('-------')
print('Test # 3')
# multiply 1 - 2x^3 + x^7 - 11 x^11
# with 2 - x^4 - x^6 + x^8
a = [1, 0, 0, -2, 0, 0, 0, 1, 0, 0, 0, -11]
b = [2, 0, 0, 0, -1, 0, -1, 0, 1]
c = polynomial_multiply(a, b)
self.assertEqual(20, len(c))
print(f'c={c}')
check_poly(c, [2, 0, 0, -4, -1, 0, -1, 4, 1, 2, 0, -25, 0, -1, 0, 12, 0, 11, 0, -11])
print('All tests passed (10 points!)')
def test_checksum_exists(self):
print('-- Test 1 --')
a = {1, 2, 10, 11}
b = {2, 5, 8, 10}
c = {1, 2, 5, 8}
self.assertFalse(check_sum_exists(a, b, c, 12))
print('Passed')
print('-- Test 2 --')
a = {1, 2, 10, 11}
b = {2, 5, 8, 10}
c = {1, 2, 5, 8, 11}
self.assertTrue(check_sum_exists(a, b, c, 12))
print('Passed')
print('-- Test 3 --')
a = {1, 4, 5, 7, 11, 13, 14, 15, 17, 19, 22, 23, 24, 28, 34, 35, 37, 39, 42, 44}
b = {0, 1, 4, 9, 10, 11, 12, 15, 18, 20, 25, 31, 34, 36, 38, 40, 43, 44, 47, 49}
c = {3, 4, 5, 7, 8, 10, 19, 20, 21, 24, 31, 35, 36, 37, 38, 39, 42, 44, 46, 49}
self.assertTrue(check_sum_exists(a, b, c, 50))
print('-- Test 4 --')
a = {98, 2, 99, 40, 77, 79, 87, 88, 89, 27}
b = {64, 66, 35, 69, 70, 40, 76, 45, 12, 60}
c = {36, 70, 10, 44, 15, 16, 83, 20, 84, 55}
self.assertFalse(check_sum_exists(a, b, c, 100))
print('All Tests Passed (15 points)!')
if __name__ == '__main__':
unittest.main()
|
'''
Given two sentences words1, words2 (each represented as an array of strings), and a list of similar word pairs pairs, determine if two sentences are similar.
For example, "great acting skills" and "fine drama talent" are similar, if the similar word pairs are pairs = [["great", "fine"], ["acting","drama"], ["skills","talent"]].
Note that the similarity relation is not transitive. For example, if "great" and "fine" are similar, and "fine" and "good" are similar, "great" and "good" are not necessarily similar.
However, similarity is symmetric. For example, "great" and "fine" being similar is the same as "fine" and "great" being similar.
Also, a word is always similar with itself. For example, the sentences words1 = ["great"], words2 = ["great"], pairs = [] are similar, even though there are no specified similar word pairs.
Finally, sentences can only be similar if they have the same number of words. So a sentence like words1 = ["great"] can never be similar to words2 = ["doubleplus","good"].
Note:
The length of words1 and words2 will not exceed 1000.
The length of pairs will not exceed 2000.
The length of each pairs[i] will be 2.
The length of each words[i] and pairs[i][j] will be in the range [1, 20].
'''
class Solution(object):
def areSentencesSimilar(self, words1, words2, pairs):
"""
:type words1: List[str]
:type words2: List[str]
:type pairs: List[List[str]]
:rtype: bool
"""
if len(words1) != len(words2):
return False
dic = {}
for pair in pairs:
if pair[0] not in dic:
dic[pair[0]] = set()
dic[pair[0]].add(pair[1])
if pair[1] not in dic:
dic[pair[1]] = set()
dic[pair[1]].add(pair[0])
for i in xrange(len(words1)):
if words1[i] == words2[i]:
continue
if words1[i] in dic and words2[i] in dic[words1[i]]:
continue
if words2[i] in dic and words1[i] in dic[words2[i]]:
continue
return False
return True
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Openloops(Package):
"""The OpenLoops 2 program is a fully automated implementation of the
Open Loops algorithm combined with on-the-fly reduction methods,
which allows for the fast and stable numerical evaluation of tree
and one-loop matrix elements for any Standard Model process
at NLO QCD and NLO EW. """
homepage = "https://openloops.hepforge.org/"
url = "https://openloops.hepforge.org/downloads?f=OpenLoops-2.1.1.tar.gz"
tags = ['hep']
version('2.1.1', sha256='f1c47ece812227eab584e2c695fef74423d2f212873f762b8658f728685bcb91')
all_processes = (
"tbln", "tbln_ew", "tbqq", "tbw", "pptttt", "pptttt_ew", "pptt",
"pptt_ew", "ppttbb", "ppttj", "ppttj_ew", "ppttjj",
"pptaj", "pptajj", "pptllj", "pptlljj", "pptln", "pptw", "pptwj",
"pptzj", "pptzjj", "ppthj", "ppthjj", "pptj",
"pptjj", "ppjj", "ppjj_ew", "ppjjj", "ppjjj_ew", "ppjjj_nf5", "ppjjjj",
"pplllvvv_ew", "ppatt", "ppatt_ew",
"ppattj", "pplltt", "pplltt_ew", "ppllttj", "ppllttj_ew", "pplntt",
"pplnttj", "ppwtt", "ppwtt_ew", "ppwttj",
"ppwttj_ew", "ppztt", "ppztt_ew", "ppzttj", "ppaatt", "ppwwtt",
"ppzatt", "ppzztt", "ppvvvv", "ppaaaj2", "ppllaa",
"ppllaaj", "pplllla", "ppvvv", "ppvvv2", "ppvvv_ew", "ppvvvj",
"ppaajj", "ppaajj2", "ppaajjj", "pplla", "pplla2",
"pplla_ew", "ppllaj", "ppllaj2", "ppllaj_ew", "ppllaj_nf5", "ppllajj",
"ppllll", "ppllll2", "ppllll2_nf5",
"ppllll2_onlyh", "ppllll_ew", "ppllllj", "ppllllj2", "ppllllj2_nf5",
"ppllllj2_nf5_notridr", "ppllllj2_nf5_sr",
"ppllllj2_onlyh", "ppllnnjj_ew", "ppllnnjj_vbs", "pplnajj", "ppvv",
"ppvv2", "ppvv_ew", "ppvvj", "ppvvj2",
"ppvvj_ew", "ppwajj", "ppwwjj", "ppzajj", "ppzwj_ew", "ppzwjj",
"ppzzjj", "ppajj", "ppajj2", "ppajj_ew", "ppajjj",
"ppllj", "ppllj2", "ppllj_ew", "ppllj_nf5", "pplljj", "pplljj_ew",
"pplljjj", "pplnj_ckm", "pplnjj", "pplnjj_ckm",
"pplnjj_ew", "pplnjjj", "ppnnjj_ew", "ppnnjjj", "ppvj", "ppvj2",
"ppvj_ew", "ppwj_ckm", "ppwjj", "ppwjj_ckm",
"ppwjj_ew", "ppwjjj", "ppzjj", "ppzjj_ew", "ppzjjj", "pphtt",
"pphtt_ew", "pphttj", "pphlltt", "pphll", "pphll2",
"pphll_ew", "pphllj", "pphllj2", "pphllj_ew", "pphlljj", "pphlljj_top",
"pphlnj_ckm", "pphlnjj", "pphv", "pphv_ew",
"pphwjj", "pphz2", "pphzj2", "pphzjj", "pphhtt", "pphhv", "pphhh2",
"heftpphh", "heftpphhj", "heftpphhjj", "pphh2",
"pphhj2", "pphhjj2", "pphhjj_vbf", "bbhj", "heftpphj", "heftpphjj",
"heftpphjjj", "pphbb", "pphbbj", "pphj2",
"pphjj2", "pphjj_vbf", "pphjj_vbf_ew", "pphjjj2", "eetttt", "eettttj",
"eellllbb", "eett", "eett_ew", "eettj",
"eettjj", "eevtt", "eevttj", "eevttjj", "eevvtt", "eevvttj",
"eellll_ew", "eevv_ew", "eevvjj", "eell_ew", "eevjj",
"eehtt", "eehttj", "eehll_ew", "eehvtt", "eehhtt", "heftppllj",
"heftpplljj", "heftpplljjj")
variant('compile_extra', default=False,
description='Compile real radiation tree amplitudes')
variant('processes', description='Processes to install. See https://' +
'openloops.hepforge.org/process_' +
'library.php?repo=public for details',
values=disjoint_sets(('all.coll',), ('lhc.coll',), ('lcg.coll',),
all_processes).with_default('lhc.coll'))
variant('num_jobs', description='Number of parallel jobs to run. ' +
'Set to 1 if compiling a large number' +
'of processes (e.g. lcg.coll)', default=0)
depends_on('python', type=("build", "run"))
phases = ['configure', 'build', 'build_processes', 'install']
def configure(self, spec, prefix):
spack_env = ('PATH LD_LIBRARY_PATH CPATH C_INCLUDE_PATH' +
'CPLUS_INCLUDE_PATH INTEL_LICENSE_FILE').split()
for k in env.keys():
if k.startswith('SPACK_'):
spack_env.append(k)
spack_env = ' '.join(spack_env)
is_intel = self.spec.satisfies('%intel')
njobs = self.spec.variants['num_jobs'].value
with open('openloops.cfg', 'w') as f:
f.write('[OpenLoops]\n')
f.write('import_env={0}\n'.format(spack_env))
f.write('num_jobs = {0}\n'.format(njobs))
f.write('process_lib_dir = {0}\n'.format(self.spec.prefix.proclib))
f.write('cc = {0}\n'.format(env['SPACK_CC']))
f.write('cxx = {0}\n'.format(env['SPACK_CXX']))
f.write('fortran_compiler = {0}\n'.format(env['SPACK_FC']))
if self.spec.satisfies('@1.3.1') and not is_intel:
f.write('gfortran_f_flags = -ffree-line-length-none\n')
if self.spec.satisfies('@2.1.1') and not is_intel:
f.write('gfortran_f_flags = -ffree-line-length-none ' +
'-fdollar-ok -mcmodel=medium\n')
if self.spec.satisfies('@:1.999.999 processes=lcg.coll'):
copy(join_path(os.path.dirname(__file__), 'sft1.coll'), 'lcg.coll')
elif self.spec.satisfies('@2:2.999.999 processes=lcg.coll'):
copy(join_path(os.path.dirname(__file__), 'sft2.coll'), 'lcg.coll')
def build(self, spec, prefix):
scons = Executable('./scons')
scons('generator=1', 'compile=2')
def build_processes(self, spec, prefix):
ol = Executable('./openloops')
processes = self.spec.variants['processes'].value
if '+compile_extra' in self.spec:
ce = 'compile_extra=1'
else:
ce = ''
ol('libinstall', ce, *processes)
def install(self, spec, prefix):
install_tree(join_path(self.stage.path, 'spack-src'),
self.prefix,
ignore=lambda x: x in ('process_obj', 'process_src'))
|
import bs4, json
soup = bs4.BeautifulSoup(open('text.html'), 'html.parser')
important_bits = soup.findAll(['h3', 'a', 'i'])
count = 0
started = False
data = []
for bit in important_bits:
start_len = len(data)
if bit.name == 'h3':
if bit.text[:3] == 'ACT':
started = True
data.append({
'type': 'act',
'name': bit.text
})
elif bit.text[:5] == 'SCENE':
data.append({
'type': 'scene',
'name': bit.text
})
elif started:
if bit.name == 'a':
if not bit.has_attr('name'):
raise Exception('Something\'s wrong with the file: it seems to have a non-line before the first act starts or after the play is over')
if bit['name'][:6] == 'speech':
data.append({
'type': 'speaker tag',
'speaker': bit.text
})
else:
data.append({
'type': 'line',
'identifier': bit['name'],
'text': bit.text
})
elif bit.name == 'i':
data.append({
'type': 'stage direction',
'text': bit.text
})
json.dump({'data': data}, open('text.json', 'w'))
|
"""
"""
from __future__ import absolute_import
import subprocess
from os import makedirs, listdir
from os.path import exists, join, basename
from abc import ABCMeta, abstractmethod
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import requests
import yaml
from pkg_resources import parse_version
from six import with_metaclass
from .io import warn, info, debug, fatal
class BaseCacher(with_metaclass(ABCMeta, object)):
def __init__(self, cache_path):
if not exists(cache_path):
makedirs(cache_path)
self.cache_path = cache_path
def abspath(self, name):
return join(self.cache_path, name)
@abstractmethod
def check(self, name, **kwargs):
"""
"""
@abstractmethod
def cache(self, name, **kwargs):
"""
"""
class TarballCacher(BaseCacher):
def __init__(self, cache_path):
cache_path = join(cache_path, 'tarballs')
super(TarballCacher, self).__init__(cache_path)
def check(self, name, version=None):
if version is None:
cfpath = self.abspath(name)
if exists(cfpath):
return cfpath
else:
name = name.lower()
for cfile in listdir(self.cache_path):
if cfile.lower().startswith(name + '-'):
cver = cfile[len(name + '-'):]
if cver.endswith('.tar.gz'):
ext = len('.tar.gz')
elif cver.endswith('.tgz'):
ext = len('.tgz')
elif cver.endswith('.tar.bz2'):
ext = len('.tar.bz2')
elif cver.endswith('.zip'):
ext = len('.zip')
else:
warn('Unknown extension on cached file: %s', cfile)
continue
cver = cver[:-ext]
if parse_version(cver) == parse_version(version):
return self.abspath(cfile)
return None
class PlatformStringCacher(BaseCacher):
cache_file = '__platform_cache.yml'
def __init__(self, cache_path):
super(PlatformStringCacher, self).__init__(cache_path)
self.cache_file = join(cache_path, PlatformStringCacher.cache_file)
if not exists(self.cache_file):
with open(self.cache_file, 'w') as handle:
handle.write(yaml.dump({}))
def check(self, name, **kwargs):
platforms = yaml.safe_load(open(self.cache_file).read())
return platforms.get(name, None)
def cache(self, name, execctx=None, buildpy='python', plat_specific=False, **kwargs):
platforms = yaml.safe_load(open(self.cache_file).read())
if name not in platforms:
with execctx() as run:
# ugly...
cmd = "python -c 'import os; print os.uname()[4]'"
arch = run(cmd, capture_output=True).splitlines()[0].strip()
if plat_specific:
cmd = ("{buildpy} -c 'import starforge.interface.wheel; "
"print starforge.interface.wheel.get_platforms"
"(major_only=True)[0]'".format(buildpy=buildpy))
else:
cmd = ("{buildpy} -c 'import wheel.pep425tags; "
"print wheel.pep425tags.get_platforms"
"(major_only=True)[0]'".format(buildpy=buildpy))
cmd = cmd.format(arch=arch)
platform = run(
cmd,
capture_output=True
).splitlines()[0].strip()
platforms[name] = platform
with open(self.cache_file, 'w') as handle:
handle.write(yaml.dump(platforms))
return platforms[name]
class UrlCacher(TarballCacher):
def check(self, name, **kwargs):
tgz = basename(urlparse(name).path)
return super(UrlCacher, self).check(tgz)
def cache(self, name, **kwargs):
cfpath = self.check(name)
tgz = basename(urlparse(name).path)
if cfpath is not None:
info('Using cached file: %s', cfpath)
else:
cfpath = self.abspath(tgz)
r = requests.get(name)
with open(cfpath, 'wb') as handle:
for chunk in r.iter_content(chunk_size=1024):
handle.write(chunk)
class PipSourceCacher(TarballCacher):
def cache(self, name, version=None, fail_ok=False, **kwargs):
if version is None:
fatal('A version must be provided when caching from pip')
cfpath = self.check(name, version=version)
if cfpath is not None:
info('Using cached sdist: %s', cfpath)
else:
try:
# TODO: use the pip API
cmd = [
'pip', '--no-cache-dir', 'install', '-d', self.cache_path,
'--no-binary', ':all:', '--no-deps', name + '==' + version
]
info('Fetching sdist: %s', name)
debug('Executing: %s', ' '.join(cmd))
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if not fail_ok:
raise
class CacheManager(object):
def __init__(self, cache_path):
self.cache_path = cache_path
self.cachers = {}
self.load_cachers()
def load_cachers(self):
self.cachers['pip'] = PipSourceCacher(self.cache_path)
self.cachers['url'] = UrlCacher(self.cache_path)
self.cachers['platform'] = PlatformStringCacher(self.cache_path)
def pip_check(self, name, version):
return self.cachers['pip'].check(name, version=version)
def url_check(self, name):
return self.cachers['url'].check(name)
def platform_check(self, name):
return self.cachers['platform'].check(name)
def pip_cache(self, name, version, fail_ok=False):
return self.cachers['pip'].cache(
name,
version=version,
fail_ok=fail_ok)
def url_cache(self, name):
return self.cachers['url'].cache(name)
def platform_cache(self, name, execctx, buildpy, plat_specific=False):
return self.cachers['platform'].cache(
name,
execctx=execctx,
buildpy=buildpy,
plat_specific=plat_specific)
|
## Utilities file for Greenland modelling functions
## 30 Nov 2017 EHU
## 10 Jul 2019 Adding visualization tools
from numpy import *
#from netCDF4 import Dataset
import numpy as np
#from scipy import interpolate
from scipy import spatial
#from scipy.ndimage import gaussian_filter
from shapely.geometry import *
from mpl_toolkits.basemap import Basemap
# import mpl_toolkits.basemap.pyproj as pyproj
import pyproj
import matplotlib.colors as colors
import shapefile
from osgeo import gdal
import cPickle as pickle
from plastic_utilities_v2 import*
import collections
##-------------------------------
## FINDING AND WRITING LINES
##-------------------------------
def Continuous_DownVStep(startcoord_x, startcoord_y, surface, xarr, yarr, Vx, Vy):
"""Traces flowlines down from an upstream point.
surface: gridded surface
xarr: gridded x-values for area of interest
yarr: gridded y-vals
Vx: 2d interpolated function for x-component of velocity
Vy: ditto y-component
"""
outarr = []
currentpt = (startcoord_x, startcoord_y)
nstep = 0
while nstep<1000:
#dt = 10 #days, we integrate velocity presented in m/day to follow path up day by day
dx = 150 #m, normalizing our step size
vmax = 50 # m/day, the maximum velocity we believe
vx = Vx(currentpt[0], currentpt[1])
vy = Vy(currentpt[0], currentpt[1])
vm = np.linalg.norm((vx, vy))
if vx > vmax:
print 'X-velocity exceeds maximum recognised. Exiting step routine.'
break
elif vy > vmax:
print 'Y-velocity exceeds maximum recognised. Exiting step routine.'
break
else:
x_n = float(currentpt[0] + (vx/vm)*dx)
y_n = float(currentpt[1] + (vy/vm)*dx)
nextpt = (x_n, y_n)
print nextpt
currentpt = nextpt
outarr.append((currentpt[0], currentpt[1]))
nstep += 1
outarr = np.asarray(outarr)
return outarr
#def WriteNetworkDict()
##-------------------------------
## READING IN SAVED LINES
##-------------------------------
## Read in line from CSV
def Flowline_CSV(filename, nlines=None, has_width=False, flip_order=True):
"""Function to read in flowlines in CSV format, similar to those sent by C. Kienholz for Alaska.
Input: filename; nlines=number of flowlines. Edited to make this argument unnecessary, but leaving it in for backward compatibility
has_width: default False for older files that have only (x,y) rather than (x,y,width) saved
flip_order: default False for lines that already run from terminus to peak
Output: list of flowlines
"""
f = open(filename,'r')
header = f.readline() #header line
hdr = header.strip('\r\n')
keys = hdr.split(',') #get names of variables
#keys[-1] = keys[-1].strip('\r\n')
data = {k : [] for k in keys} #end of line has hidden characters, so 'point_m' does not get read
#data['Line number'] = []
data['Length_ID'] = collections.OrderedDict() #new dictionary that counts how many points (i.e. lines of file) are in each flowline. Must be ordered for later iteration!
#if nlines is not None:
# data['Lineslist'] = [[] for k in range(nlines)]
data['Lineslist'] = [] #initialize as empty list
lines = f.readlines()
f.close()
temp = []
j = 0
for i,l in enumerate(lines):
linstrip = l.strip('\r\n')
parts = linstrip.split(',')
#data['Line-number'].append(parts[0])
#data['x-coord'].append(parts[1])
#data['y-coord'].append(parts[2])
x_coord = float(parts[1])
y_coord = float(parts[2])
if parts[0] not in data['Length_ID'].keys(): #finding out where lines separate
temp = []
data['Lineslist'].append(temp) #initialize new empty array that can be modified in-place later
data['Length_ID'][parts[0]] = 1
j+=1
else:
data['Length_ID'][parts[0]] += 1
#if xbounds[0]<x_coord<xbounds[1]: #taking out values outside of map area
# if ybounds[0]<y_coord<ybounds[1]:
if has_width:
width = float(parts[3])
temp.append((x_coord, y_coord, width))
else:
temp.append((x_coord, y_coord))
data['Lineslist'][j-1] = np.array(temp) #need to modify an existing array rather than append to keep correct indexing
#data['Lineslist'][j] = np.array(temp)
if nlines is None:
nlines = len(data['Length_ID'].keys())
if flip_order:
centrelines_list = [np.array(data['Lineslist'][j])[::-1] for j in range(nlines)] #making arrays, reversed to start at terminus rather than peak
else:
centrelines_list = [np.array(data['Lineslist'][j]) for j in range(nlines)] # arrays already start at terminus
return centrelines_list
def TopToTerm(branchlist):
mainline = branchlist[0]
maintree = spatial.KDTree(mainline)
full_lines = {}
j = 0
while j<len(branchlist):
branch = branchlist[j]
pt = branch[0]
dist, idx = maintree.query(pt, distance_upper_bound=5000) #find distances and indices of nearest neighbours along main line
#print dist, idx
if idx==len(mainline): #if branch does not intersect with the main line
print 'Branch {} does not intersect main line. Searching nearest trib.'.format(j)
tribtree = spatial.KDTree(full_lines[j-1]) #line of nearest trib
dist_t, idx_t = tribtree.query(pt, distance_upper_bound=1000)
if idx==len(full_lines[j-1]):
print 'Branch {} also does not intersect tributary {}. Appending raw line. Use with caution.'.format(j, j-1)
full_lines[j] = branch
else:
tribfrag = branchlist[j-1][:idx_t]
fullbranch = np.concatenate((tribfrag, branch))
full_lines[j] = fullbranch
j+=1
else:
print mainline[idx]
mainfrag = mainline[:idx]
fullbranch = np.concatenate((mainfrag, branch))
full_lines[j] = fullbranch
j+=1
return full_lines
##-------------------------------
## YIELD STRENGTH OPTIMISATION
##-------------------------------
# Dimensional and Dimensionless parameters
H0=1e3 #characteristic height for nondimensionalisation
L0=10e3 #characteristic length (10km)
#tau_yield = 100e3 #initial guess to initialize
#tau_0 = 100e3
g = 9.8
rho_ice = 920.0 #ice density kg/m^3
rho_sea=1020.0 #seawater density kg/m^3
#Bingham stress function
def B_var(tau_0, elev, thick, pos=None, time=None): #variation by altitude and ice thickness (effective pressure at base)...pos, time arguments required by plasticmodel
if elev<0:
D = -elev #Water depth D the nondim bed topography value when Z<0
else:
D = 0
N = rho_ice*g*thick*H0 - rho_sea*g*D*H0
mu = 0.01 #Coefficient between 0 and 1
tau_y = tau_0 + mu*N
return tau_y/(rho_ice*g*H0**2/L0)
def B_const(tau_yield, elev, thick, pos=None, time=None): #functional form of B if using a constant yield strength
return tau_yield/(rho_ice*g*H0**2/L0)
def plasticmodel_error(bedfunction, tau_val, Bfunction, startpoint, hinit, endpoint, Npoints, obsheightfunction, allow_upstream_breakage=True):
"""Arguments used:
bedfunction should be function of arclength returning bed elevation of the glacier.
Bfunction is nondim yield strength. Should be function with arguments elevation, ice thickness, position, and time (can just not use last two if no variation)
Startpoint is where (in arclength space) integration should start.
hinit is nondim initial height. Could be given by water balance, obs, or some thinning from reference height.
Endpoint is where integration should stop.
Npoints is how many model points to use (suggested 25000+)
#Resolution (in m) is how closely we want to space the model sample points (CURRENTLY USING NPOINTS INSTEAD OF RESOLUTION)
Obsheightfunction is the observations for comparison. May need to process from a thickness measurement. (working on functionality to make this argument optional)
allow_upstream_breakage: determines whether profiles should be allowed to break (due to yield) when stepping upstream--default is True, but False may allow optimization with more sparse data
plasticmodel_error function returns values:
RMS error
CV(RMSE)
"""
#N = ceil(abs((endpoint-startpoint)*L0/resolution))
N = Npoints
horiz = linspace(startpoint, endpoint, N)
dx = mean(diff(horiz))
#if dx<0:
# print 'Detected: running from upglacier down to terminus.'
#elif dx>0:
# print 'Detected: running from terminus upstream.'
SEarr = []
thickarr = []
basearr = []
obsarr = []
SEarr.append(hinit)
thickarr.append(hinit-(bedfunction(startpoint)/H0))
basearr.append(bedfunction(startpoint)/H0)
obsarr.append((obsheightfunction(startpoint))/H0)
for x in horiz[1::]:
bed = bedfunction(x)/H0 # value of interpolated bed function
obsheight = (obsheightfunction(x))/H0
modelthick = thickarr[-1]
B = Bfunction(tau_val, bed, modelthick, None, None)
#Break statements for thinning below yield, water balance, or flotation
if dx<0:
if modelthick<BalanceThick(bed,B):
print 'Thinned below water balance at x=' + str(10*x)+'km'
break
if modelthick<FlotationThick(bed) and allow_upstream_breakage: #new control on whether breaking happens
print 'Thinned below flotation at x=' + str(10*x)+'km'
break
if modelthick<4*B*H0/L0 and allow_upstream_breakage:
print 'Thinned below yield at x=' +str(10*x)+'km'
break
else:
basearr.append(bed)
SEarr.append(SEarr[-1]+(B/modelthick)*dx)
thickarr.append(SEarr[-1]-basearr[-1])
obsarr.append(obsheight)
error = np.sqrt(((np.array(SEarr)-np.array(obsarr))**2).mean())
CVrms = error/mean(SEarr)
print 'RMS error: '+ str(error) +', CV(RMSE): ' + str(CVrms)
return (error, CVrms)
def CV_Optimise(linename, lineref, testrange):
#
#ty_arr = arange(100e3, 451e3, 5e3)
#t0_arr = arange(80e3, 431e3, 5e3)
CV_const_arr = []
CV_var_arr = []
bedf = lineref['bed']
thickf = lineref['thickness']
sef = lineref['surface']
arcmax = ArcArray(lineref['line'])[-1]
for tau in testrange:
#tau_yield = ty_arr[j]
#tau_0 = t0_arr[j]
#tau_yield = tau
#tau_0 = tau
print str(linename) +', no smoothing, Ty = {} Pa'.format(tau)
tau_yield = tau
#model_const = plasticmodel_error(tau, bedf, B_const, 0, BalanceThick((bedf(0)/H0), B_const(tau, bedf(0)/H0, thickf(0)/H0, 0, 0))+(bedf(0)/H0), arcmax, 25000, sef)
model_const = plasticmodel_error(bedf, tau, B_const, 0, sef(0)/H0, arcmax, 25000, sef) #prescribed terminus thickness
#RMS_const = model_const[0]
CV_const = model_const[1]
print str(linename) + ', no smoothing, variable with T0 = {} Pa'.format(tau)
tau_0 = tau
#model_var = plasticmodel_error(tau, bedf, B_var, 0, BalanceThick((bedf(0)/H0), B_var(tau, bedf(0)/H0, thickf(0)/H0, 0, 0))+(bedf(0)/H0), arcmax, 25000, sef)
model_var = plasticmodel_error(bedf, tau, B_var, 0, sef(0)/H0, arcmax, 25000, sef) #prescribed terminus thickness
#RMS_var = model_var[0]
CV_var = model_var[1]
CV_const_arr.append(CV_const)
CV_var_arr.append(CV_var)
constopt_index = np.argmin(CV_const_arr)
varopt_index = np.argmin(CV_var_arr)
constopt = testrange[constopt_index]
varopt = testrange[varopt_index]
print 'OPTIMAL VALUE FOR CONSTANT TY: '+str(0.001*constopt)+' kPa'
print 'OPTIMAL VALUE FOR VARIABLE T0: '+str(0.001*varopt)+' kPa'
return constopt, varopt
def Network_CV_Optimise(networklist, taurange, glaciername='Glacier'):
"""networklist = list of dictionaries, where each dictionary is a flowline
taurange = range of yield strength values to try 50-500 kpa is a good choice
Modifies the dictionaries in networklist to add entries for optimal tau values
Returns list of best-fit tau_y, tau_0 values for all branches tested
"""
bestfitarr = []
for j, d in enumerate(networklist):
optimal_ty, optimal_t0 = CV_Optimise(glaciername+str(j), d, taurange)
d['Best tau_y'] = optimal_ty
d['Best tau_0'] = optimal_t0
bestfitarr.append((optimal_ty, optimal_t0))
return bestfitarr
##-------------------------------
## TIME EVOLUTION MODELLING - superseded by flowline_class_hierarchy code
##-------------------------------
#
#def ProcessDicts(linedicts, keys, fields, bestfit_tau):
# """Processing list of flowline dicts to be ready for PlasticEvol"""
# for d in linedicts:
# for j,k in enumerate(keys):
# d[k] = FlowProcess(d['line'], fields[j])
#
# for n,d in enumerate(linedicts):
# tau_0 = bestfit_tau[n][0]
# tau_y = bestfit_tau[n][1]
# arcmax = ArcArray(d['line'])[-1]
# modelprof = PlasticProfile(d['bed'], tau_0, B_var, 0, d['surface'](0)/H0, arcmax, 10000, d['surface'])
# modelint = interpolate.interp1d(modelprof[0], modelprof[1], kind='linear', copy=True)
# d['Modelled'] = modelprof
# d['Ref-profile-func'] = modelint
# d['Best tau_y'] = tau_y
# d['Best tau_0'] = tau_0
#
# return linedicts
#
#
#def PlasticEvol(linedicts, testyears, upgl_ref=15000/L0, thinrate=10/H0, thinvalues=None):
# """linedicts: a list of flowline dictionaries. These should be already optimised and include reference profiles from a ref model run
# testyears: a range of years to test
# upgl_ref: where to apply upglacier thinning. Default is 15km upstream, or top of glacier if flowline <15km
# thinrate: thinning rate (constant) to apply at reference point
# thinfunc: the option to define thinning as a function fit to obs (e.g. sinusoid) or as extreme climate scenario (e.g. exponential increase in thinning)
#
# returns list of dictionaries with model output
# """
# if thinvalues is None:
# thinvals = np.full(len(testyears), thinrate)
# else:
# thinvals = thinvalues
#
# modeldicts = [{} for j in range(len(linedicts))]
# for j,d in enumerate(linedicts):
# print 'Currently running line {}'.format(j)
# sarr = d['Modelled'][0] #calling list of master glacier dicts for initialization before getting into modeldicts...
# amax = sarr[-1] #can change if want to model shorter profile
# refpt = min(amax, upgl_ref)
# refht = d['Ref-profile-func'](refpt)
#
# bedf = d['bed']
# sef = d['surface']
#
# tau_j = d['Best tau_0']
#
# dmodel = modeldicts[j]
# dmodel['Termini'] = [L0*min(sarr)]
# dmodel['Termrates'] = []
#
# for j, yr in enumerate(testyears):
# #thinning = yr*thinrate
# thinning = np.sum(thinvals[:j])
# fwdmodel = PlasticProfile(bedf, tau_j, B_var, refpt, refht-thinning, 0, 25000, sef)
# bkmodel = PlasticProfile(bedf, tau_j, B_var, refpt, refht-thinning, amax, 25000, sef)
# modelterm = L0*min(fwdmodel[0]) #in m
# dL = modelterm - dmodel['Termini'][-1]
# #dmodel[yr] = fwdmodel #showing profile downstream of refpt
# dmodel['Termini'].append(modelterm)
# dmodel['Termrates'].append(dL) #dt = 1 by definition
#
# return modeldicts
#
#
##--------------------------
## VISUALIZATION - MAPPING
##--------------------------
def Greenland_map(service='ESRI_Imagery_World_2D', epsg=3413, xpixels=5000):
"""Function using Basemap to plot map for all of Greenland.
Input:
service: map appearance selected from ['World_Physical_Map', 'World_Shaded_Relief', 'World_Topo_Map', 'NatGeo_World_Map', 'ESRI_Imagery_World_2D', 'World_Street_Map', 'World_Imagery', 'Ocean_Basemap']
epsg: identifier of specific map projection to use in plotting. Default is 3413 (Polar Stereographic North).
"""
m = Basemap(projection='npstere', boundinglat=70, lon_0=315, epsg=epsg, llcrnrlon=300, llcrnrlat=57, urcrnrlon=20, urcrnrlat=80, resolution='h')
plt.figure()
m.arcgisimage(service=service, xpixels=xpixels)
plt.show()
return m
##Convert coords into lat/lon so that Basemap can convert them back (don't know why this is necessary, but it works)
def flowline_latlon(coords, fromproj=pyproj.Proj("+init=epsg:3413"), toproj=pyproj.Proj("+init=EPSG:4326")):
"""Convert coords into lat/lon so that Basemap can convert them back for plotting (don't know why this is necessary, but it works)
Defaults:
fromproj = NSIDC Polar Stereographic North, EPSG 3413
toproj = WGS84 lat-lon, EPSG 4326
"""
xs = coords[:,0]
ys = coords[:,1]
x_lon, y_lat = pyproj.transform(fromproj, toproj, xs, ys)
latlon_coords = np.asarray(zip(x_lon, y_lat))
return latlon_coords
# set the colormap and centre the colorbar - from Joe Kington (StOv)
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
##--------------------------
## GREENLAND-SPECIFIC FILE I/O
##--------------------------
def read_termini(filename, year):
"""Make and return a dictionary of terminus positions, indexed by MEaSUREs ID. These can then be plotted on a Greenland_map instance
Input:
filename = name of MEaSUREs terminus position shapefile to read
year: year of the terminus position observations"""
print 'Reading in MEaSUREs terminus positions for year ' + str(year)
sf = shapefile.Reader(filename)
fields = sf.fields[1:] #excluding the mute "DeletionFlag"
field_names = [field[0] for field in fields]
term_recs = sf.shapeRecords()
termpts_dict = {}
for r in term_recs:
atr = dict(zip(field_names, r.record)) #dictionary of shapefile fields, so we can access GlacierID by name rather than index. Index changes in later years.
key = atr['GlacierID'] #MEaSUREs ID number for the glacier, found by name rather than index
termpts_dict[key] = np.asarray(r.shape.points) #save points spanning terminus to dictionary
return termpts_dict
# Finding intersection of terminus points with mainline--modified from hubbard-mainline-advance-v2.py
def projected_term_obs(termset, linestr):
'''Given a termset from file input and LineString representation of a flowline, termline constructs a Shapely LineString of the terminus and returns the intersection of the two'''
termarr = np.array(termset)
termline = LineString(termarr)
centrpt = termline.centroid
arcdist = linestr.project(centrpt)
if arcdist>0:
return arcdist/1000
else:
near = linestr.distance(termline) #in case terminus listed in MEaSUREs is farther advanced than max seaward extent of saved flowline
return -near/1000
def advterm(termset, linestr):
'''Given termset and LineString representation of a flowline, advterm finds which terminus position projects most advanced along central flowline and returns its arclength position'''
x_term = termset[:, 0] #Note need to change from [:, 1] to [:, 0] for x-coord, due to different data format for Hubbard
y_term = termset[:, 1]
projections = []
for i in xrange(len(x_term)):
proji = linestr.project(Point(x_term[i], y_term[i]))
projections.append(proji)
termmax = min(projections) #need minimum rather than max here because we are interested in the most advanced, i.e. lowest arc-length value projection of terminus
return termmax/1000
def retterm(termset, linestr):
'''Given termset (from file input above), retterm finds which terminus position projects most retreated (rel. 2007 terminus) along central flowline and returns its arclength position'''
x_term = termset[:, 0]
y_term = termset[:, 1]
projections = []
for i in xrange(len(x_term)):
proji = linestr.project(Point(x_term[i], y_term[i]))
projections.append(proji)
termmin = max(projections) #max is the most retreated, i.e. highest arc-length value projection of terminus
return termmin/1000
##Function to read MEaSUREs velocity GeoTIFFs
def read_velocities(filename, return_grid=True, return_proj=False):
"""Extract x, y, v from a MEaSUREs GeoTIFF.
Input:
filename = GeoTIFF to be read
Optional args:
return_grid = whether to return x-y grid (default True) or only the velocity field (False)
return_proj = whether to return the gdal projection parameters (default False)"""
ds = gdal.Open(filename)
#Get dimensions
nc = ds.RasterXSize
nr = ds.RasterYSize
geotransform = ds.GetGeoTransform()
xOrigin = geotransform[0]
xPix = geotransform[1] #pixel width in x-direction
yOrigin = geotransform[3]
yPix = geotransform[5] #pixel height in y-direction
lons = xOrigin + np.arange(0, nc)*xPix
lats = yOrigin + np.arange(0, nr)*yPix
x, y = np.meshgrid(lons, lats)
vband = ds.GetRasterBand(1)
varr = vband.ReadAsArray()
if return_grid and return_proj:
return x, y, varr, ds.GetProjection()
elif return_grid:
return x, y, varr
else:
return varr
### Load-in functionality to read only terminus position and flux, lifted from Greenland-automated_summary_plots.py
def lightload(filename, glacier_name, output_dictionary):
"""Function to read only terminus position and flux from stored plastic model output.
Input:
filename = the name of a pickle file with stored model output
glacier_name = name or other identifier of the glacier to be read
output_dictionary = name of an existing dictionary where we should put this output.
Returns:
output_dictionary modified to add the requested model output
"""
output_dictionary[glacier_name] = {}
with open(filename, 'rb') as handle:
loadin = pickle.load(handle)
N_Flowlines = loadin['N_Flowlines']
mainline_termini = loadin['mainline_model_output']['Termini']
mainline_flux = loadin['mainline_model_output']['Terminus_flux']
output_dictionary[glacier_name][0] ={'Termini': mainline_termini, 'Terminus_flux': mainline_flux}
if N_Flowlines >1:
for n in range(N_Flowlines)[1::]:
key_n = 'model_output_'+str(n)
termini_n = loadin[key_n]['Termini']
termflux_n = loadin[key_n]['Terminus_flux']
output_dictionary[glacier_name][n] = {'Termini': termini_n, 'Terminus_flux': termflux_n}
else:
pass
return output_dictionary
def scenario_cumulative_SLE(scenario_dictionary):
sd = scenario_dictionary
pernetwork_cumul_fx = []
pernetwork_cumul_sle = []
for j, gid in enumerate(glaciers_simulated):
branch_fx = [np.nan_to_num(sd['GID{}'.format(gid)][k]['Terminus_flux']) for k in range(len(sd['GID{}'.format(gid)]))]
total_fx = sum(branch_fx, axis=0)
total_sle = (1E-12)*np.array(total_fx)/(361.8) #Gt ice/mm SLE conversion
cumul_fx = np.cumsum(total_fx)
cumul_sle = np.cumsum(total_sle)
pernetwork_cumul_fx.append(cumul_fx)
pernetwork_cumul_sle.append(cumul_sle)
scenario_sle = np.cumsum(pernetwork_cumul_sle, axis=0)
return scenario_sle
def compare_scenario_SLE(full_output_dictionary):
"""Calculate scenario_cumulative_SLE for all scenarios simulated, and compare them"""
perscenario_SLE = []
for s in full_output_dictionary.keys():
print 'Scenario {}'.format(s)
perscenario_SLE.append(scenario_cumulative_SLE(full_output_dictionary(s))[-1])
return perscenario_SLE
|
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
from builders.builder import Builder
from lib.logger import LOGGER
from builders.constants import PLATFORM_PARENT_PATH
class GoBuilder(Builder):
def __init__(self, app_info):
Builder.__init__(self, app_info)
def build(self):
LOGGER.info('Building {} project using godep'.format(self.name))
with open(self.build_log_path, 'a') as build_log, \
open(self.err_log_path, 'a') as err_log:
try:
subprocess.check_call(['godep', 'go', 'build', './...'],
cwd=self.sources_path, stdout=build_log, stderr=err_log)
except Exception as e:
LOGGER.error('Cannot build {} project using godep'.format(self.name))
raise e
LOGGER.info('Building {} project using godep has been finished'.format(self.name))
def download_project_sources(self, snapshot=None, url=None):
Builder.download_project_sources(self, snapshot, url)
godep_path = os.path.join(os.environ["GOPATH"], 'src/github.com/trustedanalytics/')
if not os.path.exists(godep_path):
os.makedirs(godep_path)
if not os.path.exists(os.path.join(godep_path, self.name)):
os.symlink(os.path.join(PLATFORM_PARENT_PATH, self.name), os.path.join(godep_path, self.name))
|
import requests
import json
requests.packages.urllib3.disable_warnings()
from requests.packages.urllib3.exceptions import InsecureRequestWarning
SDWAN_IP = "10.10.20.90"
SDWAN_USERNAME = "admin"
SDWAN_PASSWORD = "C1sco12345"
class rest_api_lib:
def __init__(self, vmanage_ip, username, password):
self.vmanage_ip = vmanage_ip
self.session = {}
self.login(self.vmanage_ip, username, password)
def login(self, vmanage_ip, username, password):
#GET SESSION
base_url_str = 'https://%s:8443/'%vmanage_ip
login_action = 'j_security_check'
login_data = {'j_username' : username, 'j_password' : password}
login_url = base_url_str + login_action
url = base_url_str + login_url
response = requests.post(url=login_url, data = login_data,verify=False)
try:
cookies = response.headers["Set-Cookie"]
jsessionid = cookies.split(";")
return(jsessionid[0])
except:
print(response.text)
def get_token(self, vmanage_ip, jsessionid):
#GET TOKEN
headers = {'Cookie': jsessionid}
base_url_str = 'https://%s:8443/'%vmanage_ip
api = "dataservice/client/token"
url = base_url_str + api
response = requests.get(url = url, headers=headers,verify=False)
if response.status_code == 200:
return(response.text)
else:
return None
Sdwan = rest_api_lib(SDWAN_IP, SDWAN_USERNAME, SDWAN_PASSWORD)
jsessionid = Sdwan.login(SDWAN_IP, SDWAN_USERNAME, SDWAN_PASSWORD)
token = Sdwan.get_token(SDWAN_IP, jsessionid)
def reboot_History():
headers ={'Cookie': jsessionid, 'X-XSRF-TOKEN':token, 'Content-Type': 'application/json'}
payload = {"deviceTemplateList":[{"templateId":"c566d38e-2219-4764-a714-4abeeab607dc","device":[{"csv-status":"complete","csv-deviceId":"CSR-807E37A3-537A-07BA-BD71-8FB76DE9DC38","csv-deviceIP":"10.10.1.13","csv-host-name":"site1-cedge01","//system/host-name":"man","//system/system-ip":"10.10.1.13","//system/site-id":"1009","/1/vpn_1_if_name/interface/if-name":"GigabitEthernet3","/1/vpn_1_if_name/interface/description":"port.site1-sw01","/1/vpn_1_if_name/interface/ip/address":"10.10.21.1/24","/512/vpn-instance/ip/route/0.0.0.0/0/next-hop/vpn_512_next_hop_ip_address/address":"10.10.20.254","/512/vpn_512_if_name/interface/if-name":"GigabitEthernet1","/512/vpn_512_if_name/interface/description":"port.sbx-mgmt","/512/vpn_512_if_name/interface/ip/address":"10.10.20.175/24","/0/vpn-instance/ip/route/0.0.0.0/0/next-hop/vpn_0_next_hop_ip_address/address":"10.10.23.9","/0/vpn-instance/ip/route/0.0.0.0/0/next-hop/public_internet_vpn_0_next_hop_ip_address/address":"10.10.23.41","/0/internet_vpn_0_if_name/interface/if-name":"GigabitEthernet4","/0/internet_vpn_0_if_name/interface/description":"internet-link","/0/internet_vpn_0_if_name/interface/ip/address":"10.10.23.42/30","/0/vpn_0_if_name/interface/if-name":"GigabitEthernet2","/0/vpn_0_if_name/interface/description":"GigabitEthernet5.wan-rtr01","/0/vpn_0_if_name/interface/ip/address":"10.10.23.10/30","//system/gps-location/latitude":"35.852","//system/gps-location/longitude":"-78.869","csv-templateId":"c566d38e-2219-4764-a714-4abeeab607dc"}],"isEdited":False,"isMasterEdited":False}]}
#payload = {"deviceTemplateList":[{"templateId":"c566d38e-2219-4764-a714-4abeeab607dc","device":[{"csv-status":"complete","csv-deviceId":"CSR-807E37A3-537A-07BA-BD71-8FB76DE9DC38","csv-deviceIP":"10.10.1.13","csv-host-name":"site1-cedge01","//system/host-name":"mantext","//system/system-ip":"10.10.1.13","//system/site-id":"1009","/1/vpn_1_if_name/interface/if-name":"GigabitEthernet3","/1/vpn_1_if_name/interface/description":"port.site1-sw01","/1/vpn_1_if_name/interface/ip/address":"10.10.21.1/24","/512/vpn-instance/ip/route/0.0.0.0/0/next-hop/vpn_512_next_hop_ip_address/address":"10.10.20.254","/512/vpn_512_if_name/interface/if-name":"GigabitEthernet1","/512/vpn_512_if_name/interface/description":"port.sbx-mgmt","/512/vpn_512_if_name/interface/ip/address":"10.10.20.175/24","/0/vpn-instance/ip/route/0.0.0.0/0/next-hop/vpn_0_next_hop_ip_address/address":"10.10.23.9","/0/vpn-instance/ip/route/0.0.0.0/0/next-hop/public_internet_vpn_0_next_hop_ip_address/address":"10.10.23.41","/0/internet_vpn_0_if_name/interface/if-name":"GigabitEthernet4","/0/internet_vpn_0_if_name/interface/description":"internet-link","/0/internet_vpn_0_if_name/interface/ip/address":"10.10.23.42/30","/0/vpn_0_if_name/interface/if-name":"GigabitEthernet2","/0/vpn_0_if_name/interface/description":"GigabitEthernet5.wan-rtr01","/0/vpn_0_if_name/interface/ip/address":"10.10.23.10/30","//system/gps-location/latitude":"35.852","//system/gps-location/longitude":"-78.869","csv-templateId":"c566d38e-2219-4764-a714-4abeeab607dc"}],"isEdited":False,"isMasterEdited":False}]}
payload = json.dumps(payload)
url = 'https://%s:8443/dataservice/'%SDWAN_IP
api = 'template/device/config/attachfeature'
url = url + api
resp = requests.post(url, headers=headers, data=payload, verify=False)
data = resp.json()
print(data)
reboot_History()
|
#coding: utf-8
''' mbinary
#######################################################################
# File : fastPow.py
# Author: mbinary
# Mail: zhuheqin1@gmail.com
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-12-17 21:39
# Description: fast power
#######################################################################
'''
def fastPow(a,n):
'''a^n'''
rst = 1
while n:
if n%2:
rst *=a
n>>=1
a*=a
return rst
def fastMul(a,b):
'''a*b'''
rst = 0
while b:
if b&1:
rst +=a
b>>=1
a*=2
|
import pytest
from pypart.messenger import Email, EmailFormatError
class TestEmail:
@pytest.mark.parametrize(
'email, exception',
[
('username@domain.com', None),
('user.name@domain.com', None),
('user-name@domain.com', None),
('user_name@domain.com', None),
('@domain.com', EmailFormatError),
('username@', EmailFormatError),
('username', EmailFormatError),
('username@domain.f', EmailFormatError),
('username@domain', EmailFormatError),
('username@domain.verylongtopleveldomainname', EmailFormatError),
('username@domain.co-m', EmailFormatError),
('user!name@domain.co-m', EmailFormatError),
],
)
def test_validator(
self, email: str, exception: EmailFormatError | None
) -> None:
if exception:
with pytest.raises(exception):
Email(email)
else:
Email(email)
def test_domain(self) -> None:
email: Email = Email('username@domain.com')
assert email.domain == 'domain.com'
def test_username(self) -> None:
email: Email = Email('username@domain.com')
assert email.username == 'username'
def test___str__(self) -> None:
email: Email = Email('username@domain.com')
assert str(email) == 'username@domain.com'
|
import unittest
import os
import sys
from maskgen import plugins
from maskgen.video_tools import get_shape_of_video
from maskgen import tool_set
from maskgen.cv2api import cv2api_delegate
from tests.test_support import TestSupport
class TestDroneShake(TestSupport):
filesToKill = []
def test_plugin(self):
plugins.loadPlugins()
filename = self.locateFile("tests/videos/sample1.mov")
filename_output = os.path.join(os.path.split(filename)[0], "sample1_out.avi")
self.filesToKill.append(filename_output)
file = os.path.join(os.path.split(filename)[0], "sample1.csv")
self.filesToKill.append(file)
args, errors = plugins.callPlugin('DroneShake',
None, #image would go here for image manipulations
filename,
filename_output,
fps=13.53,
height=360,
width=480)
# checking to make sure there are no errors
self.assertEqual(errors, None)
# Get the output video to compare the height and width
video = cv2api_delegate.videoCapture(filename_output)
width = int(video.get(cv2api_delegate.prop_frame_width))
height = int(video.get(cv2api_delegate.prop_frame_height))
self.assertTrue(int(width) == 480)
self.assertTrue(int(height) == 360)
def tearDown(self):
for f in self.filesToKill:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import pytorch_lightning as pl
import torch
import torch_geometric.transforms as transforms
from torch_geometric.data import DataLoader
from torch_geometric.datasets import TUDataset
from src import project_dir
class EnzymesDataModule(pl.LightningDataModule):
def __init__(
self,
data_dir="/data/",
batch_size=64,
num_workers=0,
splits=[0.7, 0.15, 0.15],
seed=42,
):
super(EnzymesDataModule, self).__init__()
self.data_dir = project_dir + data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.splits = splits
self.seed = seed
self.transform = transforms.Compose(
[
transforms.NormalizeFeatures(),
]
)
# Number of graphs, classes and features
self.num_graphs = 600
self.num_classes = 6
self.num_features = 21
def prepare_data(self):
# Download data
TUDataset(
root=self.data_dir,
name="ENZYMES",
use_node_attr=True,
use_edge_attr=True,
pre_transform=self.transform,
)
def setup(self, stage=None):
initial_seed = torch.initial_seed()
torch.manual_seed(self.seed)
dataset = TUDataset(
root=self.data_dir,
name="ENZYMES",
use_node_attr=True,
use_edge_attr=True,
pre_transform=self.transform,
).shuffle()
split_idx = np.cumsum(
[int(len(dataset) * prop) for prop in self.splits])
self.data_train = dataset[: split_idx[0]]
self.data_val = dataset[split_idx[0]: split_idx[1]]
self.data_test = dataset[split_idx[1]:]
torch.manual_seed(initial_seed)
def train_dataloader(self):
return DataLoader(
self.data_train,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True,
pin_memory=True,
)
def val_dataloader(self):
return DataLoader(
self.data_val,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
pin_memory=True,
)
def test_dataloader(self):
return DataLoader(
self.data_test,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
pin_memory=True,
)
@staticmethod
def add_model_specific_args(parent_parser):
parser = parent_parser.add_argument_group("EnzymesDataModule")
parser.add_argument(
"--data_dir", default=project_dir + "/data/", type=str)
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--num_workers", default=0, type=int)
parser.add_argument(
"--splits", default=[0.7, 0.15, 0.15], nargs=3, type=float)
parser.add_argument("--seed", default=42, type=int)
return parent_parser
@staticmethod
def from_argparse_args(namespace):
ns_dict = vars(namespace)
args = {
"data_dir": ns_dict.get("data_dir", project_dir + "/data/"),
"batch_size": ns_dict.get("batch_size", 64),
"num_workers": ns_dict.get("num_workers", 0),
"splits": ns_dict.get("splits", [0.7, 0.15, 0.15]),
"seed": ns_dict.get("seed", 42),
}
return args
if __name__ == "__main__":
dm = EnzymesDataModule(data_dir=project_dir + "/data/")
dm.prepare_data()
|
from tqdm import tqdm
import time
for i in (t:=tqdm(range(1000))):
t.set_description(f'MEDITATING')
time.sleep(1)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""The training script that runs the party.
This script requires tensorflow 1.1.0-rc1 or beyond.
As of 04/05/17 this requires installing tensorflow from source,
(https://github.com/tensorflow/tensorflow/releases)
So that it works locally, the default worker_replicas and total_batch_size are
set to 1. For training in 200k iterations, they both should be 32.
"""
import tensorflow as tf
import tensorflow_probability as tfp
from magenta.models.nsynth import utils
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("master", "",
"BNS name of the TensorFlow master to use.")
tf.app.flags.DEFINE_string("config", "h512_bo16", "Model configuration name")
tf.app.flags.DEFINE_integer("task", 0,
"Task id of the replica running the training.")
tf.app.flags.DEFINE_integer("worker_replicas", 1,
"Number of replicas. We train with 32.")
tf.app.flags.DEFINE_integer("ps_tasks", 0,
"Number of tasks in the ps job. If 0 no ps job is "
"used. We typically use 11.")
tf.app.flags.DEFINE_integer("total_batch_size", 1,
"Batch size spread across all sync replicas."
"We use a size of 32.")
tf.app.flags.DEFINE_integer("sample_length", 64000,
"Raw sample length of input.")
tf.app.flags.DEFINE_integer("num_iters", 200000,
"maximum number of iterations to train for")
tf.app.flags.DEFINE_string("logdir", "/tmp/nsynth",
"The log directory for this experiment.")
tf.app.flags.DEFINE_string("problem", "nsynth",
"Which problem setup (i.e. dataset) to use")
tf.app.flags.DEFINE_string("train_path", "", "The path to the train tfrecord.")
tf.app.flags.DEFINE_string("log", "INFO",
"The threshold for what messages will be logged."
"DEBUG, INFO, WARN, ERROR, or FATAL.")
tf.app.flags.DEFINE_bool("vae", False,
"Whether or not to train variationally")
tf.app.flags.DEFINE_bool("small", False,
"Whether to use full model i.e. 30 layers in decoder/encoder or reduced model")
tf.app.flags.DEFINE_integer("asymmetric", 0,
"Whether to have equal number of layers in decoder/encoder or a weaker decoder")
tf.app.flags.DEFINE_bool("kl_annealing", False,
"Whether to use kl_annealing")
tf.app.flags.DEFINE_float("aux_coefficient", 0,
"coefficient for auxilliary loss")
tf.app.flags.DEFINE_float("annealing_loc", 1750.,
"params of normal cdf for annealing")
tf.app.flags.DEFINE_float("annealing_scale", 150.,
"params of normal cdf for annealing")
tf.app.flags.DEFINE_float("kl_threshold", None,
"Threshold with which to bound KL-Loss")
tf.app.flags.DEFINE_float("input_dropout", 1,
"How much dropout at input to add")
def main(unused_argv=None):
tf.logging.set_verbosity(FLAGS.log)
if FLAGS.config is None:
raise RuntimeError("No config name specified.")
if FLAGS.vae:
config = utils.get_module("wavenet." + FLAGS.config).VAEConfig(
FLAGS.train_path, sample_length=FLAGS.sample_length, problem=FLAGS.problem, small=FLAGS.small, asymmetric=FLAGS.asymmetric, num_iters=FLAGS.num_iters, aux=FLAGS.aux_coefficient, dropout=FLAGS.input_dropout)
else:
config = utils.get_module("wavenet." + FLAGS.config).Config(
FLAGS.train_path, sample_length=FLAGS.sample_length, problem=FLAGS.problem, small=FLAGS.small, asymmetric=FLAGS.asymmetric, num_iters=FLAGS.num_iters)
logdir = FLAGS.logdir
tf.logging.info("Saving to %s" % logdir)
with tf.Graph().as_default():
total_batch_size = FLAGS.total_batch_size
assert total_batch_size % FLAGS.worker_replicas == 0
worker_batch_size = total_batch_size / FLAGS.worker_replicas
# Run the Reader on the CPU
cpu_device = "/job:localhost/replica:0/task:0/cpu:0"
if FLAGS.ps_tasks:
cpu_device = "/job:worker/cpu:0"
with tf.device(cpu_device):
inputs_dict = config.get_batch(worker_batch_size)
with tf.device(
tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks,
merge_devices=True)):
global_step = tf.get_variable(
"global_step", [],
tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
# pylint: disable=cell-var-from-loop
lr = tf.constant(config.learning_rate_schedule[0])
for key, value in config.learning_rate_schedule.iteritems():
lr = tf.cond(
tf.less(global_step, key), lambda: lr, lambda: tf.constant(value))
# pylint: enable=cell-var-from-loop
tf.summary.scalar("learning_rate", lr)
# build the model graph
outputs_dict = config.build(inputs_dict, is_training=True)
if FLAGS.vae:
if FLAGS.kl_annealing:
dist = tfp.distributions.Normal(loc=FLAGS.annealing_loc, scale=FLAGS.annealing_scale)
annealing_rate = dist.cdf(tf.to_float(global_step)) # how to adjust the annealing
else:
annealing_rate = 0.
kl = outputs_dict["loss"]["kl"]
rec = outputs_dict["loss"]["rec"]
aux = outputs_dict["loss"]["aux"]
tf.summary.scalar("kl", kl)
tf.summary.scalar("rec", rec)
tf.summary.scalar("annealing_rate", annealing_rate)
if FLAGS.kl_threshold is not None:
kl = tf.maximum(tf.cast(FLAGS.kl_threshold, dtype=kl.dtype), kl)
if FLAGS.aux_coefficient > 0:
tf.summary.scalar("aux", aux)
loss = rec + annealing_rate*kl + tf.cast(FLAGS.aux_coefficient, dtype=tf.float32)*aux
else:
loss = outputs_dict["loss"]
tf.summary.scalar("train_loss", loss)
worker_replicas = FLAGS.worker_replicas
ema = tf.train.ExponentialMovingAverage(
decay=0.9999, num_updates=global_step)
opt = tf.train.SyncReplicasOptimizer(
tf.train.AdamOptimizer(lr, epsilon=1e-8),
worker_replicas,
total_num_replicas=worker_replicas,
variable_averages=ema,
variables_to_average=tf.trainable_variables())
train_op = slim.learning.create_train_op(total_loss = loss,
optimizer = opt,
global_step = global_step,
colocate_gradients_with_ops=True)
# train_op = opt.minimize(
# loss,
# global_step=global_step,
# name="train",
# colocate_gradients_with_ops=True)
session_config = tf.ConfigProto(allow_soft_placement=True)
is_chief = (FLAGS.task == 0)
local_init_op = opt.chief_init_op if is_chief else opt.local_step_init_op
slim.learning.train(
train_op=train_op,
logdir=logdir,
is_chief=is_chief,
master=FLAGS.master,
number_of_steps=config.num_iters,
global_step=global_step,
log_every_n_steps=250,
local_init_op=local_init_op,
save_interval_secs=300,
sync_optimizer=opt,
session_config=session_config,)
if __name__ == "__main__":
tf.app.run()
|
# import
import os
import pandas as pd
import numpy as np
#%%
def get_depth_df(path,subjects):
# Loop through each Testsubject folder
depth_df = pd.DataFrame()
for i,elem in enumerate(subjects):
filepath = os.path.join(path, "TestSubject"+str(elem)+"\\FAP.txt")
depth_df_raw = pd.read_csv(filepath,header=1,delimiter=",",
quotechar=";",
usecols=['Depth','PicIndex'],
# index_col="PicIndex",
skipinitialspace=True)
depth_df_raw = depth_df_raw.set_index('PicIndex')
if i==0:
depth_df = depth_df_raw
depth_df.columns = [elem]
else:
depth_df[elem] = depth_df_raw
return depth_df
#%%
def get_mean(depth_df):
mean_df = pd.DataFrame()
column_name = ['D'+str(i) for i in range(1,len(depth_df.columns)+1)]
for i in range(1,depth_df.index.max()+1):
mean = depth_df.loc[i].mean().values
sample_length = depth_df.loc[i].shape[0]
# create temporary mean df to concat to the original df
mean = np.tile(mean.transpose(),(sample_length,1))
mean = pd.DataFrame(mean)
mean.columns = column_name
idx = [i for j in range(sample_length)]
mean['index'] = idx
mean = mean.set_index('index')
if i == 1:
mean_df = mean
else:
mean_df = mean_df.append(mean)
depth_df = pd.concat([depth_df,mean_df],axis=1)
return depth_df
#%%
def get_min_depth(depth_df):
min_list = []
for col in range(1,len(depth_df.columns)+1):
min_list = min_list + [depth_df[col].min() for i in range(70)]
return min_list
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-15 17:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0044_auto_20160115_1704'),
]
operations = [
migrations.AddField(
model_name='holesaw',
name='quality',
field=models.CharField(blank=True, max_length=255, verbose_name='Stahlsorte'),
),
migrations.AddField(
model_name='holesaw',
name='toothing',
field=models.CharField(blank=True, max_length=255, verbose_name='Verzahnung'),
),
]
|
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models.deletion import ProtectedError
from django.urls import reverse
from .utils import generate_unique_slug
class ExcludeDeletedManager(models.Manager):
def get_queryset(self):
return super(ExcludeDeletedManager, self).get_queryset().filter(_deleted=False)
class BaseModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
_deleted = models.BooleanField(default=False)
objects = ExcludeDeletedManager()
admin_manager = models.Manager()
class Meta:
abstract = True
ordering = ['-created_at']
def delete(self, using=None):
try:
super(BaseModel, self).delete(using)
except ProtectedError:
self._deleted = True
self.save()
class ApiDummy(BaseModel):
name = models.CharField(max_length=100)
slug = models.SlugField(editable=False, unique=True)
html_response = models.TextField(blank=True, null=True)
json_response = JSONField(blank=True, null=True)
@property
def endpoint(self):
return reverse('api-dummy', kwargs={'slug': self.slug})
def save(self, *args, **kwargs):
if not self.slug:
self.slug = generate_unique_slug(ApiDummy, [self.name])
return super(ApiDummy, self).save(*args, **kwargs)
|
# ##### BEGIN MIT LICENSE BLOCK #####
#
# MIT License
#
# Copyright (c) 2022 Steven Garcia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ##### END MIT LICENSE BLOCK #####
from .format import JMAAsset
from ..global_functions import mesh_processing, global_functions
def process_scene(context, version, generate_checksum, game_version, extension, custom_scale, biped_controller, fix_rotations):
JMA = JMAAsset()
collections = []
layer_collections = list(context.view_layer.layer_collection.children)
while len(layer_collections) > 0:
collection_batch = layer_collections
layer_collections = []
for collection in collection_batch:
collections.append(collection)
for collection_child in collection.children:
layer_collections.append(collection_child)
object_properties = []
object_list = list(context.scene.objects)
node_list = []
armature = []
armature_count = 0
first_frame = context.scene.frame_start
last_frame = context.scene.frame_end + 1
total_frame_count = context.scene.frame_end - first_frame + 1
for obj in object_list:
object_properties.append([obj.hide_get(), obj.hide_viewport])
if obj.type == 'ARMATURE' and mesh_processing.set_ignore(collections, obj) == False:
mesh_processing.unhide_object(collections, obj)
armature_count += 1
armature = obj
mesh_processing.select_object(context, obj)
node_list = list(obj.data.bones)
JMA.transform_count = total_frame_count
JMA.node_count = len(node_list)
sorted_list = global_functions.sort_list(node_list, armature, game_version, version, False)
joined_list = sorted_list[0]
reversed_joined_list = sorted_list[1]
blend_scene = global_functions.BlendScene(0, armature_count, 0, 0, 0, 0, armature, node_list, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)
global_functions.validate_halo_jma_scene(game_version, version, blend_scene, object_list, extension)
JMA.node_checksum = 0
for node in joined_list:
name = node.name
find_child_node = global_functions.get_child(node, reversed_joined_list)
find_sibling_node = global_functions.get_sibling(armature, node, reversed_joined_list)
first_child_node = -1
first_sibling_node = -1
parent_node = -1
if not find_child_node == None:
first_child_node = joined_list.index(find_child_node)
if not find_sibling_node == None:
first_sibling_node = joined_list.index(find_sibling_node)
if not node.parent == None:
parent_node = joined_list.index(node.parent)
JMA.nodes.append(JMA.Node(name, parent_node, first_child_node, first_sibling_node))
if generate_checksum:
JMA.node_checksum = global_functions.node_hierarchy_checksum(JMA.nodes, JMA.nodes[0], JMA.node_checksum)
for frame in range(first_frame, last_frame):
transforms_for_frame = []
for node in joined_list:
context.scene.frame_set(frame)
is_bone = False
if armature:
is_bone = True
bone_matrix = global_functions.get_matrix(node, node, True, armature, joined_list, True, version, 'JMA', False, custom_scale, fix_rotations)
mesh_dimensions = global_functions.get_dimensions(bone_matrix, node, version, is_bone, 'JMA', custom_scale)
rotation = (mesh_dimensions.quaternion[0], mesh_dimensions.quaternion[1], mesh_dimensions.quaternion[2], mesh_dimensions.quaternion[3])
translation = (mesh_dimensions.position[0], mesh_dimensions.position[1], mesh_dimensions.position[2])
scale = (mesh_dimensions.scale[0])
transforms_for_frame.append(JMA.Transform(translation, rotation, scale))
JMA.transforms.append(transforms_for_frame)
if version > 16394 and biped_controller:
for frame in range(JMA.transform_count):
context.scene.frame_set(frame)
armature_matrix = global_functions.get_matrix(armature, armature, True, None, joined_list, False, version, 'JMA', False, custom_scale, fix_rotations)
mesh_dimensions = global_functions.get_dimensions(armature_matrix, armature, version, False, 'JMA', custom_scale)
rotation = (mesh_dimensions.quaternion[0], mesh_dimensions.quaternion[1], mesh_dimensions.quaternion[2], mesh_dimensions.quaternion[3])
translation = (mesh_dimensions.position[0], mesh_dimensions.position[1], mesh_dimensions.position[2])
scale = (mesh_dimensions.scale[0])
JMA.biped_controller_transforms.append(JMA.Transform(translation, rotation, scale))
context.scene.frame_set(1)
for idx, obj in enumerate(object_list):
property_value = object_properties[idx]
obj.hide_set(property_value[0])
obj.hide_viewport = property_value[1]
return JMA
|
#!/usr/bin/python3
# -*-coding:Utf-8 -*
import re
import sys
from PIL import Image
tmpPath = "./.tmp"
def getCentroid(line):
line = line.replace("(", "").replace(")", "").split(',')
r = int(line[0])
g = int(line[1])
b = int(line[2])
return (r, g, b)
def getCoor(line):
return (line.split(" ")[0])
def creatKey(i, j):
return "(" + str(i) + "," + str(j) + ")"
def getPointArray(points):
res = []
i = 0
j = 0
line = []
key = creatKey(i, j)
while (key in points):
while (key in points):
line.append(points[key])
j += 1
key = creatKey(i, j)
res.append(line)
line = []
j = 0
i += 1
key = creatKey(i, j)
return(res)
def treatContent(content):
file = content.split('\n')
points = {}
i = 0
while(file[i]):
if (file[i] == "--" and file[i + 1]):
centroid = getCentroid(file[i + 1])
elif(file[i] != "-"):
points[getCoor(file[i])] = centroid
i += 1
return getPointArray(points)
def main():
av = sys.argv
try:
f = open(tmpPath, "r")
content = treatContent(f.read())
img = Image.new("RGB", (len(content), len(content[0])))
pix = img.load()
x = 0
y = 0
for line in content:
for point in line:
pix[y, x] = point
y += 1
y = 0
x += 1
img.save(av[1].split(".")[0] + "_compressed.jpg")
f.close()
except Exception as err:
print(err)
exit(84)
return(0)
if __name__ == '__main__':
main()
|
from ..utils import Object
class SendMessage(Object):
"""
Sends a message. Returns the sent message
Attributes:
ID (:obj:`str`): ``SendMessage``
Args:
chat_id (:obj:`int`):
Target chat
reply_to_message_id (:obj:`int`):
Identifier of the message to reply to or 0
options (:class:`telegram.api.types.sendMessageOptions`):
Options to be used to send the message
reply_markup (:class:`telegram.api.types.ReplyMarkup`):
Markup for replying to the message; for bots only
input_message_content (:class:`telegram.api.types.InputMessageContent`):
The content of the message to be sent
Returns:
Message
Raises:
:class:`telegram.Error`
"""
ID = "sendMessage"
def __init__(self, chat_id, reply_to_message_id, options, reply_markup, input_message_content, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
self.reply_to_message_id = reply_to_message_id # int
self.options = options # SendMessageOptions
self.reply_markup = reply_markup # ReplyMarkup
self.input_message_content = input_message_content # InputMessageContent
@staticmethod
def read(q: dict, *args) -> "SendMessage":
chat_id = q.get('chat_id')
reply_to_message_id = q.get('reply_to_message_id')
options = Object.read(q.get('options'))
reply_markup = Object.read(q.get('reply_markup'))
input_message_content = Object.read(q.get('input_message_content'))
return SendMessage(chat_id, reply_to_message_id, options, reply_markup, input_message_content)
|
class Demo:
a1 = 1
@classmethod
def mymethod1(cls):
cls.c1 = 20
del Demo.a1
print("Only static variable a1 is present")
print(Demo.__dict__)
print("---------------------------")
Demo.mymethod1()
print("static variable a1 is absent and only c1 is present")
print(Demo.__dict__)
|
import joblib
import pandas as pd
from hgtk import text, letter, checker
from .const import ALPHABET_LIST, CHOSUNG_LIST, JONGSUNG_LIST, JUNGSUNG_LIST, NUMBER_LIST, SPECIAL_CHARACTERS_LIST
CHOSUNG = 3
JUNGSUNG = 2
JONGSUNG = 1
class ModelByWord:
def __init__(self):
text.decompose = self.__decompose
self._model = joblib.load("./dataset/model_sgd.pkl")
self._word_list = [CHOSUNG_LIST, JUNGSUNG_LIST, JONGSUNG_LIST,
SPECIAL_CHARACTERS_LIST, NUMBER_LIST, ALPHABET_LIST]
@staticmethod
def __decompose(text, latin_filter=True, compose_code=u" "):
result = u""
for c in list(text):
if checker.is_hangul(c):
if checker.is_jamo(c):
result = result + c + compose_code
else:
result = result + "".join(letter.decompose(c)) + compose_code
else:
if latin_filter: # 한글 외엔 Latin1 범위까지만 포함 (한글+영어)
if checker.is_latin1(c):
result = result + c + compose_code
else:
result = result + c + compose_code
return result
def _preprocess(self, comment):
comment_decompose = self.__decompose(comment)
removed_space_word = list(filter(lambda word: word != ' ', comment_decompose.split(' ')))
split_word = list(filter(lambda element: element != '', removed_space_word))
df_result = self._word_store_in_dataframe(split_word)
return df_result
def predict(self, comment):
data = self._preprocess(comment)
predict = self._model.predict(data)
return predict
def _word_store_in_dataframe(self, split_word):
df_list = ["cho", "jung", "jong", "special_characters", "number", "alphabet"]
temp_dict = {}
for key, word_type in zip(df_list, self._word_list):
temp_dict[key] = pd.DataFrame(0, columns=word_type, index=range(1), dtype=float)
total_letter_count = 0
for word in split_word:
temp_dict, letter_count = self._insert_dataframe(temp_dict, word)
total_letter_count += letter_count
result = pd.concat(temp_dict, axis=1) / total_letter_count
return result
def _insert_dataframe(self, temp_dict, word):
letter_count = 0
if checker.is_hangul(word):
length = len(word)
if length == CHOSUNG:
temp_dict['cho'][word[0]] += 1
temp_dict['jung'][word[1]] += 1
temp_dict['jong'][word[2]] += 1
letter_count += 3
elif length == JUNGSUNG:
temp_dict['cho'][word[0]] += 1
temp_dict['jung'][word[1]] += 1
temp_dict['jong'][' '] += 1
letter_count += 3
else:
if word in CHOSUNG_LIST:
temp_dict['cho'][word[0]] += 1
elif word in JUNGSUNG_LIST:
temp_dict['jung'][word[0]] += 1
else:
temp_dict['jong'][word[0]] += 1
letter_count += 1
else:
if word.lower() in ALPHABET_LIST:
word = word.lower()
temp_dict['alphabet'][word] += 1
elif word in NUMBER_LIST:
temp_dict['number'][word] += 1
else:
if word in SPECIAL_CHARACTERS_LIST:
temp_dict['special_characters'][word] += 1
else:
temp_dict['special_characters']['etc'] += 1
letter_count += 1
return temp_dict, letter_count
|
no = {'零': 0, '一': 1, '二': 2, '三': 3, '四': 4, '五': 5, '六': 6, '七': 7, '八': 8, '九': 9, '十': 10} # 中文数目字
inputs = [] # 输入流
var = [] # 变量流
data = [] # 数据流
regexp = [] # 正则流
state = None # 业务状态
def get():
# 用户输入流
inputs.append(input().split()[-1])
print(inputs)
def get_key(v):
for k, val in no.items():
if v == val:
return k
while True:
inputs.append(input().split()[-1]) # 输入存到数据流
# 状态机
if not state:
if inputs[-1] == '整数':
state = '定义'
elif inputs[-1] in var:
state = '运算'
elif inputs[-1] == '看看':
state = '看看'
elif inputs[-1] == '如果':
state = '正则'
if state == '定义':
print('到达定义')
get() # 变量
var.append(inputs[-1])
get()
if inputs[-1] == '等于': # 赋值
get()
data.append(no[inputs[-1]])
state = None # 业务结束
continue
if state == '运算':
print('到达运算')
get()
if inputs[-1] == '减少': # 负
get()
data[-1] = get_key(data[-1]-no[inputs[-1]])
elif inputs[-1] == '增加': # 增
get()
data[-1] = get_key(data[-1]+no[inputs[-1]])
state = None
continue
if state == '看看':
print('到达看看')
get()
if inputs[-1] in var: # 找值
print(data[-1])
state = None
continue
|
version = '0.19.0b1'
|
import os
from dotenv import load_dotenv
from users import app
load_dotenv()
DB_USER = os.getenv('DB_USER')
DB_PASSWORD = os.getenv('DB_PASSWORD')
DB_HOST = os.getenv('DB_HOST')
DB_PORT = os.getenv('DB_PORT')
DB_NAME = os.getenv('DB_NAME')
config = {
'app': {
'APP_NAME': os.getenv('APP_NAME', 'app'),
'APP_VERSION': os.getenv('APP_VERSION', '1.0.0'),
'SECRET_KEY': os.getenv('APP_SECRET_KEY', None),
'TIMEZONE': os.getenv('APP_TIMEZONE', 'UTC'),
},
'db': {
'SQLALCHEMY_DATABASE_URI': f'postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}',
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
},
}
app.config.update(**config.get('app'))
app.config.update(**config.get('db'))
|
#! /usr/bin/env python
# encoding: utf-8
version = (0, 3, 10)
from .client import VimeoClient # noqa: E402
from . import exceptions # noqa: E402
|
from django.urls import path, include
from . import views
urlpatterns = [
# path('', include('user.urls')),
path('', views.first, name='first'),
path('dashboard', views.home, name='home')
]
|
"""
Copyright © 2021 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import json
import os
import traceback
from cmd import Cmd
from typing import Any, List
import inquirer
import numpy as np
from l2explorer.l2explorer_env import L2ExplorerTask
channels = ['Reset', 'Debug', 'State']
class L2ExplorerPrompt(Cmd):
prompt = "l2explorer > "
intro = "Welcome to the L2Explorer command line tool! Type ? to list commands"
def __init__(self, debug=False, editor_mode=False):
"""L2ExplorerPrompt initializing function."""
super().__init__()
# Initialize variables
self.channel = ''
self.map = None
# Initialize L2Explorer environment
self._env = L2ExplorerTask(debug, editor_mode)
self.init_env()
# Initialize the argument parser for sending messages
self.message_parser = argparse.ArgumentParser(prog="send_message")
self.action_parser = argparse.ArgumentParser(prog="action")
self.init_parsers()
def init_env(self, map=None):
# Load default map
if map is None:
mapname = 'default_map.json'
with open(mapname) as f:
map = json.load(f)
self._env.reset(map)
self.name = self._env._env.get_behavior_names()[0]
self.group_spec = self._env._env.get_behavior_spec(self.name)
self._n_agents = 1
# Step multiple times to update environment
for i in range(4):
self.do_step()
def init_parsers(self):
"""Initialize argument parsers for L2Explorer channel tester.
:return: Status.
"""
# Initialize message parser
self.message_parser.add_argument('-f', '--file',
help='file containing messages for testing')
self.message_parser.add_argument('-c', '--channel',
help='channel to publish message on')
self.message_parser.add_argument('-m', '--message', nargs='+',
help='message content')
# Initialize action parser
self.action_parser.add_argument('-c', '--count', type=int, default=1,
help='number of times to perform action')
self.action_parser.add_argument('-a', '--action', nargs='+', type=int,
help='action tuple')
return True
def do_exit(self, argv=None):
"""Exits L2ExplorerPrompt after cleanup.
:param argv: Ignored.
:return: Exit status.
"""
print("\nExiting L2Explorer CLI...\n")
return True
def help_exit(self):
"""Helper function for exit.
:return: None.
"""
print("\nExit the application. Shorthand: x, q, Ctrl-C.\n")
def do_channels(self, argv=None):
"""List the available channels for sending and receiving messages.
:param argv: Ignored.
:return: None.
"""
print("")
for channel in channels:
print(f" {channel}")
print("")
def help_channels(self):
"""Helper function for channels.
:return: None.
"""
print("\nList the available channels for sending and receiving messages.\n")
def do_select_channel(self, argv=None):
"""Display an interactive list for selecting a channel for sending and receiving messages.
:param argv: Ignored.
:return: None.
"""
question = [
inquirer.List('channel',
message="What channel would you like to communicate on?",
choices=channels,
carousel=True
),
]
print("")
self.channel = inquirer.prompt(question)["channel"]
def help_select_channel(self):
"""Helper function for select channel.
:return: None.
"""
print("\nSelect a channel for sending messages.\n")
def do_channel(self, argv=None):
"""Display the currently selected channel.
:param argv: Ignored.
:return: None.
"""
if self.channel:
print(f"\n Channel: {self.channel}\n")
else:
print("\nA channel has not been selected yet.\n")
def help_channel(self):
"""Helper function for channel.
:return: None.
"""
print("\nShow the currently selected channel.\n")
def do_reset(self, argv=None):
"""Perform a reset in the L2Explorer environment.
:param argv: Ignored.
:return: None.
"""
self.init_env(self.map)
def help_reset(self):
"""Helper function for reset.
:return: None.
"""
print("\nPerform a reset in the L2Explorer environment.\n")
def do_step(self, argv=None):
"""Perform a step in the L2Explorer environment.
:param argv: Ignored.
:return: None.
"""
self._env._env.step()
def help_step(self):
"""Helper function for step.
:return: None.
"""
print("\nPerform a step in the L2Explorer environment.\n")
def do_action(self, argv=None):
"""Perform an action in the L2Explorer environment.
:param argv: The action parameters.
:return: None.
"""
try:
# Parse the argument vector
args = self.action_parser.parse_args(argv.split())
if args.action:
for _ in range(0, args.count):
_, reward, _, _ = self._env.step(args.action)
print(f'Reward: {reward}')
except Exception as e:
print(e)
traceback.print_exc()
pass
except SystemExit:
pass
def help_action(self):
"""Helper function for action.
:return: None.
"""
self.action_parser.print_help()
print("\nPerform an action in the L2Explorer environment.\n")
def send_message(self, channel: str, message: dict):
if channel == 'Reset':
action = message.get('action', '')
if action == 'reset_environment':
self.map = message.get('payload', None)
self.do_reset()
elif action == 'object_create':
self._env._reset_channel.send_json(message)
self.do_step()
else:
print(f"Invalid reset action: {action}")
elif channel == 'Debug':
self._env._debug_channel.send_string(message)
self.do_step()
elif channel == 'State':
self._env._state_channel.request_keys(message)
self.do_step()
else:
print(f"Unimplemented channel: {channel}")
def do_send_message(self, argv=None):
"""Send/receive a message on the selected channel.
:param argv: The message parameters.
:return: None.
"""
try:
# Parse the argument vector
args = self.message_parser.parse_args(argv.split())
# Check if test file is specified
if args.file:
# Parse file
with open(args.file) as f:
data = json.load(f)
# Iterate over test messages and send over side-channel
for test in data["tests"]:
self.send_message(test["channel"], test["message"])
else:
# Get channel
self.get_channel(args.channel)
# Send message on proper channel
if args.message:
self.send_message(self.channel, json.loads(''.join(args.message)))
else:
print("Missing argument: -m --message")
except Exception as e:
print(e)
traceback.print_exc()
pass
except SystemExit:
pass
def help_send_message(self):
"""Helper function for send message.
:return: None.
"""
self.message_parser.print_help()
def get_channel(self, channel_arg):
"""Check if channel has been selected or prompt user to select one.
:param channel_arg: The channel argument given on the command line.
:return: Channel
"""
try:
if channel_arg in channels:
self.channel = channel_arg
elif not self.channel:
self.do_select_channel()
except:
pass
return self.channel
def default(self, argv):
"""Default handler if unknown command is entered on the command line.
:param argv: The line entered on the command line.
:return: Status.
"""
if argv == 'x' or argv == 'q':
# Check for shorthand exit commands
return self.do_exit(argv)
else:
print("Unknown command: {}".format(argv))
if __name__ == "__main__":
try:
# Initialize argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
help='Enable debug mode for printing messages received on L2Explorer channels')
parser.add_argument('--editor-mode', action='store_true',
help='Connect to existing Unity editor')
# Parse arguments
args = parser.parse_args()
# Change directory to directory of current file
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Start L2Explorer Prompt
L2ExplorerPrompt(args.debug, args.editor_mode).cmdloop()
except KeyboardInterrupt:
print("\nExiting L2Explorer CLI...")
# Close L2Explorer environment gracefully
L2ExplorerTask.close_env()
|
'''
@author Tian Shi
Please contact tshi@vt.edu
'''
import json
import os
import numpy as np
from sklearn.metrics import classification_report
from tqdm import tqdm
from .utils import calculate_COH, calculate_PMI, read_docs, read_vocab
def eval_aspect_coherence(
work_dir='../cluster_results',
file_term_doc='doc_term_mat.txt',
file_vocab='vocab.txt',
n_keywords=10, weight=[],
file_aspect_weight='aspect_weight.txt'):
docs = read_docs(os.path.join(work_dir, file_term_doc))
vocab = read_vocab(os.path.join(work_dir, file_vocab))
n_docs = len(docs)
n_terms = len(vocab)
print('n_docs={}, n_terms={}'.format(n_docs, n_terms))
dt_mat = np.zeros([n_terms, n_terms])
dt_vec = np.zeros(n_terms)
for k, itm in tqdm(enumerate(docs)):
for kk in set(itm):
for jj in set(itm):
if kk != jj:
dt_mat[int(kk), int(jj)] += 1.0
dt_vec[int(kk)] += 1.0
print('co-occur done')
if weight == []:
weight = np.loadtxt(os.path.join(
work_dir, file_aspect_weight), dtype=float)
else:
weight = np.array(weight)
n_topic = weight.shape[1]
print('n_topic={}'.format(n_topic))
PMI_arr = []
COH_arr = []
for k in tqdm(range(n_topic)):
topKeywordsIndex = []
for wd in weight[:, k].argsort()[::-1]:
topKeywordsIndex.append(wd)
topKeywordsIndex = topKeywordsIndex[:n_keywords]
PMI_arr.append(calculate_PMI(dt_mat, topKeywordsIndex))
COH_arr.append(calculate_COH(dt_mat, dt_vec, topKeywordsIndex))
print('Average PMI={}'.format(np.average(np.array(PMI_arr))))
print('Average COH={}'.format(np.average(np.array(COH_arr))))
index = np.argsort(PMI_arr)
for k in index:
kw_idx = []
for wd in np.argsort(weight[:, k])[::-1]:
kw_idx.append(wd)
kw_idx = kw_idx[:n_keywords]
print('Topic {} [PMI={}, COH={}]: {}'.format(
k+1, np.around(PMI_arr[k], 4), np.around(COH_arr[k], 4),
' '.join([vocab[w] for w in kw_idx])))
def evaluate_sscl_classification(args):
'''
Evaluate SSCL Classification.
'''
aspect_label = []
fp = open('../nats_results/aspect_mapping.txt', 'r')
for line in fp:
aspect_label.append(line.split()[1])
fp.close()
ignore_type = ['nomap']
if not args.none_type:
ignore_type.append('none')
tmp = {wd: -1 for wd in aspect_label if not wd in ignore_type}
label = {}
for k, wd in enumerate(sorted(list(tmp))):
label[wd] = k
fp = open(os.path.join('../nats_results', args.file_output), 'r')
pred = []
gold = []
for line in fp:
itm = json.loads(line)
arr = np.argsort(itm['aspect_weight'])[::-1]
for k in arr:
if not aspect_label[k] in ignore_type:
pp = aspect_label[k]
break
pred.append(label[pp])
try:
gold.append(label[itm['label']])
except:
lb = itm['label'].split(',')
if pp in lb:
gold.append(label[pp])
else:
gold.append(label[lb[0]])
fp.close()
print(classification_report(
gold, pred, target_names=list(label), digits=3))
def evaluate_ts_classification(args):
'''
Evaluate Teacher-Student Model
'''
asp_labels = []
pred = []
gold = []
fp = open(os.path.join('../nats_results', args.file_output), 'r')
for line in fp:
itm = json.loads(line)
if itm['pred_label'] in itm['gold_label'].split(','):
pred.append(itm['pred_label'])
gold.append(itm['pred_label'])
else:
pred.append(itm['pred_label'])
gold.append(itm['gold_label'].split(',')[0])
for wd in itm['gold_label'].split(','):
asp_labels.append(wd)
fp.close()
asp_labels = sorted(list(set(asp_labels)))
asp_map = {wd: k for k, wd in enumerate(asp_labels)}
pred = [asp_map[wd] for wd in pred]
gold = [asp_map[wd] for wd in gold]
print(classification_report(
gold, pred, target_names=asp_labels, digits=3))
|
import torch
import torch.nn as nn
import numpy as np
def train(model, train_loader, val_loader, device, batch_size=50, epochs=5):
lr = 0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
counter = 0
print_every = 100
clip = 5 # gradient clipping
model.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = model.init_hidden(batch_size)
# batch loop
for inputs, labels in train_loader:
inputs.to(device)
labels.to(device)
counter += 1
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
model.zero_grad()
# get the output from the model
output, h = model(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = model.init_hidden(batch_size)
val_losses = []
model.eval()
for inputs, labels in val_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, labels = inputs.cuda(), labels.cuda()
output, val_h = model(inputs, val_h)
val_loss = criterion(output.squeeze(), labels.float())
val_losses.append(val_loss.item())
model.train()
print("Epoch: {}/{}...".format(e + 1, epochs),
"Step: {}...".format(counter),
"Loss: {:.6f}...".format(loss.item()),
"Val Loss: {:.6f}".format(np.mean(val_losses)))
return val_losses
|
from aioviberbot.api.consts import BOT_API_ENDPOINT
class MessageSender:
def __init__(self, logger, request_sender):
self._logger = logger
self._request_sender = request_sender
async def send_message(self, to, sender_name, sender_avatar, message, chat_id=None):
if not message.validate():
self._logger.error('failed validating message: {0}'.format(message))
raise Exception('failed validating message: {0}'.format(message))
payload = self._prepare_payload(
message=message,
receiver=to,
sender_name=sender_name,
sender_avatar=sender_avatar,
chat_id=chat_id
)
self._logger.debug('going to send message: {0}'.format(payload))
result = await self._request_sender.post_request(
BOT_API_ENDPOINT.SEND_MESSAGE, payload,
)
return result['message_token']
async def post_to_public_account(self, sender, sender_name, sender_avatar, message):
if not message.validate():
self._logger.error('failed validating message: {0}'.format(message))
raise Exception('failed validating message: {0}'.format(message))
if sender is None:
raise Exception('missing parameter sender')
payload = self._prepare_payload(
message=message,
sender=sender,
sender_name=sender_name,
sender_avatar=sender_avatar
)
self._logger.debug('going to send message: {0}'.format(payload))
result = await self._request_sender.post_request(
BOT_API_ENDPOINT.POST, payload,
)
return result['message_token']
def _prepare_payload(self, message, sender_name, sender_avatar, sender=None, receiver=None, chat_id=None):
payload = message.to_dict()
payload.update({
'from': sender,
'receiver': receiver,
'sender': {
'name': sender_name,
'avatar': sender_avatar
},
'chat_id': chat_id
})
return self._remove_empty_fields(payload)
def _remove_empty_fields(self, message):
return {k: v for k, v in message.items() if v is not None}
|
### TODO: nbuckman Is this actually being used?
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
class PoseHistory:
# This class is used to store the history of poses and times for each agent
def __init__(self):
self.poses = []
self.speeds = []
self.times = []
def add_to_history(self,current_time,current_pose,current_speed):
self.times.append(current_time)
self.poses.append(current_pose)
self.speeds.append(current_speed)
def get_history_lists(self):
return self.times, self.poses, self.speeds
|
# Importing the libraries
import pandas as pd
#show all data from all columns
pd.set_option('max_columns', None)
# Importing the dataset
dataset = pd.read_csv('HNSCC_Clinical_data.csv')
#patient information
patient_info = dataset[["Age", "Sex", "Smoking History", "Current Smoker", "HPV status"]]
patient_info = pd.get_dummies(patient_info, columns = ["Sex", "HPV status"])
#temporal Data (time between different analyses and follow-up)
time = dataset[[
"Time from preRT image to start RT (month)",
"Time from RT stop to follow up imaging (months)",
"Follow up duration (month)",
"Survival (months)",
"Disease-free interval (months)"
]]
treatment = dataset[[
"RT Total Dose (Gy)",
"Unplanned Additional Oncologic Treatment",
"Number of Fractions",
"Received Concurrent Chemoradiotherapy?",
"CCRT Chemotherapy Regimen"
]]
treatment = pd.get_dummies(treatment, columns = ["Unplanned Additional Oncologic Treatment",
"Received Concurrent Chemoradiotherapy?",
"CCRT Chemotherapy Regimen" ])
#clinical measurements
pre_treatment = dataset[[
"Pre-RT L3 Skeletal Muscle Cross Sectional Area (cm2)",
"Pre-RT L3 Adipose Tissue Cross Sectional Area (cm2)",
"Pre-RT L3 Skeletal Muscle Index (cm2/m2)",
"Pre-RT L3 Adiposity Index (cm2/m2)",
"Pre-RT CT-derived lean body mass (kg)",
"Pre-RT CT-derived fat body mass (kg)",
"BMI start treat (kg/m2)"
]]
post_treatment = dataset[[
"Post-RT L3 Skeletal Muscle Cross Sectional Area (cm2)",
"Post-RT L3 Adipose Tissue Cross Sectional Area (cm2)",
"Post-RT L3 Skeletal Muscle Index (cm2/m2)",
"Post-RT L3 Adiposity Index (cm2/m2)",
"Post-RT CT-derived lean body mass (kg)",
"Post-RT CT-derived fat body mass (kg)",
"BMI stop treat (kg/m2)",
"Site of recurrence (Distal/Local/ Locoregional)"
]]
post_treatment = pd.get_dummies(post_treatment, columns = ["Site of recurrence (Distal/Local/ Locoregional)"])
outcome = dataset[[
"Disease Specific Survival Censor"
]]
def print_info():
data = [patient_info, pre_treatment, treatment, post_treatment, outcome]
for d in data:
print(d.info())
print(d.shape)
print(d.head())
print()
def get_data():
data = [patient_info.reset_index(drop=True), treatment.reset_index(drop=True), pre_treatment.reset_index(drop=True), post_treatment.reset_index(drop=True), outcome.reset_index(drop=True)]
df = pd.concat(data, axis=1, join="inner")
return df
#print(dataset.info())
#print(outcome.info())
ann_data = get_data()
print(ann_data.info())
#ann_data.to_csv('ann_dataset_treatment1.csv', index=False)
|
# Comentário de linha
# Comentários de várias linhas
# precisam ter '#' no início de todas as linhas
#PRIMEIRO TESTE DO INTERPRETADOR
hello_full = "Hello World!"
#print(hello_full)
#CONCATENANDO
hello = "Hello "
world = "World! "
number1 = 1
number2 = 2
#print(hello + world)
#print(number1 + number2)
#print(hello + world + number1)
#print(hello + world + str(number1))4
#print("{}{}{}".format(hello, world, number1))
#print(hello, world, number1)
|
"""Basic app config for jobs app."""
from django.apps import AppConfig
class JobsConfig(AppConfig):
"""Config object for the jobs app."""
name = 'jobs'
|
import requests
__all__ = ['requests_negotiate_sspi']
from .requests_negotiate_sspi import HttpNegotiateAuth # noqa
# Monkeypatch urllib3 to expose the peer certificate
HTTPResponse = requests.packages.urllib3.response.HTTPResponse
orig_HTTPResponse__init__ = HTTPResponse.__init__
HTTPAdapter = requests.adapters.HTTPAdapter
orig_HTTPAdapter_build_response = HTTPAdapter.build_response
def new_HTTPResponse__init__(self, *args, **kwargs):
orig_HTTPResponse__init__(self, *args, **kwargs)
try:
self.peercert = self._connection.sock.getpeercert(binary_form=True)
except AttributeError:
self.peercert = None
def new_HTTPAdapter_build_response(self, request, resp):
response = orig_HTTPAdapter_build_response(self, request, resp)
try:
response.peercert = resp.peercert
except AttributeError:
response.peercert = None
return response
HTTPResponse.__init__ = new_HTTPResponse__init__
HTTPAdapter.build_response = new_HTTPAdapter_build_response
|
import copy
import itertools
import re
import pymongo
from collections import defaultdict
from pdb import set_trace
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage
from django.http import HttpResponseForbidden, HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, loader
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from django.views.generic.create_update import apply_extra_context, redirect
from django_vcs_watch.models import Repository, Commit, Feed
from django_vcs_watch.forms import RepositoryForm
from django_vcs_watch.utils import get_user_feed_slug
_WRAPPERS = defaultdict(lambda: lambda x:x, {
'repositories': Repository,
'commits': Commit,
})
def uniq(lst):
_copy = copy.copy(lst)
_copy.sort()
return [i[0] for i in itertools.groupby(_copy)]
@login_required
def profile(request):
from django.views.generic.list_detail import object_list
return object_list(
request,
queryset=request.user.repository_set.all())
def autocomplete(request, **kwargs):
q = request.GET.get('q', '')
#db.repositories.ensure_index([('url', pymongo.ASCENDING)])
return object_list(
request,
cls = Repository,
query = {'url': re.compile(q)},
**kwargs)
# TODO move this to separate django app like django-annoing
from django.contrib.syndication.views import feed as contrib_feed
def feed(request, slug, param = '', feed_dict = None):
url = slug
if param:
url += '/' + param
return contrib_feed(request, url, feed_dict = feed_dict)
def create(request,
template_name,
form_class = RepositoryForm,
post_save_redirect = None,
extra_context = {},
template_loader = loader,
context_processors = None,
):
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
new_object = form.save()
return redirect(post_save_redirect, new_object)
else:
form = form_class()
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def object_detail(request,
cls,
query = {},
extra_context = {},
template_name = None,
template_name_field = None,
template_object_name = 'object',
template_loader = loader,
context_processors = None,
**kwargs
):
if callable(extra_context):
extra_context = extra_context(request, **kwargs)
if callable(query):
query = query(request, **kwargs)
object = cls.objects.find_one(query)
if object is None:
raise Http404, 'Object was not found in collection "%s"' % cls.collection.name()
if not template_name:
template_name = "%s_detail.html" % cls.objects.collection_name
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: object,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def object_list(request,
cls,
query = {},
paginate_by = None,
page = None,
allow_empty = True,
template_name = None,
template_loader = loader,
extra_context = {},
context_processors = None,
template_object_name = 'object_list',
mimetype = None,
map_func = lambda x: x,
**kwargs):
"""
Generic list of objects.
Templates: ``<collection_name>_list.html``
Context:
object_list
list of objects
is_paginated
are the results paginated?
results_per_page
number of objects per page (if paginated)
has_next
is there a next page?
has_previous
is there a prev page?
page
the current page
next
the next page
previous
the previous page
pages
number of pages, total
hits
number of objects, total
last_on_page
the result number of the last of object in the
object_list (1-indexed)
first_on_page
the result number of the first object in the
object_list (1-indexed)
page_range:
A list of the page numbers (1-indexed).
"""
if callable(extra_context):
extra_context = extra_context(request, **kwargs)
if callable(query):
query = query(request, **kwargs)
cursor = cls.objects.find(query)
if paginate_by:
paginator = Paginator(cursor, paginate_by, allow_empty_first_page=allow_empty)
if not page:
page = request.GET.get('page', 1)
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
# Page is not 'last', nor can it be converted to an int.
raise Http404
try:
page_obj = paginator.page(page_number)
except InvalidPage:
raise Http404
c = RequestContext(request, {
template_object_name: map(map_func, page_obj.object_list),
'paginator': paginator,
'page_obj': page_obj,
# Legacy template context stuff. New templates should use page_obj
# to access this instead.
'is_paginated': page_obj.has_other_pages(),
'results_per_page': paginator.per_page,
'has_next': page_obj.has_next(),
'has_previous': page_obj.has_previous(),
'page': page_obj.number,
'next': page_obj.next_page_number(),
'previous': page_obj.previous_page_number(),
'first_on_page': page_obj.start_index(),
'last_on_page': page_obj.end_index(),
'pages': paginator.num_pages,
'hits': paginator.count,
'page_range': paginator.page_range,
}, context_processors)
else:
c = RequestContext(request, {
template_object_name: map(map_func, cursor),
'paginator': None,
'page_obj': None,
'is_paginated': False,
}, context_processors)
if not allow_empty and len(cursor) == 0:
raise Http404
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
if not template_name:
template_name = "%s_list.html" % cls.objects.collection_name
t = template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=mimetype)
def commit(request, repository_slug, revision):
mode = request.GET.get('mode', 'full')
assert(mode in ('full', 'files'))
file = request.GET.get('file', None)
if file is not None:
commit = Commit.objects.find_one(dict(
slug = repository_slug,
revision = revision)
)
if not commit:
raise Http404, 'Object was not found in collection "%s"' % Commit.collection.name()
obj = None
for f in commit.changes.changed:
if f.filename == file:
obj = f
break
if obj is None:
raise Http404, 'File %s was not found in commit "%s@%s"' % (file, commit.slug, commit.revision)
return render_to_response(
'django_vcs_watch/diff.html',
dict(file = obj),
)
return object_detail(
request,
Commit,
query = dict(slug = repository_slug, revision = revision),
template_name = 'django_vcs_watch/commit_detail_%s.html' % mode,
)
def get_user_feed(request):
user = request.user
feed_slug = get_user_feed_slug(user)
return Feed.objects.find_one(dict(_id = feed_slug))
def refresh_feed(request, template_name = 'django_vcs_watch/refresh_feed.html'):
feed = get_user_feed(request)
feed.update()
next = request.POST.get('next', None)
if next is not None:
return HttpResponseRedirect(next)
return render_to_response(
template_name,
dict(feed = feed),
)
def get_rule_from_request(request):
tokens = request.POST.get('rule', '').split('&&')
tokens = (t.strip() for t in tokens)
tokens = (t for t in tokens if t)
rule = {}
for token in tokens:
if token[0] == '@':
rule['author'] = token[1:]
elif token[0] == '$':
rule['slug'] = token[1:]
return rule
@require_POST
def ignore(request):
feed = get_user_feed(request)
if feed.ignore is None:
feed.ignore = []
rule = get_rule_from_request(request)
feed.ignore.append(rule)
feed.ignore = uniq(feed.ignore)
feed.save()
if 'author' in rule:
if 'slug' in rule:
message = _('<div class="info">Now all commits from %(author)s in %(slug)s will be ignored. Boo-ga-ga!</div>')
else:
message = _('<div class="info">Now all commits from %(author)s will be ignored. Moo-ha-ha!</div>')
else:
message = _('<div class="error">Hm, it seems that we have no message for this case :).</div>')
request.user.message_set.create(message = message % rule)
next = request.POST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', '/')
return HttpResponseRedirect(next)
@require_POST
def unignore(request):
feed = get_user_feed(request)
if feed.ignore is None:
feed.ignore = []
rule = get_rule_from_request(request)
if rule:
feed.ignore = filter(lambda x: x != rule, feed.ignore)
else:
feed.ignore = []
feed.save()
if rule:
request.user.message_set.create(message = _('Rule was removed.'))
else:
request.user.message_set.create(message = _('All rules were removed.'))
next = request.POST.get('next', '/')
return HttpResponseRedirect(next)
@require_POST
def watch(request):
feed = get_user_feed(request)
if feed.watch is None:
feed.watch = []
rule = get_rule_from_request(request)
feed.watch.append(rule)
feed.watch = uniq(feed.watch)
feed.save()
if 'slug' in rule:
message = _('<div class="info">Now you watch on all commits to %(slug)s. Horay!</div>')
elif 'author' in rule:
message = _('<div class="info">Now you watch on all commits by %(author)s. Yee!</div>')
else:
message = _('<div class="error">Hm, it seems that we have no message for this case :).</div>')
request.user.message_set.create(message = message % rule)
next = request.POST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', '/')
return HttpResponseRedirect(next)
@require_POST
def unwatch(request):
feed = get_user_feed(request)
if feed.watch is None:
feed.watch = []
rule = get_rule_from_request(request)
if rule:
feed.watch = filter(lambda x: x != rule, feed.watch)
else:
feed.watch = []
feed.save()
if rule:
request.user.message_set.create(message = _('Rule was removed.'))
else:
request.user.message_set.create(message = _('All rules were removed.'))
next = request.POST.get('next', '/')
return HttpResponseRedirect(next)
def filter(request):
q = request.GET.get('q', '')
def create_rule_item(s):
""" Returns pair (name, value) for strings like '$django', '@art' or '#ft:py'"""
names = {'$': 'slug', '@': 'author', '#': 'tag'}
return (names[s[0]], s[1:])
q = map(create_rule_item, q.split())
if q:
q = {'$where': ' && '.join("this.%s == '%s'" % item for item in q)}
else:
q = {}
return object_list(
request,
cls = Commit,
query = q,
template_name = 'django_vcs_watch/filtered_commits.html',
template_object_name = 'commits_list',
paginate_by = 20,
)
|
import base64
import json
import logging
import zlib
from datetime import datetime
from django.conf import settings
from django.db import transaction
from django.http import JsonResponse
from django.utils.encoding import force_str
from django.utils.timezone import make_aware
from pytz import UTC
from gore.auth import validate_auth_header
from gore.excs import InvalidAuth
from gore.models import Event
from gore.signals import event_received
from gore.utils.event_grouper import group_event
logger = logging.getLogger(__name__)
def store_event(request, project):
try:
auth_header = validate_auth_header(request, project)
except InvalidAuth as ia:
return JsonResponse({'error': str(ia)}, status=401)
body = request.body
if request.META.get('HTTP_CONTENT_ENCODING') == 'deflate':
body = zlib.decompress(body)
elif auth_header.get('sentry_version') == '5': # Support older versions of Raven
body = zlib.decompress(base64.b64decode(body)).decode('utf8')
body = json.loads(force_str(body))
timestamp = make_aware(datetime.fromtimestamp(float(auth_header['sentry_timestamp'])), timezone=UTC)
with transaction.atomic():
event = Event.objects.create_from_raven(project_id=project, body=body, timestamp=timestamp)
try:
with transaction.atomic():
group = group_event(event.project, event)
group.archived = False
group.cache_values()
group.save()
except: # pragma: no cover
logger.warning('event with ID %s could not be grouped' % event.id, exc_info=True)
try:
event_received.send(sender=event)
except: # pragma: no cover
logger.warning('event_received signal handling failed', exc_info=True)
if settings.DEBUG:
raise
return JsonResponse({'id': event.id}, status=201)
|
import bblfsh_sonar_checks.utils as utils
import bblfsh
import re
def check(uast):
findings = []
format_calls = bblfsh.filter(uast, "//MethodInvocation/"
"Identifier[@roleCall and @roleReceiver and @Name='String']/parent::MethodInvocation/"
"Identifier[@roleCall and @roleCallee and @Name='format']/parent::MethodInvocation")
for fcall in format_calls:
args = list(bblfsh.filter(fcall, "//*[@internalRole='arguments']"))
if len(args) == 0:
continue
format_str = args[0]
if format_str.internal_type != 'String':
# Validating format strings assigned elsewhere on the same file is possible,
# but won't be doing it here for brevity sake
continue
# For the reason stated above, we only validate %d
str_val = format_str.properties["Value"]
re_res = re.findall(r'[^%]%d', str_val)
# Validate number of args
if len(re_res) != len(args[1:]):
findings.append({"msg": "Format string doesn't match number of args",
"pos": format_str.start_position})
# Validate type of args (for %d it should have the NumberLiteral role)
for arg in args[1:]:
froles = filter(lambda x: x == bblfsh.role_id('NUMBER'), arg.roles)
if len(list(froles)) == 0:
findings.append({"msg": "Format string argument is not numeric",
"pos": arg.start_position})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 13 10:50:27 2021
@author: hemerson
"""
from setuptools import setup, find_packages
VERSION = '0.0.18'
DESCRIPTION = 'AgentRL'
LONG_DESCRIPTION = 'A package containing several lightweight reinforcement learning agents'
# Setting up
setup(
name="AgentRL",
version=VERSION,
author="Harry Emerson",
author_email="emersonharry8@gmail.com",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[
'numpy>=1.21.4',
'setuptools>=58.0.4',
'torch>=1.10.0'
],
keywords=['reinforcement learning', 'agent'],
classifiers= [
"Development Status :: 3 - Alpha",
"Intended Audience :: Education",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
|
from django.apps import AppConfig
class TradesiteConfig(AppConfig):
name = 'tradesite'
|
from django.conf import settings
from hashids import Hashids
class ModelHashIdMixin(object):
""" Easy hashids for Django models.
To use in your model, inherit it from this class, in addition to models.Model
Then user obj.hashid property or cls.pk_from_hashid() function
"""
@classmethod
def get_hashids_object(cls):
salt = cls.__name__ + settings.SECRET_KEY[:20]
return Hashids(salt=salt, min_length=12)
@classmethod
def pk_from_hashid(cls, hash):
return cls.get_hashids_object().decode(hash)[0]
@property
def hashid(self):
return self.get_hashids_object().encode(self.id)
|
#!/usr/bin/env python3
from itertools import combinations
import os
from tqdm import tqdm
import re
characters = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!',
'"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@',
'[', '\\', ']', '^', '_', '`', '{', '|', '}', '~', ' ', '\t']
def console():
isDone = False
draw()
while isDone == False:
result = int(input(
"Pick an option\n[1] Create a normal wordlist\n[2] Create a unique wordlist\n[3] Help\n[4] Quit program\n>>> "))
if result == 1:
isFinished = False
while isFinished == False:
limit = int(input("How many passwords would you like in this wordlist? (Do not exceed 10,000,000): "))
if limit > 10000000:
print('[-] Limit too high')
else:
default_wordlist(limit)
isFinished = True
elif result == 2:
patterns = []
wifi_name = raw_input("Wifi network name: ")
while True:
pattern = input("Input pattern of possible passwords, type 'done' when done: ")
if pattern == 'done':
break
else:
patterns.append(pattern)
continue
print('[+] Preparing wordlist...')
wordlistPrep(wifi_name, patterns)
print('[+] Thank you for using WordGen!')
isDone = True
elif result == 3:
help()
elif result == 4:
print('[+] Thank you for using WordGen!')
isDone = True
else:
print("[-] Invalid option")
def default_wordlist(limit):
with open(os.getcwd() + '/default-wordlist.txt', 'r') as file:
listing = file.readlines()
text = ''
counter = 0
for i in listing:
if counter < limit:
i = i.rstrip()
i = i[:-1]
i = i + '\n'
text += i
counter += 1
else:
break
cwd = os.getcwd()
dist = '/dist'
path = cwd + dist
try:
os.mkdir(path)
os.chdir(path)
file = open(path + '/new-wordlist.txt', 'w')
file.write(text)
print('[+] Wordlist saved in ' + path + ' as new-wordlist.txt')
except:
os.chdir(path)
file = open(path + '/new-wordlist.txt', 'w')
file.write(text)
print('[+] Wordlist saved in ' + path + ' as new-wordlist.txt')
def wordlistPrep(patterns):
pass_counter = 0
strings = ''
text = ''
for w in tqdm(patterns):
pattern = re.compile(r'{}'.format(w))
for x in list(combinations(characters, 5)):
j = ''.join(x)
strings += j
pass_counter += 1
listing = pattern.findall(strings)
for i in listing:
text += i + '\n'
print("[+] Wordlist created")
saveFile(text)
def saveFile(text):
counter = 0
cwd = os.getcwd()
dist = '/dist'
path = cwd + dist
while True:
try:
os.mkdir(path)
os.chdir(path)
file = open(path + '/wordlist.txt', 'w')
file.write(text)
print('[+] Wordlist saved in ' + path + ' as wordlist.txt')
break
except:
counter += 1
path += counter
def draw():
print("""
*************************************************
* *
* Welcome to *
* *
* WordGen *
* *
*************************************************
""")
def help():
print("""
**************
* *
* Help *
* *
**************
Limits:
- This will show the amount of passwords in the wordlist
- Ex. A limit of 100,000 would generate a wordlist with 100,000 passwords
Patterns:
- This gives the generated passwords structure, and you can give multiple patterns
- Patterns must be inputted with keywords and periods
- Ex. '....example...'
- This gives a bunch of random passwords with random characters replacing the dots
""")
console()
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/14 4:46 PM'
from flask import Blueprint
from base.base_api import BaseApi
from global_reference import app
user_buleprint = Blueprint("user",__name__)
user_api = BaseApi(user_buleprint)
from . import user
app.register_blueprint(user_buleprint,url_prefix="/user")
|
from unittest import TestCase
import pandas as pd
import pandas.testing as tm
from pytz import UTC
from exchange_calendars.weekday_calendar import WeekdayCalendar
from .test_exchange_calendar import ExchangeCalendarTestBase
class WeekdayCalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = "24-5"
calendar_class = WeekdayCalendar
start_date = pd.Timestamp("2018-01-01", tz=UTC)
end_date = pd.Timestamp("2018-12-31", tz=UTC)
MAX_SESSION_HOURS = 24
GAPS_BETWEEN_SESSIONS = False
HAVE_EARLY_CLOSES = False
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp("2018-01-01", tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2018-04-04", tz=UTC)
DAYLIGHT_SAVINGS_DATES = ["2018-04-05", "2018-11-01"]
def get_session_block(self):
# This range is chosen specifically because it is "enclosed" by
# adjacent days that are also sessions. This prevents any edge case
# issues when looking at market opens or closes.
return self.calendar.all_sessions[1:4]
def test_open_every_weekday(self):
calendar = self.calendar
dates = pd.date_range(self.start_date, self.end_date, tz=UTC)
tm.assert_index_equal(
calendar.sessions_in_range(dates[0], dates[-1]),
# The pandas weekday is defined as Monday=0 to Sunday=6.
dates[dates.weekday <= 4],
)
def test_open_every_weekday_minute(self):
calendar = self.calendar
minutes = pd.date_range(
self.start_date,
# Our calendar should include all the minutes of this last session.
self.end_date + pd.Timedelta("1 Day") - pd.Timedelta("1 Minute"),
freq="min",
tz=UTC,
)
tm.assert_index_equal(
calendar.minutes_for_sessions_in_range(
self.start_date,
self.end_date,
),
# The pandas weekday is defined as Monday=0 to Sunday=6.
minutes[minutes.weekday <= 4],
)
|
from flask import redirect, Blueprint
from werkzeug.contrib.cache import SimpleCache
import os
import sys
import random
import yaml
cache = SimpleCache()
appbp = Blueprint("appbp", __name__, url_prefix=os.environ.get("URL_PREFIX"))
def get_config():
"""Safely returns the config YAML as a Python dict"""
with open("config.yml", "r") as stream:
try:
return yaml.safe_load(stream)
except Exception as e:
sys.exit(e)
def get_dashboards():
"""Safely returns the dashboard urls YAML as a Python dict"""
with open("dashboards.yml", "r") as stream:
try:
return yaml.safe_load(stream)
except Exception as e:
sys.exit(e)
def get_dashboard_url():
"""Return the dashboard_url and manage the next url"""
config = get_config()
dashboards = get_dashboards()
dashboard_url = cache.get("dashboard_url")
next_dashboard_url = cache.get("next_dashboard_url")
if dashboard_url is None and next_dashboard_url is None:
cache.set("next_dashboard_url", dashboards["urls"][0])
next_dashboard_url = cache.get("next_dashboard_url")
if dashboard_url is None:
cache.set("dashboard_url", cache.get("next_dashboard_url"),
timeout=config["ttl"])
dashboard_url = cache.get("dashboard_url")
if config["rotation"] == "random":
next_url = random.choice(dashboards["urls"])
while next_dashboard_url == next_url:
next_url = random.choice(dashboards["urls"])
else:
url_list = dashboards["urls"]
curr_idx = url_list.index(dashboard_url)
next_url = url_list[(curr_idx + 1) % len(url_list)]
cache.set("next_dashboard_url", next_url)
return dashboard_url
@appbp.route("/")
def index():
"""returns a 302 redirect to a url from dashboards.yml"""
return redirect(get_dashboard_url(), code=302)
@appbp.route("/ping")
def ping():
"""ping endpoint for SD / aliveness checks"""
return "pong"
|
import os
import argparse
import random
import pickle
from shutil import copyfile
from typing import Optional, Callable, List
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import pil_loader
import numpy as np
from PIL import Image
import torchvision.transforms.functional as TVF
from torchvision import transforms
import torch
from torchvision.models import wide_resnet50_2
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch.nn.functional as F
class ImagePathsDataset(VisionDataset):
def __init__(self, img_paths: List[os.PathLike], transform: Callable):
self.transform = transform
self.imgs_paths = img_paths
def __len__(self):
return len(self.imgs_paths) * 2
def __getitem__(self, idx: int):
image = pil_loader(self.imgs_paths[idx // 2])
image = self.transform(image)
w = image.shape[2]
if idx % 2 == 0:
half = image[:, :, :w//2]
y = 0
else:
half = image[:, :, w//2:]
y = 1
return {"img": half, "label": y}
@torch.no_grad()
def validate(model, dataloader):
model.eval()
accs = []
losses = []
for batch in dataloader:
img, label = batch['img'].to(device), batch['label'].to(device)
preds = model(img).squeeze(1)
loss = F.binary_cross_entropy_with_logits(preds, label.float())
acc = ((preds.sigmoid() > 0.5).long() == label).float().mean()
losses.append(loss.item())
accs.append(acc.item())
return np.mean(losses), np.mean(accs)
if __name__ == '__main__':
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', type=str, help='Path to the dataset directory')
parser.add_argument('--train_ratio', type=float, default=0.15, help='Amount of training images')
parser.add_argument('--val_ratio', type=float, default=0.05, help='Amount of training images')
parser.add_argument('--num_epochs', type=int, default=10, help='Number of training epochs')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size for training/inference')
args = parser.parse_args()
data_dir = args.data_dir
all_img_names = [f for f in os.listdir(data_dir) if os.path.splitext(f)[1].lower() in Image.EXTENSION]
random.shuffle(all_img_names)
NUM_TRAIN_IMGS = int(args.train_ratio * len(all_img_names))
NUM_VAL_IMGS = int(args.val_ratio * len(all_img_names))
img_paths_train = [os.path.join(data_dir, f) for f in all_img_names[:NUM_TRAIN_IMGS]]
img_paths_val = [os.path.join(data_dir, f) for f in all_img_names[NUM_TRAIN_IMGS:NUM_TRAIN_IMGS+NUM_VAL_IMGS]]
model = wide_resnet50_2(pretrained=True)
model.fc = torch.nn.Linear(2048, 1)
optim = torch.optim.Adam([
{'params': [p for n, p in model.named_parameters() if not n.startswith('fc.')], 'lr': 1e-5},
{'params': model.fc.parameters(), 'lr': 1e-4},
])
transform_train = transforms.Compose([
transforms.Resize(256, interpolation=Image.LANCZOS),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
transform_val = transforms.Compose([
transforms.Resize(256, interpolation=Image.LANCZOS),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset_train = ImagePathsDataset(img_paths_train, transform=transform_train)
dataset_val = ImagePathsDataset(img_paths_val, transform=transform_val)
batch_size = args.batch_size
dataloader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, num_workers=5)
dataloader_val = DataLoader(dataset_val, batch_size=batch_size, shuffle=False, num_workers=5)
device = 'cuda'
model = model.to(device)
total_num_epochs = args.num_epochs
for epoch in range(total_num_epochs):
pbar = tqdm(enumerate(dataloader_train), total=len(dataloader_train))
for i, batch in pbar:
model.train()
img, label = batch['img'].to(device), batch['label'].to(device)
preds = model(img).squeeze(1)
loss = F.binary_cross_entropy_with_logits(preds, label.float())
acc = ((preds.sigmoid() > 0.5).long() == label).float().mean()
optim.zero_grad()
loss.backward()
optim.step()
pbar.set_description(f'Epoch {epoch}. Loss: {loss.detach().item():.03f}. Acc: {acc.cpu().item():.03f}')
val_loss, val_acc = validate(model, dataloader_val)
print(f'Val loss: {val_loss:.03f}. Val acc: {val_acc: .03f}')
### Testing ###
img_paths_test = [os.path.join(data_dir, f) for f in all_img_names[NUM_TRAIN_IMGS+NUM_VAL_IMGS:]]
transform_test = transforms.Compose([
transforms.Resize(256, interpolation=Image.LANCZOS),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset_test = ImagePathsDataset(img_paths_test, transform=transform_test)
dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False, num_workers=10)
scores = []
model.eval()
with torch.no_grad():
for batch in tqdm(dataloader_test):
img = batch['img'].to(device)
preds = model(img).sigmoid()
# We compute the scores as the maximum between left and right
# Because even if one side is easily predictable, then it will be very
# difficult to connect/continue it
curr_scores = preds.view(-1, 2).max(dim=1)[0]
scores.extend(curr_scores.cpu().tolist())
assert len(scores) == len(img_paths_test)
print(f'[{data_dir}] Average score on the test set:', np.mean(scores))
save_dir = f'{data_dir}_spatial_inv'
dataset_name = os.path.basename(data_dir)
os.makedirs(save_dir, exist_ok=True)
### Preprocessing data and saving ###
with open(f'{save_dir}/{dataset_name}_scores.pkl', 'wb') as f:
pickle.dump(scores, f)
for threshold in [0.5, 0.7, 0.95, 0.99]:
final_img_paths = np.array(img_paths_test)[np.array(scores) < threshold].tolist()
target_dir = f'{save_dir}/{dataset_name}_t_{threshold}'
os.makedirs(target_dir, exist_ok=True)
for src_img_path in tqdm(final_img_paths):
trg_img_path = os.path.join(target_dir, os.path.basename(src_img_path))
copyfile(src_img_path, trg_img_path)
|
from website import create_app
# This is the entry point for
# lambda. All the whole point is
# to expose a flask app called 'app'
# don't call run!
app = create_app()
|
import numpy as np
from typing import List
from src.traffic_world import TrafficWorld
def IDM_acceleration(bumper_distance: float,
lead_vehicle_velocity: float,
current_speed: float,
desired_speed: float,
idm_params: dict = None) -> float:
''' predict the trajectory of a vehicle based on Intelligent Driver Model
based on: Kesting, A., Treiber, M., & Helbing, D. (2010). Enhanced intelligent driver model to access the impact of driving strategies
on traffic capacity. Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences
v_0: desired speed
delta: free acceleration exponent
T: desired time gap
S_0: jam distance
'''
default_idm_params = {
"free_acceleration_exponent": 4,
"desired_time_gap": 2.0,
"jam_distance": 2.0,
"maximum_acceleration": 1.4,
"desired_deceleration": 2.0,
"coolness_factor": 0.99,
}
if idm_params is not None:
for param in idm_params:
try:
default_idm_params[param] = idm_params[param]
except KeyError:
raise Exception("Invalid IDM Param: check if param key correct")
# set variable to match the paper for ease of reading
v_0 = desired_speed
v = current_speed
s = bumper_distance
delta = default_idm_params["free_acceleration_exponent"]
T = default_idm_params["desired_time_gap"]
s_0 = default_idm_params["jam_distance"]
a = default_idm_params["maximum_acceleration"]
b = default_idm_params["desired_deceleration"]
c = default_idm_params["coolness_factor"]
delta_v = v - lead_vehicle_velocity
def s_star(v, delta_v, s_0=s_0, a=a, b=b, T=T):
''' Deceleration strategy [eq 2.2]
'''
deceleration = s_0 + v * T + v * delta_v / (s * np.sqrt(a * b))
return deceleration
a_IDM = a * (1 - (v / v_0)**delta - (s_star(v, delta_v) / s)**2) #[eq 2.1]
return a_IDM
def CAH_acceleration():
''' constant-acceleration heuristic '''
raise NotImplementedError()
a_CAH = None
return a_CAH
def IDM_trajectory_prediction(veh, X_0: np.array, X_lead=None, desired_speed=None, idm_params=None):
''' Compute an IDM trajectory for a vehicle based on a speed following vehicle
'''
if X_lead is None:
X_lead = X_0 + 99999
if desired_speed is None:
desired_speed = veh.max_v
# idm_params = {}
idm_params["maximum_acceleration"] = veh.max_acceleration # correct for previous multiple in dt
bumper_distance = X_lead[0] - X_0[0] - veh.L
current_speed = X_0[4] * np.cos(X_0[2])
lead_vehicle_velocity = X_lead[4] * np.cos(X_lead[2])
a_IDM = IDM_acceleration(bumper_distance, lead_vehicle_velocity, current_speed, desired_speed, idm_params)
U_ego = np.zeros((2, 1))
U_ego[0, 0] = 0 # assume no steering
U_ego[1, 0] = a_IDM * veh.dt # control is change in velocity (discrete)
x, x_des = veh.forward_simulate_all(X_0, U_ego)
return U_ego, x, x_des
def MOBIL_lanechange(driver_x0: np.array,
driver_veh,
all_other_x0: List[np.array],
all_other_veh,
world: TrafficWorld,
use_desired_lane=True,
MOBIL_params: dict = None,
IDM_params: dict = None):
''' MOBIL lane changing rules '''
default_MOBIL_params = {
"politeness_factor": 0.5,
"changing_threshold": 0.1,
"maximum_safe_deceleration": 4,
"bias_for_right_lane": 0.3
}
if MOBIL_params:
for param in MOBIL_params:
try:
default_MOBIL_params[param] = MOBIL_params[param]
except KeyError:
raise Exception("Key Error: Check if MOBIL Param is correct")
p = default_MOBIL_params["politeness_factor"]
a_thr = default_MOBIL_params["changing_threshold"]
b_safe = default_MOBIL_params["maximum_safe_deceleration"]
a_bias = default_MOBIL_params["bias_for_right_lane"]
driver_old_lane = world.get_lane_from_x0(driver_x0)
driver_new_lanes = [li for li in range(world.n_lanes) if li != driver_old_lane]
best_new_lane = None
for new_lane in driver_new_lanes:
if use_desired_lane:
new_follower_idx = get_prev_vehicle_from_desired_lane(driver_x0, all_other_x0, all_other_veh, new_lane)
new_leader_idx = get_next_vehicle_from_desired_lane(driver_x0, all_other_x0, all_other_veh, new_lane)
old_follower_idx = get_prev_vehicle_from_desired_lane(driver_x0, all_other_x0, all_other_veh,
driver_old_lane)
old_leader_idx = get_next_vehicle_from_desired_lane(driver_x0, all_other_x0, all_other_veh, driver_old_lane)
else:
new_follower_idx = get_prev_vehicle_lane(driver_x0, all_other_x0, new_lane, world)
new_leader_idx = get_next_vehicle_lane(driver_x0, all_other_x0, new_lane, world)
old_follower_idx = get_prev_vehicle_lane(driver_x0, all_other_x0, driver_old_lane, world)
old_leader_idx = get_next_vehicle_lane(driver_x0, all_other_x0, driver_old_lane, world)
# Calculate the new acceleration of the new follower
follower_lead_pairs = {
"newfollower_after": (new_follower_idx, -1),
"newfollower_before": (new_follower_idx, new_leader_idx),
"oldfollower_before": (old_follower_idx, -1),
"oldfollower_after": (old_follower_idx, old_leader_idx),
"driver_after": (-1, new_leader_idx),
"driver_before": (-1, old_leader_idx)
}
accel = {}
lane_gap = True
for key, (follower_idx, lead_idx) in follower_lead_pairs.items():
if follower_idx is None:
# no follower, just set acceleration = 0 for before & after
accel[key] = 0
continue
elif follower_idx == -1:
follower_x0 = driver_x0
follower_veh = driver_veh
else:
follower_x0 = all_other_x0[follower_idx]
follower_veh = all_other_veh[follower_idx]
current_speed = follower_x0[4] * np.cos(follower_x0[2])
desired_speed = follower_veh.max_v
if lead_idx is None: # no cars ahead
bumper_distance = 999999 - follower_x0[0] - follower_veh.L
lead_velocity = 999999
elif lead_idx == -1: # lead vehicle is the driver vehicle
bumper_distance = driver_x0[0] - follower_x0[0] - follower_veh.L
lead_velocity = driver_x0[4] * np.cos(driver_x0[2])
else:
bumper_distance = all_other_x0[lead_idx][0] - follower_x0[0] - follower_veh.L
lead_velocity = all_other_x0[lead_idx][4] * np.cos(all_other_x0[lead_idx][2])
if bumper_distance < 0:
lane_gap = False #checks if there is even a gap between lead vehicle in next lane
a_follower = IDM_acceleration(bumper_distance, lead_velocity, current_speed, desired_speed, IDM_params)
accel[key] = a_follower
safety_criteria = accel["newfollower_after"] >= -b_safe
driver_incentive = accel["driver_after"] - accel["driver_before"]
new_follower_incentive = accel["newfollower_after"] - accel["newfollower_before"]
old_follower_incentive = accel["oldfollower_after"] - accel["oldfollower_before"]
incentive_criteria = (driver_incentive + p * (new_follower_incentive + old_follower_incentive)) >= (a_thr)
if incentive_criteria and safety_criteria and lane_gap:
best_new_lane = new_lane
return best_new_lane, accel
def get_lead_vehicle(x0: np.array, other_x0: List[np.array], world: TrafficWorld):
''' Get lead vehicle wrt a car at position x0
Input:
x0 (np.array) : current position of vehicle
other_x0 list[np.array]: list of positions of other vehicles on the road
Output: (int) index of lead vehicle
'''
n_other = len(other_x0)
ego_lane = world.get_lane_from_x0(x0)
veh_same_lane = [world.get_lane_from_x0(other_x0[i]) == ego_lane for i in range(n_other)]
veh_in_front = [(other_x0[i][0] - x0[0]) > 0 for i in range(n_other)]
vehicles_sorted = np.argsort([(other_x0[i][0] - x0[0])**2 for i in range(n_other)])
vehicles_sorted_valid = [idx for idx in vehicles_sorted if veh_same_lane[idx] and veh_in_front[idx]]
if len(vehicles_sorted_valid) > 0:
return vehicles_sorted_valid[0]
else:
return None
def get_next_vehicle_lane(x0: np.array, all_other_x0: List[np.array], lane: int, world: TrafficWorld) -> int:
''' Get lead vehicle wrt a car at position x0 that is in specific lane
Input:
x0 (np.array) : current position of vehicle
other_x0 list[np.array]: list of positions of other vehicles on the road
lane (int): desired lane within which vehicle should be returned
Output: (int) index of lead vehicle
'''
idx_in_lane = [world.get_lane_from_x0(all_other_x0[idx]) == lane for idx in range(len(all_other_x0))]
idx_forward = [all_other_x0[idx][0] - x0[0] > 0 for idx in range(len(all_other_x0))]
idx_dist_sorted = np.argsort([np.abs(all_other_x0[idx][0] - x0[0]) for idx in range(len(all_other_x0))])
idx_dist_sorted_valid = [idx for idx in idx_dist_sorted if idx_in_lane[idx] and idx_forward[idx]]
if len(idx_dist_sorted_valid) > 0:
return idx_dist_sorted_valid[0]
else:
return None
def get_prev_vehicle_lane(x0: np.array, all_other_x0: List[np.array], lane: int, world: TrafficWorld) -> int:
''' Get previous vehicle wrt a car at position x0 that is in specific lane
Input:
x0 (np.array) : current position of vehicle
other_x0 list[np.array]: list of positions of other vehicles on the road
lane (int): desired lane within which vehicle should be returned
Output: (int) index of lead vehicle
'''
idx_in_lane = [world.get_lane_from_x0(all_other_x0[idx]) == lane for idx in range(len(all_other_x0))]
idx_forward = [all_other_x0[idx][0] - x0[0] <= 0 for idx in range(len(all_other_x0))]
idx_dist_sorted = np.argsort([np.abs(all_other_x0[idx][0] - x0[0]) for idx in range(len(all_other_x0))])
idx_dist_sorted_valid = [idx for idx in idx_dist_sorted if idx_in_lane[idx] and idx_forward[idx]]
if len(idx_dist_sorted_valid) > 0:
return idx_dist_sorted_valid[0]
else:
return None
def get_prev_vehicle_from_desired_lane(x0: np.array, all_other_x0: List[np.array], all_other_vehicles,
lane: int) -> int:
''' Find the previous vehicle in the lane computed by their advertised desired lane.
This should account for vehicles that are in the middle of a lane change
'''
idx_in_lane = [all_other_vehicles[idx].desired_lane == lane for idx in range(len(all_other_x0))]
idx_forward = [all_other_x0[idx][0] - x0[0] <= 0 for idx in range(len(all_other_x0))]
idx_dist_sorted = np.argsort([np.abs(all_other_x0[idx][0] - x0[0]) for idx in range(len(all_other_x0))])
idx_dist_sorted_valid = [idx for idx in idx_dist_sorted if idx_in_lane[idx] and idx_forward[idx]]
if len(idx_dist_sorted_valid) > 0:
return idx_dist_sorted_valid[0]
else:
return None
def get_next_vehicle_from_desired_lane(x0: np.array, all_other_x0: List[np.array], all_other_vehicles,
lane: int) -> int:
''' Find the previous vehicle in the lane computed by their advertised desired lane.
This should account for vehicles that are in the middle of a lane change
'''
idx_in_lane = [all_other_vehicles[idx].desired_lane == lane for idx in range(len(all_other_x0))]
idx_forward = [all_other_x0[idx][0] - x0[0] > 0 for idx in range(len(all_other_x0))]
idx_dist_sorted = np.argsort([np.abs(all_other_x0[idx][0] - x0[0]) for idx in range(len(all_other_x0))])
idx_dist_sorted_valid = [idx for idx in idx_dist_sorted if idx_in_lane[idx] and idx_forward[idx]]
if len(idx_dist_sorted_valid) > 0:
return idx_dist_sorted_valid[0]
else:
return None
|
#!/usr/bin/env python
import base64
import gzip
from httplib import BadStatusLine
import os
import urllib2
import sys
import threading
from os.path import basename, splitext
from multiprocessing import Process
from pprint import pprint
sys.path = ["lib", "pytests", "pysystests"] + sys.path
if sys.hexversion < 0x02060000:
print "Testrunner requires version 2.6+ of python"
sys.exit()
import re
import time
import unittest
import logging.config
from threading import Thread, Event
from xunit import XUnitTestResult
from TestInput import TestInputParser, TestInputSingleton
from optparse import OptionParser, OptionGroup
from scripts.collect_server_info import cbcollectRunner, couch_dbinfo_Runner
from scripts.measure_sched_delays import SchedDelays
from scripts.getcoredumps import Getcoredumps, Clearcoredumps
import signal
import shutil
import glob
import xml.dom.minidom
import logging
log = logging.getLogger(__name__)
logging.info(__name__)
print("*** TestRunner ***")
def usage(err=None):
print """\
Syntax: testrunner [options]
Examples:
./testrunner -i tmp/local.ini -t performance.perf.DiskDrainRate
./testrunner -i tmp/local.ini -t performance.perf.DiskDrainRate.test_9M
"""
sys.exit(0)
def parse_args(argv):
parser = OptionParser()
parser.add_option("-q", action="store_false", dest="verbose")
tgroup = OptionGroup(parser, "TestCase/Runlist Options")
tgroup.add_option("-i", "--ini", dest="ini",
help="Path to .ini file containing server information,e.g -i tmp/local.ini")
tgroup.add_option("-c", "--config", dest="conf",
help="Config file name (located in the conf subdirectory), "
"e.g -c py-view.conf")
tgroup.add_option("-t", "--test", dest="testcase",
help="Test name (multiple -t options add more tests) e.g -t "
"performance.perf.DiskDrainRate")
tgroup.add_option("-d", "--include_tests", dest="include_tests",
help="Value can be 'failed' (or) 'passed' (or) 'failed=<junit_xml_path (or) "
"jenkins_build_url>' (or) 'passed=<junit_xml_path or "
"jenkins_build_url>' (or) 'file=<filename>' (or) '<regular "
"expression>' to include tests in the run. Use -g option to search "
"entire conf files. e.g. -d 'failed' or -d 'failed=report.xml' or -d "
"'^2i.*nodes_init=2.*'")
tgroup.add_option("-e", "--exclude_tests", dest="exclude_tests",
help="Value can be 'failed' (or) 'passed' (or) 'failed=<junit_xml_path (or) "
"jenkins_build_url>' (or) 'passed=<junit_xml_path (or) "
"jenkins_build_url>' or 'file=<filename>' (or) '<regular expression>' "
"to exclude tests in the run. Use -g option to search entire conf "
"files. e.g. -e 'passed'")
tgroup.add_option("-r", "--rerun", dest="rerun",
help="Rerun fail or pass tests with given =count number of times maximum. "
"\ne.g. -r 'fail=3'")
tgroup.add_option("-g", "--globalsearch", dest="globalsearch",
help="Option to get tests from given conf file path pattern, "
"like conf/**/*.conf. Useful for include or exclude conf files to "
"filter tests. e.g. -g 'conf/**/.conf'",
default="")
tgroup.add_option("-m", "--merge", dest="merge",
help="Merge the report files path pattern, like logs/**/.xml. e.g. -m '["
"logs/**/*.xml]'",
default="")
parser.add_option_group(tgroup)
parser.add_option("-p", "--params", dest="params",
help="Optional key=value parameters, comma-separated -p k=v,k2=v2,...",
default="")
parser.add_option("-n", "--noop", action="store_true",
help="NO-OP - emit test names, but don't actually run them e.g -n true")
parser.add_option("-l", "--log-level", dest="loglevel", default="INFO",
help="e.g -l info,warning,error")
options, args = parser.parse_args()
tests = []
test_params = {}
setLogLevel(options.loglevel)
log.info("Checking arguments...")
if not options.ini:
parser.error("Please specify an .ini file (-i) option.")
parser.print_help()
else:
test_params['ini'] = options.ini
if not os.path.exists(options.ini):
sys.exit("ini file {0} was not found".format(options.ini))
test_params['cluster_name'] = splitext(os.path.basename(options.ini))[0]
if not options.testcase and not options.conf and not options.globalsearch and not options.include_tests and not options.exclude_tests:
parser.error("Please specify a configuration file (-c) or a test case (-t) or a globalsearch (-g) option.")
parser.print_help()
if options.conf and not options.globalsearch:
parse_conf_file(options.conf, tests, test_params)
if options.globalsearch:
parse_global_conf_file(options.globalsearch, tests, test_params)
if options.include_tests:
tests = process_include_or_filter_exclude_tests("include", options.include_tests, tests,
options)
if options.exclude_tests:
tests = process_include_or_filter_exclude_tests("exclude", options.exclude_tests, tests, options)
if options.testcase:
tests.append(options.testcase)
if options.noop:
print("---\n"+"\n".join(tests)+"\n---\nTotal="+str(len(tests)))
sys.exit(0)
return tests, test_params, options.ini, options.params, options
def setLogLevel(log_level):
if log_level and log_level.lower() == 'info':
log.setLevel(logging.INFO)
elif log_level and log_level.lower() == 'warning':
log.setLevel(logging.WARNING)
elif log_level and log_level.lower() == 'debug':
log.setLevel(logging.DEBUG)
elif log_level and log_level.lower() == 'critical':
log.setLevel(logging.CRITICAL)
elif log_level and log_level.lower() == 'fatal':
log.setLevel(logging.FATAL)
else:
log.setLevel(logging.NOTSET)
def process_include_or_filter_exclude_tests(filtertype, option, tests, options):
if filtertype == 'include' or filtertype == 'exclude':
if option.startswith('failed') or option.startswith('passed') or option.startswith("http://") or option.startswith("https://"):
passfail = option.split("=")
tests_list = []
if len(passfail) == 2:
if passfail[1].startswith("http://") or passfail[1].startswith("https://"):
tp, tf = parse_testreport_result_xml(passfail[1])
else:
tp, tf = parse_junit_result_xml(passfail[1])
elif option.startswith("http://") or option.startswith("https://"):
tp, tf = parse_testreport_result_xml(option)
tests_list=tp+tf
else:
tp, tf = parse_junit_result_xml()
if option.startswith('failed') and tf:
tests_list = tf
elif option.startswith('passed') and tp:
tests_list = tp
if filtertype == 'include':
tests = tests_list
else:
for line in tests_list:
isexisted, t = check_if_exists_with_params(tests, line, options.params)
if isexisted:
tests.remove(t)
elif option.startswith("file="):
filterfile = locate_conf_file(option.split("=")[1])
if filtertype == 'include':
tests_list = []
if filterfile:
for line in filterfile:
tests_list.append(line.strip())
tests = tests_list
else:
for line in filterfile:
isexisted, t = check_if_exists_with_params(tests, line.strip(), options.params)
if isexisted:
tests.remove(t)
else: # pattern
if filtertype == 'include':
tests = [i for i in tests if re.search(option, i)]
else:
tests = [i for i in tests if not re.search(option, i)]
else:
log.warning("Warning: unknown filtertype given (only include/exclude supported)!")
return tests
def create_log_file(log_config_file_name, log_file_name, level):
tmpl_log_file = open("logging.conf.sample")
log_file = open(log_config_file_name, "w")
log_file.truncate()
for line in tmpl_log_file:
newline = line.replace("@@LEVEL@@", level)
newline = newline.replace("@@FILENAME@@", log_file_name.replace('\\', '/'))
log_file.write(newline)
log_file.close()
tmpl_log_file.close()
def append_test(tests, name):
prefix = ".".join(name.split(".")[0:-1])
"""
Some tests carry special chars, need to skip it
"""
if "test_restore_with_filter_regex" not in name and \
"test_restore_with_rbac" not in name and \
"test_backup_with_rbac" not in name and \
name.find('*') > 0:
for t in unittest.TestLoader().loadTestsFromName(name.rstrip('.*')):
tests.append(prefix + '.' + t._testMethodName)
else:
tests.append(name)
def locate_conf_file(filename):
log.info("Conf filename: %s" % filename)
if filename:
if os.path.exists(filename):
return file(filename)
if os.path.exists("conf{0}{1}".format(os.sep, filename)):
return file("conf{0}{1}".format(os.sep, filename))
return None
def parse_conf_file(filename, tests, params):
"""Parse a configuration file.
Configuration files contain information and parameters about test execution.
Should follow the following order:
Part1: Tests to execute.
Part2: Parameters to override the defaults.
@e.x:
TestModuleName1:
TestName1
TestName2
....
TestModuleName2.TestName3
TestModuleName2.TestName4
...
params:
items=4000000
num_creates=400000
....
"""
f = locate_conf_file(filename)
if not f:
usage("unable to locate configuration file: " + filename)
prefix = None
for line in f:
stripped = line.strip()
if stripped.startswith("#") or len(stripped) <= 0:
continue
if stripped.endswith(":"):
prefix = stripped.split(":")[0]
log.info("Test prefix: {0}".format(prefix))
continue
name = stripped
if prefix and prefix.lower() == "params":
args = stripped.split("=", 1)
if len(args) == 2:
params[args[0]] = args[1]
continue
elif line.startswith(" ") and prefix:
name = prefix + "." + name
prefix = ".".join(name.split(",")[0].split('.')[0:-1])
append_test(tests, name)
# If spec parameter isn't defined, testrunner uses the *.conf filename for
# the spec value
if 'spec' not in params:
params['spec'] = splitext(basename(filename))[0]
params['conf_file'] = filename
def parse_global_conf_file(dirpath, tests, params):
log.info("dirpath="+dirpath)
if os.path.isdir(dirpath):
dirpath=dirpath+os.sep+"**"+os.sep+"*.conf"
log.info("Global filespath=" + dirpath)
conf_files = glob.glob(dirpath)
for file in conf_files:
parse_conf_file(file, tests, params)
def check_if_exists(test_list, test_line):
new_test_line = ''.join(sorted(test_line))
for t in test_list:
t1 = ''.join(sorted(t))
if t1 == new_test_line:
return True, t
return False, ""
def check_if_exists_with_params(test_list, test_line, test_params):
new_test_line = ''.join(sorted(test_line))
for t in test_list:
if test_params:
t1 = ''.join(sorted(t+","+test_params.strip()))
else:
t1 = ''.join(sorted(t))
if t1 == new_test_line:
return True, t
return False, ""
def transform_and_write_to_file(tests_list, filename):
new_test_list = []
for test in tests_list:
line = filter_fields(test)
line = line.rstrip(",")
isexisted, _ = check_if_exists(new_test_list, line)
if not isexisted:
new_test_list.append(line)
file = open(filename, "w+")
for line in new_test_list:
file.writelines((line) + "\n")
file.close()
return new_test_list
def getNodeText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def parse_testreport_result_xml(filepath=""):
if filepath.startswith("http://") or filepath.startswith("https://"):
url_path = filepath+"/testReport/api/xml?pretty=true"
jobnamebuild = filepath.split('/')
if not os.path.exists('logs'):
os.mkdir('logs')
newfilepath = 'logs'+''.join(os.sep)+'_'.join(jobnamebuild[-3:])+"_testresult.xml"
log.info("Downloading " + url_path +" to "+newfilepath)
try:
filedata = urllib2.urlopen(url_path)
datatowrite = filedata.read()
filepath = newfilepath
with open(filepath, 'wb') as f:
f.write(datatowrite)
except Exception as ex:
log.error("Error:: "+str(ex)+"! Please check if " + url_path + " URL is accessible!! "
"Exiting...")
sys.exit(1)
if filepath == "":
filepath = "logs/**/*.xml"
log.info("Loading result data from "+filepath)
xml_files = glob.glob(filepath)
passed_tests=[]
failed_tests=[]
for xml_file in xml_files:
log.info("-- "+xml_file+" --")
doc = xml.dom.minidom.parse(xml_file)
testresultelem = doc.getElementsByTagName("testResult")
testsuitelem = testresultelem[0].getElementsByTagName("suite")
for ts in testsuitelem:
testcaseelem = ts.getElementsByTagName("case")
for tc in testcaseelem:
tcname = getNodeText((tc.getElementsByTagName("name")[0]).childNodes)
tcstatus = getNodeText((tc.getElementsByTagName("status")[0]).childNodes)
if tcstatus == 'PASSED':
failed=False
passed_tests.append(tcname)
else:
failed=True
failed_tests.append(tcname)
if failed_tests:
failed_tests = transform_and_write_to_file(failed_tests,"failed_tests.conf")
if passed_tests:
passed_tests = transform_and_write_to_file(passed_tests, "passed_tests.conf")
return passed_tests, failed_tests
def parse_junit_result_xml(filepath=""):
if filepath.startswith("http://") or filepath.startswith("https://"):
parse_testreport_result_xml(filepath)
return
if filepath == "":
filepath = "logs/**/*.xml"
log.info("Loading result data from "+filepath)
xml_files = glob.glob(filepath)
passed_tests=[]
failed_tests=[]
for xml_file in xml_files:
log.info("-- "+xml_file+" --")
doc = xml.dom.minidom.parse(xml_file)
testsuitelem = doc.getElementsByTagName("testsuite")
for ts in testsuitelem:
tsname = ts.getAttribute("name")
testcaseelem = ts.getElementsByTagName("testcase")
failed=False
for tc in testcaseelem:
tcname = tc.getAttribute("name")
tcerror = tc.getElementsByTagName("error")
for tce in tcerror:
failed_tests.append(tcname)
failed = True
if not failed:
passed_tests.append(tcname)
if failed_tests:
failed_tests = transform_and_write_to_file(failed_tests,"failed_tests.conf")
if passed_tests:
passed_tests = transform_and_write_to_file(passed_tests, "passed_tests.conf")
return passed_tests, failed_tests
def create_headers(username, password):
authorization = base64.encodestring('%s:%s' % (username, password))
return {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
def get_server_logs(input, path):
for server in input.servers:
log.info("grabbing diags from ".format(server.ip))
diag_url = "http://{0}:{1}/diag".format(server.ip, server.port)
log.info(diag_url)
try:
req = urllib2.Request(diag_url)
req.headers = create_headers(input.membase_settings.rest_username,
input.membase_settings.rest_password)
filename = "{0}/{1}-diag.txt".format(path, server.ip)
page = urllib2.urlopen(req)
with open(filename, 'wb') as output:
os.write(1, "downloading {0} ...".format(server.ip))
while True:
buffer = page.read(65536)
if not buffer:
break
output.write(buffer)
os.write(1, ".")
file_input = open('{0}'.format(filename), 'rb')
zipped = gzip.open("{0}.gz".format(filename), 'wb')
zipped.writelines(file_input)
file_input.close()
zipped.close()
os.remove(filename)
log.info("downloaded and zipped diags @ : {0}".format("{0}.gz".format(filename)))
except urllib2.URLError:
log.error("unable to obtain diags from %s" % diag_url)
except BadStatusLine:
log.error("unable to obtain diags from %s" % diag_url)
except Exception as e:
log.error("unable to obtain diags from %s %s" % (diag_url, e))
def get_logs_cluster_run(input, path, ns_server_path):
print "grabbing logs (cluster-run)"
path = path or "."
logs_path = ns_server_path + os.sep + "logs"
try:
shutil.make_archive(path + os.sep + "logs", 'zip', logs_path)
except Exception as e:
log.error("NOT POSSIBLE TO GRAB LOGS (CLUSTER_RUN)")
def get_cbcollect_info(input, path):
for server in input.servers:
print "grabbing cbcollect from {0}".format(server.ip)
path = path or "."
try:
cbcollectRunner(server, path).run()
except Exception as e:
log.error("NOT POSSIBLE TO GRAB CBCOLLECT FROM {0}: {1}".format(server.ip, e))
def get_couch_dbinfo(input, path):
for server in input.servers:
print "grabbing dbinfo from {0}".format(server.ip)
path = path or "."
try:
couch_dbinfo_Runner(server, path).run()
except Exception as e:
log.error("NOT POSSIBLE TO GRAB dbinfo FROM {0}: {1}".format(server.ip, e))
def clear_old_core_dumps(_input, path):
for server in _input.servers:
path = path or "."
try:
Clearcoredumps(server, path).run()
except Exception as e:
log.error("Unable to clear core dumps on {0} : {1}".format(server.ip, e))
def get_core_dumps(_input, path):
ret = False
for server in _input.servers:
print "grabbing core dumps files from {0}".format(server.ip)
path = path or "."
try:
if Getcoredumps(server, path).run():
ret = True
except Exception as e:
log.error("NOT POSSIBLE TO GRAB CORE DUMPS FROM {0} : {1}".\
format(server.ip, e))
return ret
class StoppableThreadWithResult(Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
super(StoppableThreadWithResult, self).__init__(group=group, target=target,
name=name, args=args, kwargs=kwargs, verbose=verbose)
self._stop = Event()
def stop(self):
self._stop.set()
self._Thread__stop()
def stopped(self):
return self._stop.isSet()
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self, timeout=None):
Thread.join(self, timeout=None)
return self._return
def runtests(names, options, arg_i, arg_p, runtime_test_params):
log.info("\nNumber of tests to be executed: " + str(len(names)))
BEFORE_SUITE = "suite_setUp"
AFTER_SUITE = "suite_tearDown"
xunit = XUnitTestResult()
# Create root logs directory
abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
# Create testrunner logs subdirectory
str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
if not os.path.exists(root_log_dir):
os.makedirs(root_log_dir)
results = []
case_number = 1
if "GROUP" in runtime_test_params:
print "Only cases in GROUPs '{0}' will be executed".format(runtime_test_params["GROUP"])
if "EXCLUDE_GROUP" in runtime_test_params:
print "Cases from GROUPs '{0}' will be excluded".format(runtime_test_params["EXCLUDE_GROUP"])
if TestInputSingleton.input.param("get-delays", False):
# start measure_sched_delays on all servers
sd = SchedDelays(TestInputSingleton.input.servers)
sd.start_measure_sched_delays()
for name in names:
start_time = time.time()
argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]]
params = dict(zip(argument_split[::2], argument_split[1::2]))
# Note that if ALL is specified at runtime then tests which have no groups are still run - just being
# explicit on this
if "GROUP" in runtime_test_params and "ALL" not in runtime_test_params["GROUP"].split(";"):
if 'GROUP' not in params: # params is the .conf file parameters.
# this test is not in any groups so we do not run it
print "test '{0}' skipped, a group was requested and this is not any groups".format(name)
continue
# there is a group for this test case, if that group is not specified at run time then do not run it
elif len( set(runtime_test_params["GROUP"].split(";")) & set(params["GROUP"].split(";")) ) == 0:
print "test '{0}' skipped, is not in the requested group".format(name)
continue
else:
pass # the test was in requested group, will run it
elif "EXCLUDE_GROUP" in runtime_test_params:
if 'GROUP' in params and \
len(set(runtime_test_params["EXCLUDE_GROUP"].split(";")) & set(params["GROUP"].split(";"))) > 0:
print "test '{0}' skipped, is in an excluded group".format(name)
continue
# Create Log Directory
logs_folder = os.path.join(root_log_dir, "test_%s" % case_number)
os.mkdir(logs_folder)
test_log_file = os.path.join(logs_folder, "test.log")
log_config_filename = r'{0}'.format(os.path.join(logs_folder, "test.logging.conf"))
create_log_file(log_config_filename, test_log_file, options.loglevel)
logging.config.fileConfig(log_config_filename)
print "Logs will be stored at {0}".format(logs_folder)
print "\n.{3}testrunner -i {0} -p {1} -t {2}\n"\
.format(arg_i or "", arg_p or "", name, os.sep)
name = name.split(",")[0]
# Update the test params for each test
TestInputSingleton.input.test_params = params
TestInputSingleton.input.test_params.update(runtime_test_params)
TestInputSingleton.input.test_params["case_number"] = case_number
TestInputSingleton.input.test_params["logs_folder"] = logs_folder
print "Test Input params:"
print(TestInputSingleton.input.test_params)
if "get-coredumps" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-coredumps", True):
clear_old_core_dumps(TestInputSingleton.input, logs_folder)
if case_number == 1:
before_suite_name = "%s.%s" % (name[:name.rfind('.')], BEFORE_SUITE)
try:
print "Run before suite setup for %s" % name
suite = unittest.TestLoader().loadTestsFromName(before_suite_name)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if "get-coredumps" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-coredumps", True):
if get_core_dumps(TestInputSingleton.input, logs_folder):
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, "Failing test : new core dump(s) "
"were found and collected."
" Check testrunner logs folder.")]
log.info("FAIL: New core dump(s) was found and collected")
except AttributeError as ex:
pass
try:
suite = unittest.TestLoader().loadTestsFromName(name)
except AttributeError, e:
print "Test {0} was not found: {1}".format(name, e)
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, e.message)]
except SyntaxError, e:
print "SyntaxError in {0}: {1}".format(name, e)
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, e.message)]
else:
test_timeout = TestInputSingleton.input.param("test_timeout", None)
t = StoppableThreadWithResult(target=unittest.TextTestRunner(verbosity=2).run,
name="test_thread",
args=(suite))
t.start()
result = t.join(timeout=test_timeout)
if "get-coredumps" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-coredumps", True):
if get_core_dumps(TestInputSingleton.input, logs_folder):
result = unittest.TextTestRunner(verbosity=2)._makeResult()
result.errors = [(name, "Failing test : new core dump(s) "
"were found and collected."
" Check testrunner logs folder.")]
log.info("FAIL: New core dump(s) was found and collected")
if not result:
for t in threading.enumerate():
if t != threading.current_thread():
t._Thread__stop()
result = unittest.TextTestRunner(verbosity=2)._makeResult()
case_number += 1000
print ("========TEST WAS STOPPED DUE TO TIMEOUT=========")
result.errors = [(name, "Test was stopped due to timeout")]
time_taken = time.time() - start_time
# Concat params to test name
# To make tests more readable
params = ''
if TestInputSingleton.input.test_params:
for key, value in TestInputSingleton.input.test_params.items():
if key and value:
params += "," + str(key) + ":" + str(value)
if result.failures or result.errors:
# Immediately get the server logs, if
# the test has failed or has errors
if "get-logs" in TestInputSingleton.input.test_params:
get_server_logs(TestInputSingleton.input, logs_folder)
if "get-logs-cluster-run" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-logs-cluster-run", True):
# Generate path to ns_server directory
ns_server_path = os.path.normpath(abs_path + os.sep + os.pardir + os.sep + "ns_server")
get_logs_cluster_run(TestInputSingleton.input, logs_folder, ns_server_path)
if "get-cbcollect-info" in TestInputSingleton.input.test_params:
if TestInputSingleton.input.param("get-cbcollect-info", True):
get_cbcollect_info(TestInputSingleton.input, logs_folder)
if "get-couch-dbinfo" in TestInputSingleton.input.test_params and \
TestInputSingleton.input.param("get-couch-dbinfo", True):
get_couch_dbinfo(TestInputSingleton.input, logs_folder)
errors = []
for failure in result.failures:
test_case, failure_string = failure
errors.append(failure_string)
break
for error in result.errors:
test_case, error_string = error
errors.append(error_string)
break
xunit.add_test(name=name, status='fail', time=time_taken,
errorType='membase.error', errorMessage=str(errors),
params=params)
results.append({"result": "fail", "name": name})
else:
xunit.add_test(name=name, time=time_taken, params=params)
results.append({"result": "pass", "name": name, "time": time_taken})
xunit.write("{0}{2}report-{1}".format(os.path.dirname(logs_folder), str_time, os.sep))
xunit.print_summary()
print "testrunner logs, diags and results are available under {0}".format(logs_folder)
case_number += 1
if (result.failures or result.errors) and \
TestInputSingleton.input.param("stop-on-failure", False):
print "test fails, all of the following tests will be skipped!!!"
break
after_suite_name = "%s.%s" % (name[:name.rfind('.')], AFTER_SUITE)
try:
print "Run after suite setup for %s" % name
suite = unittest.TestLoader().loadTestsFromName(after_suite_name)
result = unittest.TextTestRunner(verbosity=2).run(suite)
except AttributeError as ex:
pass
if "makefile" in TestInputSingleton.input.test_params:
# print out fail for those tests which failed and do sys.exit() error code
fail_count = 0
for result in results:
if result["result"] == "fail":
print result["name"], " fail "
fail_count += 1
else:
print result["name"], " pass"
if fail_count > 0:
sys.exit(1)
if TestInputSingleton.input.param("get-delays", False):
sd.stop_measure_sched_delay()
sd.fetch_logs()
# terminate any non main thread - these were causing hangs
for t in threading.enumerate():
if t.name != 'MainThread':
print 'Thread', t.name, 'was not properly terminated, will be terminated now.'
if hasattr(t, 'shutdown'):
t.shutdown(True)
else:
t._Thread__stop()
return results, xunit, "{0}{2}report-{1}".format(os.path.dirname(logs_folder), str_time, os.sep)
def filter_fields(testname):
testwords = testname.split(",")
line = ""
for fw in testwords:
if not fw.startswith("logs_folder") and not fw.startswith("conf_file") \
and not fw.startswith("cluster_name:") \
and not fw.startswith("ini:") \
and not fw.startswith("case_number:") \
and not fw.startswith("num_nodes:") \
and not fw.startswith("spec:"):
line = line + fw.replace(":", "=", 1)
if fw != testwords[-1]:
line = line + ","
return line
def compare_with_sort(dict, key):
for k in dict.keys():
if "".join(sorted(k)) == "".join(sorted(key)):
return True
return False
def merge_reports(filespath):
log.info("Merging of report files from "+str(filespath))
testsuites = {}
if not isinstance(filespath, list):
filespaths = filespath.split(",")
else:
filespaths = filespath
for filepath in filespaths:
xml_files = glob.glob(filepath)
if not isinstance(filespath, list) and filespath.find("*"):
xml_files.sort(key=os.path.getmtime)
for xml_file in xml_files:
log.info("-- " + xml_file + " --")
doc = xml.dom.minidom.parse(xml_file)
testsuitelem = doc.getElementsByTagName("testsuite")
for ts in testsuitelem:
tsname = ts.getAttribute("name")
tserros = ts.getAttribute("errors")
tsfailures = ts.getAttribute("failures")
tsskips = ts.getAttribute("skips")
tstime = ts.getAttribute("time")
tstests = ts.getAttribute("tests")
issuite_existed = False
tests = {}
testsuite = {}
# fill testsuite details
if tsname in testsuites.keys():
testsuite = testsuites[tsname]
tests = testsuite['tests']
else:
testsuite['name'] = tsname
testsuite['errors'] = tserros
testsuite['failures'] = tsfailures
testsuite['skips'] = tsskips
testsuite['time'] = tstime
testsuite['testcount'] = tstests
issuite_existed = False
testcaseelem = ts.getElementsByTagName("testcase")
# fill test case details
for tc in testcaseelem:
testcase = {}
tcname = tc.getAttribute("name")
tctime = tc.getAttribute("time")
tcerror = tc.getElementsByTagName("error")
tcname_filtered = filter_fields(tcname)
if compare_with_sort(tests, tcname_filtered):
testcase = tests[tcname_filtered]
testcase['name'] = tcname
else:
testcase['name'] = tcname
testcase['time'] = tctime
testcase['error'] = ""
if tcerror:
testcase['error'] = str(tcerror[0].firstChild.nodeValue)
tests[tcname_filtered] = testcase
testsuite['tests'] = tests
testsuites[tsname] = testsuite
log.info("\nNumber of TestSuites="+str(len(testsuites)))
tsindex = 0
for tskey in testsuites.keys():
tsindex = tsindex+1
log.info("\nTestSuite#"+str(tsindex)+") "+str(tskey)+", Number of Tests="+str(len(testsuites[tskey]['tests'])))
pass_count = 0
fail_count = 0
tests = testsuites[tskey]['tests']
xunit = XUnitTestResult()
for testname in tests.keys():
testcase = tests[testname]
tname = testcase['name']
ttime = testcase['time']
inttime = float(ttime)
terrors = testcase['error']
tparams = ""
if "," in tname:
tparams = tname[tname.find(","):]
tname = tname[:tname.find(",")]
if terrors:
failed = True
fail_count = fail_count + 1
xunit.add_test(name=tname, status='fail', time=inttime,
errorType='membase.error', errorMessage=str(terrors), params=tparams
)
else:
passed = True
pass_count = pass_count + 1
xunit.add_test(name=tname, time=inttime, params=tparams
)
str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time))
if not os.path.exists(root_log_dir):
os.makedirs(root_log_dir)
logs_folder = os.path.join(root_log_dir, "merged_summary")
try:
os.mkdir(logs_folder)
except:
pass
output_filepath="{0}{2}mergedreport-{1}".format(logs_folder, str_time, os.sep).strip()
xunit.write(output_filepath)
xunit.print_summary()
log.info("Summary file is at " + output_filepath+"-"+tsname+".xml")
return testsuites
def reruntests(rerun, names, options, arg_i, arg_p,runtime_test_params):
if "=" in rerun:
reruns = rerun.split("=")
rerun_type = reruns[0]
rerun_count = int(reruns[1])
all_results = {}
log.info("NOTE: Running " + rerun_type + " tests for " + str(rerun_count) + " times maximum.")
report_files = []
for testc in range(rerun_count+1):
if testc == 0:
log.info("\n*** FIRST run of the tests ***")
else:
log.info("\n*** "+rerun_type.upper()+" Tests Rerun#" + str(testc) + "/" + str(rerun_count) + " ***")
results, xunit, report_file = runtests(names, options, arg_i, arg_p, runtime_test_params)
all_results[(testc + 1)] = results
all_results[str(testc+1)+"_report"] = report_file+"*.xml"
report_files.append(report_file+"*.xml")
tobe_rerun = False
for result in results:
if result["result"] == rerun_type:
tobe_rerun = True
if not tobe_rerun:
break
tp, tf = parse_junit_result_xml(report_file+"*.xml")
if "fail" == rerun_type:
names = tf
elif "pass" == rerun_type:
names = tp
log.info("\nSummary:\n" + str(all_results))
log.info("Final result: merging...")
merge_reports(report_files)
return all_results
def main():
log.info("TestRunner: parsing args...")
names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv)
log.info("TestRunner: start...")
# get params from command line
TestInputSingleton.input = TestInputParser.get_test_input(sys.argv)
# ensure command line params get higher priority
runtime_test_params.update(TestInputSingleton.input.test_params)
TestInputSingleton.input.test_params = runtime_test_params
log.info("Global Test input params:")
pprint(TestInputSingleton.input.test_params)
if names:
if options.merge:
merge_reports(options.merge)
elif options.rerun:
results = reruntests(options.rerun, names, options, arg_i, arg_p, runtime_test_params)
else:
results, _, _ = runtests(names, options, arg_i, arg_p,runtime_test_params)
else:
log.warning("Warning: No tests got selected. Please double check the .conf file and other "
"options!")
log.info("TestRunner: end...")
def watcher():
"""This little code snippet is from
http://greenteapress.com/semaphores/threading_cleanup.py (2012-07-31)
It's now possible to interrupt the testrunner via ctrl-c at any time
in a platform neutral way."""
if sys.platform == 'win32':
p = Process(target=main, name="MainProcess")
p.start()
try:
p.join()
rc = p.exitcode
if rc > 0:
sys.exit(rc)
except KeyboardInterrupt:
log.error('KeyBoardInterrupt')
p.terminate()
else:
child = os.fork()
if child == 0:
main() # child runs test
try:
rc = os.waitpid(child, 0)[1] /256 # exit status is the high order byte of second member of the tuple
if rc > 0:
sys.exit( rc )
except KeyboardInterrupt:
log.error('KeyBoardInterrupt')
try:
os.kill(child, signal.SIGKILL)
except OSError:
pass
except OSError:
pass
sys.exit()
if __name__ == "__main__":
watcher()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.