max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
setup.py
|
UMCollab/ODM
| 2
|
12784451
|
<filename>setup.py
import os
from setuptools import setup
from subprocess import check_output
version = check_output(['bash', os.path.join(os.path.dirname(__file__), 'version.sh')]).decode('utf-8')
setup(
version=version,
)
| 1.609375
| 2
|
pyrate/algorithms/muondet/AverageBinEnergy.py
|
fscutti/pyrate
| 0
|
12784452
|
<filename>pyrate/algorithms/muondet/AverageBinEnergy.py
""" Compute average energy in input bins.
"""
import sys
from copy import copy
from pyrate.core.Algorithm import Algorithm
class AverageBinEnergy(Algorithm):
__slots__ = ()
def __init__(self, name, store, logger):
super().__init__(name, store, logger)
def execute(self, config):
e = self.store.get("EVENT:nT:edepScint")
self.store.put(config["name"], e)
# EOF
| 2.515625
| 3
|
src/inference_dataset.py
|
cmstroe/LM-BFF
| 0
|
12784453
|
<filename>src/inference_dataset.py
import csv
import pandas as pd
from ast import literal_eval
import random
initial_df = pd.read_csv("data/original/company_sentences_euro.csv")
inference_df = pd.DataFrame(columns = ['sentence'])
uplimit = 11000
local_uplimit = 100
for index, row in initial_df.iterrows():
if len(inference_df) < uplimit:
rand = random.randint(0, 1)
j = 0
if rand == 1:
for sent in literal_eval(row.sentences):
if j < local_uplimit and j < len(literal_eval(row.sentences)):
inference_df = inference_df.append({'sentence' : sent}, ignore_index = True)
j+=1
else:
break
else:
break
inference_df.to_csv("inference_data.csv", index = False)
| 2.921875
| 3
|
seekr2/modules/common_analyze.py
|
astokely/seekr2
| 0
|
12784454
|
<reponame>astokely/seekr2<filename>seekr2/modules/common_analyze.py
"""
common_analyze.py
Classes, functions and other objects used for analyzing SEEKR2
simulation outputs common to MMVT and Elber calculation types.
"""
import os
import glob
import xml.etree.ElementTree as ET
import subprocess
import random
from collections import defaultdict
import numpy as np
from scipy import linalg as la
from scipy.stats import gamma
import seekr2.modules.common_base as base
# The power to raise a matrix to when computing stationary probabilities
MATRIX_EXPONENTIAL = 9999999
GAS_CONSTANT = 0.0019872 # in kcal/mol*K
DEFAULT_IMAGE_DIR = "images_and_plots/"
class MissingStatisticsError(Exception):
"""Catch a very specific type of error in analysis stage."""
pass
def solve_rate_matrix(Q_hat, max_iter=100, ETOL=1e-10):
"""
Use an infinite series combined with a divide-and-conquer method
to solve the Q-matrix since the numpy/scipy solver tends to have
numerical instabilities.
"""
K_hat = Q_to_K(Q_hat)
sum_of_series = np.identity(K_hat.shape[0], dtype=np.longdouble)
for i in range(max_iter):
n = 2**i
K_pow_n = np.linalg.matrix_power(K_hat, n)
section = K_pow_n @ sum_of_series
sum_of_series += section
error = np.linalg.norm(section)
if error < ETOL:
break
T = np.zeros(Q_hat.shape, dtype=np.longdouble)
t_vector = np.zeros((Q_hat.shape[0], 1), dtype=np.longdouble)
for i in range(Q_hat.shape[0]):
T[i,i] = -1.0 / Q_hat[i,i]
t_vector[i,0] = T[i,i]
one_vector = np.ones((Q_hat.shape[0]), dtype=np.longdouble)
times = sum_of_series @ T @ one_vector
return times
def Q_to_K(Q):
"""Given a rate matrix Q, compute probability transition matrix K."""
K = np.zeros(Q.shape, dtype=np.longdouble)
for i in range(Q.shape[0]):
for j in range(Q.shape[0]):
if i == j:
K[i,j] = 0.0
else:
K[i,j] = -Q[i,j] / Q[i,i]
return K
#def make_elber_K_matrix(oldK):
# """Make a K matrix that is compatible with Elber milestoning."""
# old_shape = oldK.shape
# print("oldK:", oldK)
# exit()
# K_hat
def quadriture(err1, err2):
"""Add two errors in quadriture."""
return float(np.sqrt(err1**2 + err2**2))
def minor1d(array1d, i):
"""Accept 1D array and return a new array with element i removed."""
range1 = list(range(i)) + list(range(i+1, array1d.shape[0]))
return array1d[np.array(range1)]
def minor2d(array2d, i, j):
"""
Accept matrix array2d and return a new matrix with row i and
column j removed.
"""
range1 = list(range(i)) + list(range(i+1, array2d.shape[0]))
range2 = list(range(j)) + list(range(j+1,array2d.shape[1]))
return array2d[np.array(range1)[:,np.newaxis], np.array(range2)]
def pretty_string_value_error(value, error, error_digits=2, use_unicode=True):
"""
Returns a value/error combination of numbers in a scientifically
'pretty' format.
Scientific quantities often come as a *value* (the actual
quantity) and the *error* (the uncertainty in the value).
Given two floats, value and error, return the two in a
'pretty' formatted string: where the value and error are truncated
at the correct precision.
Parameters
----------
value : float
The quantity in question
error : float
The uncertainty of the quantity
error_digits : int, default 2
How many significant figures the error has. Scientific
convention holds that errors have 1 or (at most) 2 significant
figures. The larger number of digits is chosen here by default.
Returns
-------
new_string : str
A new list of strings sorted numerically
Examples
--------
>>> pretty_string_value_error(1.23456789e8, 4.5678e5,
error_digits=2)
"1.2346 +/- 0.0046 * 10^+08"
>>> pretty_string_value_error(5.6e-2, 2.0e-3, error_digits=1)
"5.6 +/- 0.2 * 10^-02"
"""
if value is None:
return "None"
if error is None or not np.isfinite(error):
if use_unicode:
new_string = "{:.6E} \u00B1 UNKNOWN ERROR MARGIN".format(value)
else:
new_string = "{:.6E} +/- UNKNOWN ERROR MARGIN".format(value)
else:
if not np.isfinite(value):
return str(value)
assert "e" in "{:e}".format(value), "Cannot convert into scientific "\
"notation: {1}".format(value)
value_mantissa_str, value_exponent_str = \
"{:e}".format(value).strip().split('e')
value_mantissa = float(value_mantissa_str)
value_exponent = int(value_exponent_str)
error_mantissa_str, error_exponent_str = \
"{:e}".format(error).strip().split('e')
error_mantissa = float(error_mantissa_str)
error_exponent = int(error_exponent_str)
padding = value_exponent - error_exponent + error_digits - 1
if padding < 1: padding = 1
exp_diff = error_exponent - value_exponent
string_for_formatting = "{:.%df}" % padding
new_value_mantissa = string_for_formatting.format(value_mantissa)
new_error_mantissa = string_for_formatting.format(
error_mantissa*10**exp_diff)
if use_unicode:
new_string = "%s \u00B1 %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
else:
new_string = "%s +/- %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
return new_string
def browndye_run_compute_rate_constant(compute_rate_constant_program,
results_filename_list,
sample_error_from_normal=False):
"""
run the BrownDye program compute_rate_constant to find k_ons
and the value k(b).
Parameters:
-----------
compute_rate_constant_program : str
The exact command to use to run the Browndye
compute_rate_constant program.
results_filename_list : list
A list of string paths to the results XML file, which is an
output from running the BrownDye program.
sample_error_from_normal : bool, default False
Add a fluctuation to the k-on value(s) sampled from a
normal distribution with a standard deviation equal to the
k-on's error. This is used by the error estimators to
incorporate the error in the k_b value obtained from BrownDye.
Results:
--------
k_ons : defaultdict
dictionary whose keys are various milestoning states and
whose values are the k-ons to those states.
k_on_errors : defaultdict
dictionary whose keys are various milestoning states and
whose values are the errors in the k-ons to those states.
reaction_probabilities : defaultdict
dictionary whose keys are various milestoning states and
whose values are the probabilities of reaching those states
from the b-surface.
reaction_probability_errors : defaultdict
dictionary whose keys are various milestoning states and
whose values are the errors in the probabilities of
reaching those states from the b-surface.
transition_counts : defaultdict
dictionary whose keys are the various milestone states and
also the "escaped" and "stuck" states. The values are the
counts of crossings of these various states.
"""
k_ons = defaultdict(float)
k_on_errors = defaultdict(float)
reaction_probabilities = defaultdict(float)
reaction_probability_errors = defaultdict(float)
transition_counts = defaultdict(int)
k_b = None
total_n_trajectories = 0
for results_filename in results_filename_list:
cmd = "%s < %s" % (compute_rate_constant_program,
results_filename)
output_string = subprocess.check_output(cmd, shell=True)
root = ET.fromstring(output_string)
# first, obtain counts directly from the results file
results_root = ET.parse(results_filename)
reactions = results_root.find("reactions")
n_trajectories = int(reactions.find("n_trajectories").text.strip())
total_n_trajectories += n_trajectories
stuck = int(reactions.find("stuck").text.strip())
escaped = int(reactions.find("escaped").text.strip())
for completed in reactions.iter("completed"):
name = int(completed.find("name").text.strip())
n = int(completed.find("n").text.strip())
transition_counts[name] += n
transition_counts["escaped"] += escaped
transition_counts["stuck"] += stuck
# now obtain probabilities and rates
for rate in root.iter("rate"):
name = int(rate.find("name").text.strip())
rate_constant = rate.find("rate_constant")
k_on_tag = rate_constant.find("mean")
k_on_tag_high = rate_constant.find("high")
assert k_on_tag is not None
assert k_on_tag_high is not None
k_on = float(k_on_tag.text)
k_on_error = 0.5*(float(k_on_tag_high.text) - k_on)
reaction_probability = rate.find("reaction_probability")
beta_tag = reaction_probability.find("mean")
beta_tag_high = reaction_probability.find("high")
assert beta_tag is not None
assert beta_tag_high is not None
beta = float(beta_tag.text)
beta_error = 0.5*(float(beta_tag_high.text) - beta)
if beta != 0.0:
k_b = k_on / beta
if sample_error_from_normal:
k_on = np.random.normal(loc=k_on, scale=k_on_error)
assert k_b is not None, "No BD reactions from the b-surface " \
"successfully reached any of the milestone surfaces."
for key in transition_counts:
reaction_probabilities[key] \
= transition_counts[key] / total_n_trajectories
k_ons[key] = k_b * reaction_probabilities[key]
if transition_counts[key] > 1:
reaction_probability_errors[key] = reaction_probabilities[key] \
/ np.sqrt(transition_counts[key]-1)
k_on_errors[key] = k_b * reaction_probability_errors[key]
else:
reaction_probability_errors[key] = 1e99
k_on_errors[key] = 1e99
if sample_error_from_normal:
k_ons[key] = np.random.normal(loc=k_ons[key],
scale=k_on_errors[key])
assert k_ons.keys() == transition_counts.keys()
return k_ons, k_on_errors, reaction_probabilities, \
reaction_probability_errors, transition_counts
def browndye_parse_bd_milestone_results(results_filename_list,
sample_error_from_normal=False):
# common
"""
Read and extract transition probabilities for a BD milestone.
Parameters:
-----------
results_filename_list : list
A list of paths to the results XML files for a BD milestone,
which is an output from running the BrownDye program.
sample_error_from_normal : bool, default False
Add a fluctuation to the probabilities sampled from a
normal distribution with a standard deviation equal to
1/sqrt(n-1). This is used by the error estimators to
incorporate the error in the probabilities.
Results:
--------
transition_probabilities : defaultdict
The probabilities of transitions.
transition_counts : defaultdict
The counts of transitions to all visited states.
"""
transition_counts = defaultdict(int)
transition_probabilities = defaultdict(float)
completed_prob = 0.0
completed_count = 0
for results_filename in results_filename_list:
assert os.path.exists(results_filename), "You must perform successful "\
"extractions and runs of all BD milestones if k-on settings are "\
"provided in the model XML. Missing file: " \
+ results_filename
root = ET.parse(results_filename)
reactions = root.find("reactions")
n_trajectories = int(reactions.find("n_trajectories").text.strip())
stuck = int(reactions.find("stuck").text.strip())
escaped = int(reactions.find("escaped").text.strip())
for completed in reactions.iter("completed"):
name = int(completed.find("name").text.strip())
n = int(completed.find("n").text.strip())
transition_counts[name] += n
completed_count += n
transition_counts["escaped"] += escaped
transition_counts["stuck"] += stuck
for key in transition_counts:
n = transition_counts[key]
avg = n / (completed_count + escaped)
if sample_error_from_normal:
assert n > 1, "Too few transitions to compute error."
std_dev = avg / np.sqrt(n-1)
transition_probabilities[key] = np.random.normal(
loc=avg, scale=std_dev)
else:
transition_probabilities[key] = avg
completed_prob += transition_probabilities[key]
transition_probabilities["escaped"] = 1.0 - completed_prob
return transition_probabilities, transition_counts
def combine_fhpd_results(bd_milestone, fhpd_directories,
combined_results_filename):
"""
Read all results files in the first-hitting-point-distribution
(FHPD) directories and combine them into a single results file
to be placed in the bd_milestone directory.
"""
reaction_dict = defaultdict(float)
number_escaped = 0
number_stuck = 0
number_total = 0
number_total_check = 0
results_filename_list = []
if len(fhpd_directories) == 0:
return
for fhpd_directory in fhpd_directories:
results_glob = os.path.join(fhpd_directory,
"results*.xml")
results_filename_list += glob.glob(results_glob)
if len(results_filename_list) == 0:
print("No BD output files found.")
return
for results_filename in results_filename_list:
tree = ET.parse(results_filename)
root = tree.getroot()
reactions_XML = root.find("reactions")
number_total += int(reactions_XML.find("n_trajectories").text.strip())
number_stuck += int(reactions_XML.find("stuck").text.strip())
number_escaped += int(reactions_XML.find("escaped").text.strip())
for completed_XML in reactions_XML.iter("completed"):
name = completed_XML.find("name").text.strip()
n = int(completed_XML.find("n").text.strip())
reaction_dict[name] += n
number_total_check += n
assert number_total == number_total_check + number_stuck + number_escaped
for completed_XML in reactions_XML.iter("completed"):
reactions_XML.remove(completed_XML)
reactions_XML.find("n_trajectories").text = str(number_total)
reactions_XML.find("stuck").text = str(number_stuck)
reactions_XML.find("escaped").text = str(number_escaped)
for key in reaction_dict:
completed_XML = ET.SubElement(reactions_XML, "completed")
completed_XML.text = "\n "
completed_XML.tail = "\n "
name_XML = ET.SubElement(completed_XML, "name")
name_XML.text = key
name_XML.tail = "\n "
n_XML = ET.SubElement(completed_XML, "n")
n_XML.text = str(int(reaction_dict[key]))
n_XML.tail = "\n "
xmlstr = ET.tostring(root).decode("utf-8")
with open(combined_results_filename, 'w') as f:
f.write(xmlstr)
return
class Data_sample():
"""
Represent a set of data needed to compute kinetic or thermodynamic
quantities of interest using the SEEKR method. The Data_sample may
be constructed directly from average times and rates, or may be
generated from a random distribution.
Attributes:
-----------
model : Model()
The model contains all the settings, parameters, directories,
and file names used to perform a SEEKR2 simulation.
N_ij : numpy.array()
2-dimensional array of transition counts from milestone i to
milestone j.
R_i : numpy.array()
1-dimensional array of incubation times for each milestone i.
Q : numpy.array()
The rate matrix constructed from transition counts and times.
No sink state is defined - probability is conserved for
thermodynamics calculations.
Q_hat : numpy.array()
Same as attribute Q, but one or more sink states are defined -
probability is not conserved, allowing kinetics calculations.
K : numpy.array()
The probability transition matrix made only from transition
counts - it is equivalent to a Markov transition matrix.
Probability is conserved for thermodynamics calculations.
K_hat : numpy.array()
Same as attribute Q, except one or more sink states are defined
and probability is not conserved, allowing kinetics
calculations.
p_i : numpy.array()
Stationary probability distribution along the milestones,
which can be used to compute other thermodynamic quantities,
such as the attribute free_energy_profile.
free_energy_profile : numpy.array()
The stationary free energy profile along the milestones.
MFPTs : dict
A dictionary whose keys are 2-tuples of milestone indices
(source milestone, destination milestone) and whose values
are mean first passage times (MFPTs) between these states.
States can be added to this dictionary by being either end
states or the bulk state.
k_off : float
The off-rate constant from a stationary distribution across
all the milestones to the bulk state.
k_on : dict
A dictionary whose keys are milestone indices (which are end
states) and whose values represent the 2nd-order on-rate
constant.
bd_transition_counts : dict
Keep track of transition counts for each BD state. This is a
dictionary of dictionaries, with keys of "b_surface" or
integers representing BD milestones. The values are
dictionaries whose keys are states such as milestone indices
or the string "escaped", "stuck", and whose values are counts
of transitions. This attribute is primarily used for
convergence estimates.
"""
def __init__(self, model):
self.model = model
self.N_ij = None
self.R_i = None
self.Q = None
self.Q_hat = None
self.K = None
self.K_hat = None
self.p_i = None
#self.p_i_hat = None
self.free_energy_profile = None
self.MFPTs = None
self.k_off = None
self.k_ons = {}
self.bd_transition_counts = {}
def compute_rate_matrix(self):
"""
Compute Q and K from N_ij and R_i.
"""
self.Q = np.zeros((self.model.num_milestones,
self.model.num_milestones), dtype=np.longdouble)
for i in range(self.model.num_milestones):
for j in range(self.model.num_milestones):
if self.R_i[i] == 0.0:
self.Q[i,j] = 0.0
else:
self.Q[i,j] = self.N_ij[i,j] / self.R_i[i]
assert self.Q[i,j] >= 0.0, "self.Q[i,j]: {}".format(self.Q[i,j])
for i in range(self.model.num_milestones):
row_sum = np.sum(self.Q[i])
if row_sum == 0:
new_row_sum = 0.0
for j in range(self.model.num_milestones):
self.Q[i,j] = self.Q[j,i]
new_row_sum += self.Q[j,i]
self.Q[i,i] = -new_row_sum
else:
self.Q[i,i] = -row_sum
self.K = Q_to_K(self.Q)
return
def calculate_thermodynamics(self):
"""
Use this data sample's statistics to construct the
thermodynamic quantities.
"""
eigvals, eigvecs = la.eig(self.K.T)
closest_eigval = -1e9
closest_eigvec = None
for i, eigval, in enumerate(eigvals):
if (eigval - 1.0)**2 < (closest_eigval - 1.0)**2:
closest_eigval = eigval
closest_eigvec = eigvecs[:,i]
K = abs(self.K)
q_i = np.real(closest_eigvec / np.sum(closest_eigvec))
q_i = q_i @ np.linalg.matrix_power(K, MATRIX_EXPONENTIAL)
q_i = q_i / np.sum(q_i)
incubation_times = np.zeros(self.model.num_milestones)
p_i = np.zeros(self.model.num_milestones)
for i in range(self.model.num_milestones):
N_i = 0
for j in range(self.model.num_milestones):
N_i += self.N_ij[i,j]
if N_i == 0:
incubation_times[i] = -1.0/self.Q[i,i]
else:
incubation_times[i] = self.R_i[i] / N_i
for i, q_i_val in enumerate(q_i):
p_i[i] = abs(q_i_val * incubation_times[i])
sum_p_i = np.sum(p_i)
for i in range(self.model.num_milestones):
p_i[i] /= sum_p_i
self.q_i = q_i
self.p_i = p_i
self.free_energy_profile = np.zeros(self.p_i.shape)
highest_p_i = max(self.p_i)
for i, p_i_val in enumerate(self.p_i):
free_energy = -GAS_CONSTANT*self.model.temperature*np.log(
p_i_val / highest_p_i)
self.free_energy_profile[i] = free_energy
return
def calculate_kinetics(self, pre_equilibrium_approx=False,
bd_sample_from_normal=False):
"""
Once the rate matrix Q is computed, determine the timescales
and probabilities of transfers between different states. Fill
out all kinetics quantities.
Parameters:
-----------
pre_equilibrium_approx : bool, default False
Whether to use the pre-equilibrium approximation for
computing kinetics.
bd_sample_from_normal : bool, default False
If set to True, then k-on quantities will have a random
fluctuation introduced in a magnitude proportional to k-on
errors. This is used only for error estimations.
"""
end_milestones = []
bulk_milestones = []
MFPTs = {}
k_off = 0.0
k_ons = {}
for alpha, anchor in enumerate(self.model.anchors):
if anchor.endstate:
for milestone_id in anchor.get_ids():
if self.model.get_type() == "elber":
if anchor.alias_from_id(milestone_id) == 3:
# TODO: hacky
continue
end_milestones.append(milestone_id)
if anchor.bulkstate:
for milestone_id in anchor.get_ids():
bulk_milestones.append(milestone_id)
# first, make the bulk state the sink state to compute k_offs
Q_hat = self.Q[:,:]
p_i_hat = self.p_i[:]
#if self.model.k_on_info:
# K_hat = self.K[:,:]
n = len(self.Q)
for bulk_milestone in sorted(bulk_milestones, reverse=True):
Q_hat = minor2d(Q_hat, bulk_milestone, bulk_milestone)
p_i_hat = minor1d(p_i_hat, bulk_milestone)
Q_hat = Q_hat.astype(dtype=np.longdouble)
if pre_equilibrium_approx:
lowest_p_i = np.min(self.p_i)
lowest_i = np.argmin(self.p_i)
assert lowest_p_i >= 0.0, \
"Negative stationary probability detected."
if lowest_i == n-1:
k_off = lowest_p_i * Q_hat[lowest_i-1,lowest_i]
else:
k_off = lowest_p_i * Q_hat[lowest_i,lowest_i+1]
bulk_times = np.ones(p_i_hat.shape) / k_off
else:
#negative_unity = np.zeros((len(Q_hat)), dtype=np.longdouble)
#negative_unity[:] = -1.0
#bulk_times = la.solve(Q_hat, negative_unity)
bulk_times = solve_rate_matrix(Q_hat)
for end_milestone in end_milestones:
if end_milestone in bulk_milestones:
continue
# must account for the removal of bulk state to matrix indices
no_bulk_index = end_milestone
for bulk_milestone in bulk_milestones:
if end_milestone > bulk_milestone:
no_bulk_index -= 1
mfpt = bulk_times[no_bulk_index]
MFPTs[(end_milestone, "bulk")] = mfpt
MFPT_to_bulk = 0
assert bulk_times.shape == p_i_hat.shape
for i, bulk_time in enumerate(bulk_times):
MFPT_to_bulk += bulk_time * p_i_hat[i]
# convert to 1/s
k_off = 1.0e12 / MFPT_to_bulk
# Next, compute the MFPTs between different states
for end_milestone_dest in end_milestones:
if end_milestone_dest in bulk_milestones:
continue
Q_hat = minor2d(self.Q[:], end_milestone_dest, end_milestone_dest)
#I = np.zeros((len(Q_hat)), dtype = float)
#I[:] = 1.0
#end_state_times = la.solve(Q_hat, -I)
end_state_times = solve_rate_matrix(Q_hat)
for end_milestone_src in end_milestones:
if end_milestone_dest == end_milestone_src:
# don't get the MFPT from a milestone to itself
continue
if end_milestone_src in bulk_milestones:
# a bulk milestone will never be a source
continue
mfpt = end_state_times[end_milestone_src]
MFPTs[(end_milestone_src, end_milestone_dest)] = mfpt
if self.model.k_on_info:
#if self.model.get_type() == "elber":
# K_hat = make_elber_K_matrix(self.K)
# for end_milestone in end_milestones:
# K_hat[end_milestone, :] = 0.0
# K_hat[end_milestone, end_milestone] = 1.0
#else:
# K_hat = self.K[:,:]
# for end_milestone in end_milestones:
# K_hat[end_milestone, :] = 0.0
# K_hat[end_milestone, end_milestone] = 1.0
K_hat = self.K[:,:]
for end_milestone in end_milestones:
K_hat[end_milestone, :] = 0.0
K_hat[end_milestone, end_milestone] = 1.0
p_i_hat = self.p_i[:]
n = K_hat.shape[0]
source_vec = np.zeros((n,1))
output_file_glob = os.path.join(
self.model.anchor_rootdir,
self.model.k_on_info.b_surface_directory,
self.model.k_on_info.bd_output_glob)
output_file_list = glob.glob(output_file_glob)
output_file_list = base.order_files_numerically(output_file_list)
if len(output_file_list) > 0:
if self.model.browndye_settings is not None:
k_ons_src, k_on_errors_src, reaction_probabilities, \
reaction_probability_errors, transition_counts = \
browndye_run_compute_rate_constant(os.path.join(
self.model.browndye_settings.browndye_bin_dir,
"compute_rate_constant"), output_file_list,
sample_error_from_normal=bd_sample_from_normal)
self.bd_transition_counts["b_surface"] = transition_counts
else:
raise Exception("No valid BD program settings provided.")
if len(bulk_milestones) > 0:
bulk_milestone = bulk_milestones[0]
for bd_milestone in self.model.k_on_info.bd_milestones:
bd_results_file = os.path.join(
self.model.anchor_rootdir, bd_milestone.directory,
"results.xml")
if not os.path.exists(bd_results_file):
bd_directory_list_glob = os.path.join(
self.model.anchor_rootdir,
bd_milestone.directory,
"first_hitting_point_distribution", "lig*/")
bd_directory_list = glob.glob(
bd_directory_list_glob)
if len(bd_directory_list) == 0:
continue
combine_fhpd_results(bd_milestone,
bd_directory_list,
bd_results_file)
source_index = bd_milestone.outer_milestone.index
source_vec[source_index] = k_ons_src[source_index]
results_filename_list = [bd_results_file]
transition_probabilities, transition_counts = \
browndye_parse_bd_milestone_results(
results_filename_list)
self.bd_transition_counts[bd_milestone.index] \
= transition_counts
#src_index = bd_milestone.outer_milestone.index
K_hat[source_index, :] = 0.0
for key in transition_probabilities:
value = transition_probabilities[key]
if key in ["escaped", "stuck"]:
pass
else:
K_hat[source_index, key] = value
K_hat_inf = np.linalg.matrix_power(K_hat, MATRIX_EXPONENTIAL)
end_k_ons = np.dot(K_hat_inf.T, source_vec)
for end_milestone in end_milestones:
k_ons[end_milestone] = end_k_ons[end_milestone]
self.K_hat = K_hat
self.k_ons = k_ons
self.Q_hat = Q_hat
#self.p_i_hat = p_i_hat # TODO: remove after successful CI test
self.MFPTs = MFPTs
self.k_off = k_off
return
def monte_carlo_milestoning_error(self, num=1000, skip=100, stride=1,
verbose=False,
pre_equilibrium_approx=False):
"""
Calculates an error estimate by sampling a distribution of rate
matrices assumming a Poisson (gamma) distribution with
parameters Nij and Ri using Markov chain Monte Carlo.
Enforces detailed Balance-- using a modified version of
Algorithm 4 from Noe 2008 for rate matrices.--
Distribution is: p(Q|N) = p(Q)p(N|Q)/p(N) =
p(Q) PI(q_ij**N_ij * exp(-q_ij * Ri))
Parameters
----------
num : int, default 1000
number of rate matrix (Q) samples to be generated
skip : int, default 100
number of inital rate matrix samples to skip for "burn in"
stride : int, default 1
frequency at which rate matrix samples are recorded- larger
frequency reduces correlation between samples
verbose : bool, default False
allow additional verbosity/printing
pre_equilibrium_approx : bool, default False
Whether to use the pre-equilibrium approximation for
computing kinetics.
"""
N = self.N_ij
R = self.R_i
Q = self.Q[:]
m = Q.shape[0] #get size of count matrix
Q_mats = []
MFPTs_list = defaultdict(list)
MFPTs_error = defaultdict(float)
k_off_list = []
k_ons_list = defaultdict(list)
k_ons_error = defaultdict(float)
p_i_list = []
free_energy_profile_list = []
Qnew = Q[:,:]
if verbose: print("collecting ", num, " MCMC samples from ",
num*(stride) + skip, " total moves")
for counter in range(num * (stride) + skip):
#if verbose: print("MCMC stepnum: ", counter)
Qnew = Q[:,:]
for i in range(m): # rows
for j in range(m): # columns
if i == j: continue
if Qnew[i,j] == 0.0: continue
if Qnew[j,j] == 0.0: continue
if N[i,j] == 0: continue
if R[i] == 0: continue
Q_gamma = 0
delta = Qnew[i,j]
while ((delta) >= (Qnew[i,j])):
Q_gamma = gamma.rvs(a=N[i,j], scale = 1/R[i],)
delta = Qnew[i,j] - Q_gamma
log_p_Q_old = N[i,j] * np.log(Qnew[i,j]) - Qnew[i,j] * R[i]
log_p_Q_new = N[i,j] * np.log(Qnew[i,j] - delta) - \
(Qnew[i,j] - delta) * R[i]
if verbose: print("log P(Q_new)", log_p_Q_new)
if verbose: print("log P(Q_old)", log_p_Q_old)
r2 = random.random()
p_acc = log_p_Q_new - log_p_Q_old
if verbose: print("p_acc", p_acc, "r", np.log(r2))
if np.log(r2) <= p_acc:
#log(r) can be directly compared to
# log-likelihood acceptance, p_acc
if verbose: print("performing non-reversible element "\
"shift...")
Qnew[i,i] = (Qnew[i,i]) + delta
Qnew[i,j] = Qnew[i,j] - delta
if verbose: print(Qnew)
if counter > skip and counter % stride == 0:
self.Q = Qnew
self.K = Q_to_K(self.Q)
self.calculate_thermodynamics()
self.calculate_kinetics(pre_equilibrium_approx,
bd_sample_from_normal=True)
p_i_list.append(self.p_i)
free_energy_profile_list.append(self.free_energy_profile)
for key in self.MFPTs:
MFPTs_list[key].append(self.MFPTs[key])
k_off_list.append(self.k_off)
if self.model.k_on_info:
for key in self.k_ons:
k_ons_list[key].append(self.k_ons[key])
Q = Qnew
if verbose: print("final MCMC matrix", Q)
p_i_error = np.zeros(self.p_i.shape)
free_energy_profile_err = np.zeros(self.free_energy_profile.shape)
for i in range(p_i_error.shape[0]):
p_i_val_list = []
for j in range(len(p_i_list)):
p_i_val_list.append(p_i_list[j][i])
p_i_error[i] = np.std(p_i_val_list)
for i in range(free_energy_profile_err.shape[0]):
free_energy_profile_val_list = []
for j in range(len(free_energy_profile_list)):
free_energy_profile_val_list.append(free_energy_profile_list[j][i])
free_energy_profile_err[i] = np.std(free_energy_profile_val_list)
for key in self.MFPTs:
MFPTs_error[key] = np.std(MFPTs_list[key])
k_off_error = np.std(k_off_list)
if self.model.k_on_info:
for key in self.k_ons:
k_ons_error[key] = np.std(k_ons_list[key])
return p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, \
k_ons_error
| 2.234375
| 2
|
authors/apps/stats/tests/base_test.py
|
deferral/ah-django
| 1
|
12784455
|
from authors.apps.reactions.tests.base_test import BaseTestCase
class ReaderStatsBaseTestCase(BaseTestCase):
"""
Holds mock data and test helpers for the reader stats
"""
def get_article_slug(self, data={}, rel_path='/read', created=False):
"""
Returns a url path to post an articles reaction
"""
self.create_article(data=self.article_mock)
if created:
return self.articles_path + \
'funny-things-on-the-keyboard_GoodCow' + rel_path
| 2.46875
| 2
|
tests/unittests/durable_functions/orchestration_trigger/main.py
|
vrdmr/azure-functions-python-worker
| 0
|
12784456
|
# import azure.durable_functions as df
def generator_function(context):
final_result = yield context.call_activity('activity_trigger', 'foobar')
return final_result
def main(context):
# orchestrate = df.Orchestrator.create(generator_function)
# result = orchestrate(context)
# return result
return f'{context} :)'
| 2.21875
| 2
|
data/check_donors.py
|
jacobdeasy/flexible-ehr
| 12
|
12784457
|
"""Check the maximum length of stay of donors in MIMIC-III.
If it is <48 it does not affect our study."""
import os
import pandas as pd
df = pd.read_csv('ADMISSIONS.csv')
df = df.loc[(df['DIAGNOSIS'].str.lower() == 'organ donor') | (df['DIAGNOSIS'].str.lower() == 'organ donor account')]
files = os.listdir('root')
ods = list(df['SUBJECT_ID'])
los_list = []
for od in ods:
try:
df_tmp = pd.read_csv(os.path.join('root', str(od), 'stays.csv'))
los_list += list(df_tmp['LOS'].values)
except:
pass
print(max(los_list))
"""
Result: 37.2832
"""
| 2.578125
| 3
|
CraftMasterWeb/emails/views.py
|
Athelios/CraftMaster
| 8
|
12784458
|
from django.shortcuts import render, HttpResponse
from datetime import date
from .models import Email
import json
# Create your views here.
def getEmail(request):
if request.method=="POST":
email = request.POST.get('email')
Email.objects.create(
email = email,
register_date = date.today(),
)
res = {'ret':0, 'msg':'ok'}
return HttpResponse(json.dumps(res),content_type='application/json')
return render(request,'index.html')
| 2.109375
| 2
|
algo/util/data_processor.py
|
HHansi/TED-S
| 0
|
12784459
|
# Created by Hansi at 12/22/2021
import re
import demoji
from nltk import TweetTokenizer
from sklearn.model_selection import StratifiedShuffleSplit
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*',
'+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█',
'½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥',
'▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾',
'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹',
'≤', '‡', '√', '..', '...', '…']
def remove_links(sentence, substitute=''):
"""
Method to remove links in the given text
parameters
-----------
:param sentence: str
:param substitute: str
which to replace link
:return: str
String without links
"""
sentence = re.sub('https?:\/\/\S+', substitute, sentence, flags=re.MULTILINE)
return sentence.strip()
def remove_repeating_characters(sentence):
"""
remove non alphaneumeric characters which repeat more than 3 times by its 3 occurrence (e.g. ----- to ---)
:param sentence:
:return:
"""
sentence = re.sub('(\W)\\1{3,}', '\\1', sentence)
return sentence.strip()
def remove_retweet_notations(sentence):
"""
Method to remove retweet notations in the given text
parameters
-----------
:param sentence: str
:return: str
String without retweet notations
"""
updated_sentence = re.sub(r'RT @[a-zA-Z0-9_/-]*:', '', sentence)
return updated_sentence.strip()
def add_emoji_text(x):
"""
Covert emoji to text
:param x: str
:return: str
String where emojis are replaced by text
"""
emoji_text = demoji.findall(x)
for em in emoji_text.keys():
x = x.replace(em, ' ' + emoji_text[em] + ' ')
x = ' '.join(x.split())
return x
def preprocess_data(text, preserve_case=False, emoji_to_text=False):
"""
A Pipeline to preprocess data
:param text: str
:param preserve_case: boolean, optional
:param emoji_to_text: boolean, optional
:return: str
"""
text = text.replace("\n", " ")
text = remove_links(text, substitute='')
text = remove_retweet_notations(text)
text = remove_repeating_characters(text)
if emoji_to_text:
text = add_emoji_text(text)
# tokenize and lower case
tknzr = TweetTokenizer(preserve_case=preserve_case, reduce_len=True, strip_handles=False)
tokens = tknzr.tokenize(text)
text = " ".join(tokens)
# text.replace(symbol, "#") # remove # in hash tags
# remove white spaces at the beginning and end of the text
text = text.strip()
# remove extra whitespace, newline, tab
text = ' '.join(text.split())
return text
def split_data(df, seed, label_column='label', test_size=0.1):
"""
StratifiedShuffleSplit the given DataFrame
:param df: DataFrame
:param seed: int
:param label_column: str
:param test_size: float
:return: DataFrame, DataFrame
train and test
"""
y = df[label_column]
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=seed)
train_index, test_index = next(sss.split(df, y))
train = df.iloc[train_index]
test = df.iloc[test_index]
return train, test
| 2.625
| 3
|
servicebox/services/tests/test_models.py
|
FlxPeters/servicebox
| 0
|
12784460
|
from django.test import TestCase
from services.models import (
Service,
ServiceStatusChoices,
ServiceRelation,
ServiceRealtionChoice,
Link,
LinkTypeChoice,
)
from tenancy.models import Tenant
from platforms.models import Platform
class ServiceModelTest(TestCase):
def setUp(self):
self.tenant_owner = Tenant.objects.create(name="Acme Corp.")
self.tenant_operator = Tenant.objects.create(name="Operator Incl.")
self.platform = Platform.objects.create(
name="Road Runner Cloud", tenant=self.tenant_owner
)
def test_slug_is_generated_on_save(self):
service = Service(
name="Prometheus",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
self.assertEquals("", service.slug)
service.save()
self.assertEquals("prometheus", service.slug)
def test_service_is_active_by_default(self):
service = Service(
name="Prometheus",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
self.assertEquals(ServiceStatusChoices.ACTIVE, service.status)
def test_service_has_related_services(self):
source = Service.objects.create(
name="Source",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
dest = Service.objects.create(
name="Dest",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
ServiceRelation.objects.create(
source=source,
relation=ServiceRealtionChoice.RELATED,
dest=dest,
comment="test",
)
inbound_list = dest.get_inbound_relations()
self.assertEqual(1, len(inbound_list))
self.assertEqual("test", inbound_list.first().comment)
self.assertEqual("Source", inbound_list.first().source.name)
outbound_list = source.get_outbound_relations()
self.assertEqual(1, len(outbound_list))
self.assertEqual("test", outbound_list.first().comment)
self.assertEqual("Dest", outbound_list.first().dest.name)
def test_service_has_link(self):
svc = Service.objects.create(
name="Service",
operator=self.tenant_operator,
owner=self.tenant_owner,
platform=self.platform,
)
link = Link.objects.create(
link_type=LinkTypeChoice.WEBSITE,
url="http://example.com",
description="My fancy Website",
service=svc,
)
self.assertEqual(svc.links.first().url, "http://example.com")
self.assertEqual(svc.links.first().link_type, LinkTypeChoice.WEBSITE)
self.assertEqual(svc.links.first().description, "My fancy Website")
| 2.28125
| 2
|
utils/error_log.py
|
Farazist/farazist-raspberrypi-app
| 10
|
12784461
|
import os.path
import datetime
class ErrorLog():
@staticmethod
def writeToFile(log):
time = datetime.datetime.now()
f = open('error_log.txt', 'a', encoding='utf8')
f.write(time.strftime("%Y-%m-%d %H:%M:%S ") + log + '\n')
f.close()
#test = LogFile.checkExistsFile()
#LogFile.writeToFile('test1')
| 2.953125
| 3
|
common/utils/dir_utils.py
|
jiahaoLjh/HDNet
| 18
|
12784462
|
<reponame>jiahaoLjh/HDNet<filename>common/utils/dir_utils.py
import os
import sys
def make_folder(folder_name):
if not os.path.isdir(folder_name):
os.makedirs(folder_name)
def add_pypath(path):
if path not in sys.path:
sys.path.insert(0, path)
| 2.46875
| 2
|
fast_trainer/concepts.py
|
MITIBMxGraph/SALIENT_artifact
| 6
|
12784463
|
from typing import Any, Optional, Callable, List
import torch
from .samplers import PreparedBatch
from .transferers import DeviceIterator
TrainCore = Callable[[torch.nn.Module, PreparedBatch], Any]
TrainCallback = Callable[[List[PreparedBatch], List[Any]], None]
TrainImpl = Callable[[torch.nn.Module, TrainCore, DeviceIterator,
torch.optim.Optimizer, Optional[TrainCallback]], None]
TestCallback = Callable[[PreparedBatch], None] # should not return anything
| 1.960938
| 2
|
util/tweet_download.py
|
jayelm/TwitterSA
| 0
|
12784464
|
"""
NOTE: This is no longer used in the actual web app, since I grab automatically
labeled tweets from Sentiment140. But, I keep it around for convenience.
A script to download tweets in a .tsv of the form
Status ID User ID Sentiment
====================================
used mainly to get tweets from the SemEval Twitter sentiment corpus.
We use plain old HTML parsing to avoid API rate limits.
Inspired from aritter/twitter_download
<NAME>
"""
import requests
from bs4 import BeautifulSoup
import csv
import pickle # Standard pickle for unicode support
import sys
def scrape_tweets(files, quiet=False):
"""
Scrape tweets from a list of .tsv files with the aforementioned format.
Returns an association list of tweets and their sentiments.
Only grabs tweets labeled as "positive" or "negative" for usage in a
binary classifier.
"""
tweets = []
total_tweets = 0
for filename in files:
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter='\t')
rows = list(reader)
if not quiet:
print "{}: {} tweets".format(filename, len(rows))
for sid, uid, sentiment in rows:
total_tweets += 1
print total_tweets
if not (sentiment == 'positive' or sentiment == 'negative'):
if not quiet:
print (
"Skipping neutral tweet with uid {} and sid {}".format(
uid, sid
)
)
continue
url = 'https://twitter.com/{}/status/{}'.format(uid, sid)
r = requests.get(url)
soup = BeautifulSoup(r.text)
tweet_text_paragraphs = soup.find_all('p', {'class': 'tweet-text'})
if len(tweet_text_paragraphs) > 1:
original_tweet_div = soup.find(
'div', {'class': 'js-original-tweet'}
)
tweet_text_paragraphs = original_tweet_div.find_all(
'p', {'class': 'tweet-text'}
)
if not tweet_text_paragraphs:
if not quiet:
print "Can't find tweet with uid {} and sid {}".format(
uid, sid
)
continue
tweet_text = tweet_text_paragraphs[0].text
tweets.append([tweet_text, sentiment])
print "Got {} tweets out of {}".format(len(tweets), total_tweets)
return tweets
def serialize(filename, tweets, quiet=False):
"""Output tweet lists to a file with pickle."""
if filename == '-' or not filename:
if not quiet: # Not sure why someone would specify verbose for stdout
print "Writing to stdout"
pickle.dump(tweets, sys.stdout)
else:
if not quiet:
print "Writing to {}".format(filename)
with open(filename, 'w') as fout:
pickle.dump(tweets, fout)
def handle_filename(filename):
"""Prepare the filename - if directory specified, add default name"""
if filename[-1] == '/':
filename += 'noslang.pickle'
return filename
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
'files', nargs='+',
help=".tsv files containing status id, user id, and sentiment"
)
parser.add_argument(
'-o', '--output', nargs="?", const='lib/tweets.pickle', default='-',
help="specify output file (defaults to lib/tweets.pickle "
"if specified without file or stdout if not specified)"
)
parser.add_argument(
# Quiet is used here because default behavior should be to output
# information about missed tweets
'-q', '--quiet', action='store_true',
help='be quiet'
)
args = parser.parse_args()
filename = handle_filename(args.output)
tweets = scrape_tweets(args.files, args.quiet)
serialize(filename, tweets, args.quiet)
| 3.15625
| 3
|
src/ghaudit/config.py
|
DistantThunder/ghaudit
| 1
|
12784465
|
from __future__ import annotations
from functools import reduce
from os import environ
from pathlib import Path
from typing import Collection, List, Optional, Set
from typing_extensions import TypedDict
Team = TypedDict(
"Team", {"name": str, "members": List[str], "children": List[str]}
)
Organisation = TypedDict(
"Organisation", {"name": str, "owners": List[str], "teams": List[Team]}
)
Config = TypedDict("Config", {"organisation": Organisation})
def get_teams(config: Config) -> Collection[Team]:
return config["organisation"]["teams"]
def get_team(config: Config, name: str) -> Optional[Team]:
elems = [x for x in get_teams(config) if x["name"] == name]
assert len(elems) <= 1 # nosec: testing only
if elems:
return elems[0]
return None
def _get_team_exists(config: Config, name: str) -> Team:
team = get_team(config, name)
if not team:
raise RuntimeError("team {} not found".format(name))
return team
def team_name(team: Team) -> str:
return team["name"]
# direct members of a team, not taking members of descendants teams into
# account
def team_direct_members(team: Team) -> Collection[str]:
if team["members"]:
return team["members"]
return []
# effective members of a team (direct members + members of descendants teams)
def team_effective_members(config: Config, team: Team) -> Set[str]:
return reduce(
lambda acc, child: acc | set(team_direct_members(child)),
[_get_team_exists(config, x) for x in team_descendants(config, team)],
set(team_direct_members(team)),
)
def team_children(team: Team) -> Collection[str]:
if "children" in team:
return team["children"]
return []
def team_descendants(config: Config, team: Team) -> Set[str]:
def reduce_function(acc: Set[str], child_name: str) -> Set[str]:
child_team = _get_team_exists(config, child_name)
return acc | team_descendants(config, child_team) | {child_name}
if "children" in team:
return reduce(reduce_function, set(team_children(team)), set())
return set()
def team_parents(config: Config, team: Team) -> Collection[Team]:
def is_parent(entry: Team, team: Team) -> bool:
return team_name(team) in team_children(entry)
return [x for x in get_teams(config) if is_parent(x, team)]
def team_parent(config: Config, team: Team) -> Optional[Team]:
elems = team_parents(config, team)
assert len(elems) <= 1 # nosec: testing only
if elems:
return next(iter(elems))
return None
def team_ancestors(config: Config, team: Team) -> Set[str]:
ancestors = set()
parents = team_parents(config, team)
for parent in parents:
ancestors.update(team_ancestors(config, parent))
ancestors.update(map(team_name, parents))
return ancestors
def user_teams(config: Config, email: str) -> Collection[Team]:
return [x for x in get_teams(config) if email in team_direct_members(x)]
def is_owner(config: Config, email: str) -> bool:
return email in config["organisation"]["owners"]
def default_dir() -> Path:
def parent_dir() -> Path:
xdg_home = environ.get("XDG_CONFIG_HOME")
if xdg_home:
return Path(xdg_home)
home = environ.get("HOME")
if home:
return Path(home) / ".config"
return Path("/")
return parent_dir() / "ghaudit"
| 2.640625
| 3
|
cnn_model.py
|
kennethyu2017/vin_tf
| 0
|
12784466
|
"""
implement a CNN network as mentioned in VIN paper.
Author: <NAME>
"""
import tensorflow as tf
from tensorflow.contrib.layers import conv2d, fully_connected, max_pool2d, dropout
'''
normal structure for each conv layer:conv -> elu -> bn -> pooling.
conv-1: 3x3 s:1, 50 channels .no padding .
max pooling: 2x2
conv-2: 3x3,s:1, 50 channels. no padding .
conv-3: 3x3,s:1. 100 channels. no padding .
max pooling: 2x2
conv-4: 3x3 s:1, 100 channels. no padding.
conv-5: 3x3 s:1, 100 channels. no padding.
fc-1:200-units. followed by elu.
fc-2: 4-units. output is logits.
output: unscaled logits of each actions:
up
left
right
down
=== state space:
s_image of grid map
s_goal
s_curr_pos of current state
'''
TRAINING_CFG = tf.app.flags.FLAGS # alias
class CNNModel:
def __init__(self, cnn_model_cfg,optimizer,is_training, scope="cnn_model"):
self.cnn_model_cfg = cnn_model_cfg
self.optimizer = optimizer
self.scope = scope
self.is_training = is_training
def create_net(self, state_inputs, labels, global_step_tensor):
"""
:param labels:
:param global_step_tensor:
:param state_inputs:
:return:
"""
prev_layer = state_inputs
conv_layers = []
fc_layers = []
with tf.variable_scope(self.scope):
# conv layers
# TODO add batch_norm to input process.
for (n_maps, kernel_size, stride, padding, activation, initializer, normalizer, norm_param,
regularizer, pooling_kernel_size, pooling_stride, keep_prob) in \
zip(
self.cnn_model_cfg.conv_n_feature_maps, self.cnn_model_cfg.conv_kernel_sizes,
self.cnn_model_cfg.conv_strides, self.cnn_model_cfg.conv_paddings, self.cnn_model_cfg.conv_activations,
self.cnn_model_cfg.conv_initializers, self.cnn_model_cfg.conv_normalizers, self.cnn_model_cfg.conv_norm_params,
self.cnn_model_cfg.conv_regularizers, self.cnn_model_cfg.pooling_kernel_sizes, self.cnn_model_cfg.pooling_strides,
self.cnn_model_cfg.conv_dropout_keep_probs):
prev_layer = conv2d(prev_layer, num_outputs=n_maps, kernel_size=kernel_size,
stride=stride, padding=padding,
activation_fn=activation,
data_format='NHWC',
normalizer_fn=normalizer,
normalizer_params=norm_param,
weights_initializer=initializer,
weights_regularizer=regularizer,
trainable=True)
if pooling_kernel_size:
# max pooling only
prev_layer = max_pool2d(prev_layer, pooling_kernel_size, pooling_stride,
padding='VALID', data_format='NHWC')
if keep_prob < 1:
prev_layer = dropout(prev_layer, keep_prob,is_training=self.is_training)
conv_layers.append(prev_layer)
##fc layers.inc output layer.
# flatten the output of last conv layer to (batch_size, n_fc_in)
prev_layer = tf.reshape(conv_layers[-1], shape=[-1,conv_layers[-1].shape[1] * conv_layers[-1].shape[2] * conv_layers[-1].shape[3]])
for n_unit, activation, initializer, normalizer, norm_param, regularizer,keep_prob \
in zip(
self.cnn_model_cfg.n_fc_units, self.cnn_model_cfg.fc_activations, self.cnn_model_cfg.fc_initializers,
self.cnn_model_cfg.fc_normalizers, self.cnn_model_cfg.fc_norm_params, self.cnn_model_cfg.fc_regularizers,
self.cnn_model_cfg.fc_dropout_keep_probs):
prev_layer = fully_connected(prev_layer, num_outputs=n_unit,
activation_fn=activation,
weights_initializer=initializer,
normalizer_fn=normalizer,
normalizer_params=norm_param,
weights_regularizer=regularizer,
trainable=True)
if keep_prob < 1:
prev_layer = dropout(prev_layer, keep_prob, is_training=self.is_training)
fc_layers.append(prev_layer)
# logits should be [batch_size, num_action]
logits = prev_layer
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
cross_entropy_loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
predicted_classes = tf.argmax(logits, axis=1)
total_loss = tf.add_n(reg_loss + [cross_entropy_loss], name='total_loss')
train_op = self.optimizer.minimize(total_loss, global_step_tensor)
total_loss_mean = tf.reduce_mean(total_loss)
with tf.name_scope('loss'):
tf.summary.scalar(name='total_loss', tensor=total_loss_mean,
collections=[TRAINING_CFG.summary_keys])
# with tf.name_scope('d_policy_loss_da_grads'):
# d_policy_loss_da_grads=tf.gradients(ys=policy_loss,xs=actor.action_bounded_tensors)
# for i in range(len(dq_da_grads)):
# tf.summary.scalar(name='d_policy_loss_da_grads_'+str(i)+'norm',tensor=tf.norm(d_policy_loss_da_grads[i]),
# collections=[self.cnn_model_cfg.actor_summary_keys])
# == end with variable_scope() ==
return train_op, total_loss, predicted_classes
| 3.1875
| 3
|
ryu/ryu/app/Ryuretic/Ryuretic_Intf.py
|
Ryuretic/RAP
| 2
|
12784467
|
<reponame>Ryuretic/RAP<gh_stars>1-10
#####################################################################
# Ryuretic: A Modular Framework for RYU #
# !/ryu/ryu/app/Ryuretic/Ryuretic_Intf.py #
# Authors: #
# <NAME> (<EMAIL>) #
# <NAME> (<EMAIL>) #
# Ryuretic_Intf.py #
# date 28 April 2016 #
#####################################################################
# Copyright (C) 2016 <NAME> - All Rights Reserved #
# You may use, distribute and modify this code under the #
# terms of the Ryuretic license, provided this work is cited #
# in the work for which it is used. #
# For latest updates, please visit: #
# https://github.com/Ryuretic/RAP #
#####################################################################
"""How To Run This Program
1) Ensure you have Ryu installed.
2) Save the following files to /home/ubuntu/ryu/ryu/app/Ryuretic directory
a) Ryuretic_Intf.py
b) Ryuretic.py
c) Pkt_Parse13.py
d) switch_mod13.py
3) In your controller terminal type: cd ryu
4) Enter PYTHONPATH=. ./bin/ryu-manager ryu/app/Ryuretic/Ryuretic_Intf.py
"""
#########################################################################
from Ryuretic import coupler
#################1 Import Needed Libraries 1######################
#[1] Import needed libraries here #
#########################################################################
import string, random
class Ryuretic_coupler(coupler):
def __init__(self, *args, **kwargs):
super(Ryuretic_coupler, self).__init__(*args, **kwargs)
############## 2 Add User Variables 2 ###################
#[2] Add new global variables here. #
# Ex. ICMP_ECHO_REQUEST = 8, self.netView = {} #
#################################################################
self.validNAT = 'aa:aa:aa:aa:aa:aa'
self.port_mac_map = {}
self.port_AV = {}
self.tta = {}
self.ttaAV = {}
self.check = False
self.count = 0
self.tcpConnCount = 0
self.stage = 0
self.cntrl ={'mac': 'ca:ca:ca:ad:ad:ad','ip':'192.168.0.40',
'port':None}
self.policyTbl = {}
self.keyID = 101
self.t_agent = {}
ICMP_ECHO_REPLY = 0
ICMP_ECHO_REQUEST = 8
################ 3 Proactive Rule Sets 3 ###################
#[3] Insert proactive rules defined below. Follow format below #
# Options include drop or redirect, fwd is the default. #
#####################################################################
def get_proactive_rules(self, dp, parser, ofproto):
return None, None
#fields, ops = self.honeypot(dp, parser, ofproto)
#return fields, ops
################# 4 Reactive Rule Sets 4 #####################
#[4] use below handles to direct packets to reactive user modules #
# defined in location #[5]. If no rule is added, then #
# the default self.default_Fields_Ops(pkt) must be used #
#####################################################################
# Determine highest priority fields and ops pair, if needed #
# xfields = [fields0, fields1, fields2] #
# xops = [ops0, ops1, ops2] #
# fields,ops = self._build_FldOps(xfields,xops) #
#####################################################################
def handle_eth(self,pkt):
#print "handle eth"
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_arp(self,pkt):
print "handle ARP"
#fields, ops = self.default_Field_Ops(pkt)
#fields, ops = self.respond_to_arp(pkt)
fields, ops = self.Arp_Poison(pkt) if pkt['srcip']=='1172.16.17.32' \
else self.respond_to_arp(pkt)
#fields, ops = self.arp_persist(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_ip(self,pkt):
print "handle IP"
#fields, ops = self.TTL_Check(pkt) #Lab 9
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_icmp(self,pkt):
print "Handle ICMP"
#fields, ops = self.TTL_Check(pkt)
#fields, ops = self.default_Field_Ops(pkt)
#fields, ops = self.respond_to_ping(pkt)
fields, ops = self.Icmp_Redirect(pkt) if pkt['srcip']=='192.168.0.22' \
else self.respond_to_ping(pkt)
self.install_field_ops(pkt, fields, ops)
def handle_tcp(self,pkt):
#print "handle TCP"
## fields, ops = self.TTL_Check(pkt)
## if ops['op'] == 'fwd':
## fields, ops = self.Multi_MAC_Checker(pkt)
#fields, ops = self.default_Field_Ops(pkt)
#fields, ops = self.displayTCPFields(pkt)
#fields, ops = self.displayTCP(pkt)
fields, ops = self.Tcp_Redirect(pkt) if pkt['srcip']=='192.168.0.22'\
else self.displayTCP(pkt)
self.install_field_ops(pkt, fields, ops)
def handle_udp(self,pkt):
#fields, ops = self.TTL_Check(pkt)
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt, fields, ops)
# All packets not defined above are handled here.
def handle_unk(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt, fields, ops)
#####################################################################
# The following are from the old NFG file.
def default_Field_Ops(self,pkt):
def _loadFields(pkt):
#keys specifies match fields for action. Default is
#inport and srcmac. ptype used for craft icmp, udp, etc.
fields = {'keys':['inport','srcmac'],'ptype':[], 'dp':pkt['dp'],
'ofproto':pkt['ofproto'], 'msg':pkt['msg'],
'inport':pkt['inport'], 'srcmac':pkt['srcmac'],
'ethtype':pkt['ethtype'], 'dstmac':None, 'srcip':None,
'proto':None, 'dstip':None, 'srcport':None, 'dstport':None,
'com':None, 'id':0}
return fields
def _loadOps():
#print "Loading ops"
#Specifies the timeouts, priority, operation and outport
#options for op: 'fwd','drop', 'mir', 'redir', 'craft'
ops = {'hard_t':None, 'idle_t':None, 'priority':10, \
'op':'fwd', 'newport':None}
return ops
#print "default Field_Ops called"
fields = _loadFields(pkt)
ops = _loadOps()
return fields, ops
#####################################################################
############ 5 Ryuretic Network Application Modules 5 ##############
#[5] Add user created methods below. Examples are provided to assist #
# the user with basic python, dictionary, list, and function calls #
######################################################################
# Confirm mac has been seen before and no issues are recorded
def TTL_Check(self, pkt):
#initialize fields and ops with default settings
fields, ops = self.default_Field_Ops(pkt)
if pkt['srcmac'] != self.validNAT:
if pkt['ttl']==63 or pkt['ttl']==127:
print 'TTL Decrement Detected on ', pkt['srcmac'], ' TTL is :', pkt['ttl']
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
ops['idle_t'] = 5
print "Packet TTL: ", pkt['ttl'], ' ', pkt['srcip'],' ', \
pkt['inport'],' ', pkt['srcmac']
else:
ops['idle_t'] = 20
priority = 10
return fields, ops
def Multi_MAC_Checker(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
print "*** Checking MAC ***"
#self.port_mac_map = {}
if self.port_mac_map.has_key(pkt['inport']):
if pkt['srcmac'] != self.port_mac_map[pkt['inport']]:
print " Multi-mac port detected "
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
fields, ops = self.fwd_persist(pkt,fields,ops)
else:
self.port_mac_map[pkt['inport']] = pkt['srcmac']
return fields, ops
#change name to monitor_TCP for RAP
def displayTCP(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
bits = pkt['bits']
dst, dstip, dstport = pkt['dstmac'], pkt['dstip'], pkt['dstport']
src, srcip, srcport = pkt['srcmac'], pkt['srcip'], pkt['srcport']
inport = pkt['inport']
send = (src,srcip,srcport,dstip)
arrive = (dst,dstip,dstport,srcip)
t_in = pkt['t_in']
#print "******\n"+self.tta+"/n********/n"+self.port_AV+"/n*********"
if bits == 20:
if self.tta.has_key(send):
self.tta[send]['stage'] = 0
else:
self.tta[arrive]['stage'] = 0
return fields, ops
if bits == 2:
if self.tta.has_key(send):
self.tta[send].update({'inport':inport,'stage':1})
else:
self.tta.update({send:{'inport':inport,'stage':1}})
return fields, ops
if bits == 18:
if self.tta.has_key(arrive):
if self.tta[arrive]['stage']==1:
self.tta[arrive].update({'syn':t_in,'stage':2})
return fields,ops
if bits == 16:
if self.tta.has_key(send):
if self.tta[send]['stage']==2:
tta = t_in - self.tta[send]['syn']
self.tta[send].update({'stage':3, 'ack':t_in, 'tta':tta})
print '** Calc TTA :', tta
if self.port_AV.has_key(self.tta[send]['inport']):
portAV = ((self.port_AV[self.tta[send]['inport']] * \
9) + tta)/10
self.port_AV[self.tta[send]['inport']] = portAV
else:
portAV = ((0.001*9)+tta)/10
self.port_AV.update({self.tta[send]['inport']:portAV})
print "****"
print "Port and TTA: ", inport, self.tta[send]['tta']
print '\nPort Averages: ', self.port_AV
print "****"
del self.tta[send]
return fields, ops
print "Persist"
fields, ops = self.tcp_persist(pkt,fields,ops)
return fields, ops
if bits == 24:
print "HTTP Push"
return fields, ops
if bits == 17:
print 'Port Averages: ', self.port_AV
if self.tta.has_key(send):
del self.tta[send]
elif self.tta.has_key(arrive):
del self.tta[arrive]
return fields, ops
print "Packet not addressed", bits, inport, src, dstip
return fields, ops
# Call to temporarily install drop parameter for a packet to switch
def add_drop_params(self, pkt, fields, ops):
#may need to include priority
fields['keys'] = ['inport']
fields['inport'] = pkt['inport']
ops['priority'] = 100
ops['idle_t'] = 60
ops['op']='drop'
return fields, ops
# Call to temporarily install TCP flow connection on switch
def tcp_persist(self, pkt,fields,ops):
fields['keys'] = ['inport', 'srcmac', 'srcip', 'ethtype', 'srcport']
fields['srcport'] = pkt['srcport']
fields['srcip'] = pkt['srcip']
ops['idle_t'] = 5
ops['priority'] = 10
return fields, ops
def fwd_persist(self, pkt,fields,ops):
ops['idle_t'] = 3
ops['priority'] = 10
return fields, ops
def arp_persist(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport','srcmac','ethtype']
ops['idle_t'] = 10
ops['priority'] = 2
return fields, ops
#####################################################################
"""
The following code is implemented to allow our trusted agent to comm
with the controller and vice versa.
"""
#####################################################################
#Receive and respond to arp
def respond_to_arp(self,pkt):
#print 'Respond to Arp Called'
fields, ops = self.default_Field_Ops(pkt)
if pkt['dstip'] == self.cntrl['ip']:
print "Message to Controller"
fields['keys']=['srcmac', 'srcip', 'ethtype', 'inport']
fields['ptype'] = 'arp'
fields['dstip'] = pkt['srcip']
fields['srcip'] = self.cntrl['ip']
fields['dstmac'] = pkt['srcmac']
fields['srcmac'] = self.cntrl['mac']
fields['ethtype'] = 0x0806
ops['op'] = 'craft'
ops['newport'] = pkt['inport']
#print "INPORT: ", pkt['inport']
return fields, ops
#Respond to ping. Forward or respond if to cntrl from trusted agent.
def respond_to_ping(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
print "\n\nRespond to Ping"
print pkt['dstip'], self.cntrl['ip'], pkt['srcip']
if pkt['dstip'] == self.cntrl['ip'] and pkt['srcip'] == '192.168.0.1':
#print'respond to ping'
rcvData = pkt['data'].data
#Possible actions {i-init, d-delete, v-verify,
action, keyID = rcvData.split(',')
keyID = keyID.rstrip(' \t\r\n\0')
print len(keyID)
keyID = int(keyID)
print "Action is ", action
print "KeyID is ", keyID, ', ', type(keyID)
print "\n\n\n*********"
########################################
if action == 'i':
self.t_agent = {'ip':pkt['srcip'],'mac':pkt['srcmac'],
'port':pkt['inport'],'msg':pkt['msg'],
'ofproto':pkt['ofproto'], 'dp':pkt['dp']}
elif action == 'd':
#Deleting flagged host policy
print "Deleting Policy Table"
print self.policyTbl.has_key(keyID)
print self.policyTbl.keys()
if self.policyTbl.has_key(keyID):
srcmac = self.policyTbl[keyID]['srcmac']
inport = self.policyTbl[keyID]['inport']
print srcmac, ', ', inport
if self.net_MacTbl.has_key(srcmac):
print "Found MAC"
self.net_MacTbl.pop(srcmac)
if self.net_PortTbl.has_key(inport):
print "Found Port"
self.net_PortTbl.pop(inport)
self.policyTbl.pop(keyID)
elif action is 'u':
#This is more complicated it requires data not being stored
#may need to add fields to policyTable. Maybe not.
pass
elif action is 'a':
#Acknowledge receipt
pass
else:
print "No match"
fields['dstip'] = pkt['srcip']
fields['srcip'] = self.cntrl['ip']
fields['dstmac'] = pkt['srcmac']
fields['srcmac'] = self.cntrl['mac']
fields['ptype'] = 'icmp'
fields['ethtype'] = 0x0800
fields['proto'] = 1
fields['com'] = 'a,'+rcvData
ops['op'] = 'craft'
ops['newport'] = pkt['inport']
return fields, ops
#Builds notification information for trusted agent and sends if via
# self.update_TA (may want to combine these two definitions
def notify_TA(self, pkt,status):
keyID = self.keyID
self.keyID += 1
print "Adding Violation, passkey, and updating keyID"
violation = status # 's' or 't'
#create passkey
passkey =''.join(random.choice(string.ascii_letters) for x in range(8))
#update policy table
self.policyTbl[keyID]={'inport':pkt['inport'], 'srcmac':pkt['srcmac'],
'passkey':passkey, 'violation':violation}
#Notify trusted agent of newly flagged client
self.update_TA(pkt, keyID, status)
return keyID
#Crafts tailored ICMP message for trusted agent
def update_TA(self,pkt, keyID, status):
table = self.policyTbl[keyID]
#print "Updating Trusted Agent"
fields, ops = {},{}
fields['keys'] = ['inport', 'srcip']
fields['dstip'] = self.t_agent['ip']
fields['srcip'] = self.cntrl['ip']
fields['dstmac'] = self.t_agent['mac']
fields['srcmac'] = self.cntrl['mac']
fields['dp'] = self.t_agent['dp']
fields['msg'] = self.t_agent['msg']
fields['inport'] = self.t_agent['port']
fields['ofproto']=self.t_agent['ofproto']
fields['ptype'] = 'icmp'
fields['ethtype'] = 0x0800
fields['proto'] = 1
fields['id'] = 0
fields['com'] = table['srcmac']+','+str(table['inport'])+\
','+str(table['passkey'])+','+table['violation']+\
','+str(keyID)
ops = {'hard_t':None, 'idle_t':None, 'priority':0, \
'op':'fwd', 'newport':None}
ops['op'] = 'craft'
ops['newport'] = self.t_agent['port']
self.install_field_ops(pkt, fields, ops)
#Respond to ping. Forward or respond if to cntrl from trusted agent.
def respond_to_ping(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
#print "\n\nRespond to Ping"
print pkt['dstip'], self.cntrl['ip'], pkt['srcip']
if pkt['dstip'] == self.cntrl['ip'] and pkt['srcip'] == '192.168.0.1':
#print'respond to ping'
rcvData = pkt['data'].data
#Possible actions {i-init, d-delete, v-verify,
action, keyID = rcvData.split(',')
keyID = keyID.rstrip(' \t\r\n\0')
print len(keyID)
keyID = int(keyID)
print "Action is ", action
print "KeyID is ", keyID, ', ', type(keyID)
print "\n\n\n*********"
########################################
if action == 'i':
self.t_agent = {'ip':pkt['srcip'],'mac':pkt['srcmac'],
'port':pkt['inport'],'msg':pkt['msg'],
'ofproto':pkt['ofproto'], 'dp':pkt['dp']}
elif action == 'd':
#Deleting flagged host policy
print "Deleting Policy Table"
print self.policyTbl.has_key(keyID)
print self.policyTbl.keys()
if self.policyTbl.has_key(keyID):
srcmac = self.policyTbl[keyID]['srcmac']
inport = self.policyTbl[keyID]['inport']
print srcmac, ', ', inport
if self.net_MacTbl.has_key(srcmac):
print "Found MAC"
self.net_MacTbl.pop(srcmac)
if self.net_PortTbl.has_key(inport):
print "Found Port"
self.net_PortTbl.pop(inport)
self.policyTbl.pop(keyID)
elif action is 'u':
#This is more complicated it requires data not being stored
#may need to add fields to policyTable. Maybe not.
pass
elif action is 'a':
#Acknowledge receipt
pass
else:
print "No match"
fields['dstip'] = pkt['srcip']
fields['srcip'] = self.cntrl['ip']
fields['dstmac'] = pkt['srcmac']
fields['srcmac'] = self.cntrl['mac']
fields['ptype'] = 'icmp'
fields['ethtype'] = 0x0800
fields['proto'] = 1
fields['com'] = 'a,'+rcvData
ops['op'] = 'craft'
ops['newport'] = pkt['inport']
return fields, ops
#########################################################################
"""
The following code controls the redirection of packets from their intended
destination to our trusted agent. This occurs when a port is flagged.
"""
#########################################################################
#Create a method to inject a redirect anytime the sta4 IP address is
#Check status of port and mac.
def check_net_tbl(self,mac,port=0):
if mac in self.macTbl.keys():
print mac, " found in table."
return self.macTbl[mac]['stat']
elif port in self.portTbl.keys():
print "Port ", port, " found in table."
return self.portTbl[port]['stat']
else:
return None
def addToNetView(self,pkt,status='noStat',passkey='noPass'):
keyID = self.keyID
self.netView[keyID]={'srcmac':pkt['srcmac'], 'inport':pkt['inport'],
'srcip':pkt['srcip'], 'stat':status, 'passkey':passkey}
self.macTbl[pkt['srcmac']]= keyID
if self.portTble.has_key(pkt['inport']):
print "Port already assigned"
else:
self.portTbl[pkt['inport']]= keyID
self.keyID += keyID
def flagInNetView(self,pkt,status):
if self.macTbl.has_key(pkt['srcmac']):
keyID = self.MacTbl[pkt['srcmac']]
if self.netView.has_key(keyID):
tbl = self.netView[keyID]
tbl['stat']=status
tbl['passkey']= ''.join(random.choice(string.ascii_letters) for x in range(8))
self.notify_TA(pkt,status)
else:
print "This MAC has yet to be added to the netView Table"
def arp_tester(self,pkt):
#Determin if mac or port has a status
pkt_status = self.check_net_tbl(pkt['srcmac'],pkt['inport'])
if pkt_status =='test':
fields,ops = self.Arp_Poison(pkt)
elif pkt_status == 'drop':
field,ops == self.drop_ARP(pkt)
elif pkt_status == None:
self.addToNetVeiw(pkt)
fields, ops = self.respond_to_arp(pkt)
else:
fields, ops = self.respond_to_arp(pkt)
return fields, ops
def Arp_Poison(self,pkt):
print "Building Arp poison"
fields, ops = self.default_Field_Ops(pkt)
if pkt['opcode'] != 2:
fields['keys']=['srcmac', 'srcip', 'ethtype', 'inport']
fields['ptype'] = 'arp'
fields['ethtype'] = 0x0806 #pkt['ethtype']
print "Ethernet Type is : ", pkt['ethtype'], type(pkt['ethtype'])
fields['srcmac'] = self.t_agent['mac']
fields['dstmac'] = pkt['srcmac']
fields['srcip'] = pkt['dstip'] #self.t_agent['ip']
fields['dstip'] = pkt['srcip']
ops = {'hard_t':None, 'idle_t':None, 'priority':100, \
'op':'craft', 'newport':pkt['inport']}
return fields,ops
def drop_ARP(self, pkt):
if pkt['dstip'] != self.t_agent['ip']:
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport', 'ethtype', 'proto']
fields['inport'] = pkt['inport']
fields['ethtype'] = pkt['ethtype']
fields['proto'] = pkt['proto']
ops['priority'] = 100
ops['op']='drop'
ops['idle_t'] = 120
print "(319) Droping ARP. Fields are: ", fields
return fields, ops
#Use this to flag during a TCP connection.
def flag_Sta4(self,pkt):
policyFlag = False
if pkt['srcip']=='192.168.0.22':
#change mac or port status in netView
self.flagInNetView(pkt,'test')
policyFlag = True
return policyFlag
def tcp_tester(self,pkt):
pkt_status = self.check_net_tbl(pkt['srcmac'], pkt['inport'])
if pkt_status =='test':
fields,ops = self.Tcp_Redirect(pkt)
elif pkt_status == 'noStat':
fields, ops = self.default_Field_Ops(pkt)
flag = self.flag_Sta4(pkt)
if flag:
self.flagInNetView(pkt,'test')
#field,ops = self.drop_TCP(pkt)
fields['keys'] = ['inport']
fields['inport'] = pkt['inport']
ops['priority'] = 100
#ops['idle_t'] = 5
ops['op']='drop'
else:
fields, ops = self.default_Field_Ops(pkt)
return fields, ops
#Redirect ICMP packets to trusted agent
def Icmp_Redirect(self,pkt):
print "Redirecting ICMP"
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport', 'ethtype']
fields['dstmac'] = self.t_agent['mac']
fields['dstip'] = self.t_agent['ip']
fields['ethtype'] = pkt['ethtype']
ops['op'] = 'redir'
ops['newport'] = self.t_agent['port']
ops['priority'] = 100
ops['idle_t'] = 180
#ops['hard_t'] = 180
return fields, ops
def Tcp_Redirect(self,pkt):
print "*\n*\nRedirecting TCP"
print pkt
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport', 'ethtype']
fields['dstmac'] = self.t_agent['mac']
fields['dstip'] = pkt['dstip'] #self.t_agent['ip']
fields['ethtype'] = pkt['ethtype']
ops['op'] = 'redir'
ops['newport'] = self.t_agent['port']
ops['priority'] = 100
ops['idle_t'] = 180
#ops['hard_t'] = 180
return fields, ops
#############################################################################
#############################################################################
def Simple_FW(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
#blocking w3cschools and facebook
if pkt['dstip'] in ['192.168.127.12', '172.16.31.10']:
print "W3Cschools or Facebook is not allowed"
#tell controller to drop pkts destined for dstip
fields['keys'],fields['dstip'] = ['dstip'],pkt['dstip']
ops['priority'] = 100
ops['op']= 'drop'
ops['idle_t']=60
return fields, ops
def Stateful_FW(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
if pkt['input'] in [1,2,3,4,5,6,7,8]:
if self.stat_Fw_tbl.has_key(pkt['srcip']):
if len(self.stat_Fw_tbl[pkt['srcip']]['dstip']) > 4:
self.stat_Fw_tbl[pkt['srcip']]['dstip'].pop(3)
self.self.stat_Fw_tbl[pkt['srcip']]['dstip'].append(pkt['dstip'])
else:
self.stat_Fw_tbl[pkt['srcip']]={'dstip':[pkt['dstip']]}
return fields, ops
else:
if self.stat_Fw_tbl.has_key(pkt['dstip']):
if pkt['srcip'] in stat_Fw_tbl[pkt['dstip']]['dstip']:
return fields, ops
else:
fields['keys'] = ['srcip','dstip']
fields['srcip'] = pkt['srcip']
fields['dstip'] = pkt['dstip']
ops['priority'] = 100
ops['op']='drop'
#ops['hard_t'] = 20
ops['idle_t'] = 4
return fields, ops
def honeypot(self, dp, parser, ofproto):
# This should install proactive rules that mirrors data from a
# honeypot system
fields, ops = {}, {}
fields['ethtype'] = 0x0800
fields['keys'] = ['srcip']
fields['srcip'] = '10.0.0.42'
ops['priority'] = 100
ops['op'] = 'mir'
ops['newport'] = 2
#could make this multicast as well [1,2,3]
return fields, ops
def displayTCP2(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
bits = pkt['bits']
dst = pkt['dstmac']
src = pkt['srcmac']
inport = pkt['inport']
print '*******', inport, src, bits, self.stage #, self.check
if bits in [2,16,18]:
print '**SEQ: ', pkt['seq'], '\tACK ', pkt['ack'], ' **'
if bits == 2:
self.tta[src]= {}
self.tta[src]['inport'] = pkt['inport']
#self.check=True
self.stage=1
#Somehow this is not always sent(need to resolve error that ocurs here
#So far, AP stands out, but not the NAT (comparable to NAT)
elif bits == 18 and self.stage==1:
self.tta[dst]['syn'] = pkt['t_in']
self.stage = 2
elif bits == 16 and self.stage == 2: #self.check==True:
self.stage=3
self.tta[src]['ack'] = pkt['t_in']
tta = pkt['t_in'] - self.tta[src]['syn']
if self.ttaAV.has_key(inport):
self.ttaAV[inport]= (self.ttaAV[inport] + tta)/2
else:
self.ttaAV[inport]= tta
print self.ttaAV[inport]
print '\n**** Port: ',inport,' TTA = ', tta, ' ********\n'
self.count = self.count + 1
fields, ops = self.fwd_persist(pkt,fields,ops)
#self.check = False
else:
self.stage=0
fields, ops = self.fwd_persist(pkt,fields,ops)
else:
fields, ops = self.fwd_persist(pkt,fields,ops)
print self.ttaAV
return fields, ops
| 1.898438
| 2
|
factory/tools/lib/gWftLogParser.py
|
ddbox/glideinwms
| 0
|
12784468
|
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# factory/tool specific condorLogs helper
#
import binascii
import gzip
import io
import mmap
import os.path
import re
import time
from glideinwms.factory import glideFactoryLogParser
from glideinwms.lib import condorLogParser
from glideinwms.lib.defaults import BINARY_ENCODING, force_bytes
# get the list of jobs that were active at a certain time
def get_glideins(log_dir_name, date_arr, time_arr):
glidein_list = []
cldata = glideFactoryLogParser.dirSummaryTimingsOutFull(log_dir_name, cache_dir=None)
cldata.load(active_only=False)
glidein_data = cldata.data["Completed"] # I am interested only in the completed ones
ref_ctime = time.mktime(date_arr + time_arr + (0, 0, -1))
for glidein_el in glidein_data:
glidein_id, fistTimeStr, runningStartTimeStr, lastTimeStr = glidein_el
runningStartTime = condorLogParser.rawTime2cTimeLastYear(runningStartTimeStr)
if runningStartTime > ref_ctime:
continue # not one of them, started after
lastTime = condorLogParser.rawTime2cTimeLastYear(lastTimeStr)
if lastTime < ref_ctime:
continue # not one of them, ended before
glidein_list.append(glidein_id)
return glidein_list
# get the list of log files for an entry that were active at a certain time
def get_glidein_logs_entry(factory_dir, entry, date_arr, time_arr, ext="err"):
log_list = []
log_dir_name = os.path.join(factory_dir, "entry_%s/log" % entry)
glidein_list = get_glideins(log_dir_name, date_arr, time_arr)
for glidein_id in glidein_list:
glidein_log_file = "job.%i.%i." % condorLogParser.rawJobId2Nr(glidein_id)
glidein_log_file += ext
glidein_log_filepath = os.path.join(log_dir_name, glidein_log_file)
if os.path.exists(glidein_log_filepath):
log_list.append(glidein_log_filepath)
return log_list
# get the list of log files for an entry that were active at a certain time
def get_glidein_logs(factory_dir, entries, date_arr, time_arr, ext="err"):
log_list = []
for entry in entries:
entry_log_list = get_glidein_logs_entry(factory_dir, entry, date_arr, time_arr, ext)
log_list += entry_log_list
return log_list
# extract the blob from a glidein log file starting from position
def get_Compressed_raw(log_fname, start_str, start_pos=0):
SL_START_RE = re.compile(b"%s\nbegin-base64 644 -\n" % force_bytes(start_str, BINARY_ENCODING), re.M | re.DOTALL)
size = os.path.getsize(log_fname)
if size == 0:
return "" # mmap would fail... and I know I will not find anything anyhow
with open(log_fname) as fd:
buf = mmap.mmap(fd.fileno(), size, access=mmap.ACCESS_READ)
try:
# first find the header that delimits the log in the file
start_re = SL_START_RE.search(buf, 0)
if start_re is None:
return "" # no StartLog section
log_start_idx = start_re.end()
# find where it ends
log_end_idx = buf.find(b"\n====", log_start_idx)
if log_end_idx < 0: # up to the end of the file
return buf[log_start_idx:].decode(BINARY_ENCODING)
else:
return buf[log_start_idx:log_end_idx].decode(BINARY_ENCODING)
finally:
buf.close()
# extract the blob from a glidein log file
def get_Compressed(log_fname, start_str):
raw_data = get_Compressed_raw(log_fname, start_str)
if raw_data != "":
gzip_data = binascii.a2b_base64(raw_data)
del raw_data
data_fd = gzip.GzipFile(fileobj=io.BytesIO(gzip_data))
data = data_fd.read().decode(BINARY_ENCODING)
else:
data = raw_data
return data
# extract the blob from a glidein log file
def get_Simple(log_fname, start_str, end_str):
SL_START_RE = re.compile(force_bytes(start_str, BINARY_ENCODING) + b"\n", re.M | re.DOTALL)
SL_END_RE = re.compile(end_str, re.M | re.DOTALL)
size = os.path.getsize(log_fname)
if size == 0:
return "" # mmap would fail... and I know I will not find anything anyhow
with open(log_fname) as fd:
buf = mmap.mmap(fd.fileno(), size, access=mmap.ACCESS_READ)
try:
# first find the header that delimits the log in the file
start_re = SL_START_RE.search(buf, 0)
if start_re is None:
return "" # no StartLog section
log_start_idx = start_re.end()
# find where it ends
log_end_idx = SL_END_RE.search(buf, log_start_idx)
if log_end_idx is None: # up to the end of the file
return buf[log_start_idx:].decode(BINARY_ENCODING)
else:
return buf[log_start_idx : log_end_idx.start()].decode(BINARY_ENCODING)
finally:
buf.close()
# extract the Condor Log from a glidein log file
# condor_log_id should be something like "StartdLog"
def get_CondorLog(log_fname, condor_log_id):
start_str = "^%s\n======== gzip . uuencode =============" % condor_log_id
return get_Compressed(log_fname, start_str)
# extract the XML Result from a glidein log file
def get_XMLResult(log_fname):
start_str = "^=== Encoded XML description of glidein activity ==="
s = get_Compressed(log_fname, start_str)
if s != "":
return s
# not found, try the uncompressed version
start_str = "^=== XML description of glidein activity ==="
end_str = "^=== End XML description of glidein activity ==="
return get_Simple(log_fname, start_str, end_str)
# extract slot names
def get_StarterSlotNames(log_fname, condor_log_id="(StarterLog.slot[0-9]*[_]*[0-9]*)"):
start_str = "^%s\n======== gzip . uuencode =============" % condor_log_id
SL_START_RE = re.compile(b"%s\nbegin-base64 644 -\n" % force_bytes(start_str, BINARY_ENCODING), re.M | re.DOTALL)
size = os.path.getsize(log_fname)
if size == 0:
return "" # mmap would fail... and I know I will not find anything anyhow
with open(log_fname) as fd:
buf = mmap.mmap(fd.fileno(), size, access=mmap.ACCESS_READ)
try:
strings = [s.decode(BINARY_ENCODING) for s in SL_START_RE.findall(buf, 0)]
return strings
finally:
buf.close()
| 2.21875
| 2
|
sim2net/speed/normal.py
|
harikuts/dsr_optimization
| 12
|
12784469
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 <NAME> <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
Provides an implementation of the normal speed distribution. In this case a
speed of a node is assigned at random with the normal, i.e. Gaussian,
probability distribution.
"""
from sim2net.speed._speed import Speed
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
class Normal(Speed):
"""
This class implements the normal speed distribution that assigns node's
speeds with the Gaussian probability distribution.
"""
def __init__(self, mean=0.0, standard_deviation=0.2):
"""
(Defaults to **standard normal distribution**.)
*Parameters*:
- **mean** (`float`): a value of the expectation (default: `0.0`);
- **standard_deviation** (`float`): a value of the standard
deviation (default: `0.2`).
"""
super(Normal, self).__init__(Normal.__name__)
check_argument_type(Normal.__name__, 'mean', float, mean, self.logger)
self.__mean = float(mean)
check_argument_type(Normal.__name__, 'standard_deviation', float,
standard_deviation, self.logger)
self.__standard_deviation = float(standard_deviation)
self.__current_speed = None
self.get_new()
@property
def mean(self):
"""
(*Property*) A value of the expectation of type `float`.
"""
return self.__mean
@property
def current(self):
"""
(*Property*) A value of the current speed of type `float` (or `None`
if the value has yet not been assigned).
"""
return self.__current_speed
def get_new(self):
"""
Assigns a new speed value.
.. warning::
Depending on distribution parameters, negative values may be
randomly selected.
*Returns*:
(`float`) the absolute value of a new speed.
"""
self.__current_speed = \
self.random_generator.normal(self.__mean,
self.__standard_deviation)
return self.__current_speed
| 3.671875
| 4
|
pysaintcoinach/xiv/shop_listing_item.py
|
icykoneko/saintcoinach-py
| 7
|
12784470
|
<reponame>icykoneko/saintcoinach-py<filename>pysaintcoinach/xiv/shop_listing_item.py
from .interfaces import IShopListing, IShopListingItem
class ShopListingItem(IShopListingItem):
"""
General-purpose class for items to use in IShopListing.
"""
def __init__(self,
shop_item: IShopListing,
item: 'Item',
count: int,
is_hq: bool,
collectability_rating: int):
"""
Initializes a new instance of the ShopListingItem class.
:param shop_item: The IShopListing the entry is for.
:param item: The item of the entry.
:param count: The count for the entry.
:param is_hq: A value indicating whether the `item` is high-quality.
:param collectability_rating: The collectability rating of the entry.
"""
self.__shop_item = shop_item
self.__item = item
self.__count = count
self.__is_hq = is_hq
self.__collectability_rating = collectability_rating
@property
def shop_item(self) -> IShopListing:
return self.__shop_item
@property
def item(self) -> 'Item':
return self.__item
@property
def count(self) -> int:
return self.__count
@property
def is_hq(self) -> bool:
return self.__is_hq
@property
def collectability_rating(self) -> int:
return self.__collectability_rating
def __str__(self):
result = ''
if self.count > 1:
result += '{0} '.format(self.count)
result += str(self.item)
if self.is_hq:
result += ' (HQ)'
return result
def __repr__(self):
return str(self)
| 2.59375
| 3
|
setup.py
|
cumason123/eve_echoes
| 0
|
12784471
|
<reponame>cumason123/eve_echoes<filename>setup.py
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
name="eve_echoes",
summary="Eve Echoes Utility Functions",
long_description_content_type="text/markdown",
long_description=long_description,
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(exclude=["eve_echoes.tests"]),
version="0.1.0",
install_requires=["numpy>1.19,<2.0"],
)
| 1.382813
| 1
|
armstrong/core/arm_content/mixins/__init__.py
|
cirlabs/armstrong.core.arm_content
| 0
|
12784472
|
import pkg_resources
pkg_resources.declare_namespace(__name__)
from .authors import AuthorsMixin
from .images import *
from .publication import PublicationMixin
from .video import EmbeddedVideoMixin
| 1.039063
| 1
|
tests/conftest.py
|
daveisadork/PyDevNS
| 7
|
12784473
|
import functools
import logging
import os
import pytest
import tempfile
import devns
import devns.cli
from mock import MagicMock
@pytest.fixture
def config():
return devns.Config()
@pytest.yield_fixture
def resolver_dir(config):
resolvers = []
config.resolver_dir = os.path.join(
tempfile.gettempdir(),
"{0}-{1}".format(tempfile.gettempprefix(), "resolver")
)
resolvers.append(config.resolver_dir)
yield config.resolver_dir
resolvers.append(config.resolver_dir)
for resolver in filter(None, set(resolvers)):
if os.path.isdir(resolver):
os.rmdir(resolver)
@pytest.fixture
def logger(request):
return logging.getLogger(request.node.nodeid)
@pytest.fixture
def parse_args(config):
return functools.partial(devns.cli.parse_args, config=config)
@pytest.yield_fixture
def server(config, resolver_dir):
yield devns.server.DevNS(config)
@pytest.fixture
def Connection():
class Connection(object):
settimeout = MagicMock()
bind = MagicMock()
sendto = MagicMock()
def __init__(self, responses, expected):
self.responses = responses
self.expected = expected
def getsockname(self):
return "0.0.0.0", 53535
def recvfrom(self, length):
response = self.responses.pop()
if isinstance(response, tuple):
return response
raise response()
return Connection
| 2.046875
| 2
|
tests/test_all.py
|
awensaunders/Proxy-Tools
| 3
|
12784474
|
<reponame>awensaunders/Proxy-Tools
#!/usr/bin/env python3
import pytest
import subprocess
import sys
from modules import ssh
from modules import socks
from modules import configurator
import ProxyWidget
class TestSOCKS:
@pytest.fixture
def SOCKS_setup(self):
return socks.ProxyTools('Wi-Fi')
def test_SOCKS_exists(self, SOCKS_setup):
assert hasattr(SOCKS_setup, 'is_proxy_on')
def test_SOCKS_is_proxy_on(self, SOCKS_setup):
pass
#not implemented
class TestSSH:
@pytest.fixture
def ssh_setup(self):
return ssh.Tunnel()
def test_ssh_exists(self):
assert subprocess.call(['ssh'], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL) == 255
def test_ssh_arguments(self, ssh_setup):
with pytest.raises(TypeError):
ssh_setup.start_tunnel("asdfljkl")
class TestYAML:
def test_out_equals_in(self, tmpdir):
f = str(tmpdir.join('config.yml'))
c = configurator.ConfigFile(f)
c.write_config({'name': '<NAME>', 'race': 'Human'})
print(c.read_config())
assert c.read_config() == {'name': '<NAME>', 'race': 'Human'}
def test_append_works(self, tmpdir):
f = str(tmpdir.join('config.yml'))
c = configurator.ConfigFile(f)
c.write_config({'name': '<NAME>', 'race': 'Human'})
c.append_config({'color': 'blue'})
assert c.read_config() == {'name': '<NAME>', 'race': 'Human', 'color': 'blue'} , "append_config failed to write the file"
c.write_config({'name': '<NAME>', 'race': 'Human'})
assert c.read_config() == {'name': '<NAME>', 'race': 'Human'} , "write_config failed to overwrite the appended file"
| 2.15625
| 2
|
src/ostorlab/agent/message/proto/v2/report/status_pb2.py
|
bbhunter/ostorlab
| 113
|
12784475
|
<reponame>bbhunter/ostorlab<gh_stars>100-1000
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v2/report/status.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='v2/report/status.proto',
package='',
serialized_pb=_b(
'\n\x16v2/report/status.proto\"i\n\x06status\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12%\n\nattributes\x18\x02 \x03(\x0b\x32\x11.status.attribute\x1a\'\n\tattribute\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x02(\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STATUS_ATTRIBUTE = _descriptor.Descriptor(
name='attribute',
full_name='status.attribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='status.attribute.key', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='status.attribute.value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=131,
)
_STATUS = _descriptor.Descriptor(
name='status',
full_name='status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='status.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attributes', full_name='status.attributes', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_STATUS_ATTRIBUTE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=131,
)
_STATUS_ATTRIBUTE.containing_type = _STATUS
_STATUS.fields_by_name['attributes'].message_type = _STATUS_ATTRIBUTE
DESCRIPTOR.message_types_by_name['status'] = _STATUS
status = _reflection.GeneratedProtocolMessageType('status', (_message.Message,), dict(
attribute=_reflection.GeneratedProtocolMessageType('attribute', (_message.Message,), dict(
DESCRIPTOR=_STATUS_ATTRIBUTE,
__module__='v2.report.status_pb2'
# @@protoc_insertion_point(class_scope:status.attribute)
))
,
DESCRIPTOR=_STATUS,
__module__='v2.report.status_pb2'
# @@protoc_insertion_point(class_scope:status)
))
_sym_db.RegisterMessage(status)
_sym_db.RegisterMessage(status.attribute)
# @@protoc_insertion_point(module_scope)
| 1.304688
| 1
|
01_bayes_filter/ex2_1.py
|
sanket-pixel/robotics
| 0
|
12784476
|
<reponame>sanket-pixel/robotics<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def plot_belief(belief):
plt.figure()
ax = plt.subplot(2,1,1)
ax.matshow(belief.reshape(1, belief.shape[0]))
ax.set_xticks(np.arange(0, belief.shape[0],1))
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks([])
ax.title.set_text("Grid")
ax = plt.subplot(2, 1, 2)
ax.bar(np.arange(0, belief.shape[0]), belief)
ax.set_xticks(np.arange(0, belief.shape[0], 1))
ax.set_ylim([0, 1.05])
ax.title.set_text("Histogram")
def motion_model(action, belief):
motion_probabiity = np.array([[0.7, 0.2, 0.1]])
shifted_belief = np.stack([np.roll(np.pad(belief,1), 1)[1:16], belief, np.roll(np.pad(belief,1), -1)[1:16]])
if action == 1 :
return np.dot(motion_probabiity, shifted_belief)[0]
elif action == -1 :
return np.dot(np.fliplr(motion_probabiity), shifted_belief)[0]
def sensor_model(observation, belief, world):
white_probability = np.array([0.1, 0.7])
black_probability = np.array([0.9, 0.3])
if observation == 0:
map_probability = np.where(world == 0, black_probability[0], black_probability[1])
elif observation == 1:
map_probability = np.where(world == 1, white_probability[1], white_probability[0])
corrected_belief = map_probability*belief
return corrected_belief/np.sum(corrected_belief)
def recursive_bayes_filter(actions, observations, belief, world):
for index in range(len(actions)):
current_observation = observations[index+1]
current_action = actions[index]
predicted_belief = motion_model(current_action, belief)
belief = sensor_model(current_observation, predicted_belief, world)
plot_belief(belief)
return belief
| 2.734375
| 3
|
PhenDisco/ReleaseV1.0/information-retrieval/Query_All_HTMLVer1.py
|
DBMI/iSeeDELVE
| 0
|
12784477
|
<filename>PhenDisco/ReleaseV1.0/information-retrieval/Query_All_HTMLVer1.py
# $Id$
# Query module for information retrieval
# This is a part if PFINDR project implemented at DBMI, UCSD
# This program is to write into format such as HTML, JSON in order to diplaying and exporting data
# Written by <NAME>, Aug 2012
import os,sys
import getopt
import html2text
import json
from whoosh.index import create_in
from whoosh import index, store, fields
from whoosh.fields import *
from whoosh.qparser import QueryParser
from whoosh.filedb.filestore import FileStorage
from whoosh.qparser import MultifieldParser
from whoosh import highlight
from whoosh import scoring
from whoosh import sorting
from whoosh import analysis
from whoosh.query import FuzzyTerm
import MySQLdb
from DB import Table
from TextMap import *
import textwrap
import re
##################
# Replace MySQL username and password below
MySQLname = ""
MySQLpassword = ""
# Path of indexing storage
StoragePATH = "./PhD"
##################
def checkBool(str1):
if str1.lower()=='(and' or str1.lower() =='and)' or str1.lower()=='and':
return True
if str1.lower()=='(or' or str1.lower()=='or)' or str1.lower()=='or':
return True
if str1.lower()=='(not' or str1.lower() =='not)' or str1.lower()=='not':
return True
return False
def HighlightX(terms1,docText):
docText=docText.decode('utf-8')
for value in terms1:
# Method 2
docText = re.sub(re.escape(value), ur'<strong class="match term0">\g<0></strong>', docText, flags=re.IGNORECASE | re.UNICODE)
return docText
def AddQuote(Query):
Temp1 = Query.strip('"').split()
Temp2 = '' + '"'
for item in Temp1:
if checkBool(item)==False:
Temp2 = Temp2 + ' ' + item.strip('"')
else:
Temp2 = Temp2 + '" ' + item.strip('"') + ' "'
Temp2 = Temp2 + '"'
return Temp2
def QueryAnalys(query,concept):
''' This function return query analysis with concept or not
query default is free text
concept default is False
'''
# Process query
# uppercase in case matched 'and','or','not'
if query.find(' and ')>0:
query = query.replace(' and ', ' AND ')
if query.find(' or ')>0:
query = query.replace(' or ', ' OR ')
if query.find(' not ')>0:
query = query.replace(' not ', ' NOT ')
if query.find(' And ')>0:
query = query.replace(' And ', ' AND ')
if query.find(' Or ')>0:
query = query.replace(' Or ', ' OR ')
if query.find(' Not ')>0:
query = query.replace(' Not ', ' NOT ')
if query.find('\%93')>=0:
query = query.replace('\%93', '"')
if query.find('\%94')>=0:
query = query.replace('\%94', '"')
# Add double quotes into keyword search
# Query reformulation
substudies1 = ["phs000568.v1.p1",
"phs000487.v1.p1",
"phs000544.v1.p6",
"phs000534.v1.p1",
"phs000533.v1.p1",
"phs000532.v1.p1",
"phs000531.v1.p1",
"phs000530.v1.p1",
"phs000529.v1.p1",
"phs000528.v1.p1",
"phs000527.v1.p1",
"phs000515.v2.p1",
"phs000503.v2.p2",
"phs000499.v1.p1",
"phs000498.v1.p1",
"phs000489.v2.p6",
"phs000471.v3.p1",
"phs000470.v3.p1",
"phs000469.v3.p1",
"phs000468.v3.p1",
"phs000467.v3.p1",
"phs000466.v3.p1",
"phs000465.v3.p1",
"phs000464.v3.p1",
"phs000463.v3.p1",
"phs000441.V2.P6",
"phs000429.v1.p1",
"phs000420.v3.p2",
"phs000386.v4.p2",
"phs000377.v1.p1",
"phs000363.v5.p7",
"phs000342.v6.p7",
"phs000315.v4.p2",
"phs000307.v3.p7",
"phs000301.v1.p1",
"phs000296.v1.p2",
"phs000283.v4.p2",
"phs000282.v7.p7",
"phs000281.v4.p2",
"phs000246.v1.p1",
"phs000227.v1.p2",
"phs000184.v1.p1",
"phs000123.v1.p1"]
substudies = ["phs000568",
"phs000487",
"phs000544",
"phs000534",
"phs000533",
"phs000532",
"phs000531",
"phs000530",
"phs000529",
"phs000528",
"phs000527",
"phs000515",
"phs000503",
"phs000499",
"phs000498",
"phs000489",
"phs000471",
"phs000470",
"phs000469",
"phs000468",
"phs000467",
"phs000466",
"phs000465",
"phs000464",
"phs000463",
"phs000441",
"phs000429",
"phs000420",
"phs000386",
"phs000377",
"phs000363",
"phs000342",
"phs000315",
"phs000307",
"phs000301",
"phs000296",
"phs000283",
"phs000282",
"phs000281",
"phs000246",
"phs000227",
"phs000184",
"phs000272",
"phs000123"]
q1=''
for sub in substudies:
q1 += ' NOT ' + sub
# Browse top-level studies
if query=='top':
q2 = 'phs* OR egas* ' + q1
return unicode(q2)
if concept==True:
if query.find('phs')>=0 or query.find('egas')>=0:
return unicode(query)
if query !='top':
if query.lower().find(' and ')>=0 or query.lower().find(' or ')>=0 or query.lower().find(' not ')>=0:
# Add double quotes into query before running MetaMap
# query like this
# s = "heart attack" and ("lung cancer" or copd)
# Must add quotes before parsing
queryQuote = AddQuote(query)
#print queryQuote
#QueryParse1 = QueryParse(query)
QueryParse1 = QueryParse(queryQuote)
QueryParseStr = QueryParse1.Parse()
return unicode(QueryParseStr)
else:
ExtendedQ = QueryMap(query)
MetaMap = ExtendedQ.wrapper()
# Show the query extension
try:
MetaMapOut = ExtendedQ.getMapping(MetaMap)
except:
MetaMapOut = query
if len(MetaMapOut.strip())>0:
return unicode(MetaMapOut)
else:
return unicode(query)
else:
return unicode(query)
'''
Reformat text so that it fits the cell
'''
def ReformatTitle(TextInput):
# Display title with maxium n words, n=5
Text1 = textwrap.wrap(TextInput,34)
TextInput1 = '<br>'.join(Text1[0:])
return TextInput1
'''
Reformat text so that it fits the cell
'''
def ReformatOther(TextInput):
TextInput1 = TextInput.replace(',',' <br> ')
return TextInput1
'''
Reformat text so that it fits the cell
'''
def ReformatOther1(TextInput):
TextInput1 = TextInput.replace(';',' <br> ')
return TextInput1
'''
Reformat text so that it fits the cell
'''
def ReformatWidth(TextInput,size):
Text1 = textwrap.wrap(TextInput,size)
TextInput1 = '<br>'.join(Text1[0:])
return TextInput1
def usage():
print '''Usage:
This program is for retrieval of relevant studies from dbGaP
python Query_All_HTML.py -i <input query> -p <page number> [-f] [-c] [-d] [-l]
-h, --help: This function
-i, --input <Query> : Input query
-p, --page <Page> : Page number
-f, --format: Output format, defaul as HTML, otherwise is JSON format
-c, --concept: Use concept extension or not. Default False
-d, --debug: True or False. Default False
-l, --limit: Limitation search, can be Study, StudyDesc, StudyTitle, StudyID, Variable, VariableID, VariableDesc
Examples:
python Query_All.py -i "lung cancer" -p 1 -c
'''
def AddSubstudy(StudyIDx):
# Add mother and sub-studies information on Apr 2013.
Substudies = {}
Substudies['phs000185'] = ['phs000123;Genome-Wide Association Study of Serum YKL-40 Levels','phs000184;Genome-Wide Association Study of Plasma Lp(a) Levels Identifies Multiple Genes on Chromosome 6q']
Substudies['phs000200'] = ['phs000227;PAGE: Women\'s Health Initiative (WHI)','phs000281;NHLBI GO-ESP: Women\'s Health Initiative Exome Sequencing Project (WHI) - WHISP','phs000315;WHI GARNET','phs000386;NHLBI WHI SHARe','phs000503;Women\'s Health Initiative Sight Examination (WHISE)']
Substudies['phs000182'] = ['phs000246;Fuchs\' Corneal Dystrophy: A Secondary Genome Wide Association Study']
Substudies['phs000007'] = ['phs000282;NHLBI Framingham Candidate Gene Association Resource (CARe)','phs000307;NHLBI Framingham Heart Study Allelic Spectrum Project','phs000342;NHLBI Framingham SNP Health Association Resource (SHARe)','phs000363;NHLBI Framingham SABRe CVD','phs000401;NHLBI GO-ESP: Heart Cohorts Exome Sequencing Project (FHS)']
Substudies['phs000209'] = ['phs000283;NHLBI MESA Candidate Gene Association Resource (CARe)','phs000420;NHLBI MESA SHARe','phs000403;NHLBI GO-ESP: Heart Cohorts Component of the Exome Sequencing Project (MESA)']
Substudies['phs000179'] = ['phs000296;NHLBI GO-ESP: Lung Cohorts Exome Sequencing Project (COPDGene)']
Substudies['phs000287'] = ['phs000301;PAGE: CALiCo: Cardiovascular Health Study (CHS)','phs000377;NHLBI Cardiovascular Health Study (CHS) Candidate Gene Association Resource (CARe)','phs000226;STAMPEED: Cardiovascular Health Study (CHS)','phs000400;NHLBI GO-ESP: Heart Cohorts Exome Sequencing Project (CHS)']
Substudies['phs000001'] = ['phs000429;NEI Age-Related Eye Disease Study (AREDS)']
Substudies['phs000178'] = ['phs000441;Integrated Genomic Analyses of Ovarian Carcinoma (OV)','phs000489;Comprehensive Genomic Characterization Defines Human Glioblastoma Genes and Core Pathways','phs000544;Molecular Characterization of Human Colorectal Cancer (CRC)','phs000570;Comprehensive Genomic Characterization of Squamous Cell Lung Cancers (LUSC)','phs000569;Comprehensive Molecular Portraits of Human Breast Tumors (BRCA)']
Substudies['phs000218'] = ['phs000463;TARGET: Acute Lymphoblastic Leukemia (ALL) Pilot Phase 1','phs000464;TARGET: Acute Lymphoblastic Leukemia (ALL) Expansion Phase 2','phs000465;TARGET: Acute Myeloid Leukemia (AML)','phs000466;TARGET: Kidney, Clear Cell Sarcoma of the Kidney (CCSK)','phs000467;TARGET: Neuroblastoma (NBL)','phs000468;TARGET: Osteosarcoma (OS)','phs000469;TARGET: Cell Lines and Xenografts (PPTP)','phs000470;TARGET: Kidney, Rhabdoid Tumor (RT)','phs000471;TARGET: Kidney, Wilms Tumor (WT)','phs000515;TARGET: Acute Myeloid Leukemia (AML), Induction Failure Subproject']
Substudies['phs000286'] = ['phs000498;Jackson Heart Study Allelic Spectrum Project','phs000499;NHLBI Jackson Heart Study Candidate Gene Association Resource (CARe)']
Substudies['phs000235'] = ['phs000527;CGCI: Burkitt Lymphoma Genome Sequencing Project (BLGSP)','phs000528;CGCI: HIV+ Tumor Molecular Characterization Project - Cervical Cancer (HTMCP - CC)','phs000529;CGCI: HIV+ Tumor Molecular Characterization Project - Diffuse Large B-Cell Lymphoma (HTMCP - DLBCL)','phs000530;CGCI: HIV+ Tumor Molecular Characterization Project - Lung Cancer (HTMCP - LC)','phs000531;CGCI: Medulloblastoma','phs000532;CGCI: Non-Hodgkin Lymphoma - Diffuse Large B-Cell Lymphoma (NHL - DLBCL)','phs000533;CGCI: Non-Hodgkin Lymphoma - Follicular Lymphoma (NHL - FL)','phs000534;CGCI: Office of Cancer Genomics (OCG) Grants - RC1 Human Lung Carcinogenesis']
if Substudies.has_key(StudyIDx):
print """
<div class='node mother'>
<div class="node-info" style="DISPLAY:none">"""
print ';'.join(Substudies[StudyIDx][0:])
print """
</div>
</div>
"""
else:
pass
## ======= End added
def Retrieval(Qinput,Page,concept,debug,format1,limit,PhenTab,pagePhen):
page = int(Page)
# Retrieve the storage index
schema = Schema(IDName=TEXT(stored=True),path=ID(stored=True), title=TEXT(stored=True), desc=TEXT(stored=True), Type=TEXT(stored=True), cohort=NUMERIC(stored=True), inexclude=TEXT(stored=True), platform=TEXT(stored=True), MESHterm=TEXT(stored=True), history=TEXT(stored=True), attributes=TEXT(stored=True), topic=TEXT(stored=True),disease=TEXT(stored=True),measurement=TEXT(stored=True),demographics=TEXT(stored=True),geography=TEXT(stored=True),age=TEXT(stored=True),gender=TEXT(stored=True),category=TEXT(stored=True),IRB=TEXT(stored=True),ConsentType=TEXT(stored=True),phen=TEXT(stored=True),phenID=TEXT(stored=True),phenDesc=TEXT(stored=True),phenName=TEXT(stored=True),phenCUI=TEXT(stored=True),phenMap=TEXT(stored=True), AgeMin=NUMERIC(stored=True), AgeMax=NUMERIC(stored=True), MaleNum=NUMERIC(stored=True), FemaleNum=NUMERIC(stored=True), OtherGenderNum=NUMERIC(stored=True), UnknownGenderNum=NUMERIC(stored=True), Demographics=TEXT(stored=True), phenType=TEXT(stored=True))
storage = FileStorage(StoragePATH)
ix = storage.open_index()
# limit is Default as all fields
mparser = MultifieldParser(["IDName","path","title","desc","Type","cohort","platform","topic","disease","measurement","demographics","geography","age","gender","category","phenID","phenName","phenDesc","phenCUI","phenMap","Demographics","phenType"], schema=ix.schema)
# ============= SPECIAL CASES for query =============
# If type nothing then Qinput will show the top studies
if len(Qinput.strip())==0 or Qinput.find('*')==0:
Qinput='top'
# set up if Qinput=='top'
if Qinput=='top':
limit='StudyID'
# set up if Qinput=='Search e.g. ...'
if Qinput.find('Search e.g.')==0:
try:
Qinput = Qinput.split('Search e.g.')[1].strip()
except:
Qinput = ''
# set up if Qinput=='Search ...'
if Qinput.find('Search ')==0:
try:
Qinput = Qinput.split('Search ')[1].strip()
except:
Qinput = ''
# set up if Qinput=='search ...'
if Qinput.find('search ')==0:
try:
Qinput = Qinput.split('search ')[1].strip()
except:
Qinput = ''
if debug==True:
print Qinput
# ===================================================
# Limitation search
if limit == 'Attribution':
mparser = MultifieldParser(["attributes"], schema=ix.schema)
elif limit == 'ConsentType':
mparser = MultifieldParser(["ConsentType"], schema=ix.schema)
elif limit == 'DataSet':
mparser = MultifieldParser(["phen"], schema=ix.schema)
elif limit == 'DataSetID':
mparser = MultifieldParser(["phen"], schema=ix.schema)
elif limit == 'DataSetName':
mparser = MultifieldParser(["phen"], schema=ix.schema)
elif limit == 'TopicDisease':
mparser = MultifieldParser(["disease","topic"], schema=ix.schema)
elif limit == 'Platform':
mparser = MultifieldParser(["platform"], schema=ix.schema)
elif limit == 'Geography':
mparser = MultifieldParser(["demographics"], schema=ix.schema)
elif limit == 'IRB':
mparser = MultifieldParser(["IRB"], schema=ix.schema)
elif limit == 'TopicDisease':
mparser = MultifieldParser(["topic","disease"],schema=ix.schema)
elif limit == 'Study':
mparser = MultifieldParser(["title","IDName","desc"], schema=ix.schema)
elif limit == 'Study Name':
mparser = MultifieldParser(["title"], schema=ix.schema)
elif limit == 'Study ID':
mparser = MultifieldParser(["IDName"], schema=ix.schema)
elif limit == 'Variable':
mparser = MultifieldParser(["phen"], schema=ix.schema)
elif limit == 'Variable ID':
mparser = MultifieldParser(["phenID"], schema=ix.schema)
elif limit == 'Variable Name':
mparser = MultifieldParser(["phenName"], schema=ix.schema)
elif limit == 'Variable Description':
mparser = MultifieldParser(["phenDesc"], schema=ix.schema)
# ------- SORT BY RELEVANCE BMF25 algorithm ---
with ix.searcher() as searcher:
# ====================================================================
# Retrieve the first page to show in the interface
if Qinput.strip()[0]=='(' and Qinput.strip()[-1]==')':
query_text = QueryAnalys(Qinput, False)
elif Qinput[0]=='"' and Qinput[1]=='(' and Qinput[len(Qinput)-2]==')' and Qinput[len(Qinput)-1]=='"':
query_text = QueryAnalys(Qinput, False)
elif Qinput.find(':')>=0:
query_text = QueryAnalys(Qinput, False)
else:
query_text = QueryAnalys(Qinput, concept)
if Qinput!='top':
if query_text.find(':')==-1 and query_text.find('"')==-1:
query_text = '"' + query_text.strip() + '"'
query = mparser.parse(query_text)
if debug==True:
print query
results = searcher.search_page(query,page,pagelen=25,terms=True)
# Added on May 2013, note that work only for concept-search
if concept==True:
hl1=query_text.split('"')
hl2 = [hl1[item].strip() for item in xrange(1,len(hl1),2)]
if len(hl1)==1:
hl2=hl1
else:
hl_temp = query_text.replace('AND',':::')
hl_temp = hl_temp.replace('OR',':::')
hl2 = [item.strip() for item in hl_temp.split(':::')]
if query_text.find(':')>0:
hl_temp = query_text.replace('AND',':::')
hl_temp = hl_temp.replace('OR',':::')
hl2 = [item.strip() for item in hl_temp.split(':::')]
# =====================================================================
#### CHECK CONCEPT-BASED SEARCH IS OK, IF NOT THEN SKIP
# Check if MetaMap are errors, default is concept-based
if results.total==0:
query_text = QueryAnalys(Qinput, False)
query = mparser.parse(query_text)
results = searcher.search_page(query,page,pagelen=25,terms=True)
# ======================================================================
# Get all Study ID
AllStudy={}
resultALL = searcher.search(query,limit=None)
for hit in resultALL:
AllStudy[hit['path']]=1
# ---------- SET UP RESULTS FRAGMENTS -----------------
results.fragmenter = highlight.SentenceFragmenter(charlimit=100000000)
pagecount = results.pagecount
if len(results) >= 0:
if format1 =='html':
print """<html>
<head>
<title>PhenDisco - Search Results</title>
<link rel="icon" type="image/ico" href="./images/favicon.ico" />
<link href="./css/styles.css" rel="stylesheet" type="text/css">
<script src="./js/jquery-1.8.2.min.js" language="JavaScript" type="text/javascript"></script>
<script src="./js/utility.js" language="JavaScript" type="text/javascript"></script>
<script src="./js/variableDefinitions.js" language="JavaScript" type="text/javascript"></script>
<script type="text/javascript" src="./lib/jquery.autocomplete.js"></script>
<script type="text/javascript" src="./lib/localdata.js"></script>
<meta name="robots" content="index, folow"/>
<link rel="stylesheet" type="text/css" href="./lib/jquery.autocomplete.css" />
<script type="text/javascript">
$().ready(function() {
$("#search-bar").focus().autocomplete(keywords);
});
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-15356450-11', 'ucsd.edu');
ga('send', 'pageview');
</script>
</head>
<body>
<!-- Front page main container -->
<div id='main-container'>
<div id="help-tooltip" class='rounded shadow border'></div>
<!-- main page header -->
<div id='main-header' class='rounded shadow border'>
<div class='top'>
<a href='./index.php'><h1 class='alignleft'>Phenotype Discoverer</h1></a>
<div style="clear: both;"></div>
</div>
<div class='body'>
<a href='./'><img src="PhenDisco_Logo.jpg" alt="logo" height="87" width="80" style='float:left;'></a>
<div>
<form method="get" action="./query.php" autocomplete="off">"""
if Qinput!='top':
print '<input id="search-bar" type="text" size="50" name="Fname" value=\'%s\'>' % query_text
else:
print '<input id="search-bar" type="text" size="50" name="Fname" value=\'%s\'>' % Qinput
print """<input type="hidden" name="Page" value="1">
<input type="hidden" name="PhenTab" value="0">
<input type="hidden" name="phenPage" value="1">
<button type="submit" class="search">Search</button>
<input type="checkbox" name="search-checkbox"
"""
if concept==True:
print 'checked'
print """
>Concept-based</br>
</div>
<div>
<a href='./AdvanceSearchPage.html'><button type='button' id='advanced-button' >Advanced Search</button></a><button type='button' id='limit-button' >Limits</button>
</div>
</div>
<div class='bottom'>
<div class='container'>
<img id='search-bar-help' class='alignright' src="./images/help.png" alt="help" height="24" width="24" border="0">
</div>
<div style="clear: both;"></div>
</div>
</div>
<div id='limit-container' class='hide rounded shadow border'>
<div class='top'>
<h3 class ='alignleft'>Limits</h3>
</div>
<div class="body">
<div class='container'>
<label for="LimitsField">Field:</label>
<select name="LimitField" size="1">
<option selected="selected" value="AllFields">All Fields</option>
<option value="Attribution">Attribution</option>
<option value="ConsentType">Consent Type</option>
<option value="Dataset">Dataset</option>
<option value="DatasetID">Dataset ID</option>
<option value="DatasetName">Dataset Name</option>
<option value="TopicDisease">Disease</option>
<option value="Platform">Genotype Platform</option>
<option value="Geography">Geography</option>
<option value="IRB">IRB</option>
<option value="Study">Study</option>
<option value="Study ID">Study ID</option>
<option value="Study Name">Study Name</option>
<option value="Variable">Variable</option>
<option value="Variable Description">Variable Description</option>
<option value="Variable ID">Variable ID</option>
<option value="Variable Name">Variable Name</option> </select>
</div>
</div>
<div class='bottom'> """
print """
<button type='button' class='alignright' id='limit-reset-button'>Reset</button>
<div style="clear: both;"></div>
</div>
</form>
</div>
<!-- Results Body content -->
<div id='results-body' class='rounded shadow border'>
<!-- Meta Data Options -->
<!-- Navigation -->
<div id='results-body-right-container'>
<!-- Navigation START -->
<!-- Getting Started -->
<div class='container'>
<div class='top'>
<h2>Getting Started</h2>
</div>
<div class='body'>
<ul>
<li><a href="./">PhenDisco</a></li>
<li><a href="./query.php?Fname=top&Page=1&PhenTab=1&phenPage=1&LimitField=StudyID&search-checkbox=on">Browse Top Level Studies</a></li>
<li><a href="http://pfindr-data.ucsd.edu/_PhDVer1/Manual.pdf">Download Manual</a></li>
<li><a href="http://pfindr-data.ucsd.edu/_PhDVer1/LDAM.png">Download PhenDisco domain model</a></li>
<li><a href="http://pfindr-data.ucsd.edu/_PhDVer1/sdGapMR_3.25.13.owl">Download PhenDisco ontology</a></li>
</ul>
</div>
</div>
<hr>
<!-- Important Links -->
<div class='container'>
</div>
<!-- Navigation ENDS -->
</div>
<!-- Results -->
<div id='results-body-left-container'>
<div class='top'>
<h2>Results</h2>
</div>
<div class='body'>"""
startNum = 25*(page-1)+1
if results.total > startNum + 25:
endNum = startNum + 24
else:
endNum = results.total
if Qinput.find('"')>=0:
Qinput = Qinput.replace('"','%22')
Qinput = Qinput.replace(' ','+')
if concept==True:
check1 = 'on'
else:
check1='off'
print """
<div class='container alignright spaced '>
<button id="resultsDisplayButton">Display All</button>
<button id="resultsCheckButton">Check All</button>
<form style="display:inline" action="check-form.php" method="post">
<button type="submit">Export Selections</button>
</div> """
print """
</div>
<div style="clear: both;"></div>
<div class='body'>
<div id="tooltip" class='rounded shadow border'></div>
<div id="nodeToolTip" class="rounded shadow border"></div>
<div>
<ul id="tabs">"""
if PhenTab==0:
print """
<li class="target" id="tabs-1">Studies</li>
<li id="tabs-2">Variables</li>"""
else:
print """
<li id="tabs-1">Studies</li>
<li class="target" id="tabs-2">Variables</li>"""
print """ </ul>
</div>
<div class="wrapper1">
<div class="div1">
</div>
</div>"""
if PhenTab==0:
print """
<div id="tabs-1" class="wrapper2">
"""
else:
print """
<div id="tabs-1" class="wrapper2 hide">
"""
if startNum <= endNum:
print 'Displaying: ' + str(startNum) + ' - '+str(endNum) +' of '+str(results.total) + ' studies.'
if page<=1:
print '<p class="alignright spaced"><a href="query.php?Fname=%s&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_left.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_left.png" alt="help" height="16" width="16" border="0"></a>' %(Qinput,1,limit,check1,Qinput,1,limit,check1)
else:
print '<p class="alignright spaced"><a href="query.php?Fname=%s&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_left.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_left.png" alt="help" height="16" width="16" border="0"></a>' %(Qinput,1,limit,check1,Qinput,page-1,limit,check1)
print " Page " + str(Page) +" of " + str(pagecount)
if page<=pagecount-1:
print '<a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_right.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_right.png" alt="help" height="16" width="16" border="0"></a></p>' %(Qinput,page+1,limit,check1,Qinput,pagecount,limit,check1)
else:
print '<a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_right.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=0&phenPage=1&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_right.png" alt="help" height="16" width="16" border="0"></a></p>' %(Qinput,pagecount,limit,check1,Qinput,pagecount,limit,check1)
# Added on June 1, 2013
print """
<!-- Meta Data Options -->
<div class='container alignleft'>
<div class='top'>
<h2>Study Display Options</h2>
</div>
<div class='body'>
<div id="display-options-help-tooltip" class='rounded shadow border'></div>
<table id='filters'>
<tr>
<td><input type="checkbox" checked="true" class='filter' id="title" >Title</td>
<td><input type="checkbox" checked="true" class='filter' id="embargoRelease" >Embargo Release</td>
<td><input type="checkbox" class='filter' id="links" >Links</td>
<td><input type="checkbox" class='filter' id="geography" >Geography</td>
</tr>
<tr>
<td><input type="checkbox" checked="true" class='filter' id="studyType" >Study Type</td>
<td><input type="checkbox" checked="true" class='filter' id="platform" >Platform</td>
<td><input type="checkbox" class='filter' id="consentType" >Consent Type</td>
<td><input type="checkbox" class='filter' id="irb" >IRB</td>
</tr>
<tr>
<td><input type="checkbox" checked="true" class='filter' id="sampleSize" >Sample Size</td>
<td><input type="checkbox" checked="true" class='filter' id="details" >Details</td>
<td><input type="checkbox" class='filter' id="topicDisease" >Topic Disease</td>
</tr>
</table>
</div>
</div>
<div style="clear: both;"></div>
<div class='bottom'>
<div class='container alignleft'>
<button type=button id='filter_apply' >Apply</button>
<button type=reset id='filter_defaults' >Restore Defaults</button>
</div>
<div class='container alignright'>
<img src="./images/help.png" id='display-options-help' alt="help" height="24" width="24" border="0">
</div>
</div>
<div style="clear: both;"></div>
<hr>
"""
# SHOWING THE RESULTS OF STUDIES
if results.total==0:
print "Your search returned 0 studies"
else:
# Print header table first
print """ <table id='results_table' class='results' >
<thead>
<th></th>
<th class='title'>Title</th>
<th class='embargoRelease'>Embargo Release</th>
<th class='details'>Details</th>
<th class='sampleSize'>Participants</th>
<th class='studyType'>Type of Study</th>
<th class='platform'>Platform</th>
<th class='links'>Links</th>
<th class='geography'>Geography</th>
<th class='irb'>IRB</th>
<th class='consentType'>Consent Type</th>
<th class='topicDisease'>Topic Disease</th>
</thead>
<tbody>
"""
## DISPLAYING THE RESULTS
# ===========================================================
# Display studies
# ===========================================================
PhenOut = {}
# Connect to the database
# Relace MySQL username and password below
db = MySQLdb.connect("localhost", MySQLusername, MySQLpassword, "<PASSWORD>")
#records = Table(db, "Phenotypes")
records = Table(db, "Phenotypes")
Release1 = Table(db, "Release1")
Study1 = Table(db, "Study")
Abstraction1 = Table(db, "AbstractionNew")
URL = 'http://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/study.cgi?study_id='
rank = 0
num = 25*(page-1)
for hit in results:
# DEBUGGING
if debug==True:
print hit.matched_terms()
MAXRANK = resultALL.score(0)
scoreRank = results.score(rank)
Rate = int(5*scoreRank/MAXRANK)
if Rate == 0:
Rate = 1
rank = rank + 1
PhenOut[num]=[]
# ----- Print rank score ------
URL1 = URL + hit["path"].split('.')[0]
# If title not contain search keywords
if len(hit.highlights("title"))==0:
if format1=='html':
try:
print "<tr id='results_tablerow_" + str(num) + "'>"
print "<td><input id='results_tablerow_" + str(num) + "'" + ' type="checkbox"' + ' name="tags[]"' + ' value="' + hit["path"] +'"/>'
# Added on Apr 29, 2013
id_temp1=hit["path"].strip().split('.')[0]
AddSubstudy(id_temp1)
print '</td>'
# End Added
print '<td class="title"> <a href="'+ URL1 +'" target="_blank">'+ hit["path"] +'<br>'+ html2text.html2text(hit["title"]) + '</a>'
if Qinput!='top':
print ' </td>'
temp1 = html2text.html2text(hit.highlights("desc")).lower().replace('*','')
found =any(item in temp1 for item in hl2)
if len(hit.highlights("desc"))>0 and found==True:
print '<td class="highlight1" style="DISPLAY:none">' + HighlightX(hl2,temp1) + '</td>'
else:
print '<td class="highlight1" style="DISPLAY:none">' + ' '.join(hit["desc"].split()[:30]) + '</td>'
except:
print "<tr id='results_tablerow_" + str(num) + "'>"
print "<td><input id='results_tablerow_" + str(num) + "'" + ' type="checkbox"' + ' name="tags[]"' + ' value="' + hit["path"] +'"/>'
# Added on Apr 29, 2013
id_temp1=hit["path"].strip().split('.')[0]
AddSubstudy(id_temp1)
print '</td>'
# End added
print '<td class="title"> <a href="' + URL1 + '" target="_blank">'+ hit["path"] +'<br>' + hit["title"] + '</a>'
if Qinput!='top':
print ' </td>'
temp1 = html2text.html2text(hit.highlights("desc")).lower().replace('*','')
found =any(item in temp1 for item in hl2)
if len(hit.highlights("desc"))>0 and found==True:
print '<td class="highlight1" style="DISPLAY:none">' + HighlightX(hl2,temp1) + '</td>'
else:
print '<td class="highlight1" style="DISPLAY:none">' + ' '.join(hit["desc"].split()[:30]) + '</td>'
pass
else:
# If title contain terms then highlight title
terms = [text for fieldname, text in query.existing_terms(ix.reader(), phrases=True, expand=True) if fieldname == "title"]
if format1=='html':
#pass
try:
print "<tr id='results_tablerow_" + str(num) + "'>"
print "<td><input id='results_tablerow_" + str(num) + "'" + ' type="checkbox"' + ' name="tags[]"' + ' value="' + hit["path"] +'"/>'
# Added on Apr 29, 2013
id_temp1=hit["path"].strip().split('.')[0]
AddSubstudy(id_temp1)
print '</td>'
# End added
# Note: Convert HTML into text then match into terms
temp1 = html2text.html2text(hit.highlights("title")).lower().replace('*','')
found =any(item in temp1 for item in hl2)
if found==True:
print '<td class="title"> <a href="' + URL1 + '" target="_blank">' + hit["path"] +'<br>'+ HighlightX(hl2,hit["title"]) +'</a>'
else:
print '<td class="title"> <a href="' + URL1 + '" target="_blank">' + hit["path"] +'<br>'+ hit["title"] +'</a>'
if Qinput!='top':
print ' </td>'
temp1 = html2text.html2text(hit.highlights("desc")).lower().replace('*','')
found =any(item in temp1 for item in hl2)
if len(hit.highlights("desc"))>0 and found==True:
print '<td class="highlight1" style="DISPLAY:none">' + HighlightX(hl2,temp1) + '</td>'
else:
print '<td class="highlight1" style="DISPLAY:none">' + ' '.join(hit["desc"].split()[:30]) + '</td>'
except:
print "<tr id='results_tablerow_" + str(num) + "'>"
print "<td><input id='results_tablerow_" + str(num) + "'" + ' type="checkbox"' + ' name="tags[]"' + ' value="' + hit["path"] +'"/>'
# Added on Apr 29, 2013
id_temp1=hit["path"].strip().split('.')[0]
AddSubstudy(id_temp1)
print '</td>'
# End added
print '<td class="title"> <a href="' + URL1 + '" target="_blank">'+ hit["path"] +'<br>' + hit["title"] + '</a>'
if Qinput!='top':
#print '<div class="rating"> <div class="rank' + str(Rate) + '"> </div></div>'
print ' </td>'
#print len(hit.highlights("desc"))
temp1 = html2text.html2text(hit.highlights("desc")).lower().replace('*','')
found =any(item in temp1 for item in hl2)
if len(hit.highlights("desc"))>0 and found==True:
#print '<td class="highlight1" style="DISPLAY:none">' + hit.highlights("desc") + '</td>'
print '<td class="highlight1" style="DISPLAY:none">' + HighlightX(hl2,temp1) + '</td>'
else:
print '<td class="highlight1" style="DISPLAY:none">' + ' '.join(hit["desc"].split()[:30]) + '</td>'
pass
if format1=='html':
# Print HTML Table here
# Print Embargo Release
studyid1 = hit["path"].split('.')[0]
print "<td class=\"embargoRelease\">"
print ReformatOther(Release1.getitem(studyid1)[0][1])
print "</td>"
# Print VDAS
print """<td class='details'>
<div class='detailsImg v on alignleft'>
</div>
<div class='detailsImg d off alignleft'>
</div>
<div class='detailsImg a off alignleft'>
</div>
<div class='detailsImg s off alignleft'>
</div>
<div style="clear: both;"></div>"""
print '<td class="PhenNum" style="DISPLAY:none">' + str(records.phencount(hit["path"].split('.')[0])) + '</td>'
print""" </td>"""
# Print sample size
print "<td class=\"sampleSize\">"
print hit["cohort"]
print "</td>"
# Print Study type
print "<td class=\"studyType\">"
print ReformatOther(hit["Type"])
print "</td>"
# Print Platform
platform = Study1.getPlatform(studyid1)
print "<td class=\"platform\">"
print platform
print "</td>"
# Print Links
print "<td class=\"links\">"
print "n/a"
print "</td>"
# Print Geography
print "<td class=\"geography\">"
studyid1 = hit["path"].split('.')[0]
geo = Abstraction1.getGeoNew(studyid1)
print geo.upper()
print "</td>"
# Print IRB
print " <td class='irb'>"
IRBStr = Abstraction1.getIRBNew(studyid1)
print IRBStr
print "</td>"
# Print Consent Type
print " <td class='consentType'>"
ConsentText = Abstraction1.getConsentNew(studyid1)
print ConsentText
print "</td>"
# Print Topic Disease
print " <td class='topicDisease'>"
Disease1 = Abstraction1.getDiseaseNew(studyid1)
print ReformatOther1(Disease1)
print "</td>"
print "</tr>"
PhenOut[num].append((hit["path"],hit["title"],URL1,hit["desc"],records.phencount(hit["path"].split('.')[0]),records.datasetcount(hit["path"]), hit["Type"],hit["cohort"]))
num = num + 1
# ========================================================
# ============= PRINT PHENOTYPE VARIABLES ================
# ========================================================
# Print header
print """
</tbody>
</table>
</div>"""
if PhenTab==0:
print """
<div id="tabs-2" class='wrapper2 hide'>
"""
else:
print """
<div id="tabs-2" class='wrapper2'>
"""
print """
<div id="definitionTooltip" class='rounded shadow border'></div>"""
# ========================================================
# Find all ID variables
#print AllStudy
IDListQuery = ' '
for key in AllStudy:
IDListQuery+=key.split('.')[0] + '|'
IDListQuery = IDListQuery.strip('|').strip('"').strip()
PhenQuery="'" + '|'.join(hl2[0:]).replace('(','').replace(')','').replace('"','') + "'"
records = Table(db, "Phenotypes1")
try:
# Work with extended search - concept search
PhenNum1 = records.countPhenLimit(PhenQuery,IDListQuery)
PhenOutS1 = records.getPhenLimit(PhenQuery,IDListQuery,pagePhen)
except:
PhenNum1=0
PhenOutS1 = ''
if Qinput=='top':
PhenNum1 = records.countPhenAll()
PhenOutS1 = records.getPhenAll(pagePhen)
# ===============================================================================
# SHOWING THE PHENOTYPE RESULTS
# ===============================================================================
if PhenNum1==0:
print "Your search return 0 variables"
else:
print """ <table id='variables_table' class='variables' >
<thead>
<th></th>
<th class='study'>Study</th>
<th class='name'>Variable ID/Name</th>
<th class='description'>Description</th>
<th class='category'>Category</th>
</thead>
<tbody> """
pagePhenMax = PhenNum1/25 + 1
startNum1 = 25*(pagePhen-1)+1
if PhenNum1 > startNum1 + 25:
endNum1 = startNum1 + 24
else:
endNum1 = PhenNum1
if PhenNum1>0:
print 'Displaying: ' + str(startNum1) + ' - ' +str(endNum1) +' of '+str(PhenNum1) + ' variables.'
if pagePhen<=1:
print '<p class="alignright spaced"><a href="query.php?Fname=%s&Page=%s&PhenTab=1&phenPage=%s&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_left.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&Page=%s&PhenTab=1&phenPage=%s&LimitField=%s&search-checkbox=%s&PhenTab=1"><img src="./images/arrow_left.png" alt="help" height="16" width="16" border="0"></a>' %(Qinput,1,1,limit,check1,Qinput,1,1,limit,check1)
else:
print '<p class="alignright spaced"><a href="query.php?Fname=%s&Page=%s&PhenTab=1&phenPage=%s&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_left.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&Page=%s&PhenTab=1&phenPage=%s&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_left.png" alt="help" height="16" width="16" border="0"></a>' %(Qinput,1,1,limit,check1,Qinput,1,pagePhen-1,limit,check1)
print " Page " + str(pagePhen) +" of " + str(pagePhenMax)
if pagePhen<=pagePhenMax-1:
print '<a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=1&phenPage=%s&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_right.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=1&phenPage=%s&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_right.png" alt="help" height="16" width="16" border="0"></a></p>' %(Qinput,1,pagePhen+1,limit,check1,Qinput,1,pagePhenMax,limit,check1)
else:
print '<a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=1&phenPage=%d&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_right.png" alt="help" height="16" width="16" border="0"></a> <a href="query.php?Fname=%s&submit=Search&Page=%s&PhenTab=1&phenPage=%s&LimitField=%s&search-checkbox=%s"><img src="./images/arrow_stop_right.png" alt="help" height="16" width="16" border="0"></a></p>' %(Qinput,1,pagePhenMax,limit,check1,Qinput,1,pagePhenMax,limit,check1)
# ===============================================================================
numPhen = 0
num1 = 0
# Print out the Table if only the number of PhenNum1>0
if PhenNum1>0:
# Print variables
while num1<=min(24,len(PhenOutS1)-1):
print "<tr id='variables_tablerow_" + str(num1) + "'>"
StudyID1 = PhenOutS1[num1][1]
# Print out for checked form
temp1p = '","'.join(PhenOutS1[num1][0:]).replace('>',' ')
print '<td><input id="variables_tablerow_' + str(num1) + '" type="checkbox" name="tags[]" value=\'"' + temp1p + '"\'/></td>'
URL1 = 'http://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/study.cgi?study_id='
# Print study name and title
print '<td class="study"> <a href="' + URL1 + StudyID1.split('.')[0] + '" target="_blank" >'
print StudyID1
print '<br>'
print Study1.getTitle(StudyID1)
print "</a></td>"
# Print Variable ID
print "<td class='name'> <div class='id'>"
VarLink='http://www.ncbi.nlm.nih.gov/projects/gap/cgi-bin/variable.cgi?study_id=' + StudyID1 + '&phv=' + str(int(PhenOutS1[num1][0].split('.')[0].split('phv')[1]))
print '<a href="' + VarLink + '">' + PhenOutS1[num1][0] + '</a>'
print "</div>"
# Print Variable Name
print "<div class='var_name'>"
print PhenOutS1[num1][4]
print "</div>"
PhenInfo=''
for PhenX in PhenOutS1[num1]:
PhenX = PhenX.replace(';',' | ') + ';'
PhenInfo+=PhenX
print '<td class="highlight2" style="DISPLAY:none">' + PhenInfo + '</td>'
# Print Description
print "<td class='description'>"
print HighlightX(hl2,unicode(PhenOutS1[num1][5]))
print "</td>"
# Print Category
print "<td class='category'>"
phenCat1 = PhenOutS1[num1][-1].split(';')
if len(phenCat1)>0:
for phen1 in phenCat1:
print '<span>' + phen1 + '</span><br><br>'
print "</td></tr>"
num1+=1
# End form for checkbox
print '<noscript><input type="submit"></noscript> '
print '</form>'
# ===============================================
# Print Footer
# ===============================================
print """
</tbody>
</table>
</div>
</div>
</div>
<div style="clear: both;"></div>
</div>
<div id='main-footer' class='rounded shadow border'>
<h4 id="dbv">Database Version: Last update July 1, 2013. Next update September 1, 2013 </h4>
<br>
<h5 style="font-size:x-small;font-weight:normal"> PhenDisco is based on a mix of computer- and human-curated data. For this reason, it is not 100% accurate. There may be cases in which relevant studies are missed, as well as cases in which irrelevant studies are ranked highly. We encourage users to contact us at email <EMAIL> when they encounter these kinds of problems. This work was funded by grant UH2HL108785 from NHLBI, NIH. </h5>
<div style="clear: both;"></div>
</div>
</div>
</body>
</html>
"""
# In case using JSON format
if format1=='json':
ToJSON = json.dumps(PhenOut)
print ToJSON
# Disconnect database
db.close()
def main():
version = '1.0'
debug = False
Qinput = ''
Page = 1
concept = False
format1 = 'html'
limit = ''
# PhenTab: False-0, True-1. True then display Phen Tab
PhenTab = 0
pagePhen = 1
try:
options, remainder = getopt.getopt(sys.argv[1:], 'i:p:z:l:t:fhdcvt', ['input=','page=','pagePhen=','limit=','tab=','format','help','debug','concept','version'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in options:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-i', '--input'):
Qinput = arg
elif opt in ('-p', '--page'):
Page = arg
elif opt in ('-p', '--page'):
Page = arg
elif opt in ('-z', '--pagePhen'):
pagePhen = int(arg)
elif opt in ('-c', '--concept'):
#print "Search with concept extension"
concept = True
elif opt in ('-f', '--format'):
format1 = 'json'
elif opt in ('-d', '--debug'):
debug = True
elif opt in ('-v', '--version'):
version = '1.0'
elif opt in ('-l', '--limit'):
limit = arg
elif opt in ('-t', '--tab'):
PhenTab = int(arg)
Retrieval(Qinput,Page,concept,debug,format1,limit,PhenTab,pagePhen)
if __name__=="__main__":
main()
| 2.578125
| 3
|
GamesRL/pirate-passage/grid.py
|
Zhuravld/Portfolio
| 0
|
12784478
|
<filename>GamesRL/pirate-passage/grid.py<gh_stars>0
from utils import PointIndexed, Field
Point = tuple([int, int])
class Pirate:
"""Enemy. Occupies a single Field, travels along a route.
at: current location (field ref or point coords)
id: identifier
route: list of waypoints
"""
_count = 0
def __init__(self, route_waypoints: list, id=None):
self.route = [tuple(wp) for wp in route_waypoints]
self.i = 0
Pirate._count += 1
if id is None:
id = Pirate._count
self.id = id
def execute_move(self):
self.i = (self.i + 1) % len(self.route)
def _next(self):
return self.route[(self.i + 1) % len(self.route)]
@property
def at(self):
return self.route[self.i]
class Grid:
"""Gridworld. Handles interaction of pirate and player objects
on a `PointIndexed` collection of `Field`s.
Agents on the grid communicate an intended move
by passing the `Grid` obj as parameter.
"""
# TODO: Test Grid class
def __init__(self, spec: dict):
shape, start, goal, inaccessible, pirate_routes = map(
lambda k: spec[k],
["shape", "start", "goal", "inaccessible", "pirate_routes"],
)
self.fields = self._initialize_fields(shape)
self._mark_inaccessible_fields(inaccessible)
self._set_start_and_goal_fields(start, goal)
self.pirates = self._initialize_pirates(pirate_routes)
@property
def shape(self) -> tuple([int, int]):
return self.fields.shape
def _initialize_fields(self, shape):
"""Generate grid of Fields from `shape` (2-tuple)"""
nrows, ncols = shape
return PointIndexed(
[ # 2D collection of Fields, indexed by (r,c)
[Field((row_idx, col_idx)) for row_idx in range(nrows)]
for col_idx in range(ncols)
]
)
def _mark_inaccessible_fields(self, inaccessible: list(Point)):
"""Modify `self.grid` inplace to mark inaccessible fields."""
self.inaccessible = inaccessible
for point in inaccessible:
self.fields[point].player_can_access = False
def _set_start_and_goal_fields(self, start_point: Point, goal_point: Point):
"""Init references to start and goal fields."""
self.start_field = self.fields[start_point]
self.goal_field = self.fields[goal_point]
def _initialize_pirates(self, pirate_routes: dict) -> list([Pirate]):
pirates = [Pirate(route, id=i) for i, route in pirate_routes.items()]
return pirates
def step(self, player_action: tuple([Point, Point])) -> list:
"""Increment grid state given `player_action`.
Return IDs of all collided pirates"""
in_transit_collisions = self.check_in_transit_collisions(player_action)
endpoint_collisions = self.check_endpoint_collisions(player_action)
if in_transit_collisions:
self._message(
f"Collided with pirates in transit: {', '.join(in_transit_collisions)}"
)
elif endpoint_collisions:
self._message(
f"Collided with pirates at endpoint: {', '.join(endpoint_collisions)}"
)
self._execute_movements()
if in_transit_collisions:
return in_transit_collisions
else:
return endpoint_collisions
def get_pirate_moves(self) -> dict:
return {p.id: (p.at, p._next()) for p in self.pirates}
def _execute_movements(self):
# self._message("Sending all movements commands")
for p in self.pirates:
p.execute_move()
def _message(self, msg):
"""Placeholder for sending data to the UI"""
print(msg)
def check_in_transit_collisions(self, player_action: tuple([Point, Point])) -> list:
player_from, player_to = player_action
pirates_collided = []
for pirate_id, from_to in self.get_pirate_moves().items():
pirate_from, pirate_to = from_to
if (pirate_from == player_to) and (pirate_to == player_from):
pirates_collided.append(pirate_id)
return pirates_collided
def check_endpoint_collisions(self, player_action: tuple([Point, Point])) -> list:
_, player_to = player_action
pirates_collided = []
for pirate_id, (pirate_from, pirate_to) in self.get_pirate_moves().items():
if pirate_to == player_to:
pirates_collided.append(pirate_id)
return pirates_collided
| 3.078125
| 3
|
LeetCode/Medium/partition_labels.py
|
shrey199325/LeetCodeSolution
| 0
|
12784479
|
"""
A string S of lowercase English letters is given. We want to partition this string into as many parts as possible so
that each letter appears in at most one part, and return a list of integers representing the size of these parts.
Example 1:
Input: S = "ababcbacadefegdehijhklij"
Output: [9,7,8]
Explanation:
The partition is "ababcbaca", "defegde", "hijhklij".
This is a partition so that each letter appears in at most one part.
A partition like "ababcbacadefegde", "hijhklij" is incorrect, because it splits S into less parts.
Note:
S will have length in range [1, 500].
S will consist of lowercase English letters ('a' to 'z') only.
"""
from typing import List
class Solution:
def partitionLabels(self, S: str) -> List[int]:
last_occurance = {c: i for i, c in enumerate(S)}
j = anchor = 0
ans = []
for index, c in enumerate(S):
j = max(j, last_occurance[c])
if index == j:
ans.append(index - anchor + 1)
anchor = index + 1
return ans
| 4.25
| 4
|
src/Nodes/Graph.py
|
CelineDknp/SemiParsingCFG
| 0
|
12784480
|
from .Node import Node
from .FusedNode import FusedNode
from .SimpleBranchConditionNode import SimpleBranchConditionNode
from .MultipleBranchConditionNode import MultipleBranchConditionNode
from .ControlLoopNode import ControlLoopNode
from .LabelLoopNode import LabelLoopNode
from .LoopNode import LoopNode
from .MultipleLabelLoopNode import MultipleLabelLoopNode
from Utils.config import *
from Utils.utils import clean_regex
import graphviz
class Graph:
def __init__(self, start_node):
self.start_node = start_node
self.last_node = start_node
self.open_ifs = []
self.open_loops = {}
self.open_control_loops = []
self.all_labels = {}
self.all_nodes = [start_node]
def get_last_node(self):
return self.all_nodes[-1]
def match_if(self, branch_node):
res = None
# print(f"Searching corr node for: {branch_node}")
for cond_node in self.open_ifs:
# print(f"Looking at {cond_node} at depth {cond_node.get_depth()}")
# print(f"{cond_node.get_regex()} VS {branch_node.get_regex()}")
if cond_node.get_depth() == branch_node.get_depth() and (
cond_node.get_regex() == branch_node.get_regex() or branch_node.get_type() == NODE_COND_END_ANY):
# print("match !")
res = cond_node
return res
def match_loops(self, node):
to_remove = []
# print(f"IN MATCH LOOPS {node}")
# print(self.open_loops)
if node.get_label() in self.open_loops.keys():
for loop in self.open_loops[node.get_label()]:
# print("MATCH")
loop.add_child(node)
if loop.is_complete():
to_remove.append(loop)
for n in to_remove:
self.open_loops[node.get_label()].remove(n)
def match_control_node(self, control_node, child):
to_remove = []
found = False
for n in self.open_control_loops:
if isinstance(n, ControlLoopNode) and control_node.get_type() == n.get_control():
n.add_child(child, match=True)
found = True
to_remove.append(n)
for n in to_remove:
self.open_control_loops.remove(n)
return found
def match_labels(self, loop_node):
if isinstance(loop_node, ControlLoopNode):
return True
# print(f"IN MATCH LABELS {loop_node}")
# print(self.all_labels)
if isinstance(loop_node, MultipleLabelLoopNode):
for l in loop_node.get_label():
if l in self.all_labels.keys():
# print("MATCH")
loop_node.add_child(self.all_labels[l])
if len(loop_node.get_childs()) == len(loop_node.get_label())+1:
return False
elif isinstance(loop_node, LabelLoopNode):
if loop_node.get_label() in self.all_labels.keys():
# print("MATCH")
loop_node.add_child(self.all_labels[loop_node.get_label()])
return False
return True
def update_open_loops(self, node):
if node.is_control():
self.open_control_loops.append(node)
elif self.match_labels(node):
if node.is_multiple_labels():
for l in node.get_label():
if l in self.open_loops.keys():
self.open_loops[l].append(node)
else:
self.open_loops[l] = [node]
else:
if node.get_label() in self.open_loops.keys():
self.open_loops[node.get_label()].append(node)
else:
self.open_loops[node.get_label()] = [node]
def add_single_node(self, node):
if self.last_node is not None:
self.last_node.add_child(node)
self.last_node = node
self.all_nodes.append(node)
def add_node(self, node):
# print(f"Adding node {node}")
if node.get_type() == NODE_COND_START:
# print(">>> In IF")
self.add_single_node(node)
self.open_ifs.append(node)
elif node.get_type() == NODE_SQL:
self.add_single_node(node)
elif node.get_type() == NODE_LABEL:
# print(">>> In EXEC")
self.add_single_node(node)
self.all_labels[node.get_label()] = node
self.match_loops(node)
elif node.get_type() == NODE_LOOP:
self.add_single_node(node)
#print(f"Loop node {node}")
if not node.is_goback_node(): #If we don't need to go back to this node, don't
print("Node was a GOTO, cancel last node")
self.last_node = None
self.update_open_loops(node)
elif node.get_type() == NODE_COND_BRANCH:
# print(">>> In ELSE")
corr_if = self.match_if(node)
if corr_if is None:
print(f"Was looking for node to match {node} in array {self.open_ifs}, but found none")
raise Exception('Missmatched condition branch')
temp = Node(corr_if.get_depth(), NODE_CONTROL)
if isinstance(node, MultipleBranchConditionNode): # We have a multiple condition, grab the condition
corr_if.add_branch_condition(node, temp)
corr_if.close_branch() # If we found an else, we should close the previous branch
corr_if.add_child(temp)
self.all_nodes.append(temp)
self.last_node = temp
elif node.get_type() == NODE_COND_END or node.get_type() == NODE_COND_END_ANY:
# print(">>> In END-IF")
corr_if = self.match_if(node)
temp = Node(0, NODE_CONTROL) if corr_if is None else Node(corr_if.get_depth(), NODE_CONTROL)
f = False
if node.get_type() == NODE_COND_END_ANY:
f = self.match_control_node(node, temp)
if corr_if is None and not f:
print(f"Was looking for node to match {node} in array {self.open_ifs} or {self.open_control_loops}, but found none")
raise Exception('Missmatched condition end')
# print(f"Found matching if: {corr_if}", flush=True)
if corr_if is not None:
corr_if.close(temp)
self.open_ifs.remove(corr_if)
elif self.last_node is not None:
self.last_node.add_child(temp)
self.last_node = temp
self.all_nodes.append(temp)
elif node.get_type() == "END": # Adding the end_node
self.add_single_node(node)
# print("Added the last_node")
else:
print(f"Issue during adding node {node}")
def get_start_node(self):
return self.start_node
def get_size(self):
return len(self.all_nodes)
def get_all_nodes(self):
return self.all_nodes
def replace_child(self, target, old_child, new_child):
if isinstance(target, SimpleBranchConditionNode):
if target.true_child == old_child:
target.remove_child(old_child)
target.add_child(new_child, end=True, branch=True)
if target.false_child == old_child:
target.remove_child(old_child)
target.add_child(new_child, end=True, branch=False)
elif isinstance(target, LoopNode):
target.remove_child(old_child)
target.add_child(new_child, match=True)
else:
target.remove_child(old_child)
target.add_child(new_child)
for grand_child in old_child.get_childs():
grand_child.remove_parent(old_child)
grand_child.add_parent(target)
if old_child in self.all_nodes:
self.all_nodes.remove(old_child)
def cleanup_triangle(self, current_node, new_child):
self.replace_child(current_node.get_parent()[0], current_node, new_child)
def one_parent(self, node):
if len(node.get_parent()) == 0:
return False
if len(node.get_parent()) == 1: # Easy case, only one parent
return True
first = node.get_parent()[0]
for elem in node.get_parent():
if elem != node: # Dirty
if elem != first:
return False
return True
def cleanup(self, label_clean=False):
#for o in self.open_loops:
# if self.open_loops[o] != []:
# print(f"Found open loop: {o}, {self.open_loops[o]}")
cleaned = True
while cleaned == True:
visited = []
cleaned = False
start_node = self.all_nodes.copy()
while len(start_node) != 0:
current_node = start_node[0]
visited.append(current_node)
if current_node.get_type() == NODE_CONTROL:
children = current_node.get_childs()
# print(f">>> Found control node ! len children: {len(children)} len grand_children: {len(children[0].get_childs())}")
if len(children) == 2:
if children[0] == children[1]: # Two links pointing the same direction
current_node.remove_child(children[0])
cleaned = True
# We are dealing with a triangle (V1)
elif children[1] == children[0].get_childs()[0]:
self.cleanup_triangle(current_node, children[1])
cleaned = True
# We are dealing with a triangle (V2)
elif children[0] == children[1].get_childs()[0]:
self.cleanup_triangle(current_node, children[0])
cleaned = True
elif len(children) == 1:
# We are in a control node having a single child of a control node
parent_node = current_node.get_parent().copy()
for p in parent_node:
self.replace_child(p, current_node, children[0])
cleaned = True
elif current_node.get_type() == NODE_LABEL and label_clean:
parents = current_node.get_parent()
if len(parents) == 1 and parents[0].get_type() != NODE_LOOP: # A single parent that is not a GOTO
child_node = current_node.get_childs().copy()
for c in child_node:
self.replace_child(parents[0], current_node, c)
cleaned = True
for child in current_node.get_childs().copy(): # Look at a node's childrens
if child.get_type() == NODE_CONTROL: # When we find a control node
if len(child.get_childs()) == 1 and self.one_parent(child): # Only one parent and one child
self.replace_child(current_node, child, child.get_childs()[0])
cleaned = True
start_node.remove(current_node)
def fuse(self, node_up, node_down):
if node_down.get_type() == NODE_FUSED:
node_down.fuse_node(node_up, up=True)
self.all_nodes.remove(node_up)
return node_down
elif node_up.get_type() == NODE_FUSED:
node_up.fuse_node(node_down, down=True)
self.all_nodes.remove(node_down)
else:
node = FusedNode(node_up.get_depth(), NODE_FUSED)
self.all_nodes.append(node)
node.fuse_node(node_up, up=True)
self.all_nodes.remove(node_up)
node.fuse_node(node_down, down=True)
self.all_nodes.remove(node_down)
return node
def squish(self):
squished = True
while squished:
squished = False
start_node = self.all_nodes.copy()
all_nodes = []
while len(start_node) != 0:
current_node = start_node[0]
all_nodes.append(current_node)
if (
current_node.get_type() == NODE_COND_START or current_node.get_type() == NODE_FUSED) and current_node.point_to_one():
# We are in a if node that points to a single node
# print(f"Found a node that points to one: {current_node} with childs: {current_node.get_childs()}")
child = current_node.get_childs()[0]
if child.get_type() != "END" and child.get_type() != NODE_SQL:
merge = True
for c in child.get_childs():
if c.get_type() == NODE_SQL:
merge = False
if merge:
# print("Decided to merge !")
res = self.fuse(current_node, child)
squished = True
break
elif current_node.get_type() == NODE_FUSED and any(
child.get_type() == NODE_FUSED for child in current_node.get_childs()):
# print("Found a fused node having fused child")
# We found a fused node having a FUSED child
to_fuse = None
for child in current_node.get_childs():
if child.get_type() == NODE_FUSED:
to_fuse = child
if to_fuse != None:
# print("Decided to fuse the fuse")
res = self.fuse(current_node, to_fuse)
squished = True
start_node.remove(current_node)
def save_as_file(self, filename, output_dir='doctest-output'):
dot = graphviz.Digraph(filename)
for current_node in self.all_nodes:
if current_node.get_type() == NODE_COND_START:
dot.attr('node', shape='ellipse')
dot.node(str(current_node.id), clean_regex(current_node) + " " + current_node.condition)
elif current_node.get_type() == NODE_LOOP:
dot.attr('node', shape='ellipse')
if isinstance(current_node, ControlLoopNode):
dot.node(str(current_node.id), clean_regex(current_node))
elif isinstance(current_node, MultipleLabelLoopNode):
dot.node(str(current_node.id), "PERFORM " + current_node.get_label()[0]+" THRU "+ current_node.get_label()[1])
elif isinstance(current_node, LabelLoopNode):
dot.node(str(current_node.id), clean_regex(current_node)+" " + current_node.get_label())
elif current_node.get_type() == NODE_SQL:
dot.attr('node', shape='box')
dot.node(str(current_node.id), current_node.parsable)
elif current_node.get_type() == NODE_LABEL:
dot.attr('node', shape='note')
dot.node(str(current_node.id), current_node.label)
elif current_node.get_type() == NODE_CONTROL:
dot.node(str(current_node.id), str(current_node.id))
elif current_node.get_type() == "START":
dot.attr('node', shape='diamond')
dot.node(str(current_node.id), 'START')
elif current_node.get_type() == "END":
dot.attr('node', shape='diamond')
dot.node(str(current_node.id), 'END')
elif current_node.get_type() == NODE_FUSED:
dot.attr('node', shape='circle')
dot.node(str(current_node.id), str(current_node.amount_contained()))
for n in self.all_nodes:
if n.get_type() == NODE_COND_START and isinstance(n, SimpleBranchConditionNode):
dot.edge(str(n.id), str(n.true_child.id), label='True')
dot.edge(str(n.id), str(n.false_child.id), label='False')
elif n.get_type() == NODE_COND_START and isinstance(n, MultipleBranchConditionNode):
for condition in n.get_branch_childs().keys():
# print(condition)
dot.edge(str(n.id), str(n.get_branch_child(condition).id), label=condition)
elif n.get_type() == NODE_LOOP and n.is_goback_node():
# print(f"Doing node {n}")
for link in n.get_childs():
dot.edge(str(n.id), str(link.id))
if link.get_type() == NODE_LABEL and link.get_label() == n.go_back_label():
dot.edge(str(link.id), str(n.id), label="Go back")
else:
for link in n.get_childs():
dot.edge(str(n.id), str(link.id))
dot.render(directory=output_dir, view=False)
def add_node_to_list(self, node):
self.all_nodes.append(node)
def __str__(self):
result = "Nodes in graph:\n"
for n in self.all_nodes:
# print(n.id)
result += n.__repr__() + "\n"
return result
| 2.296875
| 2
|
abc/abc048/abc048c.py
|
c-yan/atcoder
| 1
|
12784481
|
<reponame>c-yan/atcoder
N, x = map(int, input().split())
a = list(map(int, input().split()))
result = 0
# a[0] が x 以下でないと、a[1] を空にしても条件を満たせない
t = a[0] - x
if t > 0:
result += t
a[0] -= t
for i in range(1, N):
# 箱に入っているキャンディの数が食べれる上限
t = min(a[i - 1] + a[i] - x, a[i])
if t > 0:
result += t
a[i] -= t
print(result)
| 2.453125
| 2
|
handlers/storing_passwords_handl.py
|
bbt-t/bot-pet-project
| 0
|
12784482
|
<reponame>bbt-t/bot-pet-project
from datetime import timedelta
from hashlib import scrypt as hashlib_scrypt
from hmac import compare_digest as hmac_compare_digest
from pickle import dumps as pickle_dumps
from pickletools import optimize as pickletools_optimize
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.builtin import Command
from aiogram.types import Message, CallbackQuery, ChatActions
from aiogram.utils.markdown import hspoiler
from sqlalchemy.exc import IntegrityError, NoResultFound
from sqlalchemy.exc import SQLAlchemyError
from pgpy import PGPMessage
from config import bot_config
from handlers.states_in_handlers import PasswordStates
from loader import dp, logger_guru, scheduler
from middlewares.throttling import rate_limit
from utils.database_manage.sql.sql_commands import DB_USERS
from utils.keyboards.pass_settings_bk import pass_choice_kb
from utils.misc.other_funcs import delete_marked_message, get_time_now
@logger_guru.catch()
def convert_password_to_enc_object(user_id: int, name_pass: str, password: str) -> bytes:
"""
Encrypts password and serializes for storage
:param user_id: ID who wrote
:param name_pass: given password name
:param password: password
:return: pickle object
"""
very_useful_thing: str = hashlib_scrypt(
name_pass.encode(),
salt=f'{user_id}'.encode(),
n=8, r=512, p=4, dklen=16
).hex()
encrypt_password = PGPMessage.new(password.encode()).encrypt(very_useful_thing)
serialized_object: bytes = pickletools_optimize(pickle_dumps(encrypt_password))
return serialized_object
@rate_limit(2, key='pass')
@dp.message_handler(Command('pass'))
async def accept_settings_for_remembering_password(message: Message, state: FSMContext) -> None:
match lang := await DB_USERS.select_bot_language(telegram_id=(user_id := message.from_user.id)):
case 'ru':
text_msg: str = 'Привет, я могу запонить 🔐 твои пароли, для этого мне нужно знать твоё кодовое слово...'
case _:
text_msg: str = 'Hello, I can remember 🔐 your passwords, for this I need to know your codeword...'
await message.delete()
await message.answer(text_msg)
await PasswordStates.first()
async with state.proxy() as data:
data['user_id'], data['lang'] = user_id, lang
@dp.message_handler(state=PasswordStates.check_personal_code)
async def accept_settings_for_remembering_password(message: Message, state: FSMContext) -> None:
async with state.proxy() as data:
user_id, lang = data.values()
skin = await DB_USERS.select_skin(telegram_id=user_id)
msg: str = hashlib_scrypt(message.text.encode(), salt=f'{user_id}'.encode(), n=8, r=512, p=4, dklen=32).hex()
try:
if check_pass := await DB_USERS.check_personal_pass(telegram_id=user_id):
if hmac_compare_digest(check_pass, msg):
await message.answer_sticker(skin.order_accepted.value, disable_notification=True)
await message.answer('ПРИНЯТО!' if lang == 'ru' else 'ACCEPTED!')
await PasswordStates.next()
async with state.proxy() as data:
data['lang'] = lang
tex_msg: str = 'Что ты конкретно хочешь?' if lang == 'ru' else 'What do you specifically want?'
await message.answer(tex_msg, reply_markup=pass_choice_kb)
else:
match lang:
case 'ru':
await message.answer('НЕВЕРНО! попробуй ещё раз :С\n\n'
'п.с: если твой пароль потерялся, то нашиши в саппорт!\n\n'
'<i>подсказка: /support</i>')
case _:
await message.answer('WRONG! try again :С\n\n'
'п.с: If your codeword is lost, then write to support!\n\n'
'<i>prompt: /support</i>')
await state.finish()
else:
await DB_USERS.update_personal_pass(telegram_id=user_id, personal_pass=msg)
await message.answer(
'Добавила :)\nнапиши его ещё раз.' if lang == 'ru' else
"Didn't find it on the list, added it:)\nwrite it again."
)
await message.delete()
except SQLAlchemyError as err:
logger_guru.exception(f'{repr(err)} : in check_personal_code handler!')
await state.finish()
@dp.callback_query_handler(text='new_pass', state=PasswordStates.successful_auth_for_pass)
async def accept_personal_key(call: CallbackQuery, state: FSMContext) -> None:
async with state.proxy() as data:
lang: str = data.get('lang')
await call.message.answer(
'Задай имя сохраняемого пароля ...\n (можешь сразу и сам пароль)' if lang == 'ru' else
'Set the name of the password to be saved ...\n (you can also use the password itself)'
)
await call.message.delete_reply_markup()
@dp.message_handler(state=PasswordStates.successful_auth_for_pass)
async def set_name_and_write_pass(message: Message, state: FSMContext) -> None:
async with state.proxy() as data:
user_id, lang, name_pass = data.get('user_id'), data.get('lang'), data.get('name')
msg: str = message.text
match msg.replace(',', ' ').split():
case name_pass, password:
await message.delete()
enc_pass: bytes = convert_password_to_enc_object(user_id, name_pass, password)
try:
await DB_USERS.add_other_info(telegram_id=user_id, name=name_pass, info_for_save=enc_pass)
except IntegrityError:
await DB_USERS.update_pass(telegram_id=user_id, name_pass=name_pass, info_for_save=enc_pass)
await message.answer('Отлично! Записано.' if lang == 'ru' else 'Fine!')
await state.finish()
case _:
if not name_pass:
async with state.proxy() as data:
data['name'] = msg
await message.delete()
await message.answer('А теперь пароль :)' if lang == 'ru' else 'And now the password :)')
else:
enc_pass: bytes = convert_password_to_enc_object(user_id, name_pass, msg)
try:
await DB_USERS.add_other_info(telegram_id=user_id, name=name_pass, info_for_save=enc_pass)
except IntegrityError:
await DB_USERS.update_pass(telegram_id=user_id, name_pass=<PASSWORD>, info_for_save=enc_pass)
await message.delete()
await message.answer('Пoлучено, записано!' if lang == 'ru' else 'Received and recorded!')
await state.finish()
@dp.callback_query_handler(text='receive_pass', state=PasswordStates.successful_auth_for_pass)
async def get_existing_pass(call: CallbackQuery, state: FSMContext) -> None:
async with state.proxy() as data:
lang: str = data['lang']
await call.message.answer('Какое "имя" пароля?' if lang == 'ru' else 'What is the "name" of the password?')
await call.message.delete_reply_markup()
await PasswordStates.last()
@dp.message_handler(state=PasswordStates.set_name_pass)
async def get_name_of_the_requested_password(message: Message, state: FSMContext) -> None:
async with state.proxy() as data:
user_id, lang = data.values()
msg: str = message.text.replace(' ', '')
try:
if decrypt_password := await DB_USERS.select_pass(name=msg, telegram_id=user_id):
very_useful_thing: str = hashlib_scrypt(msg.encode(), salt=f'{user_id}'.encode(),
n=8, r=512, p=4, dklen=16).hex()
password: str = decrypt_password.decrypt(very_useful_thing).message
text_msg: str = (
f'НАШЛА! вот пароль с именем <b><code>{msg}</code></b> :\n\n'
f'{hspoiler(password)}\<PASSWORD>'
f'у тебя 10 секунд чтобы его скопировать !' if lang == 'ru' else
f'FOUND! here is the password with the name <b><code>{msg}</code></b> :\n\n'
f'{hspoiler(password)}\<PASSWORD>'
f'after 10 seconds it will be deleted !'
)
removing_msg: Message = await message.answer(text_msg)
delete_time: str = (get_time_now(bot_config.time_zone) + timedelta(seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
scheduler.add_job(
delete_marked_message, id=f'del_msg_{user_id}',
args=(removing_msg.message_id, message.chat.id), trigger='date',
replace_existing=True, run_date=delete_time, timezone="Europe/Moscow"
)
await message.delete()
except NoResultFound:
logger_guru.warning(f'{user_id=} entering an invalid password name!')
await message.delete()
await message.answer_chat_action(ChatActions.TYPING)
await message.answer(
f'Не найден пароль с именем {hspoiler(message.text)} 😕' if lang == 'ru' else
f"Couldn't find a password with {hspoiler(message.text)} 😕"
)
finally:
await state.finish()
| 1.945313
| 2
|
werecool/main.py
|
Zsailer/interns2020demo
| 0
|
12784483
|
def main():
print("Hello, World from Jess!")
print("Isn't this great?")
if __name__ == "__main__":
main()
| 2.3125
| 2
|
soccerpy/modules/Fundamentals/links/fixture_links.py
|
SlapBot/soccerpy
| 0
|
12784484
|
from soccerpy.modules.Fundamentals.links.base_links import BaseLinks
class FixtureLinks(BaseLinks):
def __init__(self, request):
super(FixtureLinks, self).__init__(request)
pass
| 2.046875
| 2
|
fbz_filer.py
|
jfarrimo/lol-logwatcher
| 1
|
12784485
|
<gh_stars>1-10
"""
Copyright (c) 2012 Lolapps, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY LOLAPPS, INC. ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOLAPPS, INC. OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of Lolapps, Inc..
--------------------------------------------------------------------------------------------
Wraps all our Fogbugz functionality. Stubbs out if Fogbugz not enabled.
The API docs are not the greatest, but there are some available at:
http://support.fogcreek.com/help/topics/advanced/API.html
This relies on Fogbugz's python library. Luckily this is in pypi:
$ pip install fogbugz==0.9.4
"""
import sys
import util
from settings import *
if ENABLE_FBZ:
import fogbugz
class FbzDisabled(Exception):
pass
def ensure_enabled(fn):
def wrapped():
if not ENABLE_FBZ:
raise FbzDisabled()
return fn()
return wrapped
class FBZ(object):
def __init__ (self, user=FBZ_USER, passwd=<PASSWORD>, url=FBZ_URL):
if ENABLE_FBZ:
self.fb = fogbugz.FogBugz(url)
self.fb.logon(user, passwd)
else:
self.fb = None
def close_connection(self):
if self.fb:
self.fb.logoff()
def file_case(self, product, bug_title, bug_text):
if not ENABLE_FBZ:
return FBZ_FAKE_CASE, FBZ_FAKE_PRIORITY
bug_desc = bug_title
bug_msg = bug_title
tags = []
priority = 6
projectid = PROJECT_IDS.get(product, PROJECT_DEFAULT)
sarea = PROJECT_AREAS.get(product, PROJECT_AREA_DEFAULT)
# we've gotten bugs where the number of occurrences is in
# the thousands. When that is the case, we don't really
# need additional text added to the bug, just that the bug
# has occurred again. This next section attempts to
# see if we've encountered this bug before. If so, if we've
# gotten more than 3 occurrences, reset the bug_text
# as it's most likely redundant
existing_case = self.search_by_scout_desc(bug_desc)
if existing_case != 0:
util.write_log('Found existing bug : %s' % existing_case)
case_dict = self.get_case_info(existing_case)
priority = give_priority(case_dict['ixpriority'], case_dict['c'])
projectid = case_dict['ixproject']
ixarea = case_dict['ixarea']
else:
ixarea = None
# The cols section is around getting certain status info
# ixBug is the bug number
# fOpen tells us if the bug is either open or closed
# ixStatus is to tell us if the bug is still open, then
# what state of open is it (resolved is still considered "open")
if ixarea:
resp = self.fb.new(sTags=','.join(tags or []),
sTitle=bug_title,
ixProject=projectid,
ixArea=ixarea,
ixPriority=priority,
sScoutDescription=bug_desc,
sScoutMessage=bug_msg,
sEvent=bug_text,
cols='ixBug,fOpen,ixStatus')
else:
resp = self.fb.new(sTags=','.join(tags or []),
sTitle=bug_title,
ixProject=projectid,
sArea=sarea,
ixPriority=priority,
sScoutDescription=bug_desc,
sScoutMessage=bug_msg,
sEvent=bug_text,
cols='ixBug,fOpen,ixStatus')
if not resp.ixbug or not resp.fopen or not resp.ixstatus:
raise fogbugz.FogBugzAPIError("Response is missing ixbug, fopen, or ixstatus: %r" % resp)
case = int(resp.ixbug.string)
case_open = resp.fopen.string
case_status = int(resp.ixstatus.string)
if case_open == 'false' or case_status not in STATUS_IDS.values():
# check if the bug is closed or else if the status is
# anything except open and active
self._reopen_case(case)
return case, priority
def _reopen_case(self, case):
case_dict = self.get_case_info(case)
ixpersonresolvedby = case_dict['ixpersonresolvedby']
try:
resp = self.fb.reopen(ixBug=case)
except:
resp = self.fb.reactivate(ixBug=case)
self.fb.edit(ixBug=case,
ixPersonAssignedTo=ixpersonresolvedby)
return resp
def _search_by_scout_desc(self, title):
""" attempt to search fogbugz for a certain
scoutdescription. This, in theory, is how we can
see if a bug has been automatically filed before.
If the bug has been filed before, return the bug number.
Otherwise, return 0
"""
resp = self.fb.listScoutCase(sScoutDescription=title)
try:
ixbug = int(resp.ixbug.string)
except AttributeError:
ixbug = 0
return ixbug
@ensure_enabled
def resolve_case(self, case):
self.fb.resolve(ixBug=case)
@ensure_enabled
def close_case(self, case):
self.fb.close(ixBug=case)
@ensure_enabled
def check_case_status(self, case):
resp = self.fb.search(q = case,
cols = 'fOpen,ixStatus,c')
status = resp.fopen.string
return status
@ensure_enabled
def get_case_info(self, case):
resp = self.fb.search(q = case,
cols = 'fOpen,ixStatus,c,ixPersonAssignedTo,ixProject,ixArea,ixPersonResolvedBy,ixPriority,sStatus,sPersonAssignedTo')
case_dict = { 'fopen' : resp.fopen.string,
'ixarea' : int(resp.ixarea.string),
'ixstatus' : int(resp.ixstatus.string),
'c' : int(resp.c.string),
'ixpersonassignedto' : int(resp.ixpersonassignedto.string),
'ixproject' : int(resp.ixproject.string),
'ixpersonresolvedby' : int(resp.ixpersonresolvedby.string),
'ixpriority' : int(resp.ixpriority.string),
'sstatus' : resp.sstatus.string,
'spersonassignedto' : resp.spersonassignedto.string
}
return case_dict
@ensure_enabled
def get_case_list(self, search_criteria, max_count=None):
if max_count is not None:
resp = self.fb.search(q = search_criteria,
cols = 'ixBug,sTitle',
max = max_count)
else:
resp = self.fb.search(q = search_criteria,
cols = 'ixBug,sTitle')
resp = [{'ixBug': case.ixbug.string,
'sTitle': case.stitle.string}
for case in resp.cases]
return resp
@ensure_enabled
def get_occurrence_count(self, case):
""" give it a case number, and this should tell
you how many times that bug has been hit.
"""
resp = self.fb.search(q = case,
cols = 'c')
count = int(resp.c.string)
return count
def give_priority(currentpriority, occurrences=0):
if occurrences >= 25:
newpriority = 2
elif occurrences >= 10:
newpriority = 3
elif occurrences >= 5:
newpriority = 4
elif occurrences >= 3:
newpriority = 5
else:
newpriority = 6
if newpriority < currentpriority:
util.write_log('increasing priority to %s' % newpriority)
return newpriority
else:
return currentpriority
| 1.070313
| 1
|
test2.py
|
mahendra1904/pythod-programs
| 0
|
12784486
|
#"ereht era ynam ysad"
x = "there are many days"
length=len(x)
l1=x.split()
str=''
for x in l1:
str=str+x[::-1]+ ' '
print(str)
| 3.546875
| 4
|
loss/__init__.py
|
Axrid/cv_template
| 69
|
12784487
|
import torch.nn as nn
from loss.gradient import grad_loss
from loss.tvloss import tv_loss
from loss.vggloss import vgg_loss
from loss.ssim import ssim_loss as criterionSSIM
from options import opt
criterionCAE = nn.L1Loss()
criterionL1 = criterionCAE
criterionBCE = nn.BCELoss()
criterionMSE = nn.MSELoss()
def get_default_loss(recovered, y, avg_meters):
ssim = - criterionSSIM(recovered, y)
ssim_loss = ssim * opt.weight_ssim
# Compute L1 loss (not used)
l1_loss = criterionL1(recovered, y)
l1_loss = l1_loss * opt.weight_l1
loss = ssim_loss + l1_loss
# record losses
avg_meters.update({'ssim': -ssim.item(), 'L1': l1_loss.item()})
if opt.weight_grad:
loss_grad = grad_loss(recovered, y) * opt.weight_grad
loss += loss_grad
avg_meters.update({'gradient': loss_grad.item()})
if opt.weight_vgg:
content_loss = vgg_loss(recovered, y) * opt.weight_vgg
loss += content_loss
avg_meters.update({'vgg': content_loss.item()})
return loss
| 2.1875
| 2
|
xmlmapping/log.py
|
YAmikep/django-xmlmapping
| 2
|
12784488
|
# Internal
from .settings import (
LOGGER_NAME, LOG_FILE, LOG_SIZE, LOGGER_FORMAT, LOG_LEVEL
)
from .utils.loggers import DefaultLogger
default_logger = DefaultLogger(
logger_name=LOGGER_NAME,
level=LOG_LEVEL,
log_file=LOG_FILE,
log_size=LOG_SIZE,
logger_format=LOGGER_FORMAT
)
| 1.578125
| 2
|
tests/test_gpsampler.py
|
tupui/batman
| 0
|
12784489
|
<filename>tests/test_gpsampler.py
# coding: utf8
import os
from mock import patch
import pytest
import numpy as np
import numpy.testing as npt
from batman.space.gp_sampler import GpSampler
import sys
# a simple class with a write method
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
@patch("matplotlib.pyplot.show")
def test_GpSampler1D(mock_show, tmp):
n_nodes = 100
reference = {'indices': [[x / float(n_nodes)] for x in range(n_nodes)],
'values': [0 for _ in range(n_nodes)]}
sampler = GpSampler(reference)
print(sampler)
# Plot of the modes of the Karhunen Loeve Decomposition
sampler.plot_modes(os.path.join(tmp, 'gp_modes.pdf'))
# Sample of the Gp and plot the instances
size = 5
Y = sampler(size)
sampler.plot_sample(Y, os.path.join(tmp, 'gp_instances.pdf'))
# Build a Gp instance and plot the instances
coeff = [[0.2, 0.7, -0.4, 1.6, 0.2, 0.8]]
Y = sampler(coeff=coeff)
sampler.plot_sample(Y)
@patch("matplotlib.pyplot.show")
def test_GpSampler2D(mock_show, tmp):
n_nodes_by_dim = 10
n_nodes = n_nodes_by_dim**2
reference = {'indices': [[x / float(n_nodes_by_dim),
y / float(n_nodes_by_dim)]
for x in range(n_nodes_by_dim)
for y in range(n_nodes_by_dim)],
'values': [0. for x in range(n_nodes)]}
sampler = GpSampler(reference, "Matern([0.5, 0.5], nu=0.5)")
print(sampler)
# Plot of the modes of the Karhunen Loeve Decomposition
sampler.plot_modes(os.path.join(tmp, 'gp_modes.pdf'))
# Sample of the Gp and plot the instances
size = 5
Y = sampler(size)
sampler.plot_sample(Y, os.path.join(tmp, 'gp_instance.pdf'))
# Build a Gp instance and plot the instances
coeff = [[0.2, 0.7, -0.4, 1.6, 0.2, 0.8]]
Y = sampler(coeff=coeff)
sampler.plot_sample(Y, os.path.join(tmp, 'gp_instance.pdf'))
@patch("matplotlib.pyplot.show")
def test_GpSampler3D(mock_show, tmp):
n_nodes_by_dim = 10
n_nodes = n_nodes_by_dim**3
reference = {'indices': [[x / float(n_nodes_by_dim),
y / float(n_nodes_by_dim),
z / float(n_nodes_by_dim)]
for x in range(n_nodes_by_dim)
for y in range(n_nodes_by_dim)
for z in range(n_nodes_by_dim)],
'values': [0. for x in range(n_nodes)]}
sampler = GpSampler(reference, "Matern([0.5, 0.5, 0.5], nu=0.5)")
print(sampler)
# Sample of the Gp and plot the instances
size = 5
Y = sampler(size)
# Build a Gp instance and plot the instances
coeff = [[0.2, 0.7, -0.4, 1.6, 0.2, 0.8]]
Y = sampler(coeff=coeff)
def sampler1D_from_file(tmp):
n_nodes = 3
reference = {'indices': [[x / float(n_nodes)] for x in range(n_nodes)],
'values': [0. for _ in range(n_nodes)]}
reference_filename = os.path.join(tmp, 'reference_file.npy')
np.save(reference_filename, reference)
sampler = GpSampler(reference_filename)
printOutput = WritableObject()
old = sys.stdout
sys.stdout = printOutput
print(sampler)
sys.stdout = old
sol = ['Gp sampler summary:\n'
'- Dimension = 1\n'
'- Kernel = Matern(0.5, nu=0.5)\n'
'- Standard deviation = 1.0\n'
'- Mesh size = 3\n'
'- Threshold for the KLd = 0.01\n'
'- Number of modes = 3', '\n']
npt.assert_array_equal(printOutput.content, sol)
@pytest.fixture(scope="function", params=[1, 2, 3])
def sampler(request, seed):
if request.param == 1:
n_nodes = 3
reference = {'indices': [[x / float(n_nodes)] for x in range(n_nodes)],
'values': [0. for _ in range(n_nodes)]}
gpsampler = GpSampler(reference)
elif request.param == 2:
n_nodes_by_dim = 2
n_nodes = n_nodes_by_dim**2
reference = {'indices': [[x / float(n_nodes_by_dim),
y / float(n_nodes_by_dim)]
for x in range(n_nodes_by_dim)
for y in range(n_nodes_by_dim)],
'values': [0. for x in range(n_nodes)]}
gpsampler = GpSampler(reference, "Matern([0.5, 0.5], nu=0.5)")
elif request.param == 3:
n_nodes_by_dim = 2
n_nodes = n_nodes_by_dim**3
reference = {'indices': [[x / float(n_nodes_by_dim),
y / float(n_nodes_by_dim),
z / float(n_nodes_by_dim)]
for x in range(n_nodes_by_dim)
for y in range(n_nodes_by_dim)
for z in range(n_nodes_by_dim)],
'values': [0. for x in range(n_nodes)]}
gpsampler = GpSampler(reference, "Matern([0.5, 0.5, 0.5], nu=0.5)")
return gpsampler
def test_GpSampler_print(sampler):
printOutput = WritableObject()
old = sys.stdout
sys.stdout = printOutput
print(sampler)
sys.stdout = old
if sampler.n_dim == 1:
sol = ['Gp sampler summary:\n'
'- Dimension = 1\n'
'- Kernel = Matern(0.5, nu=0.5)\n'
'- Standard deviation = 1.0\n'
'- Mesh size = 3\n'
'- Threshold for the KLd = 0.01\n'
'- Number of modes = 3', '\n']
elif sampler.n_dim == 2:
sol = ['Gp sampler summary:\n'
'- Dimension = 2\n'
'- Kernel = Matern([0.5, 0.5], nu=0.5)\n'
'- Standard deviation = 1.0\n'
'- Mesh size = 4\n'
'- Threshold for the KLd = 0.01\n'
'- Number of modes = 4', '\n']
elif sampler.n_dim == 3:
sol = ['Gp sampler summary:\n'
'- Dimension = 3\n'
'- Kernel = Matern([0.5, 0.5, 0.5], nu=0.5)\n'
'- Standard deviation = 1.0\n'
'- Mesh size = 8\n'
'- Threshold for the KLd = 0.01\n'
'- Number of modes = 8', '\n']
npt.assert_array_equal(printOutput.content, sol)
def test_GpSampler_std(sampler):
if sampler.n_dim == 1:
sol = np.array([0.788, 0.497, 0.363])
elif sampler.n_dim == 2:
sol = np.array([0.699, 0.439, 0.433, 0.356])
elif sampler.n_dim == 3:
sol = np.array([0.609, 0.35, 0.344, 0.339, 0.266, 0.266, 0.264, 0.237])
npt.assert_almost_equal(sampler.standard_deviation, sol, decimal=2)
def test_GpSampler_modes(sampler):
if sampler.n_dim == 1:
sol = np.array([[-0.548, -0.639, -0.54],
[0.708, -0.011, -0.706],
[-0.445, 0.769, -0.459]])
elif sampler.n_dim == 2:
sol = np.array([[-0.505, -0.496, -0.496, -0.503],
[-0.037, 0.705, -0.707, 0.038],
[-0.723, -0.018, 0.057, 0.688],
[0.47, -0.506, -0.501, 0.522]])
elif sampler.n_dim == 3:
sol = np.array([[-0.35, -0.365, -0.359, -0.361, -0.354, -0.353, -0.336, -0.349],
[0.253, 0.516, 0.111, 0.392, -0.386, -0.169, -0.508, -0.261],
[0.544, -0.097, 0.306, -0.342, 0.317, -0.295, 0.107, -0.534],
[-0.134, -0.334, 0.505, 0.351, -0.32, -0.519, 0.313, 0.148],
[-0.527, 0.458, -0.047, 0.104, 0.117, -0.069, 0.469, -0.511],
[-0.294, -0.208, 0.613, -0.073, -0.091, 0.577, -0.255, -0.288],
[-0.173, -0.294, -0.096, 0.539, 0.644, -0.148, -0.362, -0.132],
[0.325, -0.374, -0.35, 0.412, -0.294, 0.362, 0.321, -0.376]])
npt.assert_almost_equal(sampler.modes, sol, decimal=2)
def test_GpSampler_sample_values_sobol(sampler):
size = 2
Y = sampler(size, kind="sobol")
if sampler.n_dim == 1:
sol = np.array([[0., 0., 0.],
[-0.638, -0.148, -0.163]])
elif sampler.n_dim == 2:
sol = np.array([[0., 0., 0., 0.],
[-0.551, -0.327, 0.112, -0.173]])
elif sampler.n_dim == 3:
sol = np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
[-0.171, -0.09, -0.298, -0.339, 0.292, -0.255, -0.05, -0.243]])
npt.assert_almost_equal(Y['Values'], sol, decimal=2)
def test_GpSampler_sample_coeff_sobol(sampler):
size = 2
Y = sampler(size, kind="sobol")
if sampler.n_dim == 1:
sol = np.array([[0., 0., 0.],
[0.674, -0.674, 0.674]])
elif sampler.n_dim == 2:
sol = np.array([[0., 0., 0., 0.],
[0.674, -0.674, 0.674, -0.674]])
elif sampler.n_dim == 3:
sol = np.array([[0., 0., 0., 0., 0., 0., 0., 0.],
[0.674, -0.674, 0.674, -0.674, 0.674, -0.674, 0.674, -0.674]])
npt.assert_almost_equal(Y['Coefficients'], sol, decimal=2)
def test_GpSampler_sample_values(sampler, seed):
size = 2
Y = sampler(size)
if sampler.n_dim == 1:
sol = np.array([[-0.48, 0.16, 0.59],
[0.29, -0.31, -0.43]])
elif sampler.n_dim == 2:
sol = np.array([[-0.51, -0.09, -0.05, -0.21],
[0.13, 0.65, 0.89, 0.25]])
elif sampler.n_dim == 3:
sol = np.array([[-0.37, -0.62, -0.36, -0.54, -0.19, 0.2, 0.17, 0.22],
[-0.2, 0.34, 0.76, 0.42, 0.21, 0.01, 0.15, 0.54]])
npt.assert_almost_equal(Y['Values'], sol, decimal=2)
def test_GpSampler_sample_coeff(sampler, seed):
size = 2
Y = sampler(size)
if sampler.n_dim == 1:
sol = np.array([[-0.2, -1.53, 0.18],
[0.34, 1.03, -0.47]])
elif sampler.n_dim == 2:
sol = np.array([[0.62, -0.05, 0.51, -0.77],
[-1.37, -0.38, 0.28, -1.65]])
elif sampler.n_dim == 3:
sol = np.array([[0.89, -2.21, -0.83, -0.2, -0.72, 0.33, -0.97, 0.48],
[-1.3, 0.2, -0.76, 1.48, 0.32, 0.8, 0.35, -2.09]])
npt.assert_almost_equal(Y['Coefficients'], sol, decimal=2)
def test_GpSampler_build_values(sampler):
coeff = [[0.2, 0.7, -0.4, 1.6, 0.2, 0.8]]
Y = sampler(coeff=coeff)
if sampler.n_dim == 1:
sol = np.array([[0.225, -0.216, -0.264]])
elif sampler.n_dim == 2:
sol = np.array([[0.311, -0.138, -0.582, 0.119]])
elif sampler.n_dim == 3:
sol = np.array([[-0.22, -0.11, 0.34, 0.28, -0.37, -0.21, -0.04, -0.04]])
npt.assert_almost_equal(Y['Values'], sol, decimal=2)
def test_GpSampler_build_coeff(sampler):
coeff = [[0.2, 0.7, -0.4, 1.6, 0.2, 0.8]]
Y = sampler(coeff=coeff)
if sampler.n_dim == 1:
sol = np.array([[0.2, 0.7, -0.4]])
elif sampler.n_dim == 2:
sol = np.array([[0.2, 0.7, -0.4, 1.6]])
elif sampler.n_dim == 3:
sol = np.array([[0.2, 0.7, -0.4, 1.6, 0.2, 0.8, 0., 0.]])
npt.assert_almost_equal(Y['Coefficients'], sol, decimal=2)
| 2.40625
| 2
|
setup.py
|
muirawachanga/network_billing_management
| 1
|
12784490
|
from __future__ import unicode_literals
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in network_billing_system/__init__.py
from network_billing_system import __version__ as version
setup(
name="network_billing_system",
version=version,
description="A complete billing system integrated with pfsense",
author="stephen",
author_email="<EMAIL>",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires,
)
| 1.601563
| 2
|
Dataset/Leetcode/train/6/163.py
|
kkcookies99/UAST
| 0
|
12784491
|
<filename>Dataset/Leetcode/train/6/163.py<gh_stars>0
class Solution:
def XXX(self, s, numRows) :
if numRows==1:return s
l=[""]*numRows
z=numRows*2-2 #2行2个一组,3行4个一组,4行6个一组
for j in range(len(s)):
y=j%z #余数
if y<numRows:
l[y]=l[y]+s[j]
else:
l[z-y]=l[z-y]+s[j]
return "".join(l)
| 2.921875
| 3
|
apps/books/views.py
|
RGero215/Demo
| 1
|
12784492
|
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from .models import Review, Author, Book
from ..login_registration.models import User
# Create your views here.
def books(request):
if 'login' not in request.session or request.session['login'] == False:
return redirect('/')
else:
context = {
'recent': Review.objects.recent_and_not()[0],
'more': Review.objects.recent_and_not()[1]
}
return render(request, 'books/index.html', context)
def add(request):
if 'login' not in request.session or request.session['login'] == False:
return redirect('/')
else:
context = {
'authors': Author.objects.all()
}
return render(request, 'books/edit.html', context)
def create(request):
if 'login' not in request.session or request.session['login'] == False:
return redirect('/')
else:
errs = Review.objects.validate_review(request.POST)
if errs:
for e in errs:
messages.error(request, e)
else:
book_id = Review.objects.create_review(request.POST, request.session['user_id']).book.id
return redirect('/books/{}'.format(book_id))
def create_additional(request, book_id):
if 'login' not in request.session or request.session['login'] == False:
return redirect('/')
else:
the_book = Book.objects.get(id=book_id)
new_book_data = {
'title': the_book.title,
'author': the_book.author.id,
'rating': request.POST['rating'],
'review': request.POST['review'],
'new_author': ''
}
errs = Review.objects.validate_review(new_book_data)
if errs:
for e in errs:
messages.error(request, e)
else:
Review.objects.create_review(new_book_data, request.session['user_id'])
return redirect('/books/' + book_id)
def show(request, book_id):
if 'login' not in request.session or request.session['login'] == False:
return redirect('/')
else:
context = {
'book': Book.objects.get(id=book_id)
}
return render(request, 'books/review.html', context)
def profile(request, user_id):
if 'login' not in request.session or request.session['login'] == False:
return redirect('/')
else:
user = User.objects.get(id=user_id)
unique_ids = user.reviews_left.all().values('book').distinct()
unique_books = []
for book in unique_ids:
unique_books.append(Book.objects.get(id=book['book']))
context = {
'user': user,
'unique_book_reviews': unique_books
}
return render(request, 'books/profile.html', context)
def delete(request, review_id):
if 'login' not in request.session or request.session['login'] == False:
return redirect('/')
else:
book = Book.objects.get(reviews= review_id).id
Review.objects.get(id=review_id).delete()
return redirect('/books/'+ str(book))
| 2.125
| 2
|
test.py
|
racerxdl/PyMAX30100
| 0
|
12784493
|
#!/usr/bin/env python
import time
from max30100.oxymeter import Oxymeter
from max30100.constants import *
x = Oxymeter(1)
lastReport = time.time() * 1000
if not x.begin():
print "Error initializing"
exit(1)
x.setIRLedCurrent(MAX30100_LED_CURR_7_6MA) # Works best with me
print "Running loop"
while True:
x.update()
if time.time() * 1000 - lastReport > 1000:
print("Heart Rate: %s bpm - SpO2: %s%%" %(x.getHeartRate(), x.getSpO2()))
lastReport = time.time() * 1000
| 2.5
| 2
|
radynpy/utils/RadynMovie.py
|
grahamkerr/radynpy
| 1
|
12784494
|
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
def rmovie_basicvar(cdf,
var = 'tg1',
Mm = False,
km = False,
savefig = False,
figname = 'radynvar.html',
color = 'steelblue'):
'''
A function to produce an animated figure of RADYN variables.
This version is pre-constructed and lets you just input the
variable you want to plot. Other variables (such as populations)
will require more input, and are separate functions.
Turns the output into a pandas dataframe, which is then passed to
plotly express to create the animated figure
Parameters
__________
cdf : The radyn cdf object
var : str
The variable to plot (default = 'tg1')
Mm : Boolean
Plot height in Mm (default = False)
km : Boolean
Plot height in km (default = False)
savefig : Boolean
Save the figure (html file)
figname : str
Filename, if saving the output
NOTES :
So far, allowed variables are
tg1 - temperature
ne1 - electron density
bheat1 - beam heating rate
d1 - mass density
vz1 - velocity
np - proton density
<NAME>, March 2021
'''
########################################################################
# Some preliminary set up
########################################################################
if Mm == True:
xtitle = 'Height [Mm]'
height = cdf.z1/1e8
elif km == True:
xtitle = 'Height [km]'
height = cdf.z1/1e5
else:
xtitle = 'Height [cm]'
height = cdf.z1
if var == 'tg1':
rvar = cdf.tg1
ytitle = 'Temperature [K]'
ylog = True
xlog = False
elif var == 'ne1':
rvar = cdf.ne1
ytitle = 'Electron Density [cm<sup>-3</sup>]'
ylog = True
xlog = False
elif var == 'bheat1':
rvar = cdf.bheat1
ytitle = 'Q<sub>beam</sub> [erg cm<sup>-3</sup> s<sup>-1</sup>]'
ylog = False
xlog = False
elif var == 'd1':
rvar = cdf.d1
ytitle = 'Mass Density [g cm<sup>-3</sup>]'
ylog = True
xlog = False
elif var == 'vz1':
rvar = cdf.vz1/1e5
ytitle = 'Velocity [km s<sup>-1</sup>]'
ylog = False
xlog = False
elif var == 'np':
rvar = cdf.n1[:,:,5,0]
ytitle = 'Proton Density [cm<sup>-3</sup>]'
ylog = True
xlog = False
template = dict(
layout = go.Layout(font = dict(family = "Rockwell", size = 16),
title_font = dict(family = "Rockwell", size = 20),
plot_bgcolor = 'white',
paper_bgcolor = 'white',
xaxis = dict(
showexponent = 'all',
exponentformat = 'e',
tickangle = 0,
linewidth = 3,
showgrid = True,
),
yaxis = dict(
showexponent = 'all',
exponentformat = 'e',
linewidth = 3,
showgrid = True,
anchor = 'free',
position = 0,
domain = [0.0,1]
),
coloraxis_colorbar = dict(
thickness = 15,
tickformat = '0.2f',
ticks = 'outside',
titleside = 'right'
)
))
########################################################################
# Build the dataframe
########################################################################
col1 = ytitle
col2 = xtitle
time = 'Time [s]'
timeind = 'Time index'
df_list = []
for i in range(len(cdf.time)):
data = {col1:rvar[i,:],
col2:height[i,:],
time: cdf.time[i],
timeind: i
}
df_list.append(pd.DataFrame(data))
df = pd.concat(df_list)
########################################################################
# Plot the variable
########################################################################
h1 = 700
w1 = 700
fig1 = px.line(df,
x = df.columns[1], y = df.columns[0],
# animation_group = 'Time [s]',
animation_frame = 'Time [s]',
log_x = xlog,
log_y = ylog,
template = template,
color_discrete_sequence = [color])
fig1.show()
if savefig == True:
fig1.write_html(figname)
return df
def rmovie(var1, var2,
time = [-10.0],
savefig = False,
figname = 'radynvar.html',
xtitle = 'Var 1',
ytitle = 'Var 2',
title = ' ',
color = 'steelblue',
xlog = False, ylog = False):
'''
A function to produce an animated figure of RADYN variables.
This version is 'dumb' and just plots col1 vs col2 without any
axes labels, unless passed through the function fall.
Variables must be input as [time, dim1]
Turns the output into a pandas dataframe, which is then passed to
plotly express to create the animated figure
Parameters
__________
var1 : float
The variable to plot on the x-axis [time, dim1]
var2 : float
The variable to plot on the y-axis [time, dim1]
xtitle : str
The xaxis label (default "Var 1")
ytitle : str
The xaxis label (default "Var 2")
title : str
A plot title (default " ")
savefig : Boolean
Save the figure (html file)
figname : str
Filename, if saving the output
xlog : boolean
Default is false. Set to True to have log x-axis
ylog : boolean
Default is false. Set to True to have log y-axis
NOTES :
<NAME>, March 2021
'''
########################################################################
# Some preliminary set up
########################################################################
if time[0] == -10:
time = np.arange(0,var1.shape[0])
col3 = 'Time [index]'
else:
col3 = 'Time [s]'
template = dict(
layout = go.Layout(font = dict(family = "Rockwell", size = 16),
title_font = dict(family = "Rockwell", size = 20),
plot_bgcolor = 'white',
paper_bgcolor = 'white',
xaxis = dict(
showexponent = 'all',
exponentformat = 'e',
tickangle = 0,
linewidth = 3,
showgrid = True,
),
yaxis = dict(
showexponent = 'all',
exponentformat = 'e',
linewidth = 3,
showgrid = True,
anchor = 'free',
position = 0,
domain = [0.0,1]
),
coloraxis_colorbar = dict(
thickness = 15,
tickformat = '0.2f',
ticks = 'outside',
titleside = 'right'
)
))
########################################################################
# Build the dataframe
########################################################################
col1 = xtitle
col2 = ytitle
df_list = []
for i in range(len(time)):
data = {col1:var1[i,:],
col2:var2[i,:],
col3: time[i],
}
df_list.append(pd.DataFrame(data))
df = pd.concat(df_list)
########################################################################
# Plot the variable
########################################################################
h1 = 700
w1 = 700
fig1 = px.line(df,
x = df.columns[0], y = df.columns[1],
# animation_group = 'Time [s]',
animation_frame = df.columns[2],
log_x = xlog,
log_y = ylog,
title = title,
color_discrete_sequence = [color],
template = template)
fig1.show()
if savefig == True:
fig1.write_html(figname)
return df
| 3.53125
| 4
|
HLTrigger/Configuration/python/HLT_75e33/modules/particleFlowRecHitHBHE_cfi.py
|
PKUfudawei/cmssw
| 1
|
12784495
|
import FWCore.ParameterSet.Config as cms
particleFlowRecHitHBHE = cms.EDProducer("PFRecHitProducer",
navigator = cms.PSet(
hcalEnums = cms.vint32(1, 2),
name = cms.string('PFRecHitHCALDenseIdNavigator')
),
producers = cms.VPSet(cms.PSet(
name = cms.string('PFHBHERecHitCreator'),
qualityTests = cms.VPSet(
cms.PSet(
cuts = cms.VPSet(
cms.PSet(
depth = cms.vint32(1, 2, 3, 4),
detectorEnum = cms.int32(1),
threshold = cms.vdouble(0.8, 1.2, 1.2, 1.2)
),
cms.PSet(
depth = cms.vint32(
1, 2, 3, 4, 5,
6, 7
),
detectorEnum = cms.int32(2),
threshold = cms.vdouble(
0.1, 0.2, 0.2, 0.2, 0.2,
0.2, 0.2
)
)
),
name = cms.string('PFRecHitQTestHCALThresholdVsDepth')
),
cms.PSet(
cleaningThresholds = cms.vdouble(0.0),
flags = cms.vstring('Standard'),
maxSeverities = cms.vint32(11),
name = cms.string('PFRecHitQTestHCALChannel')
)
),
src = cms.InputTag("hbhereco")
))
)
| 1.304688
| 1
|
tests/test_backend.py
|
seeq12/seeq-udf-ui
| 0
|
12784496
|
from seeq import spy
import pytest
import time
@pytest.mark.system
class TestCreate:
def test_create_package_and_function(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
assert 'testPackage' in ui.backend.fetch_udf_packages()
assert 'testPackage' in ui.app.search_display.package_list
assert 'testFunction' in [func_name.name for func_name in ui.backend.selected_package.package.functions]
assert 'testFunction' + '($Signal, $Signal)' in ui.app.search_display.function_list
def test_create_function_same_name(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
ui.app.function_parameters_display.params_and_types = [{'name': 'a', 'type': 'Signal'},
{'name': 'b', 'type': 'Signal'},
{'name': 'c', 'type': 'Scalar'}]
ui.app.function_parameters_display.formula = '$a + $b * $c'
ui.app.function_documentation.func_description = '<p>Test function with the same name</p>'
ui.app.function_documentation.examples_and_descriptions = [
{'description': 'Example 1', 'formula': '$a + $b * $c'},
{'description': 'Example 2', 'formula': '$c + $d * $e'}]
ui.app.summary_page.vue_on_review(data='')
ui.app.summary_page.vue_on_submit(data='')
assert 'testFunction' + '($Signal, $Signal)' in ui.app.search_display.function_list
assert 'testFunction' + '($Signal, $Signal, $Scalar)' in ui.app.search_display.function_list
@pytest.mark.system
class TestModify:
def test_modify_formula(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
ui.app.function_parameters_display.params_and_types = [{'name': 'a', 'type': 'Signal'},
{'name': 'newParam', 'type': 'Scalar'}]
ui.app.function_parameters_display.formula = '$a * $newParam'
ui.app.summary_page.vue_on_review(data='')
ui.app.summary_page.vue_on_submit(data='')
ui.app.search_display.vue_update_package_object(data='testPackage')
ui.app.search_display.vue_update_function(data='testFunction' + '($Signal, $Scalar)')
assert '$newParam' in ui.app.function_parameters_display.formula
assert '$newParam' in ui.backend.selected_function.formula
def test_func_description(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
ui.app.function_documentation.func_description_markdown = '## Test Function Description'
ui.app.function_documentation.vue_update_func_desc_html(data='')
# The markdown-to-html converter has a delay
time.sleep(0.5)
assert '<h2>Test Function Description</h2>' in ui.app.function_documentation.func_description_html
ui.app.summary_page.vue_on_review(data='')
ui.app.summary_page.vue_on_submit(data='')
ui.app.search_display.vue_update_package_object(data='testPackage')
ui.app.search_display.vue_update_function(data='testFunction($Signal, $Signal)')
assert '<h2>Test Function Description</h2>' in ui.backend.selected_function.description
def test_package_description(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
ui.app.function_documentation.package_description_markdown = '## Test Package Description'
ui.app.function_documentation.vue_update_package_desc_html(data='')
# The markdown-to-html converter has a delay
time.sleep(0.5)
assert '<h2>Test Package Description</h2>' in ui.app.function_documentation.package_description_html
ui.app.summary_page.vue_on_review(data='')
ui.app.summary_page.vue_on_submit(data='')
ui.app.search_display.vue_update_package_object(data='testPackage')
ui.app.search_display.vue_update_function(data='testFunction($Signal, $Signal)')
assert '<h2>Test Package Description</h2>' in ui.backend.selected_package.description
def test_add_examples(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
examples = [{'description': 'Example 1', 'formula': '$a + $b'},
{'description': 'Example 2', 'formula': '$c + $d'}]
ui.app.function_documentation.examples_and_descriptions = examples
ui.app.summary_page.vue_on_review(data='')
ui.app.summary_page.vue_on_submit(data='')
ui.app.search_display.vue_update_package_object(data='testPackage')
ui.app.search_display.vue_update_function(data='testFunction($Signal, $Signal)')
assert ui.backend.selected_function.examples_and_descriptions == examples
assert ui.app.function_documentation.examples_and_descriptions == examples
def test_access_control(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackageAccessControl', 'testFunction')
ui.app.function_parameters_display.params_and_types = [{'name': 'a', 'type': 'Signal'},
{'name': 'b', 'type': 'Signal'}]
ui.app.function_parameters_display.formula = '$a + $b'
ui.app.function_documentation.func_description = '<p>Test function</p>'
access_input = [{'name': spy.user.name,
'username': spy.user.username,
'type': 'User',
'read': True,
'write': True,
'manage': True},
{'name': 'Everyone',
'username': None,
'type': 'UserGroup',
'read': True,
'write': True,
'manage': True}
]
ui.app.access_management.selected_users_dict = access_input
ui.app.summary_page.vue_on_review(data='')
ui.app.summary_page.vue_on_submit(data='')
ui.app.search_display.vue_update_package_object(data='testPackageAccessControl')
assert access_input[0] in ui.backend.selected_package.permissions
assert access_input[1] in ui.backend.selected_package.permissions
@pytest.mark.system
class TestDelete:
def test_archive_function(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
ui.app.search_display.vue_update_package_object(data='testPackage')
ui.app.search_display.vue_update_function(data='testFunction($Signal, $Signal)')
ui.app.summary_page.selected_for_delete = 'Function: testFunction'
ui.app.summary_page.vue_on_delete(data='')
ui.app.search_display.vue_update_package_object(data='testPackage')
assert 'testFunction' + '($Signal, $Signal)' not in ui.app.search_display.function_list
assert 'testFunction' not in ui.app.search_display.function_list
def test_archive_package(self, instantiate_ui_create_function_and_package):
ui = instantiate_ui_create_function_and_package('testPackage', 'testFunction')
ui.app.summary_page.selected_for_delete = 'Package: testPackage'
ui.app.summary_page.vue_on_delete(data='')
ui.app.search_display.vue_update_package_object(data='testPackage')
assert 'testPackage' not in ui.app.search_display.package_list
| 2.203125
| 2
|
moai/nn/residual/bottleneck.py
|
tzole1155/moai
| 10
|
12784497
|
<reponame>tzole1155/moai
import moai.nn.convolution as mic
import moai.nn.activation as mia
import torch
__all__ = [
"Bottleneck",
"PreResBottleneck",
"PreActivBottleneck",
]
'''
Bottleneck versions with 3 convolutions (2 projections, 1 bottleneck)
'''
class Bottleneck(torch.nn.Module):
def __init__(self,
convolution_type: str,
activation_type: str,
in_features: int,
out_features: int,
bottleneck_features: int,
convolution_params: dict,
activation_params: dict,
strided: bool,
):
super(Bottleneck, self).__init__()
self.W1 = mic.make_conv_1x1(
convolution_type=convolution_type,
in_channels=in_features,
out_channels=bottleneck_features,
stride=2 if strided else 1,
**convolution_params
)
self.A1 = mia.make_activation(
features=bottleneck_features,
activation_type=activation_type,
**activation_params
)
self.W2 = mic.make_conv_3x3(
convolution_type=convolution_type,
in_channels=bottleneck_features,
out_channels=bottleneck_features,
**convolution_params
)
self.A2 = mia.make_activation(
features=bottleneck_features,
activation_type=activation_type,
**activation_params
)
self.W3 = mic.make_conv_1x1(
convolution_type=convolution_type,
in_channels=bottleneck_features,
out_channels=out_features,
**convolution_params
)
self.A3 = mia.make_activation(
features=out_features,
activation_type=activation_type,
**activation_params
)
self.S = torch.nn.Identity() if in_features == out_features\
else mic.make_conv_1x1(
convolution_type=convolution_type,
in_channels=in_features,
out_channels=out_features,
**convolution_params,
# using a 3x3 conv for shortcut downscaling instead of a 1x1 (used in detectron2 for example)
) if not strided else mic.make_conv_3x3(
convolution_type=convolution_type,
in_channels=in_features,
out_channels=out_features,
stride=2,
**convolution_params,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = self.W3(self.A2(self.W2(self.A1(self.W1(x))))) # y = W3 * A2(W2 * A1(W1 * x))
return self.A3(self.S(x) + y) # out = A3(S(x) + y)
class PreResBottleneck(Bottleneck):
def __init__(self,
convolution_type: str,
activation_type: str,
in_features: int,
out_features: int,
bottleneck_features: int,
convolution_params: dict,
activation_params: dict,
strided: bool,
):
super(PreResBottleneck, self).__init__(
convolution_type=convolution_type,
activation_type=activation_type,
in_features=in_features,
out_features=out_features,
bottleneck_features=bottleneck_features,
convolution_params=convolution_params,
activation_params=activation_params,
strided=strided
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = self.A3(self.W3(self.A2(self.W2(self.A1(self.W1(x)))))) # y = A3(W3 * A2(W2 * A1(W1 * x)))
return self.S(x) + y # out = S(x) + y
class PreActivBottleneck(torch.nn.Module):
def __init__(self,
convolution_type: str,
activation_type: str,
in_features: int,
out_features: int,
bottleneck_features: int,
convolution_params: dict,
activation_params: dict,
strided: bool,
):
super(PreActivBottleneck, self).__init__()
self.A1 = mia.make_activation(
features=in_features,
activation_type=activation_type,
**activation_params
)
self.W1 = mic.make_conv_1x1(
convolution_type=convolution_type,
in_channels=in_features,
out_channels=bottleneck_features,
stride=2 if strided else 1,
**convolution_params
)
self.A2 = mia.make_activation(
features=bottleneck_features,
activation_type=activation_type,
**activation_params
)
self.W2 = mic.make_conv_3x3(
convolution_type=convolution_type,
in_channels=bottleneck_features,
out_channels=bottleneck_features,
**convolution_params
)
self.A3 = mia.make_activation(
features=bottleneck_features,
activation_type=activation_type,
**activation_params
)
self.W3 = mic.make_conv_1x1(
convolution_type=convolution_type,
in_channels=bottleneck_features,
out_channels=out_features,
**convolution_params
)
self.S = torch.nn.Identity() if in_features == out_features\
else mic.make_conv_1x1(
convolution_type=convolution_type,
in_channels=in_features,
out_channels=out_features,
**convolution_params,
# using a 3x3 conv for shortcut downscaling instead of a 1x1 (used in detectron2 for example)
) if not strided else mic.make_conv_3x3(
convolution_type=convolution_type,
in_channels=in_features,
out_channels=out_features,
stride=2,
**convolution_params,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = self.W3(self.A3(self.W2(self.A2(self.W1(self.A1(x)))))) # y = W3 * A3(W2 * A2(W1 * A1(x)))
return self.S(x) + y # out = x + y
| 2.265625
| 2
|
test.py
|
Pokoai/Andrew-NG-Meachine-Learning
| 1
|
12784498
|
x1, x2 = find_decision_boundary(svc, 0, 5, 1.5, 5, 2 * 10**-3)
fig. ax = plt.subplots(figsize=(10, 8))
ax.scatter(x1, x2, s=10, c='r', label='Boundary')
plot_init_pic(data, fig, ax)
ax.set_title('SVM(C=1) Decition Boundary')
ax.set_xlabel('X1')
ax.set_ylabel('X2')
ax.legend()
plt.show()
| 2.890625
| 3
|
anton/lights/audio/audioprocess.py
|
sunnstix/dancyPi-audio-reactive-led
| 0
|
12784499
|
from asyncio import streams
import time
import numpy as np
import pyaudio
import anton.lights.config as config
class AudioProcess():
def __init__(self):
self.audio = pyaudio.PyAudio()
self.frames_per_buffer = int(config.MIC_RATE / config.FPS)
self.overflows = 0
self.prev_ovf_time = time.time()
self.running = False
def start_stream(self,callback):
self.stream = self.audio.open(format=pyaudio.paInt16,
channels=1,
rate=config.MIC_RATE,
input=True,
frames_per_buffer=self.frames_per_buffer)
self.running = True
self.overflows = 0
while self.running:
try:
y = np.fromstring(self.stream.read(self.frames_per_buffer, exception_on_overflow=False), dtype=np.int16)
y = y.astype(np.float32)
self.stream.read(self.stream.get_read_available(), exception_on_overflow=False)
callback(y)
except IOError:
self.overflows += 1
if time.time() > self.prev_ovf_time + 1:
self.prev_ovf_time = time.time()
print('Audio buffer has overflowed {} times'.format(self.overflows))
def kill_stream(self):
self.running = False
def stop_stream(self):
self.stream.stop_stream()
self.stream.close()
| 2.8125
| 3
|
main.py
|
mario21ic/swarm-status
| 0
|
12784500
|
<gh_stars>0
#!/usr/bin/env python3
# Usage: python main.py action
# Example: python main.py nodes|stacks|services|tasks
import time
import platform
import logging
import os
import sys
import boto3
import docker
docker_client = docker.from_env()
docker_api = docker.APIClient(base_url='unix://var/run/docker.sock')
logging.basicConfig(level=logging.INFO, format="%(asctime)s " + platform.node() + ": %(message)s")
def main(action=""):
logging.info("========================")
if action == "nodes":
logging.info("### Nodes ###")
nodes = docker_client.nodes.list(filters={'role': 'manager'})
logging.info("#### Managers: %s" % (len(nodes)))
logging.info("ID\t\t\t\tHOSTNAME\tIP\tSTATUS\tAVAILABILITY\tMANAGER\tREACHABILITY\tCREATED")
for node in nodes:
# logging.info("attrs: " + str(node.attrs))
logging.info("%s\t%s\t%s\t%s\t%s\t\t%s\t%s\t%s" % (
node.attrs['ID'],
node.attrs['Description']['Hostname'],
node.attrs['Status']['Addr'],
node.attrs['Status']['State'],
node.attrs['Spec']['Availability'],
node.attrs['ManagerStatus']['Leader'],
node.attrs['ManagerStatus']['Reachability'],
node.attrs['CreatedAt']
))
nodes = docker_client.nodes.list(filters={'role': 'worker'})
logging.info("#### Managers: %s" % (len(nodes)))
logging.info("ID\t\t\t\tHOSTNAME\tIP\tSTATUS\tAVAILABILITY\tCREATED")
for node in nodes:
# logging.info("attrs: " + str(node.attrs))
logging.info("%s\t%s\t%s\t%s\t%s\t\t%s" % (
node.attrs['ID'],
node.attrs['Description']['Hostname'],
node.attrs['Status']['Addr'],
node.attrs['Status']['State'],
node.attrs['Spec']['Availability'],
node.attrs['CreatedAt']
))
return
if action == "services":
logging.info("###### Services ######")
services = docker_client.services.list()
logging.info("ID\t\t\t\tNAME\t\tMODE\tREPLICAS-TASKS\tIMAGE\tPORTS\tCREATED")
for service in services:
# logging.info("Service attrs: " + str(service.attrs))
mode = ""
for mod in service.attrs['Spec']['Mode'].keys():
mode = mod
replicas = 0
if mode == "Replicated":
replicas = service.attrs['Spec']['Mode']['Replicated']['Replicas']
ports = []
if "Ports" in service.attrs['Endpoint']:
for ingress in service.attrs['Endpoint']['Ports']:
ports.append(
ingress['Protocol'] + "/" + str(ingress['PublishedPort']) + ":" + str(ingress['TargetPort']))
logging.info("%s\t%s\t\t%s\t%s-%s\t%s\t%s\t%s" % (
service.attrs['ID'],
service.name[:7],
mode[:6],
replicas,
len(service.tasks()),
service.attrs['Spec']['TaskTemplate']['ContainerSpec']['Image'].split("@")[0],
",".join(ports),
service.attrs['CreatedAt']
))
return
if action == "tasks":
logging.info("###### Tasks ######")
services = docker_client.services.list()
logging.info("ID\t\t\t\tNAME\t\tIMAGE\t\t\tNODE\tDESIRED\tCURRENT\tERROR\tPORTS\tCREATED")
for service in services:
for task in service.tasks():
# logging.info("Tasks attrs: " + str(task))
node = docker_api.inspect_node(task['NodeID'])
logging.info("%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\t%s\t%s" % (
task['ID'],
"",
task['Spec']['ContainerSpec']['Image'].split("@")[0],
node['Description']['Hostname'],
task['DesiredState'],
task['Status']['State'],
"",
"",
task['CreatedAt']
))
return
if __name__ == "__main__":
main(sys.argv[1])
| 2.125
| 2
|
graphapi/schema.py
|
johnseekins/openstates.org
| 51
|
12784501
|
import graphene
from .legislative import LegislativeQuery
from .core import CoreQuery
class Query(LegislativeQuery, CoreQuery, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
| 1.46875
| 1
|
objects/enemy.py
|
elgrandt/ShooterInc
| 0
|
12784502
|
import OGL
import pywavefront
from OpenGL.GL import *
class BasicEnemy(OGL.Cube):
def __init__(self,x,y,z):
OGL.Cube.__init__(self,3,8,2,x,y,z)
self.model = pywavefront.Wavefront("Handgun_obj.obj","models/")
self.model.onload = self.onload
#self.model = OGL.OBJ("Man.obj",Fse,10)
#self.width = self.model.widthal
#self.height = self.model.height
#self.depth = self.model.depth
def onload(self):
trigger = self.model.get_mesh("Cube.005_Cube.000")
trigger.pos[0] = -.3
def blit(self):
self.model.draw()
| 2.875
| 3
|
playbooks/files/autoscaling/cpu_load.py
|
opentelekomcloud-infra/csm-sandbox
| 2
|
12784503
|
<gh_stars>1-10
""" A tool for generating a set of subsequent CPU utilization levels."""
import logging
import subprocess
import sys
import time
from argparse import ArgumentParser
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
def process(interval, utilization_list, ncpus):
for utilization in utilization_list:
print(f"\nSwitching to {utilization}%")
proc = subprocess.Popen(["lookbusy",
"--ncpus", str(ncpus),
"--cpu-util", utilization])
time.sleep(interval)
proc.terminate()
def _parse_args():
agp = ArgumentParser(
prog="cpu load generator ",
description="Generates a set of subsequent CPU utilization levels read from a file.")
agp.add_argument("--interval",
help="interval between subsequent CPU utilization levels in seconds",
default=60,
type=int)
agp.add_argument("--source",
help="source file containing a new line separated list of CPU"
"utilization levels specified as numbers in the [0, 100] range")
agp.add_argument("--ncpus",
help="number of CPU cores to utilize [default: 1]", default=1)
args, _ = agp.parse_known_args()
try:
int(args.interval)
except ValueError:
LOGGER.error("interval must be an integer >= 0")
sys.exit(1)
if args.interval <= 0:
LOGGER.error("interval must be >= 0")
sys.exit(1)
return args
def _parse_config(source: str):
utilization = []
with open(source, 'r') as file:
levels = file.read().splitlines()
for line in levels:
if not line.isdigit():
continue
if 0 <= int(line) <= 100:
utilization.append(line)
else:
LOGGER.error("the source file must only contain new line separated"
"numbers in the [0, 100] range")
return utilization
def main():
args = _parse_args()
utilization = _parse_config(args.source)
while True:
try:
process(args.interval, utilization, args.ncpus)
except KeyboardInterrupt:
LOGGER.info("Script Stopped")
sys.exit(0)
if __name__ == '__main__':
main()
| 3.125
| 3
|
memory/memory_object.py
|
nbanmp/seninja
| 109
|
12784504
|
<gh_stars>100-1000
from ..expr import BV, BVArray, Bool, ITE
class MemoryObj(object):
def __init__(self, name, bits=64, bvarray=None):
self.bvarray = BVArray(
"MEMOBJ_" + name, bits, 8
) if bvarray is None else bvarray
self.name = name
self.bits = bits
def __str__(self):
return "<MemoryObj{bits} {name}>".format(
bits=self.bits,
name=self.name
)
def __repr__(self):
return self.__str__()
def load(self, index: BV):
return self.bvarray.Select(index)
def store(self, index: BV, value: BV, condition: Bool = None):
if condition is None:
self.bvarray.Store(index, value)
else:
# this can be inefficient
self.bvarray.ConditionalStore(index, value, condition)
def copy(self):
return MemoryObj(self.name, self.bits, self.bvarray.copy())
def merge(self, other, merge_condition: Bool):
self.bvarray = self.bvarray.merge(other.bvarray, merge_condition)
| 2.78125
| 3
|
models/dino.py
|
guilhermesurek/3D-ResNets-PyTorch
| 0
|
12784505
|
<reponame>guilhermesurek/3D-ResNets-PyTorch<filename>models/dino.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
class Head(nn.Module):
def __init__(self, n_classes, n_inputs=None, dropout=None):
super().__init__()
self.n_inputs = n_inputs
self.n_classes = n_classes
self.n_layers = 2
# Consolidate temporal features
# self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(7,256), stride=1)
# self.bn1 = nn.BatchNorm2d(self.conv1.out_channels)
# self.conv2 = nn.Conv2d(in_channels=self.conv1.out_channels, out_channels=32, kernel_size=(6,512), stride=1)
# self.bn2 = nn.BatchNorm2d(self.conv2.out_channels)
# self.conv3 = nn.Conv2d(in_channels=self.conv2.out_channels, out_channels=32, kernel_size=(5,1024), stride=1)
# self.bn3 = nn.BatchNorm2d(self.conv3.out_channels)
# self.relu = nn.ReLU(inplace=True)
# #self.fc1 = nn.Linear(self.conv3.out_channels,self.conv3.out_channels)
# self.fc = nn.Linear(self.conv3.out_channels,n_classes)
# self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
# conf2
# self.conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=(7,7), stride=1)
# self.bn1 = nn.BatchNorm2d(self.conv1.out_channels)
# self.conv2 = nn.Conv2d(in_channels=self.conv1.out_channels, out_channels=2, kernel_size=(6,7), stride=1)
# self.bn2 = nn.BatchNorm2d(self.conv2.out_channels)
# self.conv3 = nn.Conv2d(in_channels=self.conv2.out_channels, out_channels=4, kernel_size=(5,5), stride=1)
# self.bn3 = nn.BatchNorm2d(self.conv3.out_channels)
# self.relu = nn.ReLU(inplace=True)
# #self.fc1 = nn.Linear(8128,8128) #conv3.out_channels,conv3.out_channels)
# self.fc = nn.Linear(8128,n_classes)
# conf3
# self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(9,75), stride=(1,3))
# self.bn1 = nn.BatchNorm2d(self.conv1.out_channels)
# self.conv2 = nn.Conv2d(in_channels=self.conv1.out_channels, out_channels=8, kernel_size=(8,75), stride=(1,3))
# self.bn2 = nn.BatchNorm2d(self.conv2.out_channels)
# self.relu = nn.ReLU(inplace=True)
# #self.fc1 = nn.Linear(1560,1560)
# self.fc = nn.Linear(1560,n_classes)
# conf4
# self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(16,155), stride=(1,3))
# self.bn1 = nn.BatchNorm2d(self.conv1.out_channels)
# self.relu = nn.ReLU(inplace=True)
# #self.fc1 = nn.Linear(2528,2528)
# self.fc = nn.Linear(2528,n_classes)
# conf5
# self.fc1 = nn.Linear(16*2048,2048)
# self.fc = nn.Linear(2048,n_classes)
# conf6
#self.fc = nn.Linear(16*2048,n_classes)
# ViT #1
#self.fc = nn.Linear(n_inputs, n_classes)
# ViT #2
self.lstm = nn.LSTM(input_size=self.n_inputs, hidden_size=self.n_inputs, num_layers=self.n_layers, dropout=dropout, batch_first=True) #lstm
self.fc = nn.Linear(self.n_inputs, self.n_classes)
def forward(self, x):
## Conf1
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# x = self.conv2(x)
# x = self.bn2(x)
# x = self.relu(x)
# x = self.conv3(x)
# x = self.bn3(x)
# x = self.relu(x)
# x = self.avgpool(x)
# x = torch.flatten(x, 1)
# #x = self.fc1(x)
# x = self.fc(x)
## Conf2
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# x = self.conv2(x)
# x = self.bn2(x)
# x = self.relu(x)
# x = self.conv3(x)
# x = self.bn3(x)
# x = self.relu(x)
# x = torch.flatten(x, 1)
# #x = self.fc1(x)
# x = self.fc(x)
## Conf3
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# x = self.conv2(x)
# x = self.bn2(x)
# x = self.relu(x)
# x = torch.flatten(x, 1)
# #x = self.fc1(x)
# x = self.fc(x)
## Conf4
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# x = torch.flatten(x, 1)
# #x = self.fc1(x)
# x = self.fc(x)
## Conf5
# x = torch.flatten(x, 1)
# x = self.fc1(x)
# x = self.fc(x)
## ViT #1
#x = torch.flatten(x, 1)
#x = self.fc(x)
## ViT #2
# Don't need to initialize hidden state because the states are not connected between iterations
x, _ = self.lstm(x)
x = x[:, -1, :]
x = self.fc(x)
return x
class Dino_ResNet(nn.Module):
def __init__(self,
#block,
#layers,
#block_inplanes,
#n_input_channels=3,
#conv1_t_size=7,
#conv1_t_stride=1,
#no_max_pool=False,
#shortcut_type='B',
#widen_factor=1.0,
n_classes=400):#,
#dropout_factor=0.5):
super().__init__()
# Extract Features
self.dino = torch.hub.load('facebookresearch/dino:main', 'dino_resnet50')
for param in self.dino.parameters():
param.requires_grad = False
self.head = Head(n_classes=n_classes)
# Consolidate temporal features
# self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(7,256), stride=1)
# self.bn1 = nn.BatchNorm2d(self.conv1.out_channels)
# self.conv2 = nn.Conv2d(in_channels=self.conv1.out_channels, out_channels=128, kernel_size=(6,512), stride=1)
# self.bn2 = nn.BatchNorm2d(self.conv2.out_channels)
# self.conv3 = nn.Conv2d(in_channels=self.conv2.out_channels, out_channels=256, kernel_size=(5,1024), stride=1)
# self.bn3 = nn.BatchNorm2d(self.conv3.out_channels)
# self.relu = nn.ReLU(inplace=True)
# #self.fc1 = nn.Linear(self.conv3.out_channels,self.conv3.out_channels)
# self.fc = nn.Linear(self.conv3.out_channels,n_classes)
# self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
def forward(self, x):
#import ipdb; ipdb.set_trace()
x = x.permute(0, 2, 1, 3, 4)
#x1 = [self.dino(x_in) for x_in in x]
#x1 = []
c=0
for x_in in x:
out = self.dino(x_in)
out = torch.stack((out,), 0)
if c==0:
x1 = out
c=+1
else:
x1 = torch.cat((x1,out),0)
#x1 = torch.stack(x1, 0)
# Fro LSTM comment below
x = torch.stack((x1,), 0).permute(1, 0, 2, 3)
#x = x1
x = self.head(x)
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
# x = self.conv2(x)
# x = self.bn2(x)
# x = self.relu(x)
# x = self.conv3(x)
# x = self.bn3(x)
# x = self.relu(x)
# x = self.avgpool(x)
# x = torch.flatten(x, 1)
# #x = self.fc1(x)
# x = self.fc(x)
return x
class Dino_ViT(nn.Module):
def __init__(self,
#block,
#layers,
#block_inplanes,
#n_input_channels=3,
#conv1_t_size=7,
#conv1_t_stride=1,
#no_max_pool=False,
#shortcut_type='B',
#widen_factor=1.0,
n_classes=400,
dropout_factor=0.5):
super().__init__()
# Extract Features
self.dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits16')
for param in self.dino.parameters():
param.requires_grad = False
self.head = Head(n_classes=n_classes, n_inputs=384, dropout=dropout_factor)
def forward(self, x):
#import ipdb; ipdb.set_trace()
x = x.permute(0, 2, 1, 3, 4)
c=0
for x_in in x:
out = self.dino(x_in)
out = torch.stack((out,), 0)
if c==0:
x1 = out
c=+1
else:
x1 = torch.cat((x1,out),0)
# Fro LSTM comment below
#x = torch.stack((x1,), 0).permute(1, 0, 2, 3)
x = x1
x = self.head(x)
return x
def generate_model(model_arch, **kwargs):
if model_arch == 'dino_resnet':
model = Dino_ResNet(**kwargs)
elif model_arch == 'dino_vit':
model = Dino_ViT(**kwargs)
return model
| 2.109375
| 2
|
python/planck/settings_planck_2018.py
|
sfu-cosmo/MagCosmoMC
| 0
|
12784506
|
<reponame>sfu-cosmo/MagCosmoMC
import copy
import re
from paramgrid import batchjob
# plik foregrounds have to be calculated a posteriori as before (before zre6p5 filter).
ini_dir = 'batch3/'
cov_dir = 'planck_covmats/'
defaults = ['common.ini']
getdist_options = {'ignore_rows': 0.3, 'marker[nrun]': 0,
'marker[w]': -1}
importanceDefaults = ['importance_sampling.ini']
# ranges for parameters when they are varied
params = dict()
params['w'] = '-0.99 -3. 1 0.02 0.02'
params['wa'] = '0 -3 2 0.05 0.05'
params['mnu'] = '0.02 0 5 0.1 0.03'
params['omegak'] = '-0.0008 -0.3 0.3 0.001 0.001' # starting exactly on flat seems to confuse minimizer
params['nnu'] = '3.046 0.05 10 0.05 0.05'
params['nrun'] = '0 -1 1 0.005 0.001'
params['nrunrun'] = '0 -1 1 0.002 0.001'
params['r'] = '0 0 3 0.03 0.03'
params['Alens'] = '1 0 10 0.05 0.05'
params['yhe'] = '0.245 0.1 0.5 0.006 0.006'
params['alpha1'] = '0 -1 1 0.0003 0.0003'
params['meffsterile'] = '0.1 0 3 0.1 0.03'
params['Aphiphi'] = '1 0 10 0.02 0.02'
params['Alensf'] = '1 0 10 0.03 0.03'
# extra parameters that are set only when specific parameters are varied.
param_extra_opts = {
'mnu': {'num_massive_neutrinos': 3, 'neutrino_hierarchy': 'degenerate'},
'meffsterile': {'param[mnu]': '0.06', 'param[nnu]': '3.1 3.046 10 0.05 0.05', 'num_massive_neutrinos': 1,
'accuracy_level': 1.2},
'yhe': {'bbn_consistency': False},
'r': {'compute_tensors': True},
'nt': {'inflation_consistency': False, 'lmax_tensor': 1000}
}
# dataset names
lowl = 'lowl'
lensing = 'lensing'
lensonly = 'lensonly'
lowE = 'lowE'
WMAP = 'WMAP'
BAO = 'BAO'
HST = 'Riess18'
JLA = 'JLA'
Pantheon = 'Pantheon'
BAORSD = 'BAORSD'
lowEdata = 'simall_EE.ini'
BAOdata = 'BAO.ini'
RSDdata = 'BAODR12_RSD.ini'
HSTdata = 'HST_Riess2018.ini'
theta_prior = {'prior[theta]': '1.0409 0.0006'}
# dataset names
tauprior = {'prior[tau]': '0.055 0.009'}
tauname = 'tau055'
variant_tag = ['TT', 'TTTEEE']
variant_pol_tag = ['TE', 'EE']
variants = variant_tag
planck_highL_sets = []
planck_pol_sets = []
planck_vars = ['plikHM', 'CamSpecHM']
planck_ini = ['plik_rd12_HM_v22_%s.ini', 'nonclik_v10_7_%s.ini']
clean_ini = ['nonclik_v10_7_TT_clean.ini']
# planck_ini = ['plik_rd12_HM_v22_%s.ini', 'CAMspec_%s_clik14.ini']
planck_base = [[], []]
for planck, ini, base in zip(planck_vars, planck_ini, planck_base):
for name, var in zip(variant_tag, variants):
planck_highL_sets.append(batchjob.dataSet([planck, name], base + [ini % var]))
for var in variant_pol_tag:
planck_pol_sets.append(batchjob.dataSet([planck, var], base + [ini % var]))
baseTT = planck_highL_sets[0]
baseTTTEEE = planck_highL_sets[1]
WMAP9 = [[WMAP], ['WMAP.ini']]
likechecks = []
newCovmats = False
# Importance sampling settings
class importanceFilterLensing:
def wantImportance(self, jobItem):
return [planck for planck in planck_vars if planck in jobItem.data_set.names] and (
not 'omegak' in jobItem.param_set or (len(jobItem.param_set) == 1))
class importanceFilterSN:
def wantImportance(self, jobItem):
return 'JLA' not in jobItem.data_set.names and 'Pantheon' not in jobItem.data_set.names
class reion_importance(batchjob.importanceSetting):
def wantImportance(self, jobItem):
return [planck for planck in planck_vars if
planck in jobItem.data_set.names] and not 'reion' in jobItem.data_set.names
class zre_importance(batchjob.importanceFilter):
def wantImportance(self, jobItem):
return [planck for planck in planck_vars if
planck in jobItem.data_set.names] and not 'reion' in jobItem.data_set.names
def filter(self, batch, jobItem):
samples = jobItem.parent.getMCSamples(settings=getdist_options)
pars = samples.getParams()
samples.filter(pars.zrei >= 6.5)
samples.ranges.setRange('zrei', [6.5, None])
samples.saveChainsAsText(jobItem.chainRoot, properties={'burn_removed': True})
class importanceFilterNotOmegak:
def wantImportance(self, jobItem):
return not ('omegak' in jobItem.param_set)
class importanceFilterBAO:
def wantImportance(self, jobItem):
return not ('omegak' in jobItem.param_set) and jobItem.data_set.hasName('BAO')
class importanceFilterNnu:
def wantImportance(self, jobItem):
return 'nnu' in jobItem.param_set
post_lensing = [[lensing], ['lensing.ini'], importanceFilterLensing()]
post_lensingBAO = [[BAO, lensing], [BAOdata, 'lensing.ini'], importanceFilterNotOmegak()]
post_lensingPantheon = [[lensing, Pantheon], ['lensing.ini', 'Pantheon.ini'], importanceFilterSN()]
post_lensingJLA = [[lensing, JLA], ['lensing.ini', 'JLA_marge.ini'], importanceFilterNotOmegak()]
post_BAO = [[BAO], [BAOdata], importanceFilterNotOmegak()]
post_HST = [[HST], [HSTdata], importanceFilterNotOmegak()]
post_BAOJLA = [[BAO, JLA], [BAOdata, 'JLA_marge.ini'], importanceFilterNotOmegak()]
post_BAOPantheon = [[BAO, Pantheon], [BAOdata, 'Pantheon.ini'], importanceFilterNotOmegak()]
post_BAOHST = [[BAO, HST], [BAOdata, HSTdata], importanceFilterNotOmegak()]
post_BAOHSTJLA = [[BAO, JLA, HST], [BAOdata, 'JLA_marge.ini', HSTdata], importanceFilterNotOmegak()]
post_BAOHSTPantheon = [[BAO, Pantheon, HST], [BAOdata, 'Pantheon.ini', HSTdata], importanceFilterNotOmegak()]
post_BAOlensingPantheon = [[BAO, lensing, Pantheon], [BAOdata, 'lensing.ini', 'Pantheon.ini'],
importanceFilterNotOmegak()]
post_Pantheon = [[Pantheon], ['Pantheon.ini'], importanceFilterNotOmegak()]
post_CookeBBN = ['Cooke17']
post_Aver15 = [['Aver15'], ['Aver15BBN.ini'], importanceFilterNnu()]
post_BBN = [['Cooke17', 'Aver15'], ['Aver15BBN.ini', 'Cooke17BBN.ini'], importanceFilterNnu()]
# set up groups of parameters and data sets
groups = []
g = batchjob.jobGroup('main')
g.datasets = copy.deepcopy(planck_highL_sets)
for d in g.datasets:
d.add(lowl)
d.add(lowE, lowEdata)
g.params = [[], ['omegak'], ['mnu'], ['r'], ['nrun', 'r'], ['nnu'], ['nrun'], ['Alens'], ['yhe'], ['w'], ['alpha1']]
g.importanceRuns = [post_BAO, post_lensing, post_lensingBAO, post_HST, post_BBN]
groups.append(g)
gpol = batchjob.jobGroup('mainpol')
gpol.datasets = copy.deepcopy(planck_pol_sets)
for d in gpol.datasets:
d.add(lowE, lowEdata)
gpol.params = [[], ['mnu'], ['nnu'], ['nrun'], ['Alens'], ['yhe'], ['r']]
gpol.importanceRuns = [post_BAO]
groups.append(gpol)
gpol = batchjob.jobGroup('polbao')
gpol.datasets = copy.deepcopy(planck_pol_sets)
for d in gpol.datasets:
d.add(lowE, lowEdata)
d.add(BAO, BAOdata)
gpol.params = [[], ['mnu'], ['nnu']]
gpol.importanceRuns = [post_lensing]
groups.append(gpol)
gpol = batchjob.jobGroup('pollensing')
gpol.datasets = copy.deepcopy(planck_pol_sets)
for d in gpol.datasets:
d.add(lowE, lowEdata)
d.add(lensing)
for d in list(gpol.datasets):
d = d.copy().add(BAO, BAOdata).add('CookeDH', 'baryon_density.ini')
gpol.datasets.append(d)
for d in copy.deepcopy(planck_pol_sets):
d.add(lowE, lowEdata)
d.add(lensing)
d.add('CookeDH', 'baryon_density.ini')
gpol.datasets.append(d)
gpol.params = [[]]
gpol.importanceRuns = []
groups.append(gpol)
gnotau = batchjob.jobGroup('nopoltau')
gnotau.params = [[]]
gnotau.datasets = copy.deepcopy(planck_highL_sets)
for d in gnotau.datasets:
d.add(lowl)
for d in copy.deepcopy(planck_highL_sets):
d.add(lowl)
d.add(lensing)
gnotau.datasets.append(d)
for d in copy.deepcopy(planck_highL_sets):
d.add(lowl)
d.add('reion', 'reion_tau.ini')
gnotau.datasets.append(d)
gnotau.importanceRuns = [post_BAO]
groups.append(gnotau)
gnotau = batchjob.jobGroup('nopoltaumnu')
gnotau.params = [['mnu'], ['Alens']]
gnotau.datasets = []
for d in copy.deepcopy(planck_highL_sets):
d.add(lowl)
d.add(lensing)
gnotau.datasets.append(d)
gnotau.importanceRuns = [post_BAO]
groups.append(gnotau)
g2 = batchjob.jobGroup('ext')
g2.datasets = copy.deepcopy(g.datasets)
g2.params = [['nnu', 'meffsterile'], ['nnu', 'mnu'], ['nnu', 'yhe']]
g2.importanceRuns = [post_BAO, post_HST, post_lensing, post_lensingBAO]
groups.append(g2)
g3 = batchjob.jobGroup('geom')
g3.params = [['omegak'], ['w'], ['w', 'wa']]
g3.datasets = []
for d in copy.deepcopy(g.datasets):
d.add(BAO, BAOdata)
g3.datasets.append(d)
for d in copy.deepcopy(g.datasets):
d.add(BAO, BAOdata)
d.add(HST, HSTdata)
d.add(JLA)
g3.datasets.append(d)
g3.importanceRuns = [post_lensing, post_lensingPantheon]
groups.append(g3)
g3 = batchjob.jobGroup('de')
g3.params = [['w'], ['w', 'wa']]
g3.datasets = []
for d in copy.deepcopy(g.datasets):
d.add(BAO, BAOdata)
d.add(Pantheon)
g3.datasets.append(d)
for d in copy.deepcopy(g.datasets):
d.add(BAO, BAOdata)
d.add(HST, HSTdata)
d.add(Pantheon)
g3.datasets.append(d)
g3.importanceRuns = [post_lensing]
groups.append(g3)
g6 = batchjob.jobGroup('lensing')
g6.datasets = copy.deepcopy(g.datasets)
for d in g6.datasets:
d.add(lensing)
g6.params = [['omegak'], ['mnu'], ['Alens']]
g6.importanceRuns = [post_BAO]
groups.append(g6)
inflat = batchjob.jobGroup('inflat')
inflat.datasets = copy.deepcopy(g.datasets)
for d in inflat.datasets:
d.add(lensing)
inflat.params = [['r'], ['nrun', 'r']]
inflat.importanceRuns = [post_BAO]
groups.append(inflat)
gbest = batchjob.jobGroup('basebest')
gbest.datasets = copy.deepcopy(g.datasets)
for d in gbest.datasets:
d.add(lensing)
gbest.params = [[]]
gbest.importanceRuns = [post_BAO, post_HST, post_BAOHST, post_Pantheon, post_BAOHSTJLA, post_BAOPantheon,
post_BAOHSTPantheon]
groups.append(gbest)
g7 = batchjob.jobGroup('mnu')
g7.datasets = []
for d in copy.deepcopy(g.datasets):
d.add(BAO, BAOdata)
g7.datasets.append(d)
for d in copy.deepcopy(g.datasets):
d.add(lensing)
d.add(BAO, BAOdata)
g7.datasets.append(d)
g7.params = [['mnu'], ['nnu', 'meffsterile'], ['nnu', 'mnu']]
g7.importanceRuns = [post_Pantheon, post_Aver15, post_BBN]
groups.append(g7)
gnonbbn = batchjob.jobGroup('nonbbn')
gnonbbn.datasets = copy.deepcopy(g.datasets)
for d in gnonbbn.datasets:
d.add('Aver15', 'Aver15BBN')
gnonbbn.params = [['yhe'], ['nnu', 'yhe']]
gnonbbn.importanceRuns = [post_BAO, post_lensing, post_lensingBAO]
groups.append(gnonbbn)
gnnu = batchjob.jobGroup('nnu')
gnnu.datasets = []
for d in copy.deepcopy(g.datasets):
d.add(BAO, BAOdata)
gnnu.datasets.append(d)
gnnu.params = [['nnu']]
gnnu.importanceRuns = [post_lensingJLA, post_lensingPantheon, post_lensing, post_Aver15, post_BBN]
groups.append(gnnu)
gHST = batchjob.jobGroup('HST')
gHST.datasets = []
for d in copy.deepcopy(g.datasets):
d.add(HST, HSTdata)
gHST.datasets.append(d)
gHST.params = [['nnu']]
gHST.importanceRuns = [post_BAO, post_BAOPantheon, post_lensing, post_lensingBAO, post_BAOlensingPantheon]
groups.append(gHST)
gclean = batchjob.jobGroup('cleaned')
d = batchjob.dataSet(['CleanedCamSpecHM', 'TT'], [clean_ini])
d.add(lowl)
d.add(lowE, lowEdata)
gclean.datasets = [d]
gclean.params = [[], ['mnu'], ['nnu'], ['yhe'], ['Alens'], ['omegak'], ['nrun'], ['r'], ['w']]
groups.append(gclean)
# Things mainly for the lensing paper
glens = batchjob.jobGroup('lensonly')
# get this well converged so importance sampling might work
lensdata = batchjob.dataSet(['lensing', 'lenspriors'], [lensonly, 'lensonly_priors', {'MPI_Converge_Stop': 0.002}])
glens.datasets = [lensdata]
glens.datasets += [lensdata.copy().add('theta', theta_prior)]
lensdata = lensdata.copy().add(BAO, BAOdata)
glens.datasets.append(lensdata)
lensdata = lensdata.copy().add('theta', theta_prior)
glens.datasets.append(lensdata)
glens.params = [[], ['mnu']]
glens.importanceRuns = [post_Pantheon]
varnames = ['agr2', 'conslmin40', 'agrlmax425', 'ptt', 'pttagr2', 'bfcl', 'agr2bfcl', 'linear', 'acc', 'agr2acc',
'takahashi', 'agr2takahashi', 'Apr6']
if True:
# Consistency checks mainly for the lensing paper
base = 'smicadx12_Dec5_ftl_mv2_n'
vars = [('dclpp_p_teb_agr2_CMBmarged', {}),
('dclpp_p_teb_consext8_CMBmarged', {'cmb_dataset[lensing,use_min]': 2}),
('dclpp_p_teb_agr2_CMBmarged', {'cmb_dataset[lensing,use_max]': 9}),
('dclpttptt_p_teb_consext8_CMBmarged', {}),
('dclpttptt_p_teb_agr2_CMBmarged', {}),
('dclpp_p_teb_consext8_lensonly', {}),
('dclpp_p_teb_agr2_lensonly', {}),
# ('dclpp_p_teb_consext8_lensonly', {
# 'cmb_dataset[lensing,linear_correction_bin_window_fix_cl_file]': '../base_omegak_plikHM_TTTEEE_lowl_lowE.minimum.theory_cl'}),
('dclpp_p_teb_consext8_CMBmarged', {'redo_theory': True, 'redo_cls': True, 'use_nonlinear_lensing': False}),
('dclpp_p_teb_consext8_CMBmarged',
{'redo_theory': True, 'redo_cls': True, 'accuracy_level': 1.5, 'k_eta_max_scalar': 50000}),
('dclpp_p_teb_agr2_CMBmarged',
{'redo_theory': True, 'redo_cls': True, 'accuracy_level': 1.5, 'k_eta_max_scalar': 50000}),
('dclpp_p_teb_consext8_CMBmarged',
{'redo_theory': True, 'redo_cls': True, 'halofit_version': 4}),
('dclpp_p_teb_agr2_CMBmarged',
{'redo_theory': True, 'redo_cls': True, 'halofit_version': 4}),
('smicadx12_Apr6_ndclpp_p_teb_consext8_CMBmarged', {}),
]
for name, var in zip(varnames, vars):
tag, opt = var
dic = {'redo_likelihood': True, 'redo_add': False,
'cmb_dataset[lensing]': '%%DATASETDIR%%planck_lensing_2017/%s%s.dataset' % (
base if 'smica' not in tag else '', tag)}
dic.update(opt)
if 'ptt' in name and not 'CMBmarged' in name:
dic['cmb_dataset[lensing,linear_correction_bin_window_fix_cl]'] = 'TT'
glens.importanceRuns.append([[name], [dic]])
glens.extra_opts = {'sampling_method': 1} # no fast params
groups.append(glens)
# Things mainly for the lensing paper
glensTT = batchjob.jobGroup('lensTT')
base = 'smicadx12_Dec5_ftl_mv2_n'
tag = 'dclpttptt_p_teb_agr2_CMBmarged'
lensdata = batchjob.dataSet(['lensing', 'lenspriors', 'pttagr2'],
[lensonly, 'lensonly_priors',
{'cmb_dataset[lensing]': '%%DATASETDIR%%planck_lensing_2017/%s%s.dataset' % (base, tag)}])
glensTT.datasets = [lensdata]
glensTT.datasets += [lensdata.copy().add('theta', theta_prior)]
lensdata = lensdata.copy().add(BAO, BAOdata)
glensTT.datasets.append(lensdata)
lensdata = lensdata.copy().add('theta', theta_prior)
glensTT.datasets.append(lensdata)
glensTT.params = [[], ['mnu']]
groups.append(glensTT)
glens = batchjob.jobGroup('lensonlyastro')
lensdata = batchjob.dataSet(['lensing', 'DESpriors'], [lensonly, 'DES_astro_priors'])
glens.datasets = [lensdata]
glens.datasets += [lensdata.copy().add(BAO, BAOdata)]
lensdata = lensdata.copy().add('CookeDH', 'baryon_density.ini')
glens.datasets += [lensdata]
lensdata = lensdata.copy().add(BAO, BAOdata)
glens.datasets += [lensdata]
glens.param_extra_opts = {'mnu': {'param[mnu]': '0.07 0.05 1 0.1 0.03'}}
glens.params = [[], ['mnu']]
glens.extra_opts = {'sampling_method': 1} # no fast params
groups.append(glens)
gphi = batchjob.jobGroup('Aphiphi')
gphi.params = [['Aphiphi']]
gphi.datasets = []
for d in copy.deepcopy(g.datasets):
d.add(lensing)
gphi.datasets.append(d)
gphi.importanceRuns = []
groups.append(gphi)
gphi = batchjob.jobGroup('Alens')
gphi.params = [[], ['Alens']]
gphi.datasets = []
for d in copy.deepcopy(planck_highL_sets):
gphi.datasets.append(d)
dlow = d.copy()
dlow.add(lowl)
gphi.datasets.append(dlow)
dtau = d.copy()
dtau.add(lowE, lowEdata)
gphi.datasets.append(dtau)
gphi.importanceRuns = [post_BAO]
groups.append(gphi)
gWMAP = batchjob.jobGroup('WMAP')
gWMAP.params = [[]]
gWMAP.datasets = [WMAP9]
gWMAP.importanceRuns = [post_BAO]
groups.append(gWMAP)
for bk in ['BK14']:
gbkp = batchjob.jobGroup(bk)
gbkp.datasets = []
for d in copy.deepcopy(planck_highL_sets):
d.add(lowl)
d.add(lowE, lowEdata)
d.add(bk)
gbkp.datasets.append(d)
for d in [copy.deepcopy(baseTTTEEE)]:
d.add(lowl)
d.add(lowE, lowEdata)
d.add(bk)
d.add(lensing)
gbkp.datasets.append(d)
gbkp.params = [['r'], ['nrun', 'r']]
gbkp.importanceRuns = [post_BAO, post_lensing, post_lensingBAO]
groups.append(gbkp)
DESdatapriors = [batchjob.dataSet(['DES', 'lenspriors'], ['DES', 'lensonly_priors']),
batchjob.dataSet(['DESlens', 'lenspriors'], ['DES_lensing', 'lensonly_priors'])]
gWL = batchjob.jobGroup('DES')
gWL.datasets = copy.deepcopy(DESdatapriors)
for d in copy.deepcopy(DESdatapriors):
d.add('lensing', lensonly)
gWL.datasets.append(d)
for d in copy.deepcopy(DESdatapriors):
d.add(BAO, BAOdata)
gWL.datasets.append(d)
for d in copy.deepcopy(DESdatapriors):
d.add('lensing', lensonly)
d.add(BAO, BAOdata)
gWL.datasets.append(d)
gWL.params = [[], ['mnu']]
gWL.importanceRuns = []
groups.append(gWL)
DESdatapriors = [batchjob.dataSet(['DES', 'DESpriors'], ['DES', 'DES_astro_priors']),
batchjob.dataSet(['DESlens', 'DESpriors'], ['DES_lensing', 'DES_astro_priors']),
batchjob.dataSet(['DESwt', 'DESpriors'], ['DES_wt', 'DES_astro_priors'])
]
gWL = batchjob.jobGroup('DESastro')
gWL.datasets = copy.deepcopy(DESdatapriors)
for d in copy.deepcopy(DESdatapriors):
d.add('lensing', 'lensonly')
gWL.datasets.append(d)
for d in copy.deepcopy(DESdatapriors):
d.add(BAO, BAOdata)
d.add('CookeDH', 'baryon_density.ini')
gWL.datasets.append(d)
for d in copy.deepcopy(DESdatapriors):
d.add('lensing', lensonly)
d.add(BAO, BAOdata)
d.add('CookeDH', 'baryon_density.ini')
gWL.datasets.append(d)
gWL.param_extra_opts = {'mnu': {'param[mnu]': '0.07 0.05 1 0.1 0.03'}}
gWL.params = [[], ['mnu']]
gWL.importanceRuns = []
groups.append(gWL)
gDESPlanck = batchjob.jobGroup('DESPlanck')
gDESPlanck.datasets = []
for d in [copy.deepcopy(baseTTTEEE)]:
d.add(lowl)
d.add(lowE, lowEdata)
d.add('DES')
gDESPlanck.datasets.append(d)
for d in [copy.deepcopy(baseTTTEEE)]:
d.add(lowl)
d.add(lowE, lowEdata)
d.add('DESlens', 'DES_lensing')
gDESPlanck.datasets.append(d)
# for d in copy.deepcopy(gDESPlanck.datasets):
# d.add(lensing)
# gDESPlanck.datasets.append(d)
gDESPlanck.params = [[], ['mnu']]
gDESPlanck.importanceRuns = [post_BAO, post_lensing, post_lensingBAO]
groups.append(gDESPlanck)
gext = batchjob.jobGroup('twos')
d = copy.deepcopy(baseTTTEEE)
d.add(lowl)
d.add(lowE, lowEdata)
gext.datasets = [d]
gext.params = [['nrun', 'nrunrun'], ['nnu', 'nrun']]
gext.importanceRuns = [post_BAO, post_lensing, post_lensingBAO]
groups.append(gext)
gext = batchjob.jobGroup('big')
d = copy.deepcopy(baseTTTEEE)
d.add(lowl)
d.add(lowE, lowEdata)
d.add(BAO, BAOdata)
d.add(HST, HSTdata)
d.add(Pantheon)
d.add(lensing)
gext.datasets = [d]
gext.params = [['nrun', 'nnu', 'w', 'mnu']]
gext.importanceRuns = []
groups.append(gext)
gBBN = batchjob.jobGroup('lensingBBN')
BBN = batchjob.dataSet(['lensing', 'lenspriors', BAO, 'Cooke17', 'Aver15'],
[lensonly, 'Aver15BBN', 'Cooke17BBN', BAOdata, 'lensonly_priors',
{'prior[omegabh2]': ''}])
gBBN.datasets = [BBN]
gBBN.datasets += [BBN.copy().add('theta', theta_prior)]
gBBN.params = [['nnu'], ['nnu', 'mnu']]
gBBN.extra_opts = {'sampling_method': 1}
gBBN.importanceRuns = [post_Pantheon]
groups.append(gBBN)
noCMB = {'lmin_store_all_cmb': 0, 'lmin_computed_cl': 0, 'param[ns]': 0.96, 'param[logA]': 3, 'param[tau]': 0.055,
'get_sigma8': False}
gBBN = batchjob.jobGroup('BBN')
BBN = batchjob.dataSet([BAO, 'Cooke17'], [BAOdata, 'Cooke17BBN', noCMB])
gBBN.datasets = [BBN]
gBBN.datasets += [BBN.copy().add('Pantheon')]
gBBN.datasets += [BBN.copy().add('JLA')]
gBBN.datasets += [BBN.copy().add('Pantheon').add('theta', theta_prior)]
gBBN.datasets += [BBN.copy().add('theta', theta_prior)]
gBBN.params = [[], ['mnu']]
gBBN.extra_opts = {'sampling_method': 1}
groups.append(gBBN)
gBBN = batchjob.jobGroup('BBNnnu')
BBN1 = batchjob.dataSet([BAO, 'Cooke17', 'Aver15'], ['Aver15BBN', 'Cooke17BBN', BAOdata, noCMB])
BBN2 = batchjob.dataSet([BAO, 'Cooke17Marc', 'Aver15'],
['Aver15BBN', {'abundance_dataset[Cooke17Marc]': '%DATASETDIR%D_Cooke2017_marcucci.dataset'},
BAOdata, noCMB])
BBN3 = batchjob.dataSet([BAO, 'Cooke17Adel', 'Aver15'],
['Aver15BBN', {'abundance_dataset[Cooke17Adel]': '%DATASETDIR%D_Cooke2017_adelberger.dataset'},
BAOdata, noCMB])
gBBN.datasets = []
for BBN in [BBN1, BBN2, BBN3]:
gBBN.datasets += [BBN]
gBBN.datasets += [BBN.copy().add('Pantheon')]
gBBN.datasets += [BBN.copy().add('theta', theta_prior)]
gBBN.datasets += [BBN.copy().add('Pantheon').add('theta', theta_prior)]
gBBN.params = [['nnu'], ['nnu', 'mnu']]
gBBN.extra_opts = {'sampling_method': 1}
groups.append(gBBN)
# add zre prior for all runs
importance_filters = [zre_importance(['zre6p5'])]
skip = []
covWithoutNameOrder = ['lenspriors', 'CookeDH', 'pttagr2', HST, 'JLA', Pantheon, BAORSD, 'lensing', 'DESpriors', 'DES',
'DESlens', 'BAO', 'reion', 'abundances', 'theta', 'Aver15'] + varnames
covNameMappings = {HST: 'HST', 'CleanedCamSpecHM': 'CamSpecHM', 'Cooke17Adel': 'Cooke17', 'Cooke17Marc': 'Cooke17'}
# try to match run to exisitng covmat
covrenames = []
for planck in planck_vars:
covrenames.append([planck, 'planck'])
covrenames.append(['CamSpecHM', 'plikHM'])
covrenames.append(['lensing_lenspriors', 'lensonly'])
covrenames.append(['lensing', 'lensonly'])
covrenames.append(['Alensf', 'Alens'])
covrenames.append(['_Aphiphi', ''])
covrenames.append(['Pantheon', 'JLA'])
covrenames.append(['_Aver15', ''])
covrenames.append(['_r', ''])
covrenames.append(['_w', ''])
covrenames.append(['_alpha1', ''])
covrenames.append(['DES_BAO', 'BAO_lensonly'])
covrenames.append(['DESlens_BAO', 'BAO_lensonly'])
covrenames.append(['DES_lensonly', 'lensonly'])
covrenames.append(['DES', 'lensonly'])
covrenames.append(['DESwt', 'DES'])
covrenames.append(['mnu_DES', 'DES'])
covrenames.append(['Riess18', 'HST'])
covrenames.append(['DESwt_DESpriors_lensing', 'DES_DESpriors_lensonly'])
covrenames.append(['DESwt_DESpriors_BAO_CookeDH', 'DES_DESpriors_BAO'])
covrenames.append(['DESwt_DESpriors_lensing_BAO_CookeDH', 'DES_DESpriors_BAO'])
covrenames.append(['DESpriors_BAO', 'DESpriors_CookeDH_BAO'])
covrenames.append(['base_mnu_plikHM_TTTEEE_lowl_lowE_DES', 'base_mnu_plikHM_TTTEEE_lowl_lowE'])
covrenames.append(['base_mnu_plikHM_TTTEEE_lowl_lowE_DESlens', 'base_mnu_plikHM_TTTEEE_lowl_lowE'])
covrenames.append(['lensing_lenspriors_theta', 'lensing_lenspriors_theta_BAO'])
covrenames.append(['lowl_lensing', 'lowl_lowE'])
covrenames.append(['lowl', 'lowl_lowE'])
covrenames.append(['TT', 'TT_lowl_lowE'])
covrenames.append(['TTTEEE', 'TTTEEE_lowl_lowE'])
covrenames.append(['TTTEEE_lowE', 'TTTEEE_lowl_lowE'])
covrenames.append(['TT_lowE', 'TT_lowl_lowE'])
covrenames.append(
['nrun_nnu_w_mnu_plikHM_TTTEEE_lowl_lowE_BAO_Riess18_Pantheon_lensing', 'w_plikHM_TTTEEE_lowl_lowE_BAO_Pantheon'])
| 1.734375
| 2
|
tests/test_operator/test_parse_comparison.py
|
gruebel/pycep
| 2
|
12784507
|
import json
from pathlib import Path
from assertpy import assert_that
from pycep import BicepParser
EXAMPLES_DIR = Path(__file__).parent / "examples/comparison"
BICEP_PARSER = BicepParser()
def test_parse_greater_than_or_equals() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "greater_than_or_equals"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
def test_parse_greater_than() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "greater_than"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
def test_parse_less_than_or_equals() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "less_than_or_equals"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
def test_parse_less_than() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "less_than"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
def test_parse_equals() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "equals"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
def test_parse_not_equals() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "not_equals"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
def test_parse_equals_case_insensitive() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "equals_case_insensitive"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
def test_parse_not_equals_case_insensitive() -> None:
# given
sub_dir_path = EXAMPLES_DIR / "not_equals_case_insensitive"
file_path = sub_dir_path / "main.bicep"
expected_result = json.loads((sub_dir_path / "result.json").read_text())
# when
result = BICEP_PARSER.parse(file_path=file_path)
# then
assert_that(result).is_equal_to(expected_result)
| 2.9375
| 3
|
py/tests/slice.py
|
DoDaek/gpython
| 0
|
12784508
|
# Copyright 2019 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
doc="slice"
a = slice(10)
assert a.start == None
assert a.stop == 10
assert a.step == None
a = slice(0, 10, 1)
assert a.start == 0
assert a.stop == 10
assert a.step == 1
doc="finished"
| 2.3125
| 2
|
azure-iot-device/tests/common/test_async_adapter.py
|
necoh/azure-iot-sdk-python-preview
| 35
|
12784509
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import inspect
import asyncio
import logging
import azure.iot.device.common.async_adapter as async_adapter
logging.basicConfig(level=logging.INFO)
pytestmark = pytest.mark.asyncio
@pytest.fixture
def dummy_value():
return 123
@pytest.fixture
def mock_function(mocker, dummy_value):
mock_fn = mocker.MagicMock(return_value=dummy_value)
mock_fn.__doc__ = "docstring"
return mock_fn
@pytest.mark.describe("emulate_async()")
class TestEmulateAsync(object):
@pytest.mark.it("Returns a coroutine function when given a function")
async def test_returns_coroutine(self, mock_function):
async_fn = async_adapter.emulate_async(mock_function)
assert inspect.iscoroutinefunction(async_fn)
@pytest.mark.it(
"Returns a coroutine function that returns the result of the input function when called"
)
async def test_coroutine_returns_input_function_result(
self, mocker, mock_function, dummy_value
):
async_fn = async_adapter.emulate_async(mock_function)
result = await async_fn(dummy_value)
assert mock_function.call_count == 1
assert mock_function.call_args == mocker.call(dummy_value)
assert result == mock_function.return_value
@pytest.mark.it("Copies the input function docstring to resulting coroutine function")
async def test_coroutine_has_input_function_docstring(self, mock_function):
async_fn = async_adapter.emulate_async(mock_function)
assert async_fn.__doc__ == mock_function.__doc__
@pytest.mark.it("Can be applied as a decorator")
async def test_applied_as_decorator(self):
# Define a function with emulate_async applied as a decorator
@async_adapter.emulate_async
def some_function():
return "foo"
# Call the function as a coroutine
result = await some_function()
assert result == "foo"
@pytest.mark.describe("AwaitableCallback")
class TestAwaitableCallback(object):
@pytest.mark.it("Instantiates from a provided callback function")
async def test_instantiates(self, mock_function):
callback = async_adapter.AwaitableCallback(mock_function)
assert isinstance(callback, async_adapter.AwaitableCallback)
@pytest.mark.it(
"Invokes the callback function associated with an instance and returns its result when a call is invoked the instance"
)
async def test_calling_object_calls_input_function_and_returns_result(
self, mocker, mock_function
):
callback = async_adapter.AwaitableCallback(mock_function)
result = callback()
assert mock_function.call_count == 1
assert mock_function.call_args == mocker.call()
assert result == mock_function.return_value
@pytest.mark.it("Completes the instance Future when a call is invoked on the instance")
async def test_calling_object_completes_future(self, mock_function):
callback = async_adapter.AwaitableCallback(mock_function)
assert not callback.future.done()
callback()
await asyncio.sleep(0.1) # wait to give time to complete the callback
assert callback.future.done()
@pytest.mark.it("Can be called using positional arguments")
async def test_can_be_called_using_positional_args(self, mocker, mock_function):
callback = async_adapter.AwaitableCallback(mock_function)
result = callback(1, 2, 3)
assert mock_function.call_count == 1
assert mock_function.call_args == mocker.call(1, 2, 3)
assert result == mock_function.return_value
@pytest.mark.it("Can be called using explicit keyword arguments")
async def test_can_be_called_using_explicit_kwargs(self, mocker, mock_function):
callback = async_adapter.AwaitableCallback(mock_function)
result = callback(a=1, b=2, c=3)
assert mock_function.call_count == 1
assert mock_function.call_args == mocker.call(a=1, b=2, c=3)
assert result == mock_function.return_value
@pytest.mark.it("Can have its callback completion awaited upon")
async def test_awaiting_completion_of_callback_returns_result(self, mock_function):
callback = async_adapter.AwaitableCallback(mock_function)
callback()
assert await callback.completion() == mock_function.return_value
assert callback.future.done()
| 2.25
| 2
|
source/Controller.py
|
Baumwollboebele/autonnomous_selfie_drone
| 0
|
12784510
|
from Constants import Constants
from djitellopy import tello
class Controller():
def __init__(self) -> None:
"""
Initialoization of class variables.
"""
self.const = Constants()
self.drone = tello.Tello()
self.up_down_velocity = 0
self.right_left_velocity = 0
self.forward_backward_velocity = 0
self.turn_velocity = 0
def start(self):
"""
Function starts the drone, it's video recording.
"""
self.drone.connect()
self.drone.takeoff()
self.drone.streamon()
def battery(self):
"""
Function prints the current battery level of the drone in percent.
"""
print(f"Battery at {self.drone.get_battery()}%")
def set_velocity(self, vel):
"""
Function sets the veolitiy of the drone in cm/s
Args:
vel (int): speed of the drone in cm/s
"""
self.drone.set_speed(vel)
def move(self, x, y):
"""
Function utilizes the vector from the center of the camera
to the detected nose.
Depending on the x values orientation the drone is
either turned left or right.
Deoending on the y values orientation the drone is either
moved up or down.
Args:
x (int): x component of the vector
(Centre of camera image to nose)
y (int): y component of the vecgtor
(centre of camera image to nose)
"""
self.reset()
# TURN
if x < - self.const.TOLERANCE_X:
self.turn_velocity = - self.const.DRONE_SPEED_TURN
elif x > self.const.TOLERANCE_X:
self.turn_velocity = self.const.DRONE_SPEED_TURN
else:
pass
# UP DOWN
if y < - self.const.TOLERANCE_Y:
self.up_down_velocity = self.const.DRONE_SPEED_Y
elif y > self.const.TOLERANCE_Y:
self.up_down_velocity = - self.const.DRONE_SPEED_Y
else:
pass
self.drone.send_rc_control(self.right_left_velocity,
self.forward_backward_velocity,
self.up_down_velocity, self.turn_velocity)
def move_pose(self, pose):
"""
Function moves the drone if a pose is detected:
- Right arm up
- Left arm up
- Arms crossed
Args:
pose (string): String identifier of the pose.
"""
self.reset()
if pose == "left":
self.right_left_velocity = -self.const.DRONE_SPEED_X
elif pose == "right":
self.right_left_velocity = +self.const.DRONE_SPEED_X
else:
self.right_left_velocity = 0
self.drone.send_rc_control(self.right_left_velocity, 0, 0, 0)
def stop(self):
"""
Function stops the drones movement.
"""
self.drone.send_rc_control(0, 0, 0, 0)
def start_height(self):
"""
Function sets the starting height of the drone.
"""
self.drone.send_rc_control(0, 0, self.const.START_HEIGHT, 0)
def reset(self):
"""
Function resets all velocity values of the drone.
"""
self.up_down_velocity = 0
self.right_left_velocity = 0
self.forward_backward_velocity = 0
self.turn_velocity = 0
def get_stream(self):
"""
Function returnes the current frame of the video stream.
Returns:
(frame): Current Frame of the video stream.
"""
return self.drone.get_frame_read().frame
| 3.375
| 3
|
tests/test_clitool.py
|
daryl-scott/clitool2
| 0
|
12784511
|
<filename>tests/test_clitool.py
"""Test Case for the clitool module"""
from __future__ import absolute_import
import inspect
import os
from unittest import TestCase
from clitool2 import CLITool, parse_docstr
def _test1(param1, param2, *args, **kwargs):
"""Sample function for TestCase.
Returns the supplied values.
Args:
param1: parameter 1
param2: parameter 2
args: var-positional
kwargs: JSON-encoded string
Returns:
tuple: (param1, param2, args, kwargs
"""
return (param1, param2, args, kwargs)
def _test2(num1, num2):
"""Sample function for TestCase.
Returns the supplied values.
Args:
num1: number 1
num2: number 2
Returns:
float: sum of number 1 and 2
"""
return float(num1) + float(num2)
class CLIToolTestCase(TestCase):
"""Test Case for the clitool module"""
def test_parse_docstr(self):
"""Test for the parse_docstr function"""
expected = ["param1: parameter 1", "param2: parameter 2",
"args: var-positional", "kwargs: JSON-encoded string"]
info = parse_docstr(inspect.getdoc(_test1))
self.assertEqual(info.summary, "Sample function for TestCase.")
self.assertEqual(info.description, "Returns the supplied values.")
self.assertEqual(info.args, os.linesep.join(expected))
self.assertEqual(info.returns, "tuple: (param1, param2, args, kwargs")
def test_clitool_normal(self):
"""Test the CLITool class with normal exit"""
tool = CLITool(_test1, parse_doc=False)
args = ("A", "B", "C", "D", '--kwargs={"E": 5}')
expected = ("A", "B", ("C", "D"), {"E": 5})
result = tool(*args)
self.assertEqual(result.output, expected)
self.assertEqual(result.status, 0)
def test_clitool_error(self):
"""Test the CLITool class with exception in wrapped function"""
tool = CLITool(_test2, parse_doc=True)
args = ("1", "b")
result = tool(*args)
self.assertEqual(result.error[0], ValueError)
self.assertEqual(result.status, 1)
| 3.15625
| 3
|
write_rotation_to_principal_axes.py
|
LBJ-Wade/gadget4-tools
| 1
|
12784512
|
import numpy as np
from numba import njit
from snapshot_functions import read_particles_filter
from scipy.linalg import eigh
def run(argv):
if len(argv) < 5:
print('python script.py <IC-file> <preIC-file> <ID> <radius>')
return 1
ID = int(argv[3])
r = float(argv[4])
print('getting IDs of nearby particles')
pos, header = read_particles_filter(argv[2],ID_list=[ID],opts={'pos':True})
IDs, header = read_particles_filter(argv[2],center=pos[0],radius=r,opts={'ID':True})
print('reading positions of %d particles'%len(IDs))
pos0, ID0, header = read_particles_filter(argv[2],ID_list=IDs,opts={'pos':True,'ID':True})
sort0 = np.argsort(ID0)
ID0 = ID0[sort0]
pos0 = pos0[sort0] - pos
pos1, ID1, header = read_particles_filter(argv[1],ID_list=IDs,opts={'pos':True,'ID':True})
sort1 = np.argsort(ID1)
ID1 = ID1[sort1]
pos1 = pos1[sort1] - pos
if not np.array_equal(ID0,ID1):
print('Error')
print(np.stack((ID0,ID1)).T.tolist())
return
rot = np.diag((1,1,1))
for i in range(2):
if i > 0:
eigval, eigvec = eigh(e)
rot1 = eigvec.T
print('rotate by %.0f degrees'%(np.arccos((np.trace(rot1)-1)/2)*180./np.pi))
pos = (rot1 @ (pos.T)).T
pos0 = (rot1 @ (pos0.T)).T
pos1 = (rot1 @ (pos1.T)).T
rot = rot1 @ rot
disp = pos1 - pos0
e = np.zeros((3,3))
for c in range(3):
dist2 = np.zeros(pos0.shape[0])
for d in range(3):
if d != c: dist2 += pos0[:,d]**2
idx = np.argsort(dist2)[:32]
for d in range(3):
e[c,d] = np.polyfit(pos0[idx,c],disp[idx,d],1)[0]
e = .5*(e + e.T)
with np.printoptions(precision=5, suppress=True):
print('Tidal tensor:')
print(e)
with np.printoptions(precision=5, suppress=True):
print('rotation matrix (%.0f degrees)'%(np.arccos((np.trace(rot)-1)/2)*180./np.pi))
print(rot)
np.savetxt('rotation_%d.txt'%ID,rot)
if __name__ == '__main__':
from sys import argv
run(argv)
| 2.203125
| 2
|
books/views.py
|
lazar-mikov/ManezCo
| 0
|
12784513
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import *
from .forms import *
from django.contrib.auth.decorators import login_required
from accounts.decorators import adult_user
# Create your views here.
@login_required(login_url='login')
def bookIndex(request):
books = Book.objects.all()
form = BookForm()
if request.method =='POST':
form = BookForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/books')
context = {'books': books, 'form':form}
return render(request, '../templates/list.html', context)
@login_required(login_url='login')
def editBook(request, pk):
book = Book.objects.get(id=pk)
form = BookForm(instance=book)
if request.method == 'POST':
form = BookForm(request.POST, instance=book)
if form.is_valid():
form.save()
return redirect('../../')
context = {'form':form}
return render(request, '../templates/edit_book.html', context)
@login_required(login_url='login')
def deleteBook(request, pk):
item = Book.objects.get(id=pk)
if request.method == 'POST':
item.delete()
return redirect('../../')
context = {'item':item}
return render(request, '../templates/delete.html', context)
| 2.21875
| 2
|
human_services/locations/migrations/0003_serviceatlocation.py
|
DarwishMenna/pathways-backend
| 12
|
12784514
|
<reponame>DarwishMenna/pathways-backend
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-21 00:46
from __future__ import unicode_literals
import common.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('locations', '0002_auto_20171214_1957'),
]
operations = [
migrations.CreateModel(
name='ServiceAtLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location')),
],
bases=(common.models.ValidateOnSaveMixin, models.Model),
),
]
| 1.539063
| 2
|
resources/benchmark.py
|
WIPACrepo/file_catalog
| 0
|
12784515
|
#!/usr/bin/env python
from __future__ import print_function
import hashlib
import string
import random
import time
from json import dumps as json_encode, loads as json_decode
import requests
class FileCatalogLowLevel(object):
"""
Low level file catalog interface. Use like a dict::
fc = FileCatalog('http://file_catalog.com')
fc['my_new_file'] = {'locations':['/this/is/a/path']}
Args:
url (str): url of the file catalog server
timeout (float): (optional) seconds to wait for a query to finish
"""
def __init__(self, url, timeout=60):
self.url = url
self.timeout = timeout
self.session = requests.Session()
def _getfileurl(self, uid):
for _ in range(5):
try:
r = self.session.get(self.url+'/api/files',
params={'query':json_encode({'uid':uid})},
timeout=self.timeout)
except requests.exceptions.Timeout:
continue
if r.status_code == 429:
continue
r.raise_for_status()
files = json_decode(r.text)['files']
break
else:
raise Exception('server is too busy')
if len(files) != 1:
raise KeyError()
return self.url+files[0]
def __getitem__(self, uid):
url = self._getfileurl(uid)
for _ in range(5):
try:
r = self.session.get(url, timeout=self.timeout)
except requests.exceptions.Timeout:
continue
if r.status_code == 429:
continue
r.raise_for_status()
return json_decode(r.text)
raise Exception('server is too busy')
def __setitem__(self, uid, value):
meta = value.copy()
meta['uid'] = uid
data = json_encode(meta)
try:
url = self._getfileurl(uid)
except KeyError:
# does not exist
method = self.session.post
url = self.url+'/api/files'
else:
# exists, so update
method = self.session.put
for _ in range(5):
try:
r = method(url, data=data,
timeout=self.timeout)
except requests.exceptions.Timeout:
continue
if r.status_code == 429:
continue
r.raise_for_status()
return
raise Exception('server is too busy')
def __delitem__(self, uid):
url = self._getfileurl(uid)
for _ in range(5):
try:
r = self.session.delete(url, timeout=self.timeout)
except requests.exceptions.Timeout:
continue
if r.status_code == 429:
continue
r.raise_for_status()
return
raise Exception('server is too busy')
def sha512sum(data):
m = hashlib.sha512()
m.update(data)
return m.hexdigest()
def make_data():
return ''.join(random.choice(string.ascii_letters) for _ in range(random.randint(1,1000)))
def benchmark(address,num):
start = time.time()
fc = FileCatalogLowLevel(address)
for i in range(num):
data = make_data()
cksm = sha512sum(data)
fc[str(i)] = {'data':data,'checksum':cksm,'locations':[make_data()]}
for i in range(num):
meta = fc[str(i)]
for i in range(num):
del fc[str(i)]
return time.time()-start
def main():
import argparse
parser = argparse.ArgumentParser(description='benchmark file_catalog server')
parser.add_argument('--address',type=str,default='http://localhost:8888',help='server address')
parser.add_argument('-n','--num',type=int,default=10000,help='number of test values')
args = parser.parse_args()
print('starting benchmark')
t = benchmark(args.address, args.num)
print('finished. took',t,'seconds')
if __name__ == '__main__':
main()
| 2.78125
| 3
|
standalone_src/agent.py
|
lazykyama/atari_trtis_demo
| 0
|
12784516
|
from collections import deque
import numpy as np
import cv2
import chainer
from chainer import links as L
import chainerrl
from chainerrl import agents
from chainerrl.action_value import DiscreteActionValue
from chainerrl import explorers
from chainerrl import links
from chainerrl import replay_buffer
def infer(agent, state):
gray_state = [cv2.cvtColor(s, cv2.COLOR_RGB2GRAY) for s in state]
dqn_state = [cv2.resize(s, (84, 84), interpolation=cv2.INTER_AREA) \
for s in gray_state]
input_tensor = np.array(dqn_state).astype(np.float32)
return agent.act(input_tensor)
class Agent(object):
def __init__(self,
modelpath,
n_actions=4,
n_stack_frames=4):
# Predefined parameters.
replay_start_size = 5 * 10 ** 4
# Load the model.
q_func = links.Sequence(
links.NatureDQNHead(),
L.Linear(512, n_actions),
DiscreteActionValue)
opt = chainer.optimizers.RMSpropGraves(
lr=2.5e-4, alpha=0.95, momentum=0.0, eps=1e-2)
opt.setup(q_func)
rbuf = replay_buffer.ReplayBuffer(10 ** 6)
explorer = explorers.LinearDecayEpsilonGreedy(
start_epsilon=1.0, end_epsilon=0.1,
decay_steps=10 ** 6,
random_action_func=lambda: np.random.randint(n_actions))
def phi(x):
# Feature extractor
return np.asarray(x, dtype=np.float32) / 255
Agent = agents.DQN
self._agent = Agent(q_func, opt, rbuf, gpu=-1, gamma=0.99,
explorer=explorer, replay_start_size=replay_start_size,
target_update_interval=10 ** 4,
clip_delta=True,
update_interval=4,
batch_accumulator='sum',
phi=phi)
self._agent.load(modelpath)
self._state = deque(
[], maxlen=n_stack_frames)
self._action = 0
def get_action(self):
return self._action
def put_state(self, state):
# Note: should devide this code to 2 parts:
# putting state part and do inference part...
self._state.append(state)
if len(self._state) < self._state.maxlen:
# Need to wait.
return
state = list(self._state)
self._action = infer(self._agent, state)
| 2.234375
| 2
|
data_code/prepare_data.py
|
yum-ume/Chainer_Image_caption
| 0
|
12784517
|
<reponame>yum-ume/Chainer_Image_caption
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "."))
from PIL import Image
ImageDir=sys.argv[1]
ResizeImageDir=sys.argv[2]
dataDir ='..'
dataType='val2014'
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
# initialize COCO api for instance annotations
coco=COCO(annFile)
# display COCO categories and supercategories
cats = coco.loadCats(coco.getCatIds())
names=[cat['name'] for cat in cats]
ids=[cat['id'] for cat in cats]
name_ids = {}
for i in range(len(names)):
if ids[i] not in name_ids:
name_ids.update({names[i]:ids[i]})
# get all images containing given categories, select one at random
img_dict = {}
for name in names:
catIds = coco.getCatIds(catNms=[name]);
imgIds = coco.getImgIds(catIds=catIds );
for i in range(len(imgIds)):
img = coco.loadImgs(imgIds[i])[0]
if img["file_name"] not in img_dict:
img_dict.update({img["file_name"]: name})
for k,v in sorted(img_dict.items(), key=lambda x: x[0]):
ImageFile = '%s/%s'%(ImageDir,k)
pil_im = Image.open(ImageFile)
out = pil_im.resize((255, 255))
save_image = '%s/%s'%(ResizeImageDir,k)
out.save(save_image)
print(save_image + " " + str(name_ids[v]))
| 2.109375
| 2
|
tests/frequentist/test_bounds.py
|
danielsaaf/confidence
| 107
|
12784518
|
import pytest
import time
import numpy as np
from spotify_confidence.analysis.frequentist.confidence_computers.z_test_computer import sequential_bounds
@pytest.mark.skip(reason="Skipping because this test is very slow")
def test_many_days():
"""
This input (based on a real experiment) is very long, which can cause slow calculation
"""
t = [
0.0016169976338740648,
0.0057857955498163615,
0.012200379088315757,
0.020199591701142824,
0.02956441064038571,
0.04047102718841871,
0.052929825413405296,
0.06580092295219643,
0.07878439818310792,
0.09148496950057272,
0.1028893343050959,
0.1128434997940756,
0.12298934256730025,
0.13280979910049193,
0.14267997977787195,
0.15281963941289514,
0.16293176212095561,
0.17198778455162406,
0.17996747917082068,
0.18786110540725684,
0.1955669737257397,
0.20335013690301407,
0.21277055903588274,
0.22148328777708232,
0.2295912740670489,
0.23640586948077766,
0.2431234831038822,
0.24987292468428604,
0.2568336065927525,
0.2649271880853427,
0.27282722271091664,
0.2799894816822785,
0.2862801096305317,
0.2925685639072496,
0.2988294699944579,
0.3051314956400879,
0.3118994077972684,
0.31887303037202536,
0.32523581745772245,
0.3307398353487736,
0.33616198578702633,
0.34151324975562525,
0.3478405485563082,
0.3546238566149848,
0.36130761502236336,
0.36751189302418574,
0.3730571543616735,
0.37865278180851814,
0.38428987795273567,
0.3900127609160433,
0.3964718089893684,
0.40306122104207753,
0.40914555292031984,
0.41449831480764515,
0.4198849769608837,
0.4256404199470336,
0.4315384355133149,
0.43801594290086987,
0.4444516211895538,
0.45034373518130405,
0.4556807858158224,
0.4610488197166289,
0.46633036852044285,
0.4717294082126311,
0.47769497653470894,
0.48369759863580825,
0.4892945325380834,
0.49431792124380325,
0.49935417177798586,
0.5043009639028166,
0.5093262559789482,
0.5149098888134348,
0.5205835093969735,
0.5261172491490695,
0.5310141031413226,
0.5359027242118537,
0.540068909216935,
0.5451620919252675,
0.5506752550043325,
0.5562355968920056,
0.5614758121490083,
0.5660462437469214,
0.5706616804819072,
0.5750453002157994,
0.5795939049979849,
0.5861802311128667,
0.5913273051077091,
0.5958976691303413,
0.6001503392324151,
0.6042404457337608,
0.6082963816680697,
0.6124734913435614,
0.6174918231657613,
0.6223867287374153,
0.6268875352709179,
0.6308341907134806,
0.6348490070893678,
0.6388763812049537,
0.6430405276890614,
0.6476616520101889,
0.6525750168960728,
0.6570689758011117,
0.6610427627189518,
0.6649727383296814,
0.6689671694958335,
0.673019050913289,
0.6776959248411508,
0.6825336054124376,
0.6869984168463193,
0.6908780826604262,
0.6949984065748767,
0.6991746490342636,
0.7033415661048878,
0.7082721626873987,
0.7131064081819068,
0.7176506656210218,
0.7216193168175142,
0.7256178250256133,
0.7296113326629264,
0.733677461202103,
0.7383860054116087,
0.7431864069529378,
0.7475115177561259,
0.7513220765829758,
0.7551652404828552,
0.7591154774153049,
0.7635879699061145,
0.76888963361854,
0.7740750002725536,
0.7788235152607059,
0.7829338267710377,
0.7870690059847372,
0.7912444713283939,
0.7954864645360872,
0.8002680350991415,
0.8051864906561857,
0.8097254772233912,
0.8137210008565843,
0.8175460095309978,
0.8214444612731922,
0.8256005212486867,
0.8302889054993935,
0.8351108860804202,
0.839542135124793,
0.8433705788759852,
0.8472835029908369,
0.8513248314019267,
0.8556693700983707,
0.8606610209471658,
0.865499591259651,
0.8699232042972833,
0.8737653545679493,
0.8776996212090155,
0.8816179062961511,
0.8856027192473231,
0.8900849425785808,
0.8947120585746139,
0.8993599427069738,
0.9035026227768521,
0.9075820073336299,
0.9115699850604569,
0.9158137239629064,
0.9207252417911126,
0.925749689176233,
0.9303560370359392,
0.9343408161994707,
0.9384800274049299,
0.9426168396879175,
0.9475247422385961,
0.9523909621035122,
0.9573336433987555,
0.9618665256655873,
0.9657568345864344,
0.9697355995499667,
0.973736889607129,
0.9778353641807583,
0.9828378833872299,
0.987703190985854,
0.9921586319807856,
0.9960384779956415,
1.0,
]
start_time = time.time()
results = sequential_bounds(np.array(t), alpha=0.003333333, sides=2)
my_bounds = results.bounds
expected = np.array(
[
5.75400023,
8.0,
5.14701605,
4.91478643,
4.80691346,
4.69004328,
4.57921075,
4.49683943,
4.44452939,
4.38899083,
4.35683792,
4.33289847,
4.301461,
4.27383028,
4.24513591,
4.21444005,
4.18809224,
4.17037988,
4.15702106,
4.13796352,
4.12345883,
4.10808648,
4.07898394,
4.06169498,
4.04985422,
4.04453139,
4.03288177,
4.02205301,
4.00664024,
3.98770613,
3.97358123,
3.96589571,
3.95946059,
3.94995533,
3.94128534,
3.93114789,
3.91870273,
3.90749163,
3.90064315,
3.8958719,
3.88847126,
3.88184277,
3.86841705,
3.85642932,
3.84721152,
3.84099201,
3.83689676,
3.8295672,
3.82234648,
3.81501541,
3.80286989,
3.79370807,
3.78728177,
3.78449351,
3.77865864,
3.76988501,
3.76230126,
3.75251025,
3.74474277,
3.73953663,
3.73534961,
3.72974059,
3.72466752,
3.71785112,
3.70903202,
3.70176221,
3.6976847,
3.6944938,
3.68996741,
3.68449851,
3.67888767,
3.67142884,
3.66522708,
3.65968721,
3.65649679,
3.65207508,
3.65156885,
3.643952,
3.63644572,
3.63029181,
3.62665696,
3.62527741,
3.62117738,
3.61789837,
3.6128686,
3.59904477,
3.5976517,
3.59678297,
3.59434356,
3.59116304,
3.58814574,
3.5835558,
3.57659985,
3.5726481,
3.56990393,
3.56879169,
3.56501955,
3.56127173,
3.55720436,
3.55194666,
3.54597713,
3.5436994,
3.54287161,
3.53974477,
3.53649679,
3.53314876,
3.52700997,
3.52175088,
3.51873367,
3.51846468,
3.51401711,
3.5106822,
3.50742162,
3.50113309,
3.49658758,
3.49376264,
3.49238249,
3.48979047,
3.48725107,
3.48341163,
3.47810608,
3.47381485,
3.47184685,
3.47110719,
3.46801712,
3.46472076,
3.45913659,
3.45209404,
3.4484684,
3.44587153,
3.44472549,
3.44242755,
3.43895355,
3.43549018,
3.43080058,
3.42621252,
3.42437516,
3.42371762,
3.42122891,
3.41861765,
3.41451447,
3.40936002,
3.4051931,
3.40307035,
3.40295986,
3.40052495,
3.39688763,
3.39279348,
3.38725208,
3.38421998,
3.38214471,
3.38133324,
3.37908335,
3.37689107,
3.37364203,
3.36937673,
3.36593888,
3.36250238,
3.36109704,
3.35878324,
3.35666501,
3.35305866,
3.34754255,
3.34364255,
3.34157534,
3.34085629,
3.33864193,
3.33563376,
3.33016843,
3.32687574,
3.32338656,
3.32166421,
3.32107266,
3.31861916,
3.31615129,
3.31334059,
3.30792367,
3.30479742,
3.30339238,
3.30296421,
3.30041534,
]
)
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 10 seconds, something is most likely broken
assert (time.time() - start_time) < 15
# Run a second time but with initial state from last run.
start_time = time.time()
results = sequential_bounds(np.array(t), alpha=0.003333333, sides=2, state=results.state)
my_bounds = results.bounds
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 10 seconds, something is most likely broken
print(f"Time passed second round: {time.time() - start_time}")
assert (time.time() - start_time) < 0.01
@pytest.mark.skip(reason="Skipping because this test is very slow")
def test_many_days_fast_and_no_crash():
"""
This is based on experiment 1735 on 26.11.2020. The calculation of the corresponding bounds takes many minutes
without performance tweak. Therefore, this test only checks for absence of crashs and time constraints, but
does not compare against the baseline without performance tweak. There is a Jupyter notebook making that comparison.
"""
t = [
0.011404679673257933,
0.02292450819418779,
0.0356455988484443,
0.04835740420885424,
0.05971666577058213,
0.06976017458481187,
0.07984165086754545,
0.09002459314412276,
0.10026356929804565,
0.11129746744100509,
0.1222487922920801,
0.13250332796555583,
0.1418309168157694,
0.15072692856918676,
0.15940425274581055,
0.16819162796171988,
0.17766544268380677,
0.18725283769713902,
0.19600162922594835,
0.20386600701959812,
0.21159934032678884,
0.21916233120704773,
0.22688560894714668,
0.23509036348536208,
0.24366994698965522,
0.2515994198750076,
0.25875219123481424,
0.2659624389836802,
0.2731790169781248,
0.28051081384508175,
0.28822790138928306,
0.2962915558739476,
0.3037246366701631,
0.31063411372423433,
0.31767205835063517,
0.32464032826076655,
0.3318100596369355,
0.3397812253123048,
0.3476375502493003,
0.3550356746451523,
0.3616457394863339,
0.3683042335071859,
0.375005792804928,
0.38175551518794676,
0.3891222824602354,
0.39652683513644266,
0.40347332732118724,
0.4098512458112366,
0.4163205187081655,
0.42263992444151655,
0.42899148558161226,
0.43464157988476515,
0.43858871208254674,
0.44192382717460427,
0.44482627278235426,
0.4474605932759375,
0.44957511937869815,
0.4509048070694502,
0.45222422911858906,
0.45333747002744257,
0.45426598540713137,
0.4551955091445229,
0.45605329943533507,
0.456895460181754,
0.4578387508027823,
0.45881449093488524,
0.45965707183034693,
0.4603621239391219,
0.4610501740166303,
0.46173166976907054,
0.4624475477181825,
0.4632872155802805,
0.4641010162663083,
0.46481571779810027,
0.4654194019478082,
0.4660207332628762,
0.4666458170038323,
0.4672646265190821,
0.46791675385342846,
0.4685898046101078,
0.46918687841487516,
0.46969451649339183,
0.47019581032136176,
0.4706811945055765,
0.47116992587716583,
0.47170379526092326,
0.47227291514937425,
0.4727852448922026,
0.47322669549150526,
0.4736554715946826,
0.47408022827201673,
0.47450655350577753,
0.4749737592414058,
0.47545756086422586,
0.4759381553493523,
0.47630259262910407,
0.4766609657576709,
0.47699441004302984,
0.4773518028238301,
0.477775327063972,
0.4781977729215707,
0.47856485714029223,
0.47888037506649034,
0.47919262983512245,
0.47949520717080135,
0.47980748994936967,
0.4801789017032324,
0.4805627078538587,
0.48090167009664675,
0.4811904245288165,
0.48149113920373887,
0.4817901452725537,
0.4820966860142033,
0.48243977972257923,
0.4827841618880198,
0.48309197708176604,
0.4833586316742829,
0.4836129058750043,
0.4838654994795544,
0.4841171547512422,
0.48439948090305657,
0.48470691796266424,
0.4849764575786085,
0.4852081697757299,
0.48545255646897667,
0.4856974893559792,
0.48595208567096676,
0.48624575584693763,
0.4865416528128355,
0.4867930840050338,
0.4870117575768593,
0.4872274340855126,
0.4874240218226533,
0.4876215198827202,
0.4878617751103791,
0.488108108494191,
0.48831807097586183,
0.4884937072807334,
0.48866595438332605,
0.488852192449045,
0.48903411698459087,
0.4892522303576926,
0.4894829201921431,
0.4896802221826566,
0.4898457609055321,
0.49001188783706756,
0.4901847091433521,
0.4903469286887892,
0.4905345812562857,
0.49073597269748276,
0.49091467609036693,
0.4910691508884479,
0.4912115954189357,
0.49135658885361677,
0.49150574176382184,
0.49167835299558493,
0.49186735004001847,
0.49203167033066975,
0.49216849886895175,
0.4923075682021289,
0.4924506289512129,
0.49259525825672346,
0.49276396210238826,
0.49294465420074185,
0.4931019580023778,
0.49330306934421303,
0.4935200763248353,
0.49373208353184794,
0.4939721566949216,
0.4942334053697541,
0.4944958444668745,
0.4947262121870588,
0.49492469059489225,
0.4951192336066912,
0.495294323717807,
0.4954780829041733,
0.4956838158854796,
0.49592192835302007,
0.49614550366367866,
0.49633301618149417,
0.49652995404283723,
0.4967104500716375,
0.4969174855149766,
0.49712443692850716,
0.4973541744251272,
0.49756258235533957,
0.49772464784612763,
0.4978989396740621,
0.4980669292663541,
0.4982378038820735,
0.49843929335804726,
0.4986487236509305,
0.49883442952786183,
0.49899118713574214,
0.49915640374435144,
0.49932506557511197,
]
alpha = 0.0033333333333333335
sides = 2
start_time = time.time()
my_bounds = sequential_bounds(np.array(t), alpha=alpha, sides=sides).bounds
expected = np.array(
[
5.0536015,
4.819334,
4.70702194,
4.60970036,
4.55329219,
4.5118919,
4.465161,
4.42168832,
4.37932413,
4.33343066,
4.29780246,
4.26550766,
4.2476601,
4.22343408,
4.20455427,
4.1834642,
4.15580542,
4.13352266,
4.1170148,
4.10326736,
4.08845795,
4.07496919,
4.05959646,
4.0417501,
4.02262887,
4.01056674,
4.00192679,
3.98996708,
3.97709149,
3.96442225,
3.95010566,
3.93456306,
3.92603865,
3.91801377,
3.90630556,
3.8975012,
3.88641115,
3.87143326,
3.85966246,
3.85112482,
3.84569926,
3.83714224,
3.82719647,
3.81910741,
3.80682977,
3.79652758,
3.78889289,
3.78428912,
3.77646938,
3.76966463,
3.76150223,
3.75820905,
3.76088934,
3.76171382,
3.76141619,
3.76079216,
3.76237742,
3.76725034,
3.76769877,
3.7690107,
3.7710916,
3.77168583,
3.76813708,
3.7705804,
3.76669411,
3.76711572,
3.76808636,
3.76962133,
3.76680748,
3.76844159,
3.76552364,
3.76210975,
3.76321355,
3.76471956,
3.76227721,
3.76424368,
3.76172169,
3.75923,
3.76099518,
3.75829319,
3.76028082,
3.75824824,
3.7562443,
3.76013739,
3.75818674,
3.7560594,
3.75379557,
3.75757852,
3.75582548,
3.75412511,
3.75244297,
3.75075688,
3.74891172,
3.75280489,
3.75090966,
3.7494744,
3.74806463,
3.75254602,
3.75114099,
3.74947802,
3.74782149,
3.74638383,
3.75092969,
3.74970739,
3.7485241,
3.74730404,
3.74585452,
3.74435839,
3.74303855,
3.74191532,
3.74074663,
3.73958567,
3.74415751,
3.74282592,
3.74149075,
3.74029857,
3.73926672,
3.73828357,
3.73730769,
3.7363362,
3.7352472,
3.73406243,
3.74020438,
3.7393112,
3.73836986,
3.73742713,
3.73644796,
3.73531947,
3.73418345,
3.73321896,
3.73238074,
3.73155456,
3.73080198,
3.73004637,
3.7291278,
3.72818669,
3.7273851,
3.72671496,
3.72605809,
3.72534827,
3.72465527,
3.72382494,
3.72294733,
3.73077145,
3.73014101,
3.72950865,
3.72885115,
3.7282343,
3.72752112,
3.72675617,
3.7260778,
3.7254917,
3.72495149,
3.72440186,
3.72383671,
3.723183,
3.72246763,
3.72184599,
3.7213286,
3.72080295,
3.72026245,
3.71971626,
3.71907946,
3.71839777,
3.71780463,
3.71704671,
3.7162294,
3.71543144,
3.71452847,
3.72065881,
3.71967136,
3.71880523,
3.71805949,
3.71732896,
3.71667185,
3.71598258,
3.71521135,
3.71431933,
3.71348235,
3.71278081,
3.71204444,
3.71136994,
3.7105967,
3.70982427,
3.70896735,
3.71527887,
3.71467395,
3.71402372,
3.71339733,
3.71276051,
3.71201001,
3.71123041,
3.71053954,
3.70995666,
3.70934263,
3.70871611,
]
)
assert np.allclose(my_bounds, expected)
# if the calculation with max_nints takes longer than 30 seconds, something is most likely broken
assert (time.time() - start_time) < 30
| 2.265625
| 2
|
Database_Setup.py
|
TannerWilcoxson/UnitAnalysis
| 0
|
12784519
|
<reponame>TannerWilcoxson/UnitAnalysis
import sqlite3 as sql
import sys
import os
class myVars():
'''
A Class for using persistant variables through the use of an SQLite Database
Change self.path below to be an absolute path found within your computer.
'''
def __init__(self):
path = os.path.realpath(__file__)
path = path.split('/')
path[-1] = "units.db"
self.path = ""
for i in path:
self.path += '/'+i
try:
file = open(self.path)
file.close()
except IOError:
print("No Database Found. Generating New Database")
self.__makeNewDatabase__()
def __makeNewDatabase__(self):
database = sql.connect(self.path)
self.cursor = database.cursor()
self.cursor.execute("create table UnitVariables (UnitName text Primary Key,\
UnitType text not null,\
UnitValue real not null);")
database.commit()
database.close()
def loadUnitVariables(self):
database = sql.connect(self.path)
self.cursor = database.cursor()
self.cursor.execute("SELECT * FROM UnitVariables")
unitList = self.cursor.fetchall();
database.close()
return unitList
def add(self, UnitName, UnitType, UnitValue, OverrideExisting = False):
database = sql.connect(self.path)
varHolder = "(?,?,?)"
VarData = (UnitName, UnitType, UnitValue)
try:
cursor = database.cursor()
cursor.execute(f"INSERT INTO UnitVariables VALUES {varHolder}", VarData)
database.commit()
database.close()
return
except sql.IntegrityError:
if OverrideExisting:
try:
self.remove(varName)
except:
database.close()
return
cursor = database.cursor()
cursor.execute(f"INSERT INTO UnitVariables VALUES {varHolder}", VarData)
database.commit()
database.close()
return
else:
raise Error("Error: Does Unit type already exist? Try OverrideExisting Flag")
return
def remove(self, varName, database = None):
opened = False
if not database:
opened = True
database = sql.connect(self.path)
try:
cursor = database.cursor()
cursor.execute(f"DELETE FROM UnitVariables WHERE VarName = ?", (varName,))
database.commit()
if opened:
database.close()
except:
if opened:
database.close()
print(f"{varName} could not be found")
| 3.171875
| 3
|
main.py
|
Violet64/Tkinter-Wordle
| 0
|
12784520
|
<reponame>Violet64/Tkinter-Wordle
import tkinter as tk
class Wordle:
def __init__(self, word):
root.bind("<KeyPress>", self.__key_press)
self.__word = word
self.__row = 0
self.__col = 0
self.__labels = []
self.__current_row = ""
for i in range(6):
temp = []
for j in range(5):
label_border = tk.Frame(root, highlightbackground="black", highlightthickness=2)
label = tk.Label(label_border, bg="#404040", font=("Arial", 25, "bold"), height=2, width=4, fg="white")
label_border.grid(row=i, column=j)
label.grid(row=i, column=j)
temp.append(label)
self.__labels.append(temp)
def __key_press(self, event):
if event.keysym == "BackSpace":
if self.__col != 0:
self.__labels[self.__row][self.__col - 1]["text"] = ""
self.__col -= 1
self.__current_row = self.__current_row[:-1]
elif event.char.isalpha():
self.__labels[self.__row][self.__col]["text"] = event.char.upper()
self.__col += 1
self.__current_row += event.char.lower()
if self.__col == 5:
if self.__row == 4:
self.__end_screen(False)
return
self.__check_row()
self.__row += 1
self.__col = 0
self.__current_row = ""
def __check_row(self):
word: str = self.__word
if self.__current_row == self.__word:
for i in range(5):
self.__labels[self.__row][i]["bg"] = "#50C878"
self.__end_screen(True)
return
for i, char in enumerate(self.__current_row):
if char == self.__word[i]:
self.__labels[self.__row][i]["bg"] = "#50C878"
elif char in word:
self.__labels[self.__row][i]["bg"] = "#FADA5E"
word = word.replace(char, "-", 1)
def __end_screen(self, win: bool):
if win is True:
text = "YOUWIN"
index = 0
for i in range(2, 4):
for j in range(1, 4):
self.__labels[i][j]["bg"] = "#40E0D0"
self.__labels[i][j]["text"] = text[index]
index += 1
else:
root.bind("<KeyPress>", self.__do_nothing)
text = "SUXLOL"
index = 0
for i in range(2, 4):
for j in range(1, 4):
self.__labels[i][j]["bg"] = "#CA3433"
self.__labels[i][j]["text"] = text[index]
index += 1
for i in range(5):
self.__labels[5][i]["bg"] = "#50C878"
self.__labels[5][i]["text"] = self.__word[i].upper()
def __do_nothing(self, event):
pass
if __name__ == "__main__":
root = tk.Tk()
root.title("Wordle")
Wordle("sussy")
root.mainloop()
| 3.796875
| 4
|
layers/equivariant_linear.py
|
JoshuaMitton/InvariantGraphNetworks
| 0
|
12784521
|
<filename>layers/equivariant_linear.py
# import tensorflow as tf
import torch
import numpy as np
class equi_2_to_2(torch.nn.Module):
"""equivariant nn layer."""
def __init__(self, input_depth, output_depth, device):
super(equi_2_to_2, self).__init__()
self.basis_dimension = 15
self.device = device
# self.coeffs_values = torch.matmul(torch.randn(size=(input_depth, output_depth, self.basis_dimension), dtype=torch.float32), torch.sqrt(2. / (input_depth + output_depth).type(torch.FloatTensor)))
self.coeffs_values = torch.mul(torch.randn(size=(input_depth, output_depth, self.basis_dimension), dtype=torch.float32), torch.sqrt(torch.tensor([2.]) / (input_depth + output_depth)))#.cuda()
self.coeffs = torch.nn.Parameter(self.coeffs_values, requires_grad=False)
self.diag_bias = torch.nn.Parameter(torch.zeros((1, output_depth, 1, 1), dtype=torch.float32), requires_grad=False)
self.all_bias = torch.nn.Parameter(torch.zeros((1, output_depth, 1, 1), dtype=torch.float32), requires_grad=False)
def ops_2_to_2(self, inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m x m
# print(f'input shape : {inputs.shape}')
diag_part = torch.diagonal(inputs, dim1=-2, dim2=-1) # N x D x m
# print(f'diag_part shape : {diag_part.shape}')
sum_diag_part = torch.sum(diag_part, dim=2, keepdim=True) # N x D x 1
# print(f'sum_diag_part shape : {sum_diag_part.shape}')
sum_of_rows = torch.sum(inputs, dim=3) # N x D x m
# print(f'sum_of_rows shape : {sum_of_rows.shape}')
sum_of_cols = torch.sum(inputs, dim=2) # N x D x m
# print(f'sum_of_cols shape : {sum_of_cols.shape}')
sum_all = torch.sum(sum_of_rows, dim=2) # N x D
# print(f'sum_all shape : {sum_all.shape}')
# op1 - (1234) - extract diag
op1 = torch.diag_embed(diag_part) # N x D x m x m
# op2 - (1234) + (12)(34) - place sum of diag on diag
op2 = torch.diag_embed(sum_diag_part.repeat(1, 1, dim)) # N x D x m x m
# op3 - (1234) + (123)(4) - place sum of row i on diag ii
op3 = torch.diag_embed(sum_of_rows) # N x D x m x m
# op4 - (1234) + (124)(3) - place sum of col i on diag ii
op4 = torch.diag_embed(sum_of_cols) # N x D x m x m
# op5 - (1234) + (124)(3) + (123)(4) + (12)(34) + (12)(3)(4) - place sum of all entries on diag
op5 = torch.diag_embed(torch.unsqueeze(sum_all, dim=2).repeat(1, 1, dim)) # N x D x m x m
# op6 - (14)(23) + (13)(24) + (24)(1)(3) + (124)(3) + (1234) - place sum of col i on row i
op6 = torch.unsqueeze(sum_of_cols, dim=3).repeat(1, 1, 1, dim) # N x D x m x m
# op7 - (14)(23) + (23)(1)(4) + (234)(1) + (123)(4) + (1234) - place sum of row i on row i
op7 = torch.unsqueeze(sum_of_rows, dim=3).repeat(1, 1, 1, dim) # N x D x m x m
# op8 - (14)(2)(3) + (134)(2) + (14)(23) + (124)(3) + (1234) - place sum of col i on col i
op8 = torch.unsqueeze(sum_of_cols, dim=2).repeat(1, 1, dim, 1) # N x D x m x m
# op9 - (13)(24) + (13)(2)(4) + (134)(2) + (123)(4) + (1234) - place sum of row i on col i
op9 = torch.unsqueeze(sum_of_rows, dim=2).repeat(1, 1, dim, 1) # N x D x m x m
# op10 - (1234) + (14)(23) - identity
op10 = inputs # N x D x m x m
# op11 - (1234) + (13)(24) - transpose
op11 = inputs.permute(0, 1, 3, 2) # N x D x m x m
# op12 - (1234) + (234)(1) - place ii element in row i
op12 = torch.unsqueeze(diag_part, dim=3).repeat(1, 1, 1, dim) # N x D x m x m
# op13 - (1234) + (134)(2) - place ii element in col i
op13 = torch.unsqueeze(diag_part, dim=2).repeat(1, 1, dim, 1) # N x D x m x m
# op14 - (34)(1)(2) + (234)(1) + (134)(2) + (1234) + (12)(34) - place sum of diag in all entries
op14 = torch.unsqueeze(sum_diag_part, dim=3).repeat(1, 1, dim, dim) # N x D x m x m
# op15 - sum of all ops - place sum of all entries in all entries
op15 = torch.unsqueeze(torch.unsqueeze(sum_all, dim=2), dim=3).repeat(1, 1, dim, dim) # N x D x m x m
if normalization is not None:
float_dim = dim.type(torch.FloatTensor)
if normalization is 'inf':
op2 = torch.div(op2, float_dim)
op3 = torch.div(op3, float_dim)
op4 = torch.div(op4, float_dim)
op5 = torch.div(op5, float_dim**2)
op6 = torch.div(op6, float_dim)
op7 = torch.div(op7, float_dim)
op8 = torch.div(op8, float_dim)
op9 = torch.div(op9, float_dim)
op14 = torch.div(op14, float_dim)
op15 = torch.div(op15, float_dim**2)
return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
def forward(self, inputs, normalization='inf'):
m = torch.tensor(inputs.shape[3], dtype=torch.int32, device=self.device) # extract dimension
# print(f'inputs device : {inputs.device}')
ops_out = self.ops_2_to_2(inputs=inputs, dim=m, normalization=normalization)
# for idx, op in enumerate(ops_out):
# print(f'ops_out{idx} : {op.shape}')
ops_out = torch.stack(ops_out, dim=2)
# print(f'self.coeffs device : {self.coeffs.device}')
# print(f'ops_out device : {ops_out.device}')
output = torch.einsum('dsb,ndbij->nsij', self.coeffs.double(), ops_out) # N x S x m x m
# bias
# print(f'diag_bias shape : {self.diag_bias.shape}')
# print(f'eye shape : {torch.eye(torch.tensor(inputs.shape[3], dtype=torch.int32, device=self.device), device=self.device).shape}')
# mat_diag_bias = torch.mul(torch.unsqueeze(torch.unsqueeze(torch.eye(torch.tensor(inputs.shape[3], dtype=torch.int32, device=self.device), device=self.device), 0), 0), self.diag_bias)
mat_diag_bias = self.diag_bias.expand(-1,-1,inputs.shape[3],inputs.shape[3])
mat_diag_bias = torch.mul(mat_diag_bias, torch.eye(inputs.shape[3], device=self.device))
output = output + self.all_bias + mat_diag_bias
# print(f'mat_diag_bias shape : {mat_diag_bias.shape}')
return output
# def equi_2_to_2(name, input_depth, output_depth, inputs, normalization='inf', normalization_val=1.0):
# '''
# :param name: name of layer
# :param input_depth: D
# :param output_depth: S
# :param inputs: N x D x m x m tensor
# :return: output: N x S x m x m tensor
# '''
# basis_dimension = 15
# # initialization values for variables
# coeffs_values = torch.matmul(torch.randn(size=(input_depth, output_depth, basis_dimension), dtype=torch.float32), torch.sqrt(2. / (input_depth + output_depth).type(torch.FloatTensor)))
# # coeffs_values = tf.multiply(tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32), tf.sqrt(2. / tf.to_float(input_depth + output_depth)))
# #coeffs_values = tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32)
# # define variables
# coeffs = torch.autograd.Variable(coeffs_values, requires_grad=True)
# # coeffs = tf.get_variable('coeffs', initializer=coeffs_values)
# m = inputs.shape[3].type(torch.IntTensor) # extract dimension
# # m = tf.to_int32(tf.shape(inputs)[3]) # extract dimension
# ops_out = ops_2_to_2(inputs, m, normalization=normalization)
# ops_out = torch.stack(ops_out, dim=2)
# # ops_out = tf.stack(ops_out, axis=2)
# output = torch.einsum('dsb,ndbij->nsij', coeffs, ops_out) # N x S x m x m
# # output = tf.einsum('dsb,ndbij->nsij', coeffs, ops_out) # N x S x m x m
# # bias
# diag_bias = torch.autograd.Variable(torch.zeros((1, output_depth, 1, 1), dtype=torch.float32), requires_grad=True)
# # diag_bias = tf.get_variable('diag_bias', initializer=tf.zeros([1, output_depth, 1, 1], dtype=tf.float32))
# all_bias = torch.autograd.Variable(torch.zeros((1, output_depth, 1, 1), dtype=torch.float32), requires_grad=True)
# # all_bias = tf.get_variable('all_bias', initializer=tf.zeros([1, output_depth, 1, 1], dtype=tf.float32))
# mat_diag_bias = torch.matmul(torch.unsqueeze(torch.unsqueeze(torch.eye(inputs.shape[3].type(torch.IntTensor)), 0), 0), diag_bias)
# # mat_diag_bias = tf.multiply(tf.expand_dims(tf.expand_dims(tf.eye(tf.to_int32(tf.shape(inputs)[3])), 0), 0), diag_bias)
# output = output + all_bias + mat_diag_bias
# return output
def equi_2_to_1(name, input_depth, output_depth, inputs, normalization='inf', normalization_val=1.0):
'''
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m x m tensor
:return: output: N x S x m tensor
'''
basis_dimension = 5
# with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
# initialization values for variables
coeffs_values = torch.matmul(torch.randn(size=(input_depth, output_depth, basis_dimension), dtype=torch.float32), torch.sqrt(2. / (input_depth + output_depth).type(torch.FloatTensor)))
# coeffs_values = tf.multiply(tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32), tf.sqrt(2. / tf.to_float(input_depth + output_depth)))
#coeffs_values = tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32)
# define variables
coeffs = torch.autograd.Variable(coeffs_values, requires_grad=True)
# coeffs = tf.get_variable('coeffs', initializer=coeffs_values)
m = inputs.shape[3].type(torch.IntTensor) # extract dimension
# m = tf.to_int32(tf.shape(inputs)[3]) # extract dimension
ops_out = ops_2_to_1(inputs, m, normalization=normalization)
ops_out = torch.stack(ops_out, dim=2)
# ops_out = tf.stack(ops_out, axis=2) # N x D x B x m
output = torch.einsum('dsb,ndbi->nsi', coeffs, ops_out) # N x S x m x m
# output = tf.einsum('dsb,ndbi->nsi', coeffs, ops_out) # N x S x m
# bias
bias = torch.autograd.Variable(torch.zeros((1, output_depth, 1), dtype=torch.float32), requires_grad=True)
# bias = tf.get_variable('bias', initializer=tf.zeros([1, output_depth, 1], dtype=tf.float32))
output = output + bias
return output
def equi_1_to_2(name, input_depth, output_depth, inputs, normalization='inf', normalization_val=1.0):
'''
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m tensor
:return: output: N x S x m x m tensor
'''
basis_dimension = 5
# with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
# initialization values for variables
coeffs_values = torch.matmul(torch.randn(size=(input_depth, output_depth, basis_dimension), dtype=torch.float32), torch.sqrt(2. / (input_depth + output_depth).type(torch.FloatTensor)))
# coeffs_values = tf.multiply(tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32), tf.sqrt(2. / tf.to_float(input_depth + output_depth)))
#coeffs_values = tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32)
# define variables
coeffs = torch.autograd.Variable(coeffs_values, requires_grad=True)
# coeffs = tf.get_variable('coeffs', initializer=coeffs_values)
m = inputs.shape[3].type(torch.IntTensor) # extract dimension
# m = tf.to_int32(tf.shape(inputs)[2]) # extract dimension
ops_out = ops_1_to_2(inputs, m, normalization=normalization)
ops_out = torch.stack(ops_out, dim=2)
# ops_out = tf.stack(ops_out, axis=2) # N x D x B x m x m
output = torch.einsum('dsb,ndbij->nsij', coeffs, ops_out) # N x S x m x m
# output = tf.einsum('dsb,ndbij->nsij', coeffs, ops_out) # N x S x m x m
# bias
bias = torch.autograd.Variable(torch.zeros((1, output_depth, 1, 1), dtype=torch.float32), requires_grad=True)
# bias = tf.get_variable('bias', initializer=tf.zeros([1, output_depth, 1, 1], dtype=tf.float32))
output = output + bias
return output
def equi_1_to_1(name, input_depth, output_depth, inputs, normalization='inf', normalization_val=1.0):
'''
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m tensor
:return: output: N x S x m tensor
'''
basis_dimension = 2
# with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
# initialization values for variables
coeffs_values = torch.matmul(torch.randn(size=(input_depth, output_depth, basis_dimension), dtype=torch.float32), torch.sqrt(2. / (input_depth + output_depth).type(torch.FloatTensor)))
# coeffs_values = tf.multiply(tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32), tf.sqrt(2. / tf.to_float(input_depth + output_depth)))
#coeffs_values = tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32)
# define variables
coeffs = torch.autograd.Variable(coeffs_values, requires_grad=True)
# coeffs = tf.get_variable('coeffs', initializer=coeffs_values)
m = inputs.shape[3].type(torch.IntTensor) # extract dimension
# m = tf.to_int32(tf.shape(inputs)[2]) # extract dimension
ops_out = ops_1_to_1(inputs, m, normalization=normalization)
ops_out = torch.stack(ops_out, dim=2)
# ops_out = tf.stack(ops_out, axis=2) # N x D x B x m
output = torch.einsum('dsb,ndbi->nsi', coeffs, ops_out) # N x S x m x m
# output = tf.einsum('dsb,ndbi->nsi', coeffs, ops_out) # N x S x m
# bias
bias = torch.autograd.Variable(torch.zeros((1, output_depth, 1), dtype=torch.float32), requires_grad=True)
# bias = tf.get_variable('bias', initializer=tf.zeros([1, output_depth, 1], dtype=tf.float32))
output = output + bias
return output
def equi_basic(name, input_depth, output_depth, inputs):
'''
:param name: name of layer
:param input_depth: D
:param output_depth: S
:param inputs: N x D x m x m tensor
:return: output: N x S x m x m tensor
'''
basis_dimension = 4
# with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
# initialization values for variables
coeffs_values = torch.matmul(torch.randn(size=(input_depth, output_depth, basis_dimension), dtype=torch.float32), torch.sqrt(2. / (input_depth + output_depth).type(torch.FloatTensor)))
# coeffs_values = tf.multiply(tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32), tf.sqrt(2. / tf.to_float(input_depth + output_depth)))
#coeffs_values = tf.random_normal([input_depth, output_depth, basis_dimension], dtype=tf.float32)
# define variables
coeffs = torch.autograd.Variable(coeffs_values, requires_grad=True)
# coeffs = tf.get_variable('coeffs', initializer=coeffs_values)
m = inputs.shape[3].type(torch.IntTensor) # extract dimension
# m = tf.to_int32(tf.shape(inputs)[3]) # extract dimension
float_dim = m.type(torch.FloatTensor)
# float_dim = tf.to_float(m)
# apply ops
ops_out = []
# w1 - identity
ops_out.append(inputs)
# w2 - sum cols
sum_of_cols = torch.divide(torch.sum(inputs, dim=2), float_dim) # N x D x m
# sum_of_cols = tf.divide(tf.reduce_sum(inputs, axis=2), float_dim) # N x D x m
ops_out.append(torch.unsqueeze(sum_of_cols, dim=2).repeat(1, 1, m, 1)) # N x D x m x m
# ops_out.append(tf.tile(tf.expand_dims(sum_of_cols, axis=2), [1, 1, m, 1])) # N x D x m x m
# w3 - sum rows
sum_of_rows = torch.divide(torch.sum(inputs, dim=3), float_dim) # N x D x m
# sum_of_rows = tf.divide(tf.reduce_sum(inputs, axis=3), float_dim) # N x D x m
ops_out.append(torch.unsqueeze(sum_of_rows, dim=3).repeat(1, 1, 1, m)) # N x D x m x m
# ops_out.append(tf.tile(tf.expand_dims(sum_of_rows, axis=3), [1, 1, 1, m])) # N x D x m x m
# w4 - sum all
sum_all = torch.divide(torch.sum(sum_of_rows, dim=2), torch.square(float_dim)) # N x D
# sum_all = tf.divide(tf.reduce_sum(sum_of_rows, axis=2), tf.square(float_dim)) # N x D
ops_out.append(torch.unsqueeze(torch.unsqueeze(sum_all, dim=2), dim=3).repeat(1, 1, m, m)) # N x D x m x m
# ops_out.append(tf.tile(tf.expand_dims(tf.expand_dims(sum_all, axis=2), axis=3), [1, 1, m, m])) # N x D x m x m
ops_out = torch.stack(ops_out, dim=2)
# ops_out = tf.stack(ops_out, axis=2)
output = torch.einsum('dsb,ndbij->nsij', coeffs, ops_out) # N x S x m x m
# output = tf.einsum('dsb,ndbij->nsij', coeffs, ops_out) # N x S x m x m
# bias
bias = torch.autograd.Variable(torch.zeros((1, output_depth, 1, 1), dtype=torch.float32), requires_grad=True)
# bias = tf.get_variable('bias', initializer=tf.zeros([1, output_depth, 1, 1], dtype=tf.float32))
output = output + bias
return output
# def ops_2_to_2(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m x m
# diag_part = torch.diagonal(inputs) # N x D x m
# sum_diag_part = torch.sum(diag_part, dim=2, keepdim=True) # N x D x 1
# sum_of_rows = torch.sum(inputs, dim=3) # N x D x m
# sum_of_cols = torch.sum(inputs, dim=2) # N x D x m
# sum_all = torch.sum(sum_of_rows, dim=2) # N x D
# # op1 - (1234) - extract diag
# op1 = torch.diagonal(diag_part) # N x D x m x m
# # op2 - (1234) + (12)(34) - place sum of diag on diag
# op2 = torch.diagonal(sum_diag_part.repeat(1, 1, dim)) # N x D x m x m
# # op3 - (1234) + (123)(4) - place sum of row i on diag ii
# op3 = torch.diagonal(sum_of_rows) # N x D x m x m
# # op4 - (1234) + (124)(3) - place sum of col i on diag ii
# op4 = torch.diagonal(sum_of_cols) # N x D x m x m
# # op5 - (1234) + (124)(3) + (123)(4) + (12)(34) + (12)(3)(4) - place sum of all entries on diag
# op5 = torch.diagonal(torch.unsqueeze(sum_all, dim=2).repeat(1, 1, dim)) # N x D x m x m
# # op6 - (14)(23) + (13)(24) + (24)(1)(3) + (124)(3) + (1234) - place sum of col i on row i
# op6 = torch.unsqueeze(sum_of_cols, dim=3).repeat(1, 1, 1, dim) # N x D x m x m
# # op7 - (14)(23) + (23)(1)(4) + (234)(1) + (123)(4) + (1234) - place sum of row i on row i
# op7 = torch.unsqueeze(sum_of_rows, dim=3).repeat(1, 1, 1, dim) # N x D x m x m
# # op8 - (14)(2)(3) + (134)(2) + (14)(23) + (124)(3) + (1234) - place sum of col i on col i
# op8 = torch.unsqueeze(sum_of_cols, dim=2).repeat(1, 1, dim, 1) # N x D x m x m
# # op9 - (13)(24) + (13)(2)(4) + (134)(2) + (123)(4) + (1234) - place sum of row i on col i
# op9 = torch.unsqueeze(sum_of_rows, dim=2).repeat(1, 1, dim, 1) # N x D x m x m
# # op10 - (1234) + (14)(23) - identity
# op10 = inputs # N x D x m x m
# # op11 - (1234) + (13)(24) - transpose
# op11 = inputs.permute(0, 1, 3, 2) # N x D x m x m
# # op12 - (1234) + (234)(1) - place ii element in row i
# op12 = torch.unsqueeze(diag_part, dim=3).repeat(1, 1, 1, dim) # N x D x m x m
# # op13 - (1234) + (134)(2) - place ii element in col i
# op13 = torch.unsqueeze(diag_part, dim=2).repeat(1, 1, dim, 1) # N x D x m x m
# # op14 - (34)(1)(2) + (234)(1) + (134)(2) + (1234) + (12)(34) - place sum of diag in all entries
# op14 = torch.unsqueeze(sum_diag_part, dim=3).repeat(1, 1, dim, dim) # N x D x m x m
# # op15 - sum of all ops - place sum of all entries in all entries
# op15 = torch.unsqueeze(torch.unsqueeze(sum_all, dim=2), dim=3).repeat(1, 1, dim, dim) # N x D x m x m
# if normalization is not None:
# float_dim = dim.type(torch.FloatTensor)
# if normalization is 'inf':
# op2 = torch.div(op2, float_dim)
# op3 = torch.div(op3, float_dim)
# op4 = torch.div(op4, float_dim)
# op5 = torch.div(op5, float_dim**2)
# op6 = torch.div(op6, float_dim)
# op7 = torch.div(op7, float_dim)
# op8 = torch.div(op8, float_dim)
# op9 = torch.div(op9, float_dim)
# op14 = torch.div(op14, float_dim)
# op15 = torch.div(op15, float_dim**2)
# return [op1, op2, op3, op4, op5, op6, op7, op8, op9, op10, op11, op12, op13, op14, op15]
def ops_2_to_1(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m x m
diag_part = tf.matrix_diag_part(inputs) # N x D x m
sum_diag_part = tf.reduce_sum(diag_part, axis=2, keepdims=True) # N x D x 1
sum_of_rows = tf.reduce_sum(inputs, axis=3) # N x D x m
sum_of_cols = tf.reduce_sum(inputs, axis=2) # N x D x m
sum_all = tf.reduce_sum(inputs, axis=(2, 3)) # N x D
# op1 - (123) - extract diag
op1 = diag_part # N x D x m
# op2 - (123) + (12)(3) - tile sum of diag part
op2 = tf.tile(sum_diag_part, [1, 1, dim]) # N x D x m
# op3 - (123) + (13)(2) - place sum of row i in element i
op3 = sum_of_rows # N x D x m
# op4 - (123) + (23)(1) - place sum of col i in element i
op4 = sum_of_cols # N x D x m
# op5 - (1)(2)(3) + (123) + (12)(3) + (13)(2) + (23)(1) - tile sum of all entries
op5 = tf.tile(tf.expand_dims(sum_all, axis=2), [1, 1, dim]) # N x D x m
if normalization is not None:
float_dim = tf.to_float(dim)
if normalization is 'inf':
op2 = tf.divide(op2, float_dim)
op3 = tf.divide(op3, float_dim)
op4 = tf.divide(op4, float_dim)
op5 = tf.divide(op5, float_dim ** 2)
return [op1, op2, op3, op4, op5]
def ops_1_to_2(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m
sum_all = tf.reduce_sum(inputs, axis=2, keepdims=True) # N x D x 1
# op1 - (123) - place on diag
op1 = tf.matrix_diag(inputs) # N x D x m x m
# op2 - (123) + (12)(3) - tile sum on diag
op2 = tf.matrix_diag(tf.tile(sum_all, [1, 1, dim])) # N x D x m x m
# op3 - (123) + (13)(2) - tile element i in row i
op3 = tf.tile(tf.expand_dims(inputs, axis=2), [1, 1, dim, 1]) # N x D x m x m
# op4 - (123) + (23)(1) - tile element i in col i
op4 = tf.tile(tf.expand_dims(inputs, axis=3), [1, 1, 1, dim]) # N x D x m x m
# op5 - (1)(2)(3) + (123) + (12)(3) + (13)(2) + (23)(1) - tile sum of all entries
op5 = tf.tile(tf.expand_dims(sum_all, axis=3), [1, 1, dim, dim]) # N x D x m x m
if normalization is not None:
float_dim = tf.to_float(dim)
if normalization is 'inf':
op2 = tf.divide(op2, float_dim)
op5 = tf.divide(op5, float_dim)
return [op1, op2, op3, op4, op5]
def ops_1_to_1(inputs, dim, normalization='inf', normalization_val=1.0): # N x D x m
sum_all = tf.reduce_sum(inputs, axis=2, keepdims=True) # N x D x 1
# op1 - (12) - identity
op1 = inputs # N x D x m
# op2 - (1)(2) - tile sum of all
op2 = tf.tile(sum_all, [1, 1, dim]) # N x D x m
if normalization is not None:
float_dim = tf.to_float(dim)
if normalization is 'inf':
op2 = tf.divide(op2, float_dim)
return [op1, op2]
| 2.75
| 3
|
standard/single_map_plot.py
|
ElthaTeng/multiline-ngc3351
| 3
|
12784522
|
<reponame>ElthaTeng/multiline-ngc3351
import numpy as np
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from astropy.io import fits
from matplotlib.colors import LogNorm
mom0 = np.load('data_image/NGC3351_CO10_mom0.npy')
fits_map = fits.open('data_image/NGC3351_CO10_mom0_broad_nyq.fits')
wcs = WCS(fits_map[0].header)
map = fits.open('data_image/NGC3351_CO21_mom1_broad_nyq.fits')[0].data
mask = np.load('mask_whole_recovered.npy')
map_masked = map * mask
map_masked[mask == 0] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111, projection=wcs)
ra = ax.coords[0]
ra.set_major_formatter('hh:mm:ss.s')
plt.imshow(map_masked, cmap='coolwarm', origin='lower')
plt.tick_params(axis="y", labelsize=14, labelleft=True)
plt.tick_params(axis="x", labelsize=14, labelbottom=True)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
#cb.ax.plot(-0.25, 0.65, 'k.')
#plt.contour(mom0,origin='lower',levels=(20,50,100,150,200,250), colors='dimgray', linewidths=1)
plt.title(r'(c) Moment 1 of CO 2-1', fontsize=16)
plt.xlim(15,60)
plt.ylim(15,60)
plt.xlabel('R.A. (J2000) ', fontsize=14) #
plt.ylabel('Decl. (J2000)', fontsize=14) #
plt.savefig('formal_plots/mom1_co21.pdf', bbox_inches='tight', pad_inches=0.1)
plt.show()
| 2.125
| 2
|
Decoder.py
|
zouguojian/Improved-RLSTM
| 4
|
12784523
|
<gh_stars>1-10
import Rlstm as lstm
import tensorflow as tf
class decoder(object):
def __init__(self,h_state,batch_size,predict_time,layer_num=1,nodes=128,is_training=True):
'''
:param batch_size:
:param layer_num:
:param nodes:
:param is_training:
we need to define the decoder of Encoder-Decoder Model,and the parameter will be
express in the Rlstm.
'''
self.h_state=h_state
self.predict_time=predict_time
self.nodes=nodes
self.out_num=1
with tf.variable_scope('decoder',reuse=tf.AUTO_REUSE):
self.encoder_Lstm=lstm.rlstm(batch_size,layer_num,nodes,is_training)
def decoding(self):
'''
we always use c_state as the input to decoder
'''
# (self.c_state, self.h_state) = self.encoder_Lstm.calculate(self.h_state)
h_state=self.h_state
h=[]
for i in range(self.predict_time):
h_state=tf.reshape(h_state,shape=(-1,1,self.nodes))
(c_state, h_state)=self.encoder_Lstm.calculate(h_state)
#we use the list h to recoder the out of decoder eatch time
h.append(h_state)
h=tf.convert_to_tensor(h,dtype=tf.float32)
#LSTM的最后输出结果
h_state = tf.reshape(h, [-1, self.nodes])
#the full connect layer to output the end results
with tf.variable_scope('Layer', reuse=tf.AUTO_REUSE):
w = tf.get_variable("wight", [self.nodes, 1],
initializer=tf.truncated_normal_initializer(stddev=0.1))
bias = tf.get_variable("biases", [1],
initializer=tf.constant_initializer(0))
results = tf.matmul(h_state, w) + bias
return tf.reshape(results, [-1, self.out_num])
| 2.875
| 3
|
code/actions/compile.py
|
michaelbrockus/meson-ui-
| 0
|
12784524
|
<gh_stars>0
#!/usr/bin/env python3
#
# file: compile.py
# author: <NAME>
# gmail: <<EMAIL>>
#
class MesonCompile:
pass
| 1.34375
| 1
|
quasimodo/fact_combinor.py
|
Aunsiels/CSK
| 16
|
12784525
|
from quasimodo.parts_of_facts import PartsOfFacts
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
class FactCombinor(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "Fact Combinor"
def process(self, input_interface):
parts_of_facts = PartsOfFacts.from_generated_facts(input_interface.get_generated_facts())
return input_interface.replace_generated_facts(parts_of_facts.merge_into_generated_facts())
| 2.328125
| 2
|
utils.py
|
scanlon-dev/wallhaven-switcher
| 0
|
12784526
|
<reponame>scanlon-dev/wallhaven-switcher
import string, urllib, random
from pathlib import Path
import os, subprocess, configparser, sys
from threading import Timer
from common import *
def get_config_file(create=False):
home = str(Path.home())
config_dir = os.path.join(home, '.config/wallhaven/')
config_file = os.path.join(home, '.config/wallhaven/configs.ini')
if os.path.isfile(config_file):
return config_file
elif create:
image_folder = os.path.join(home, 'WallhavenImgs/')
store_image_folder = os.path.join(home, 'Pictures/Wallpapers')
subprocess.run(['mkdir', '-p', image_folder])
subprocess.run(['mkdir', '-p', store_image_folder])
subprocess.run(['mkdir', '-p', config_dir])
config_str = DEFAULT_CONFIG_STRING.format(wallpaper_path=image_folder,
store_wallpaper_path=store_image_folder)
with open(config_file, 'w') as f:
f.write(config_str)
return config_file
return None
def read_config_file(config_file, section_name):
configs = configparser.ConfigParser()
try:
configs.read(config_file)
# myconfigs.read(CONFIG_FILE)
t = configs[section_name]
return configs, t
except Exception as e:
print("Config file format error or there is no section " + section_name)
def update_configs(configs, file_path):
print("updating configs...")
config_file = get_config_file()
tconfigs, tsection = read_config_file(config_file, SECTION_NAME)
tsection['page'] = configs[SECTION_NAME]['page']
tsection['current wallpaper'] = configs[SECTION_NAME]['current wallpaper']
with open(file_path, 'w') as f:
tconfigs.write(f)
def clear_folder(folder_path):
print("Are you sure you want to clear " + folder_path + '? Y/N')
ok = input()
if ok != 'y' and ok != 'Y':
return
try:
(_, _, filenames) = next(os.walk(folder_path))
for file in filenames:
os.remove(os.path.join(folder_path, file))
except Exception as e:
print("Error occurred. Check the path and its contents.")
else:
print("Clear done.")
def check_progress(maxval, results, myconfigs, rm=False):
img_folder_path = myconfigs['wallpaper path']
i = 1
j = 0
for img, error in results:
print_progress(maxval, i)
i = i + 1
if error:
print(f" !{img['id']} error: {error}")
if rm:
try:
os.remove(os.path.join(img_folder_path, img['id']))
except:
pass
else:
j = j + 1
return j
def print_progress(maxval, i):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("[%-24s] %d/%d" % ('=' * i, i, maxval))
sys.stdout.flush()
def fetch_img(img, configs):
def timer_handler(response):
response.close()
img_folder_path = configs['wallpaper path']
try:
filename = f'{img["id"]}.{img["file_type"].split("/")[1]}'
with open(os.path.join(img_folder_path, filename), 'wb') as f:
req = urllib.request.Request(img['path'], headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=TIME_OUT) as response:
timer = Timer(TIME_OUT, timer_handler, [response])
timer.start()
try:
f.write(response.read())
except:
timer.cancel()
f.close()
return img, 'Time out!'
timer.cancel()
# print(f"{img['name']} done.")
return img, None
except Exception as e:
return img, e
def set_system_wallpaper(myconfigs, sys_name, wallpaper):
# if myconfigs['hsetroot']=='1':
# wallpaper=hsetroot_process(wallpaper_path)
#Add wpgtk in this function
#if os.path.exists('$HOME/.local/bin/wpg'):
# os.run(wpg.sh -g)os.fileaksdlfjl(wallpaper_
# else:
# continue
if sys_name == 'xfce':
xfcesetwallpaper = 'xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitoreDP-1/workspace0/last-image -s'.split()
xfcesetwallpaper.append(wallpaper)
# print(xfcesetwallpaper)
subprocess.run(xfcesetwallpaper)
elif sys_name == 'gnome':
gnomesetwallpaper = f'gsettings set org.gnome.desktop.background picture-uri "file://{wallpaper}"'
subprocess.run(gnomesetwallpaper.split())
elif sys_name == 'feh':
feh_options = myconfigs['feh options']
feh_set_wallpaper = f'feh {feh_options} {wallpaper}'
print(feh_set_wallpaper)
subprocess.run(feh_set_wallpaper.split())
elif sys_name == 'hsetroot':
wallpaper=hsetroot_process(wallpaper,myconfigs)
command=f'hsetroot {myconfigs["hsetroot image option"]} {wallpaper}'
print(command)
subprocess.run(command.split())
# image tweak for xfce lock screen
# xfcesetwallpaper = 'xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitoreDP-1/workspace0/last-image -s'.split()
# xfcesetwallpaper.append(wallpaper)
# print(xfcesetwallpaper)
# subprocess.run(xfcesetwallpaper)
print('successfully set wallpaper ' + wallpaper)
def random_string(string_length):
"""Generate a random string with the combination of lowercase and uppercase letters """
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for _ in range(string_length))
def hsetroot_process(wallpaper_path,myconfigs):
cur_path=os.path.join(myconfigs['wallpaper path'],CUR_WALLPAPER_NAME)
#subprocess.run(['cp', wallpaper_path, cur_path])
command=f'hsetroot {myconfigs["hsetroot image option"]} {wallpaper_path} {myconfigs["hsetroot tweak options"]} -write {cur_path}'
print(command)
subprocess.run(command.split())
return cur_path
| 2.28125
| 2
|
Factorial_Recursive.py
|
parth2608/NPTEL-Joy-of-computing-with-Python
| 0
|
12784527
|
def factorial(n):
if(n==0):
return 1
else:
return n*factorial(n-1)
n=int(input("Enter a positive number: "))
if(n<0):
print("Factorial is not defined on negative numbers.")
else:
f=factorial(n)
print("Factorial of",n,"is",f)
| 4.21875
| 4
|
sc2/constants.py
|
Olaf-G/Bachelor-s-Degree
| 7
|
12784528
|
<reponame>Olaf-G/Bachelor-s-Degree<filename>sc2/constants.py
from .ids.ability_id import *
from .ids.buff_id import *
from .ids.effect_id import *
from .ids.unit_typeid import *
from .ids.upgrade_id import *
| 1.179688
| 1
|
app/main/forms.py
|
HenriqueLR/hangman-game
| 0
|
12784529
|
#coding: utf-8
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from main.utils import parse_csv_file as read_file
def _validation_word(word):
if len(word) > 46:
raise forms.ValidationError(_('Maximum length allowed 46 words.'))
if ' ' in word:
raise forms.ValidationError(_('Forbidden use words with whitespaces.'))
if not re.match(r'^\w+$', word):
raise forms.ValidationError(_('Forbidden use words with special character.'))
class FilesAdminForm(forms.ModelForm):
def clean_file(self):
file = self.cleaned_data['file']
parse_file = read_file(file)
if not 'word' in parse_file.fieldnames:
raise forms.ValidationError(_('Structure file incorrect, check example in documentation.'))
for row in parse_file:
_validation_word(row['word'])
return file
class Meta:
fields = '__all__'
class WordsAdminForm(forms.ModelForm):
def clean_word(self):
word = self.cleaned_data['word']
_validation_word(word)
return word
class Meta:
fields = '__all__'
| 2.6875
| 3
|
je_editor/ui/ui_utils/keyword/__init__.py
|
JE-Chen/je_editor
| 1
|
12784530
|
<reponame>JE-Chen/je_editor
from je_editor.ui.ui_utils.keyword import *
| 1.007813
| 1
|
tests/test_transports.py
|
freedge/fake-switches
| 42
|
12784531
|
<reponame>freedge/fake-switches<gh_stars>10-100
import unittest
from hamcrest import assert_that, equal_to
from fake_switches.transports import SwitchSshService, SwitchTelnetService, SwitchHttpService
class TransportsTests(unittest.TestCase):
def test_http_service_has_default_port(self):
http_service = SwitchHttpService()
assert_that(http_service.port, equal_to(80))
def test_ssh_service_has_default_port(self):
ssh_service = SwitchSshService()
assert_that(ssh_service.port, equal_to(22))
def test_telnet_service_has_default_port(self):
telnet_service = SwitchTelnetService()
assert_that(telnet_service.port, equal_to(23))
| 2.828125
| 3
|
dftd3/parameters/r2r4.py
|
bast/pyDFTD3
| 2
|
12784532
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# pyDFTD3 -- Python implementation of Grimme's D3 dispersion correction.
# Copyright (C) 2020 <NAME> and contributors.
#
# This file is part of pyDFTD3.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# For information on the complete list of contributors to the
# pyDFTD3, see: <http://github.com/bobbypaton/pyDFTD3/>
#
from math import sqrt
"""PBE0/def2-QZVP atomic values for multipole coefficients."""
_R2R4 = [
8.0589,
3.4698,
29.0974,
14.8517,
11.8799,
7.8715,
5.5588,
4.7566,
3.8025,
3.1036,
26.1552,
17.2304,
17.7210,
12.7442,
9.5361,
8.1652,
6.7463,
5.6004,
29.2012,
22.3934,
19.0598,
16.8590,
15.4023,
12.5589,
13.4788,
12.2309,
11.2809,
10.5569,
10.1428,
9.4907,
13.4606,
10.8544,
8.9386,
8.1350,
7.1251,
6.1971,
30.0162,
24.4103,
20.3537,
17.4780,
13.5528,
11.8451,
11.0355,
10.1997,
9.5414,
9.0061,
8.6417,
8.9975,
14.0834,
11.8333,
10.0179,
9.3844,
8.4110,
7.5152,
32.7622,
27.5708,
23.1671,
21.6003,
20.9615,
20.4562,
20.1010,
19.7475,
19.4828,
15.6013,
19.2362,
17.4717,
17.8321,
17.4237,
17.1954,
17.1631,
14.5716,
15.8758,
13.8989,
12.4834,
11.4421,
10.2671,
8.3549,
7.8496,
7.3278,
7.4820,
13.5124,
11.6554,
10.0959,
9.7340,
8.8584,
8.0125,
29.8135,
26.3157,
19.1885,
15.8542,
16.1305,
15.6161,
15.1226,
16.1576,
]
R2R4 = [sqrt(0.5 * x * sqrt(i + 1)) for i, x in enumerate(_R2R4)]
| 1.929688
| 2
|
src/dowml/lib.py
|
IBMDecisionOptimization/dowml
| 3
|
12784533
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright <NAME>, 2021
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
import ast
import base64
import csv
import glob
import io
import logging
import os
import re
import sys
import tempfile
import time
import urllib.parse
from collections import namedtuple
from contextlib import contextmanager
from datetime import datetime
from functools import lru_cache
from operator import attrgetter
import requests
from ibm_watson_machine_learning.wml_client_error import WMLClientError, ApiRequestFailure
from packaging import version
from ibm_watson_machine_learning import APIClient
# WML Python API version with a fixed Assets.download function
WML_HAS_FIXED_DOWNLOAD = "1000.0.0"
LOGNAME = 'log.txt'
class Error(Exception):
"""Base class for all errors in this script"""
pass
class InvalidCredentials(Error):
"""The WML credentials were not found, or incorrect"""
pass
class SimilarNamesInJob(Error):
"""A job can't have two input files with the same name, irrespective of path"""
pass
class NoCredentialsToCreateSpace(Error):
"""Need to create a space, but credentials are incomplete to allow that"""
pass
#
# The WML API offers a way to choose which entities are returned when asking
# for job details. Unfortunately, that parameter is not surfaced in the WML
# Python API, and we have to patch the code in order to send the value we want.
#
# APIClient._params() is the function that creates the parameters for the REST
# call. We replace it with our own function '_new_params' that (1) calls the
# original function and adds the filter we want, if we want one.
#
# The function will not be called with any parameter. So the filter we want to
# use, if any, must be set in a global variable.
_the_filter = None
# Backup of the original function, so that we can call it and restore it later.
_the_old_params = None
def _new_params():
"""Our new function to build a parameter list for the REST call.
Called by the instance of APIClient itself."""
global _the_old_params
global _the_filter
# Use the original code and get its output
# noinspection PyCallingNonCallable
result = _the_old_params()
# Add the filter, if one is required
if _the_filter:
result['include'] = _the_filter
return result
def _get_file_spec(path):
force = False
if path[0] == '+':
force = True
path = path[1:]
basename = os.path.basename(path)
return path, basename, force
class _CredentialsProvider:
""""Reads credentials for a DOWMLLib instance. Stores them as a
'credentials' attribute."""
ENVIRONMENT_VARIABLE_NAME = 'DOWML_CREDENTIALS'
ENVIRONMENT_VARIABLE_NAME_FILE = 'DOWML_CREDENTIALS_FILE'
# The keys in the credentials
APIKEY = 'apikey'
TOKEN = 'token'
SPACE_ID = 'space_id'
SPACE_NAME = 'space_name'
URL = 'url'
REGION = 'region'
COS_CRN = 'cos_resource_crn'
ML_CRN = 'ml_instance_crn'
REGION_TO_URL = {
'us-south': 'https://us-south.ml.cloud.ibm.com',
'eu-de': 'https://eu-de.ml.cloud.ibm.com',
'eu-gb': 'https://eu-gb.ml.cloud.ibm.com',
'jp-tok': 'https://jp-tok.ml.cloud.ibm.com',
}
def __init__(self, wml_credentials_file=None, api_key=None, wml_credentials_str=None, url=None, region=None):
self._logger = logging.getLogger(self.__class__.__name__)
if wml_credentials_str is None:
if wml_credentials_file is not None:
wml_credentials_str = self._read_wml_credentials_from_file(wml_credentials_file)
else:
wml_credentials_str = self._read_wml_credentials_from_env()
if wml_credentials_str:
self._logger.debug('Found credential string.')
self.credentials = self.check_credentials(wml_credentials_str, api_key=api_key, url=url, region=region)
def usage(self):
print(f'${self.ENVIRONMENT_VARIABLE_NAME} should contain credentials as a Python dict of the form:')
print(f' {{\'{self.APIKEY}\': \'<apikey>\', \'{self.URL}\': \'https://us-south.ml.cloud.ibm.com\'}}')
print(f'Or set ${self.ENVIRONMENT_VARIABLE_NAME_FILE} to the path to a file containing the same information.')
def check_credentials(self, wml_cred_str, api_key, url, region):
wml_credentials = None
if wml_cred_str is not None:
assert type(wml_cred_str) is str
if not wml_cred_str:
raise InvalidCredentials('WML credentials must not be an empty string.')
wml_credentials = ast.literal_eval(wml_cred_str)
assert type(wml_credentials) is dict
if not wml_credentials:
# If we don't find a credentials string through the environment, we will
# assume that the parameters are enough to build one.
wml_credentials = {}
if api_key:
wml_credentials[self.APIKEY] = api_key
if url:
wml_credentials[self.URL] = url
if region:
wml_credentials[self.REGION] = region
if self.APIKEY not in wml_credentials and self.TOKEN not in wml_credentials:
raise InvalidCredentials('API key (or token) must be specified.')
if self.URL not in wml_credentials and self.REGION not in wml_credentials:
raise InvalidCredentials('URL or region must be specified (but not both).')
if self.APIKEY in wml_credentials:
assert type(wml_credentials[self.APIKEY]) is str
else:
assert type(wml_credentials[self.TOKEN]) is str
if region:
if url:
raise InvalidCredentials(f"You must not specify both '{self.URL}' and '{self.REGION}'.")
wml_credentials[self.REGION] = region
# Setting a region must clear the URL, otherwise there will be an
# ambiguity (and therefore an error) just below
wml_credentials.pop(self.URL, None)
if self.REGION in wml_credentials:
region = wml_credentials[self.REGION]
if self.URL in wml_credentials:
raise InvalidCredentials(f"WML credentials must not have both '{self.URL}' and '{self.REGION}'.")
try:
wml_credentials[self.URL] = self.REGION_TO_URL[region]
except KeyError:
raise InvalidCredentials(f"Unknown region '{region}'.")
wml_credentials.pop(self.REGION)
if url:
# The url specified takes priority over the one in the credentials, if any.
wml_credentials[self.URL] = url
assert self.URL in wml_credentials
assert type(wml_credentials[self.URL]) is str
url = wml_credentials[self.URL]
if not url:
raise InvalidCredentials('URL must not be an empty string.')
if url[-1] == '/':
self._logger.warning('URL should not have a \'/\' at the end.')
wml_credentials[self.URL] = url[:-1]
self._logger.debug('Credentials have the expected structure.')
return wml_credentials
def _read_wml_credentials_from_env(self):
"""Return a string of credentials suitable for WML from the environment
Raises InvalidCredentials if anything is wrong."""
var_name = self.ENVIRONMENT_VARIABLE_NAME
var_file_name = self.ENVIRONMENT_VARIABLE_NAME_FILE
self._logger.debug(f'Looking for credentials in environment variable {var_name}...')
wml_cred_str = None
if var_name in os.environ:
wml_cred_str = os.environ[var_name]
elif var_file_name in os.environ:
self._logger.debug(f'Looking for credentials file name in environment variable {var_file_name}...')
wml_cred_str = self._read_wml_credentials_from_file(os.environ[var_file_name])
return wml_cred_str
def _read_wml_credentials_from_file(self, file):
"""Return the content of the file, assumed to be WML credentials"""
self._logger.debug(f'Looking for credentials in file \'{file}\'...')
with open(file) as f:
wml_cred_str = f.read()
return wml_cred_str
def version_is_greater(current, minimum):
"""Compare two 'vv.nn.pp' versions.
Return True is the current version string is greater or equal to the
minimum string. Assumes that each string is of type vv.nn.pp, with vv, nn and
pp being integers."""
return version.parse(current) >= version.parse(minimum)
@contextmanager
def suppress_stdout():
""""Sometimes it's nicer to not get printed output from APIClient"""
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
class DOWMLLib:
"""A Python client to run DO models on WML"""
DOWML_PREFIX = 'dowml'
space_name = f'{DOWML_PREFIX}-space'
MODEL_NAME = f'{DOWML_PREFIX}-model'
MODEL_TYPES = ['cplex', 'cpo', 'opl', 'docplex']
DO_VERSION = '20.1'
TSHIRT_SIZES = ['S', 'M', 'XL']
DEPLOYMENT_NAME = f'{DOWML_PREFIX}-deployment'
def __init__(self, wml_credentials_file=None,
api_key=None,
space_id=None,
url=None,
region=None,
tz=datetime.utcnow().astimezone().tzinfo):
f"""Read and validate the WML credentials
Args:
wml_credentials_file: path to the file that contains the WML credentials.
If None, they are read from the environment.
space_id: the id of the space that should be used. If specified, this
replaces the one in the credentials.
url: the URL for the Machine Learning service to use. If specified, this
replaces the one in the credentials. url and region can't be both specified.
region: An alternative way to specify the URL for the Machine Learning service
to use. If specified, the URL will be deduced from the region. This
replaces the one in the credentials. url and region can't be both specified.
Possible values for the region are {list(_CredentialsProvider.REGION_TO_URL.keys())}
tz: timezone to use to display time, defaults to Python's default timezone."""
self.model_type = self.MODEL_TYPES[0]
"""Type of model to solve"""
self.tshirt_size = self.TSHIRT_SIZES[0]
"""Tee-shirt size for the hardware to solve the model"""
self.do_version = self.DO_VERSION
"""DO engines version to use"""
self.timelimit = None
"""Time limit for the solve, in seconds"""
self.inputs = 'assets'
"""Type of inputs that the created jobs should use"""
self.outputs = 'inline'
"""Type of outputs that the created jobs should use"""
self._logger = logging.getLogger(self.__class__.__name__)
cred_provider = _CredentialsProvider(wml_credentials_file, api_key=api_key, url=url, region=region)
wml_credentials = cred_provider.credentials
# A space name in the credentials changes the default
if cred_provider.SPACE_NAME in wml_credentials:
self._logger.debug('They contain a space name.')
self.space_name = wml_credentials[cred_provider.SPACE_NAME]
if cred_provider.SPACE_ID in wml_credentials:
self._logger.debug('And they contain a space id.')
# The space_id specified here takes precedence
# over the one, if any, defined in the credentials
if space_id:
wml_credentials[cred_provider.SPACE_ID] = space_id
self._wml_credentials = wml_credentials
# We don't initialize the client at this time, because this is an
# expensive operation.
self._client = None
self._space_id = None
self._data_connection = None
self.tz = tz
@property
def url(self):
"""The URL for the WML service instance that the library is connected to."""
return self._wml_credentials[_CredentialsProvider.URL]
@property
def space_id(self):
"""The id for the deployment space that the library is connected to."""
return self._space_id
@property
def inline(self):
self._logger.warning('Attribute \'inline\' is deprecated: use \'inputs\' instead.')
return self.inputs == 'inline'
@inline.setter
def inline(self, value):
self._logger.warning('Attribute \'inline\' is deprecated: use \'inputs\' instead.')
self.inputs = 'inline' if value else 'assets'
def _create_client(self):
"""Create the Python APIClient instance"""
assert self._client is None
self._logger.debug('Creating the WML client...')
# http://ibm-wml-api-pyclient.mybluemix.net/#api-for-ibm-cloud
client = APIClient(self._wml_credentials)
self._logger.info(f'Creating the client succeeded. Client version is {client.version}.')
self._logger.info(f'Client uses URL {self._wml_credentials[_CredentialsProvider.URL]}.')
return client
def _set_default_space(self):
space_id_key = _CredentialsProvider.SPACE_ID
if space_id_key in self._wml_credentials:
space_id = self._wml_credentials[space_id_key]
self._logger.debug(f'Using specified space \'{space_id}\'.')
else:
space_id = self._find_or_create_space()
self._logger.debug('Setting default space...')
self._client.set.default_space(space_id)
self._space_id = space_id
self._logger.debug('Done.')
def _get_or_make_client(self):
if self._client is None:
self._client = self._create_client()
# The client is pretty much useless when it doesn't yet have a
# default space. So let's set it immediately.
self._set_default_space()
# It would seem natural to assert that self._space_id is not None.
# But this fails when we are in unit-tests and we just set _client to
# a mock object from outside, without also setting the _space_id.
return self._client
def solve(self, paths):
"""Solve the model, return the job id
The model is sent as online data to WML (if 'inline yes') or is uploaded as a data asset
to be reused later (default).
:param paths: one or more pathname to the files to send, as a single
string, separated by space
:return: The id of the submitted job
"""
self._get_or_make_client()
# As _get_deployment_id caches its results, it may happen that what it returns is
# invalid. For example if the user deleted the deployment after the last solve.
# So if we get an error about the deployment not existing, we clear the cache
# and retry once.
first_try = True
while True:
deployment_id = self._get_deployment_id()
self._logger.info(f'Deployment id: {deployment_id}')
try:
job_id = self.create_job(paths, deployment_id)
self._logger.info(f'Job id: {job_id}')
return job_id
except ApiRequestFailure as e:
if first_try and b'deployment_does_not_exist' in e.args[1].content:
self._logger.warning('Deployment was not found. Clearing the cache and retrying...')
self._get_deployment_id_with_params.cache_clear()
first_try = False
else:
if not first_try:
self._logger.warning('Clearing the cache didn\'t help...')
raise
def client_data_asset_download(self, asset_id, filename):
self._logger.debug(f'Downloading asset {asset_id} in {filename}...')
with suppress_stdout():
name = self._client.data_assets.download(asset_id, filename)
if version_is_greater(self._client.version, WML_HAS_FIXED_DOWNLOAD):
filename = name
# else the return value is useless when filename is an absolute path
self._logger.debug(f'Done saving {filename}.')
def get_log(self, job_id):
"""Extract the engine log from the job.
:param job_id: The id of the job to get the log from
:return: The decoded log, or None
"""
def _get_asset_content(asset_id):
if version_is_greater(self._client.version, WML_HAS_FIXED_DOWNLOAD):
return self._client.data_assets.download(asset_id).decode('ascii')
else:
with tempfile.TemporaryDirectory() as temp_dir_name:
filename = os.path.join(temp_dir_name, f'{asset_id}-log.txt')
self.client_data_asset_download(asset_id, filename)
with open(filename) as f:
content = f.read()
return content
def _get_log_from_output_references(references):
self._logger.debug(f'Looking for {LOGNAME} in output_data_references...')
for ref in references:
if ref.get('type') != 'data_asset':
continue
if 'id' not in ref:
self._logger.warning('Ignoring data asset with no id.')
continue
if ref['id'] == LOGNAME:
self._logger.debug('Found it.')
try:
asset_id = ref['location']['id']
except KeyError:
self._logger.error('Log data asset has no location/id information.')
break
self._logger.debug(f'This is asset {asset_id}.')
return _get_asset_content(asset_id)
return None
def _get_log_from_outputs(outputs):
self._logger.debug(f'Looking for {LOGNAME} in output_data...')
for output_data in outputs:
if output_data['id'] == LOGNAME:
if 'content' not in output_data:
self._logger.error(f'Log without content for job {job_id}.')
continue
self._logger.debug('Found it. Decoding it...')
output = output_data['content']
output = self.decode_log(output)
output = self.remove_empty_lines(output)
self._logger.debug('Decoded the log.')
return output
return None
job_details = self.get_job_details(job_id, with_contents='log')
try:
do = job_details['entity']['decision_optimization']
except KeyError:
self._logger.warning('No decision_optimization structure available for this job.')
return None
# When we have references in the job, the 'output_data' may be an empty list
if 'output_data' in do and do['output_data']:
return _get_log_from_outputs(do['output_data'])
elif 'output_data_references' in do:
return _get_log_from_output_references(do['output_data_references'])
else:
self._logger.warning('No output_data or output_data_references structure available for this job.')
return None
def _parse_asset_references(self, details, key):
def find_id_in_href(loc):
href = loc.get('href') if loc else None
if not href:
return None
path = urllib.parse.urlparse(href).path
s = re.search('/v2/assets/(.*)', path)
if s:
return s.group(1)
self._logger.warning(f'Could not decode href for asset \'{name}\'.')
return None
def find_id_in_id(loc):
return loc.get('id') if loc else None
try:
refs = details['entity']['decision_optimization'][key]
except KeyError:
self._logger.debug(f'No \'{key}\' structure available for this job.')
return {}
result = {}
for ref in refs:
asset_type = ref.get('type')
if asset_type != 'data_asset':
self._logger.debug(f'Ignoring asset of unknown type \'{asset_type}\'.')
continue
name = ref.get('id')
if not name:
self._logger.warning('Found a data asset with no name.')
continue
self._logger.debug(f'Found a data asset named {name}.')
location = ref.get('location')
asset_id = find_id_in_id(location) or find_id_in_href(location)
if asset_id:
result[name] = asset_id
else:
self._logger.warning(f'Could not find asset id for asset \'{name}\'.')
return result
def get_output_asset_ids(self, details):
""""Extract the output data asset ids from the job details.
:param details: The details of the job to get the output from
:return: A dict of outputs. Keys are the names of the outputs,
and the corresponding value for each key is the id of the asset.
"""
return self._parse_asset_references(details, 'output_data_references')
def get_input_asset_ids(self, details):
""""Extract the input data asset ids from the job details.
:param details: The details of the job to get the output from
:return: A dict of inputs. Keys are the names of the inputs,
and the corresponding value for each key is the id of the asset.
"""
return self._parse_asset_references(details, 'input_data_references')
def get_output(self, details, csv_as_dataframe=None, tabular_as_csv=False):
"""Deprecated. Use get_outputs instead"""
return self.get_outputs(details, csv_as_dataframe, tabular_as_csv)
def _extract_inline_files_from_details(self, details, key, tabular_as_csv):
try:
files = details['entity']['decision_optimization'][key]
except KeyError:
self._logger.debug(f'No \'{key}\' structure available for this job.')
return {}
result = {}
for output_data in files:
name = output_data['id']
if 'content' in output_data:
# What we have here is a regular file, encoded
self._logger.debug(f'Found a regular file named {name}.')
content = self._extract_regular_file(output_data)
result[name] = content
elif ('values' in output_data and
'fields' in output_data and
name.lower().endswith('.csv')):
self._logger.debug(f'Found a CSV file named {name}.')
content = self._extract_csv_file(output_data, tabular_as_csv)
result[name] = content
else:
self._logger.warning(f'Found an unknown file named {name}.')
content = output_data
result[name] = content
return result
def get_outputs(self, details, csv_as_dataframe=None, tabular_as_csv=False):
""""Extract the inline outputs from the job details.
:param details: The details of the job to get the outputs from
:param csv_as_dataframe: Whether the content of a CSV file should be
returned as a Pandas DataFrame or not. Deprecated: use tabular_as_csv
instead
:param tabular_as_csv: Whether tabular outputs should be returned as
CSV file content instead of Pandas dataframe
:return: A dict of outputs, with the names of the assets as keys, and
the content as value, as bytes. We don't assume that the content is
actually text.
"""
if csv_as_dataframe is not None:
# We have a non-default value for this deprecated parameter
# Let's first check that the replacement parameter is at its default
# value
assert(tabular_as_csv is False)
# Now we can replace it with the correct value
tabular_as_csv = not csv_as_dataframe
return self._extract_inline_files_from_details(details, 'output_data', tabular_as_csv)
def get_inputs(self, details, tabular_as_csv=False):
""""Extract the inline inputs from the job details.
:param details: The details of the job to get the inputs from
:param tabular_as_csv: Whether tabular inputs should be returned as
CSV file content instead of Pandas dataframe
:return: A dict of inputs, with the names of the assets as keys, and
the content as value, as bytes. We don't assume that the content is
actually text.
"""
return self._extract_inline_files_from_details(details, 'input_data', tabular_as_csv)
@staticmethod
def _extract_csv_file(output_data, tabular_as_csv):
if tabular_as_csv:
content = io.StringIO()
writer = csv.writer(content)
writer.writerow(output_data['fields'])
for r in output_data['values']:
writer.writerow(r)
content = content.getvalue().encode()
else:
import pandas
content = pandas.DataFrame(output_data['values'],
columns=output_data['fields'])
return content
@staticmethod
def _extract_regular_file(output_data):
content = output_data['content']
content = content.encode('UTF-8')
content = base64.b64decode(content)
return content
def get_job_details(self, job_id, with_contents=None):
""" Get the job details for the given job
:param job_id: The id of the job to look for
:param with_contents: if 'names', the details returned include
the input and output files names. If 'full', the content of these files
is included as well. If 'log', the content only includes the output files
:return: The job details
"""
client = self._get_or_make_client()
self._logger.debug('Fetching output...')
output_filter = None
if not with_contents:
output_filter = 'solve_parameters,solve_state,status'
elif with_contents == 'log':
output_filter = 'output_data,output_data_references'
job_details = self.client_get_job_details(client, job_id, output_filter)
self._logger.debug('Done.')
if with_contents != 'full' and with_contents != 'log':
self.filter_large_chunks_from_details(job_details)
return job_details
@staticmethod
def client_get_job_details(client, job_id, with_filter=None):
# The filter doesn't work correctly if it contains spaces
assert(not with_filter or (with_filter.find(' ') == -1))
if version_is_greater(client.version, "1.0.154"):
result = client.deployments.get_job_details(job_id, with_filter)
else:
global _the_filter
global _the_old_params
# Save the filter in a global variable for our new function to find it
_the_filter = with_filter
# Save the pointer to the original code
# noinspection PyProtectedMember
_the_old_params = client._params
# and replace it with our new function
client._params = _new_params
try:
result = client.deployments.get_job_details(job_id)
finally:
# Put back the original code
client._params = _the_old_params
return result
@staticmethod
def filter_large_chunks_from_details(job_details):
"""Remove the large blobs (input/output) from the given job_details."""
try:
do = job_details['entity']['decision_optimization']
for data in do.get('output_data', []):
if 'content' in data:
# This is the case for regular files, such as the log
data['content'] = '[not shown]'
elif 'values' in data:
# This is the case for CSV files
data['values'] = ['[not shown]']
for data in do.get('input_data', []):
if 'content' in data:
data['content'] = '[not shown]'
if 'solve_state' in do and 'latest_engine_activity' in do['solve_state']:
do['solve_state']['latest_engine_activity'] = ['[not shown]']
except KeyError:
# GH-1: This happens when the job failed
pass
def _delete_data_assets(self, job_details):
job_id = job_details['metadata']['id']
try:
odr = job_details['entity']['decision_optimization']['output_data_references']
except KeyError:
odr = []
for output in odr:
if output.get('type') != 'data_asset':
continue
if 'location' not in output:
self._logger.error(f'Missing \'location\' in details for job {job_id}.')
elif 'id' not in output['location']:
self._logger.error(f'Missing \'location.id\' in details for job {job_id}.')
else:
data_asset_id = output['location']['id']
self._logger.debug(f'Deleting data asset {data_asset_id}...')
try:
self._client.data_assets.delete(data_asset_id)
self._logger.debug('Done.')
except WMLClientError:
self._logger.error('Exception raised while trying to delete the asset.', exc_info=True)
def _client_deployments_delete_job(self, job_id, hard, job_details):
"""Deletes the platform run, so that the deployment job is deleted as well.
If only calling
client.deployments.delete_job(job_id, hard)
the 'run' of the 'platform job' on the Watson Studio side is left,
and it will never be deleted.
On the other hand, deleting the run on the WS side also deletes the
deployment job on the WML side. So let's do that.
"""
client = self._get_or_make_client()
wml_url = self._wml_credentials['url']
# We don't want to (try to) delete the WS run if we only cancel the job
# Instead, we skip everything until calling deployments.delete_job(True)
everything_ok_so_far = hard
ws_url = client.PLATFORM_URLS_MAP.get(wml_url)
if everything_ok_so_far and not ws_url:
self._logger.error(f'Unknown Watson Studio URL for WML URL {wml_url}.')
everything_ok_so_far = False
if everything_ok_so_far:
try:
platform_job_id = job_details['entity']['platform_job']['job_id']
platform_run_id = job_details['entity']['platform_job']['run_id']
url = f'{ws_url}/v2/jobs/{platform_job_id}/runs/{platform_run_id}?space_id={self.space_id}'
except KeyError:
self._logger.error('Watson Studio job id or run id not found in WML job details.')
everything_ok_so_far = False
if everything_ok_so_far:
# noinspection PyUnboundLocalVariable
self._logger.debug(f'Trying to delete run {platform_run_id} of Watson Studio job {platform_job_id}...')
# noinspection PyProtectedMember
# noinspection PyUnboundLocalVariable
r = requests.delete(url, headers={'Authorization': f'Bearer {client.service_instance._get_token()}',
'Content-Type': 'application/json',
'cache-control': 'no-cache'})
if r.status_code != 204:
self._logger.error(f'Error when trying to delete the Watson Studio run. {r.text}')
everything_ok_so_far = False
if not everything_ok_so_far:
if hard:
self._logger.error('Could not delete the Watson Studio run. Deleting the WML job deployment instead...')
# else:
# we just wanted to cancel the job, so there's nothing to warn against
client.deployments.delete_job(job_id, hard)
def delete_job(self, job_id, hard=True):
"""Delete the given job.
:param job_id: the job to be deleted
:param hard: deprecated. Use cancel_job instead of passing False
"""
if not hard:
self.cancel_job(job_id)
return
self._logger.debug(f'Deleting data assets (if any) for job {job_id}...')
job_details = self.get_job_details(job_id, with_contents='names')
self._delete_data_assets(job_details)
self._logger.debug(f'Done. Deleting job {job_id}...')
self._client_deployments_delete_job(job_id, True, job_details)
self._logger.debug('Done.')
def cancel_job(self, job_id):
"""Cancel the given job.
:param job_id: the job to be canceled
"""
self._logger.debug(f'Canceling job {job_id}...')
self._client_deployments_delete_job(job_id, False, None)
self._logger.debug('Done.')
def decode_log(self, output):
"""Decode the engine log coming from DO4WML.
:param output: A base-64 encoded text with empty lines
:return: The decoded text, without empty lines
"""
output = output.encode('UTF-8')
output = base64.b64decode(output)
output = output.decode('UTF-8')
output = self.remove_empty_lines(output)
return output
@staticmethod
def remove_empty_lines(output):
"""Remove empty lines from the log.
:param output: The text to process
:return: The text, with no empty lines
"""
output = '\n'.join([s for s in output.splitlines() if s])
return output
@staticmethod
def _get_job_status_from_details(job_details):
return job_details['entity']['decision_optimization']['status']['state']
@staticmethod
def _get_job_id_from_details(job_details):
return job_details['metadata']['id']
@staticmethod
def _get_creation_time_from_details(job_details, tz):
created = job_details['metadata']['created_at']
if created[-1] == 'Z':
# A suffix of Z is not understood by isoformat. Let's replace
# it with one that's understood
created = created[:-1] + '+00:00'
dt = datetime.fromisoformat(created)
# Transform to local time
dt = dt.astimezone(tz)
# Remove timezone information so that ...
dt = dt.replace(tzinfo=None)
# ... just naively prints local time
created = dt.isoformat(sep=' ', timespec='seconds')
else:
# Let's not mess with what we don't know
pass
return created
@staticmethod
def _get_input_names_from_details(job_details):
do = job_details['entity']['decision_optimization']
inputs = do.get('input_data', [])
names = [i['id'] for i in inputs]
inputs = do.get('input_data_references', [])
for i in inputs:
if 'id' in i:
names.append('*' + i['id'])
else:
names.append('Unknown')
return names
def wait_for_job_end(self, job_id, print_activity=False):
"""Wait for the job to finish, return its status and details as a tuple.
If print_activity is True, some information is printed in the console."""
class ProgressiveDelay:
def __init__(self):
self.delays = [2, 2, 2, 2, 2, 2, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 30]
def wait(self):
delay = self.delays[0]
if len(self.delays) > 1:
self.delays.pop(0)
assert (2 <= delay <= 30)
time.sleep(delay)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class StatusLogger:
def __init__(self, initial_state):
self.last_state = initial_state
print(initial_state, end='', flush=True)
def log_state(self, state):
if state == self.last_state:
print('.', end='', flush=True)
else:
if self.last_state != '':
print('')
# else: if state was empty, no need to end the line
print(state, end='', flush=True)
self.last_state = state
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
client = self._get_or_make_client()
with StatusLogger('') as status_logger, \
ProgressiveDelay() as delayer:
while True:
job_details = self.client_get_job_details(client, job_id, with_filter='solve_state,status')
do = job_details['entity']['decision_optimization']
status = self._get_job_status_from_details(job_details)
self._logger.info(f'Job status: {status}')
if print_activity:
status_logger.log_state(f'Job is {status}.')
if status in ['completed', 'failed', 'canceled']:
break
if print_activity:
# There may be a bit of log to look at
try:
activity = do['solve_state']['latest_engine_activity']
if activity:
# Because of the StatusLogger, we're not at the beginning of a line
print('')
# We are joining the lines in the activity with a CR,
# only to remove them if they were already included...
act = '\n'.join(activity)
act = self.remove_empty_lines(act)
print(act)
except KeyError:
# This must mean that no activity is available yet
pass
delayer.wait()
if print_activity:
# The status_logger printed something, but didn't end the line yet
print('')
return status, job_details
@staticmethod
def get_file_as_data(path):
"""Return the base-64 encoded content of a file."""
with open(path, 'rb') as f:
data = f.read()
data = base64.b64encode(data)
data = data.decode('UTF-8')
return data
def _get_type_from_details(self, job):
try:
deployment_id = job['entity']['deployment']['id']
deployment = self._get_deployment_from_id(deployment_id)
model_id = deployment['entity']['asset']['id']
model = self._get_model_definition_from_id(model_id)
deployment_type = model['entity']['wml_model']['type']
match = re.fullmatch(r"do-(....*)_[0-9.]*", deployment_type)
if match:
deployment_type = match.group(1)
return deployment_type
except KeyError:
# Something changed. But let's not fail just for that
self._logger.warning('Error while fetching type of a job!')
return '?????'
def _get_version_from_details(self, job):
try:
deployment_id = job['entity']['deployment']['id']
deployment = self._get_deployment_from_id(deployment_id)
model_id = deployment['entity']['asset']['id']
model = self._get_model_definition_from_id(model_id)
deployment_type = model['entity']['wml_model']['type']
match = re.fullmatch(r"do-....*_([0-9.]*)", deployment_type)
engine_version = '?????'
if match:
engine_version = match.group(1)
return engine_version
except KeyError:
# Something changed. But let's not fail just for that
self._logger.warning('Error while fetching version of a job!')
return '?????'
@lru_cache
def _get_model_definition_from_id(self, model_id):
client = self._get_or_make_client()
model = client.model_definitions.get_details(model_id)
return model
@lru_cache
def _get_deployment_from_id(self, deployment_id):
client = self._get_or_make_client()
deployment = client.deployments.get_details(deployment_id)
return deployment
def _get_size_from_details(self, job):
try:
deployment_id = job['entity']['deployment']['id']
deployment = self._get_deployment_from_id(deployment_id)
size = deployment['entity']['hardware_spec']['name']
return size
except KeyError:
# Something changed. But let's not fail just for that
self._logger.warning('Error while fetching size of a job!')
return '?'
def get_jobs(self):
"""Return the list of tuples (status, id, ...) for all jobs in the deployment."""
client = self._get_or_make_client()
self._logger.debug('Getting job details...')
job_details = client.deployments.get_job_details()
self._logger.debug('Done.')
self._logger.debug('Getting information about deployments and models...')
result = []
for job in job_details['resources']:
status = self._get_job_status_from_details(job)
job_id = self._get_job_id_from_details(job)
created = self._get_creation_time_from_details(job, self.tz)
names = self._get_input_names_from_details(job)
deployment_type = self._get_type_from_details(job)
engine_version = self._get_version_from_details(job)
size = self._get_size_from_details(job)
JobTuple = namedtuple('Job', ['status', 'id', 'created', 'names', 'type', 'version', 'size'])
j = JobTuple(status=status, id=job_id, created=created, names=names,
type=deployment_type, version=engine_version, size=size)
result.append(j)
result.sort(key=attrgetter('created'))
self._logger.debug('Done.')
return result
def parse_paths(self, paths):
"""Expand wildcards that may appear in the input assets list."""
self._logger.debug(f'Parsing input list: {paths}')
globbed = []
for path in paths.split():
# Let's first get rid of the 'force' flag that glob
# would not understand
path, _, force = _get_file_spec(path)
files = glob.glob(path)
if not files:
# If the path doesn't actually match an existing file, this is
# not necessarily an error: this name can refer to a data
# asset that exists already. So let's keep it.
files = [path]
if force:
# Put back the '+' in front
files = [f'+{file}' for file in files]
globbed += files
self._logger.debug(f'Actual input list: {globbed}')
return globbed
def create_inputs(self, paths, cdd_inputdata, solve_payload):
# First deal with wildcards
globbed = self.parse_paths(paths)
# And let's now create the inputs from these files
names = []
for path in globbed:
path, basename, force = _get_file_spec(path)
if basename in names:
raise SimilarNamesInJob(basename)
names.append(basename)
if self.inputs == 'inline':
input_data = {
'id': basename,
'content': self.get_file_as_data(path)
}
else:
data_asset_id = self._create_data_asset_if_necessary(path, basename, force)
input_data = {
'id': basename,
"type": "data_asset",
"location": {
"href": "/v2/assets/" + data_asset_id + "?space_id=" + self.space_id
}
}
solve_payload[cdd_inputdata].append(input_data)
def create_job(self, paths, deployment_id):
"""Create a deployment job (aka a run) and return its id."""
client = self._get_or_make_client()
cdd = client.deployments.DecisionOptimizationMetaNames
assert(self.outputs == 'inline' or self.outputs == 'assets')
cdd_outputdata = cdd.OUTPUT_DATA
if self.outputs == 'assets':
cdd_outputdata = cdd.OUTPUT_DATA_REFERENCES
# Assume we use inline data (i.e. content in the job request)
cdd_inputdata = cdd.INPUT_DATA
if self.inputs == 'assets':
# But if we don't want inline data, we have to submit
# input references instead
cdd_inputdata = cdd.INPUT_DATA_REFERENCES
solve_payload = {
cdd.SOLVE_PARAMETERS: {
'oaas.logAttachmentName': 'log.txt',
'oaas.logTailEnabled': 'true',
'oaas.resultsFormat': 'JSON'
},
cdd_inputdata: [],
cdd_outputdata: [
{'id': '.*'}
]
}
if self.outputs == 'assets':
out = solve_payload[cdd_outputdata][0]
out['type'] = 'data_asset'
# PyCharm assumes that, because we added a string in the dict on
# the previous line, we should only add strings in the same dict.
# But this is not how WML does...
# noinspection PyTypeChecker
out['connection'] = {}
# noinspection PyTypeChecker
out['location'] = {'name': '${job_id}/${attachment_name}'}
if self.timelimit:
params = solve_payload[cdd.SOLVE_PARAMETERS]
params['oaas.timeLimit'] = 1000 * self.timelimit
self.create_inputs(paths, cdd_inputdata, solve_payload)
self._logger.debug('Creating the job...')
if self.inputs == 'inline':
self._logger.debug('Data is inline. Let\'s not print the payload...')
else:
self._logger.debug(repr(solve_payload))
dt = datetime.now()
job_details = client.deployments.create_job(deployment_id, solve_payload)
submit_time = (datetime.now() - dt).total_seconds()
self._logger.debug(f'Done in {submit_time}. Getting its id...')
job_id = client.deployments.get_job_uid(job_details)
return job_id
def _get_deployment_id(self):
# Which deployment we want depends on a number of configuration values
# in the library. In order for the cache to work correctly, and not always
# return the same deployment id, the cached function must be given these
# values as parameters.
return self._get_deployment_id_with_params(self.DEPLOYMENT_NAME,
self.model_type,
self.do_version,
self.tshirt_size)
@lru_cache
def _get_deployment_id_with_params(self, deployment_name_prefix, model_type, do_version, tshirt_size):
# The point of this forwarding function is to allow testing.
# Specifically, counting the number of calls to the _cached
# function, and deciding what it returns.
# Mocking this function would remove the lru_cache...
return self._get_deployment_id_with_params_cached(deployment_name_prefix, model_type, do_version, tshirt_size)
def _get_deployment_id_with_params_cached(self, deployment_name_prefix, model_type, do_version, tshirt_size):
"""Create the deployment if doesn't exist already, return its id."""
self._logger.debug('Getting deployments...')
client = self._get_or_make_client()
deployment_details = client.deployments.get_details()
self._logger.debug('Done.')
resources = deployment_details['resources']
deployment_name = f'{deployment_name_prefix}-{model_type}-{do_version}-{tshirt_size}'
self._logger.debug(f'Got the list. Looking for deployment named \'{deployment_name}\'')
deployment_id = None
for r in resources:
if r['entity']['name'] == deployment_name:
deployment_id = r['metadata']['id']
self._logger.debug('Found it.')
break
if deployment_id is not None:
return deployment_id
self._logger.debug('This deployment doesn\'t exist yet. Creating it...')
deployment_id = self._create_deployment(deployment_name)
return deployment_id
def _create_deployment(self, deployment_name):
# We need a model to create a deployment
model_id = self._get_model_id()
# Create the deployment
self._logger.debug('Creating the deployment itself...')
client = self._get_or_make_client()
cdc = client.deployments.ConfigurationMetaNames
meta_props = {
cdc.NAME: deployment_name,
cdc.DESCRIPTION: "Deployment for the Solve on WML Python script",
cdc.BATCH: {},
cdc.HARDWARE_SPEC: {'name': self.tshirt_size, 'num_nodes': 2}
}
deployment = client.deployments.create(artifact_uid=model_id, meta_props=meta_props)
self._logger.debug('Deployment created.')
deployment_id = client.deployments.get_id(deployment)
return deployment_id
def _get_model_id(self):
"""Create an empty model if one doesn't exist, return its id."""
self._logger.debug('Getting models...')
client = self._get_or_make_client()
details = client.repository.get_details()
self._logger.debug('Done.')
resources = details['models']['resources']
model_name = f'{self.MODEL_NAME}-{self.model_type}-{self.do_version}'
self._logger.debug(f'Got the list. Looking for model named \'{model_name}\'...')
model_id = None
for r in resources:
if r['metadata']['name'] == model_name:
model_id = r['metadata']['id']
self._logger.debug('Found it.')
self._logger.debug(f'Model id: {model_id}')
break
if model_id is None:
self._logger.debug('This model doesn\'t exist yet. Creating it...')
model_id = self._create_model(model_name)
return model_id
def get_available_versions(self):
"""Return the list of available DO versions on the platform."""
client = self._get_or_make_client()
target_version = "1.0.92"
if not version_is_greater(client.version, target_version):
return [f'Error: need WML client version {target_version} or better to retrieve available versions']
available_versions = []
for s in client.software_specifications.get_details()['resources']:
name = s['metadata']['name']
match = re.fullmatch(r"do_([0-9.]*)", name)
if match:
available_versions.append(match.group(1))
return available_versions
def _create_model(self, model_name):
client = self._get_or_make_client()
cr = client.repository
crm = cr.ModelMetaNames
model_metadata = {
crm.NAME: model_name,
crm.DESCRIPTION: "Model for the solve-on-wml script",
crm.TYPE: f'do-{self.model_type}_{self.do_version}',
crm.SOFTWARE_SPEC_UID:
client.software_specifications.get_id_by_name(f'do_{self.do_version}')
}
# We need an empty.zip file, because APIClient doesn't know better
handle, path = tempfile.mkstemp(suffix='.zip', text=False)
try:
# This string is the result of converting the file
# empty.zip in the repository using
# openssl base64 < empty.zip
file_content = base64.b64decode('UEsFBgAAAAAAAAAAAAAAAAAAAAAAAA==')
os.write(handle, file_content)
finally:
os.close(handle)
try:
model_details = cr.store_model(model=path,
meta_props=model_metadata)
finally:
os.remove(path)
self._logger.debug('Model created.')
model_id = client.repository.get_model_id(model_details)
self._logger.debug(f'Model id: {model_id}')
return model_id
def _find_or_create_space(self):
"""Find the Space to use from its name, create it if it doesn't exist."""
assert self._client
client = self._client
self._logger.debug('Fetching existing spaces...')
space_details = client.spaces.get_details()
resources = space_details['resources']
self._logger.debug(f'Got the list. Looking for space named \'{self.space_name}\'...')
space_id = None
for r in resources:
if r['entity']['name'] == self.space_name:
space_id = r['metadata']['id']
self._logger.debug('Found it.')
break
if space_id is None:
self._logger.debug('This space doesn\'t exist yet. Creating it...')
# Prepare necessary information
wml_credentials = self._wml_credentials
cos_crn = _CredentialsProvider.COS_CRN
ml_crn = _CredentialsProvider.ML_CRN
if cos_crn not in wml_credentials or ml_crn not in wml_credentials:
raise NoCredentialsToCreateSpace(f'WML credentials do not contain the information necessary '
f'to create a deployment space. \nMissing \'{cos_crn}\' '
f'and/or \'{ml_crn}\'.')
assert type(wml_credentials[cos_crn]) is str
assert type(wml_credentials[ml_crn]) is str
csc = client.spaces.ConfigurationMetaNames
metadata = {
csc.NAME: self.space_name,
csc.DESCRIPTION: self.space_name + ' description',
csc.STORAGE: {
"type": "bmcos_object_storage",
"resource_crn": self._wml_credentials[cos_crn]
},
csc.COMPUTE: {
"name": "existing_instance_id",
"crn": self._wml_credentials[ml_crn]
}
}
# Create the space
# We want the space to be ready as soon as the code returns from
# spaces.store(), so we use background_mode=False. In addition, this
# gives us error checking which doesn't happen in default mode.
space_details = client.spaces.store(meta_props=metadata, background_mode=False)
state = space_details['entity']['status'].get('state')
self._logger.debug(f'Space created, with state={state}.')
space_id = client.spaces.get_uid(space_details)
self._logger.info(f'Space id: {space_id}')
return space_id
def _get_asset_details(self):
"""Return the list of all the data assets in the space."""
client = self._get_or_make_client()
# This is the first version where data_assets.get_details() works
assert(version_is_greater(client.version, "1.0.95.1"))
results = client.data_assets.get_details()['resources']
return results
def _find_asset_id_by_name(self, name):
"""Looks for a data asset with the given name, returns its id, or None"""
assets = self._get_asset_details()
for asset in assets:
metadata = asset['metadata']
if metadata['name'] == name:
return metadata['asset_id']
return None
def create_asset(self, path, basename):
"""Create a data asset with the given name.
A Watson Studio data asset is an entity that mimicks a file."""
client = self._get_or_make_client()
asset_details = client.data_assets.create(basename, path)
return asset_details['metadata']['guid']
def delete_asset(self, uid):
"""Delete an existing asset. Return True if ok, False if not."""
client = self._get_or_make_client()
status = client.data_assets.delete(uid)
return status == "SUCCESS"
def _create_data_asset_if_necessary(self, path, basename, force):
"""Create a data asset (and upload file) if it doesn't exist already (or force is True)."""
asset_to_delete = None
self._logger.info(f'Checking whether a data asset named \'{basename}\' already exists.')
data_asset_id = self._find_asset_id_by_name(basename)
if data_asset_id:
self._logger.debug(f'Yes, with id {data_asset_id}.')
if not force:
return data_asset_id
self._logger.debug('Creating new asset with local content.')
asset_to_delete = data_asset_id
else:
self._logger.debug('No, creating the data asset.')
data_asset_id = self.create_asset(path, basename)
self._logger.debug('Done.')
if asset_to_delete:
self._logger.debug('Deleting the old data asset.')
if self.delete_asset(asset_to_delete):
self._logger.debug('Done.')
else:
self._logger.warning('Could not delete pre-existing asset.')
return data_asset_id
| 2.25
| 2
|
lecture_transcriber/utils.py
|
Harrison88/lecture_transcriber
| 0
|
12784534
|
<gh_stars>0
import deepspeech
import numpy as np
class RewindableChunker:
def __init__(self, audiosegment, size=50):
self.audiosegment = audiosegment
self.size = size
self.lower_bounds = range(0, len(audiosegment), size)
self.upper_bounds = range(size, len(audiosegment) + size, size)
self.current_index = 0
def __iter__(self):
while self.current_index < len(self.lower_bounds):
lower_bound = self.lower_bounds[self.current_index]
upper_bound = self.upper_bounds[self.current_index]
yield self.audiosegment[lower_bound:upper_bound]
self.current_index += 1
def rewind(self, ms):
iterations = (ms // self.size) + 1
self.current_index = self.current_index - iterations
def __len__(self):
return len(self.lower_bounds)
@property
def current_time(self):
return self.upper_bounds[self.current_index]
def words_from_metadata(metadata):
word = ""
word_list = []
word_start_time = 0
for index, item in enumerate(metadata.items):
if item.character != " ":
word = word + item.character
if item.character == " " or index == metadata.num_items - 1:
word_duration = item.start_time - word_start_time
if word_duration < 0:
word_duration = 0
each_word = {
"word": word,
"start_time": word_start_time,
"duration": word_duration,
}
word_list.append(each_word)
word = ""
word_start_time = 0
elif len(word) == 1:
word_start_time = item.start_time
return word_list
def create_deepspeech_model(
model, *, beam_width=500, trie=None, lm=None, lm_alpha=0.75, lm_beta=1.85
):
deepspeech_model = deepspeech.Model(model, beam_width)
if trie and lm:
deepspeech_model.enableDecoderWithLM(lm, trie, lm_alpha, lm_beta)
return deepspeech_model
def audiosegment_to_np(audiosegment):
return np.frombuffer(audiosegment._data, np.int16)
| 2.59375
| 3
|
ryu/app/sdnhub_apps/stateless_lb.py
|
bpalamol28/TP3SDN
| 0
|
12784535
|
# Copyright (C) 2014 SDN Hub
#
# Licensed under the GNU GENERAL PUBLIC LICENSE, Version 3.
# You may not use this file except in compliance with this License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-3.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
import logging
import json
import random
from ryu.lib import mac as mac_lib
from ryu.lib import ip as ip_lib
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib.packet import arp
from ryu.ofproto import ether, inet
from ryu.ofproto import ofproto_v1_0, ofproto_v1_3
from ryu.lib import dpid as dpid_lib
from ryu.app.sdnhub_apps import learning_switch
UINT32_MAX = 0xffffffff
################ Main ###################
# The stateless server load balancer picks a different server for each
# request. For making the assignment, it only uses the servers it
# already knows the location of. The clients or the gateway sents along
# a request for the Virtual IP of the load-balancer. The first switch
# intercepting the request will rewrite the headers to match the actual
# server picked. So all other switches will only have to do simple
# L2 forwarding. It is possible to avoid IP header writing if alias IP
# is set on the servers. The call skip_ip_header_rewriting() will handle
# the appropriate flag setting.
class StatelessLB(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(StatelessLB, self).__init__(*args, **kwargs)
self.rewrite_ip_header = True
self.server_index = 0
self.servers = []
self.virtual_ip = None
#self.virtual_ip = "10.0.0.5"
self.virtual_mac = "A6:63:DD:D7:C0:C8" # Pick something dummy and
#self.servers.append({'ip':"10.0.0.2", 'mac':"00:00:00:00:00:02"})
#self.servers.append({'ip':"10.0.0.3", 'mac':"00:00:00:00:00:03"})
#self.servers.append({'ip':"10.0.0.4", 'mac':"00:00:00:00:00:04"})
#self.learning_switch = kwargs['learning_switch']
#self.learning_switch.add_exemption({'dl_type': ether.ETH_TYPE_LLDP})
#self.learning_switch.add_exemption({'dl_dst': self.virtual_mac})
def set_learning_switch(self, learning_switch):
self.learning_switch = learning_switch
self.learning_switch.clear_exemption()
self.learning_switch.add_exemption({'dl_dst': self.virtual_mac})
# Users can skip doing header rewriting by setting the virtual IP
# as an alias IP on all the servers. This works well in single subnet
def set_rewrite_ip_flag(self, rewrite_ip):
if rewrite_ip == 1:
self.rewrite_ip_header = True
else:
self.rewrite_ip_header = False
def set_virtual_ip(self, virtual_ip=None):
self.virtual_ip = virtual_ip
def set_server_pool(self, servers=None):
self.servers = servers
def formulate_arp_reply(self, dst_mac, dst_ip):
if self.virtual_ip == None:
return
src_mac = self.virtual_mac
src_ip = self.virtual_ip
arp_opcode = arp.ARP_REPLY
arp_target_mac = dst_mac
ether_proto = ether.ETH_TYPE_ARP
hwtype = 1
arp_proto = ether.ETH_TYPE_IP
hlen = 6
plen = 4
pkt = packet.Packet()
e = ethernet.ethernet(dst_mac, src_mac, ether_proto)
a = arp.arp(hwtype, arp_proto, hlen, plen, arp_opcode,
src_mac, src_ip, arp_target_mac, dst_ip)
pkt.add_protocol(e)
pkt.add_protocol(a)
pkt.serialize()
return pkt
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
if self.virtual_ip == None or self.servers == None:
return
msg = ev.msg
datapath = msg.datapath
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
in_port = msg.match['in_port']
dpid = datapath.id
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether.ETH_TYPE_ARP:
arp_hdr = pkt.get_protocols(arp.arp)[0]
if arp_hdr.dst_ip == self.virtual_ip and arp_hdr.opcode == arp.ARP_REQUEST:
reply_pkt = self.formulate_arp_reply(arp_hdr.src_mac,
arp_hdr.src_ip)
actions = [ofp_parser.OFPActionOutput(in_port)]
out = ofp_parser.OFPPacketOut(datapath=datapath,
in_port=ofp.OFPP_ANY, data=reply_pkt.data,
actions=actions, buffer_id = UINT32_MAX)
datapath.send_msg(out)
return
# Only handle IPv4 traffic going forward
elif eth.ethertype != ether.ETH_TYPE_IP:
return
iphdr = pkt.get_protocols(ipv4.ipv4)[0]
# Only handle traffic destined to virtual IP
if (iphdr.dst != self.virtual_ip):
return
# Only handle TCP traffic
if iphdr.proto != inet.IPPROTO_TCP:
return
tcphdr = pkt.get_protocols(tcp.tcp)[0]
valid_servers = []
for server in self.servers:
outport = self.learning_switch.get_attachment_port(dpid, server['mac'])
if outport != None:
server['outport'] = outport
valid_servers.append(server)
total_servers = len(valid_servers)
# If we there are no servers with location known, then skip
if total_servers == 0:
return
# Round robin selection of servers
index = self.server_index % total_servers
selected_server_ip = valid_servers[index]['ip']
selected_server_mac = valid_servers[index]['mac']
selected_server_outport = valid_servers[index]['outport']
self.server_index += 1
print "Selected server", selected_server_ip
########### Setup route to server
match = ofp_parser.OFPMatch(in_port=in_port,
eth_type=eth.ethertype, eth_src=eth.src, eth_dst=eth.dst,
ip_proto=iphdr.proto, ipv4_src=iphdr.src, ipv4_dst=iphdr.dst,
tcp_src=tcphdr.src_port, tcp_dst=tcphdr.dst_port)
if self.rewrite_ip_header:
actions = [ofp_parser.OFPActionSetField(eth_dst=selected_server_mac),
ofp_parser.OFPActionSetField(ipv4_dst=selected_server_ip),
ofp_parser.OFPActionOutput(selected_server_outport) ]
else:
actions = [ofp_parser.OFPActionSetField(eth_dst=selected_server_mac),
ofp_parser.OFPActionOutput(selected_server_outport) ]
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
cookie = random.randint(0, 0xffffffffffffffff)
mod = ofp_parser.OFPFlowMod(datapath=datapath, match=match, idle_timeout=10,
instructions=inst, buffer_id = msg.buffer_id, cookie=cookie)
datapath.send_msg(mod)
########### Setup reverse route from server
match = ofp_parser.OFPMatch(in_port=selected_server_outport,
eth_type=eth.ethertype, eth_src=selected_server_mac, eth_dst=eth.src,
ip_proto=iphdr.proto, ipv4_src=selected_server_ip, ipv4_dst=iphdr.src,
tcp_src=tcphdr.dst_port, tcp_dst=tcphdr.src_port)
if self.rewrite_ip_header:
actions = ([ofp_parser.OFPActionSetField(eth_src=self.virtual_mac),
ofp_parser.OFPActionSetField(ipv4_src=self.virtual_ip),
ofp_parser.OFPActionOutput(in_port) ])
else:
actions = ([ofp_parser.OFPActionSetField(eth_src=self.virtual_mac),
ofp_parser.OFPActionOutput(in_port) ])
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
cookie = random.randint(0, 0xffffffffffffffff)
mod = ofp_parser.OFPFlowMod(datapath=datapath, match=match, idle_timeout=10,
instructions=inst, cookie=cookie)
datapath.send_msg(mod)
| 1.632813
| 2
|
sample/tool/__init__.py
|
Jeanhwea/python-project-template
| 0
|
12784536
|
# -*- coding: utf-8 -*-
__all__ = ()
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| 1.3125
| 1
|
thresholding-traceback.py
|
ANDROID564/pc_cyber_lab
| 0
|
12784537
|
<filename>thresholding-traceback.py
import numpy as np
import cv2
def nothing(x):
pass
img = cv2.imread("b4.jpg", cv2.IMREAD_GRAYSCALE)
cv2.namedWindow("Image")
cv2.createTrackbar("Threshold value", "Image", 128, 255, nothing)
#cv2.resizeWindow('image', 10,10)
while True:
value_threshold = cv2.getTrackbarPos("Threshold value", "Image")
_, threshold_binary = cv2.threshold(img, value_threshold, 255, cv2.THRESH_BINARY)
_, threshold_binary_inv = cv2.threshold(img, value_threshold, 255, cv2.THRESH_BINARY_INV)
_, threshold_trunc = cv2.threshold(img, value_threshold, 255, cv2.THRESH_TRUNC)
_, threshold_to_zero = cv2.threshold(img, value_threshold, 255, cv2.THRESH_TOZERO)
_, threshold_to_zero_inv = cv2.threshold(img, value_threshold, 255, cv2.THRESH_TOZERO_INV)
cv2.imshow("Image", img)
cv2.imshow("th binary", threshold_binary)
cv2.imshow("th binary inv", threshold_binary_inv)
cv2.imshow("th trunc", threshold_trunc)
cv2.imshow("th to zero", threshold_to_zero)
cv2.imshow("th to zero inv", threshold_to_zero_inv)
key = cv2.waitKey(100)
if key == 27:
break
cv2.destroyAllWindows()
| 3.5625
| 4
|
m2-modified/ims/common/agentless-system-crawler/tests/functional/test_functional_plugins.py
|
CCI-MOC/ABMI
| 108
|
12784538
|
<reponame>CCI-MOC/ABMI
import shutil
import tempfile
import unittest
import docker
import requests.exceptions
from plugins.systems.cpu_container_crawler import CpuContainerCrawler
from plugins.systems.cpu_host_crawler import CpuHostCrawler
from plugins.systems.memory_container_crawler import MemoryContainerCrawler
from plugins.systems.memory_host_crawler import MemoryHostCrawler
from plugins.systems.os_container_crawler import OSContainerCrawler
from plugins.systems.process_container_crawler import ProcessContainerCrawler
# Tests the FeaturesCrawler class
# Throws an AssertionError if any test fails
# Tests conducted with a single container running.
class HostAndContainerPluginsFunctionalTests(unittest.TestCase):
image_name = 'alpine:latest'
def setUp(self):
self.docker = docker.Client(
base_url='unix://var/run/docker.sock', version='auto')
try:
if len(self.docker.containers()) != 0:
raise Exception(
"Sorry, this test requires a machine with no docker"
"containers running.")
except requests.exceptions.ConnectionError:
print ("Error connecting to docker daemon, are you in the docker"
"group? You need to be in the docker group.")
self.docker.pull(repository='alpine', tag='latest')
self.container = self.docker.create_container(
image=self.image_name, command='/bin/sleep 60')
self.tempd = tempfile.mkdtemp(prefix='crawlertest.')
self.docker.start(container=self.container['Id'])
def tearDown(self):
self.docker.stop(container=self.container['Id'])
self.docker.remove_container(container=self.container['Id'])
shutil.rmtree(self.tempd)
def test_crawl_invm_cpu(self):
fc = CpuHostCrawler()
cores = len(list(fc.crawl()))
assert cores > 0
def test_crawl_invm_mem(self):
fc = MemoryHostCrawler()
cores = len(list(fc.crawl()))
assert cores > 0
def test_crawl_outcontainer_cpu(self):
fc = CpuContainerCrawler()
for key, feature, t in fc.crawl(self.container['Id']):
print key, feature
cores = len(list(fc.crawl(self.container['Id'])))
assert cores > 0
def test_crawl_outcontainer_os(self):
fc = OSContainerCrawler()
assert len(list(fc.crawl(self.container['Id']))) == 1
def test_crawl_outcontainer_processes(self):
fc = ProcessContainerCrawler()
# sleep + crawler
assert len(list(fc.crawl(self.container['Id']))) == 2
def test_crawl_outcontainer_processes_mmapfiles(self):
fc = ProcessContainerCrawler()
output = "%s" % list(fc.crawl(self.container['Id'], get_mmap_files='True'))
assert '/bin/busybox' in output
def test_crawl_outcontainer_mem(self):
fc = MemoryContainerCrawler()
output = "%s" % list(fc.crawl(self.container['Id']))
assert 'memory_used' in output
if __name__ == '__main__':
unittest.main()
| 2.09375
| 2
|
Pygame/SimplePygame/SimplePygame.py
|
kasztp/python-lessons
| 35
|
12784539
|
import pygame
from pygame.locals import *
pygame.init()
screen = pygame.display.set_mode((600, 600), 0)
pygame.display.set_caption('Simple Pygame Game')
bee = pygame.image.load('bee1.png').convert_alpha()
beeX = 0
beeY = 0
clock = pygame.time.Clock()
loop = True
while loop:
for event in pygame.event.get():
if event.type == QUIT \
or (event.type == KEYDOWN and event.key == K_ESCAPE):
loop = False
keystate = pygame.key.get_pressed()
if keystate[K_RIGHT]:
beeX += 5
screen.fill((0,120,0))
screen.blit(bee, (beeX, beeY))
pygame.display.flip()
clock.tick(60)
pygame.quit()
| 3.328125
| 3
|
ansible-devel/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py
|
satishcarya/ansible
| 0
|
12784540
|
<gh_stars>0
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
module: fakemodule
short_desciption: fake module
description:
- this is a fake module
options:
_notreal:
description: really not a real option
author:
- me
"""
import json
def main():
print(json.dumps(dict(changed=False, source='testns.testcol.fakemodule')))
if __name__ == '__main__':
main()
| 1.765625
| 2
|
radcontrold.py
|
davea/radcontrold
| 1
|
12784541
|
#!/usr/bin/env python
import sys
import logging
from configparser import ConfigParser
from os.path import expanduser
from socket import gethostname
from time import sleep
from eq3bt import Thermostat, Mode
from bluepy.btle import BTLEException
from mqttwrapper import run_script
log = logging.getLogger("radcontrold")
def callback(topic, payload, config):
log.debug("%s %s", topic, payload)
room = topic.split("/")[2]
mode = {
b'0': Mode.Closed,
b'1': Mode.Open,
}.get(payload)
if mode is None:
log.warning("Ignoring invalid payload on %s", topic)
return
addresses = config['radiators'].get(room, "")
if not addresses:
# Control message is for a radiator we're not responsible for.
log.debug("No EQ3 addresses in config for %s", room)
return
success = True
for address in addresses.split(","):
for attempt in range(10):
try:
Thermostat(address).mode = mode
log.info("Set %s in %s to %s", address, room, mode)
break
except BTLEException:
log.warning("Couldn't set mode %s for %s in %s", mode, address, room)
sleep(1)
else:
success = False
# Only post acknowledgment to MQTT topic if all thermostats were controlled.
if success:
return [
("{}/ack".format(topic), payload)
]
def main():
formatter = "[%(asctime)s] %(name)s %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=formatter)
logging.getLogger('eq3bt').setLevel(logging.ERROR)
hostname = gethostname().split(".")[0]
config = ConfigParser()
config.read(expanduser("~/.config/radcontrold/{}.ini".format(hostname)))
if not config.has_section('radiators') or len(config['radiators']) == 0:
log.warning("No config for {}, exiting.".format(hostname))
sys.exit(0)
run_script(callback, broker=config['mqtt']['broker'], topics=['control/radiator/+/active'], config=config)
if __name__ == '__main__':
main()
| 2.109375
| 2
|
src/model.py
|
rish-16/Lightning-Transformer
| 2
|
12784542
|
<gh_stars>1-10
import torch
import numpy as np
from torch import nn
import pytorch_lightning as pl
class Attention(pl.LightningModule):
def __init__(self, d_model, num_heads, p, d_input=None):
super().__init__()
self.num_heads = num_heads
self.d_model = d_model
if d_input is None:
d_xq = d_xk = d_xv = d_model
else:
d_xq, d_xk, d_xv = d_input
assert d_model % self.num_heads == 0
self.d_k = d_model // self.num_heads
self.W_q = nn.Linear(d_xq, d_model, bias=False)
self.W_k = nn.Linear(d_xk, d_model, bias=False)
self.W_v = nn.Linear(d_xv, d_model, bias=False)
self.W_h = nn.Linear(d_model, d_model)
def scaled_dot_product_attention(self, Q, K, V):
batch_size = Q.size(0)
k_length = K.size(2)
Q = Q / np.sqrt(self.d_k)
scores = torch.matmul(Q, K.transpose(2, 3))
A = nn.Softmax(dim=-1)(scores)
H = torch.matmul(A, V)
return H, A
def split_heads(self, x, batch_size):
return x.view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
def group_heads(self, x, batch_size):
return x.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.d_k)
def forward(self, X_q, X_k, X_v):
batch_size, seq_length, dim = X_q.size()
Q = self.split_heads(self.W_q(X_q), batch_size)
K = self.split_heads(self.W_k(X_k), batch_size)
V = self.split_heads(self.W_v(X_v), batch_size)
H_cat, A = self.scaled_dot_product_attention(Q, K, V)
H_cat = self.group_heads(H_cat, batch_size)
H = self._h(H_cat)
return H, A
class Encoder(pl.LightningModule):
def __init__(self, d_model, num_heads, conv_hidden_dim, p=0.1):
self.mha = Attention(d_model, num_heads, p)
self.layernorm1 = nn.LayerNorm(normalized_shape=d_model, eps=1e-6)
self.layernorm2 = nn.LayerNorm(normalized_shape=d_model, eps=1e-6)
def forward(self, x):
attn_output, _ = self.mha(x, x, x)
out1 = self.layernorm1(x + attn_output)
cnn_output = self.cnn(out1)
out2 = self.layernorm2(out1 + cnn_output)
return out2
| 2.40625
| 2
|
tests/fixture_classes/db.py
|
em-2/em2
| 0
|
12784543
|
from em2.core import Database
class FakeConn:
async def execute(self, *args, **kwargs):
pass
class DummyAcquireContext:
def __init__(self, conn):
self.conn = conn
async def __aenter__(self):
return self.conn
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
class TestDatabase(Database):
def __init__(self, loop, settings):
super().__init__(loop, settings)
self.conn = getattr(settings, '_test_conn', FakeConn())
async def startup(self):
pass
def acquire(self, *, timeout=None):
return DummyAcquireContext(self.conn)
async def close(self):
pass
| 2.53125
| 3
|
dedalus/Tag.py
|
xzoert/dedalus
| 0
|
12784544
|
class Tag:
ASSIGNED=1
NOT_ASSIGNED=0
INHERITED=2
def __init__(self,name):
self.name=name.strip()
self.key=name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not(self == other)
| 3.03125
| 3
|
admin/management/commands/deploy.py
|
adelsonllima/djangoplus
| 21
|
12784545
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import datetime
from django.core.management.base import BaseCommand
from fabric.api import *
from fabric.contrib.files import exists, append, contains
from django.conf import settings
username = 'root'
project_dir = os.getcwd()
project_name = project_dir.split('/')[-1]
remote_project_dir = '/var/opt/{}'.format(project_name)
env.user = username
env.connection_attempts = 10
# centos 7 - wkhtmltopdf
# yum -y install xorg-x11-server-Xvfb xorg-x11-fonts-Type1 xorg-x11-fonts-75dpi
# curl -O -L https://downloads.wkhtmltopdf.org/0.12/0.12.5/wkhtmltox-0.12.5-1.centos7.x86_64.rpm
# yum -y localinstall wkhtmltox-0.12.5-1.centos7.x86_64.rpm
# export QT_XKB_CONFIG_ROOT=/usr/share/X11/xkb
# ln -s /usr/local/bin/wkhtmltopdf /bin
# pip install pdfkit
# python
# import pdfkit
# pdfkit.from_string('Hello!', 'out.pdf')
class Command(BaseCommand):
VERBOSE = False
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--push', action='store_true', dest='push', default=False, help='Syncronize files only')
parser.add_argument('--update', action='store_true', dest='update', default=False, help='Syncronize files and update the requirements')
parser.add_argument('--deploy', action='store_true', dest='deploy', default=False, help='Deploy the application.')
parser.add_argument('--create', action='store_true', dest='create', default=False, help='Creates a new droplet and deploy the application.')
parser.add_argument('--verbose', action='store_true', dest='verbose', default=False, help='Verbose the output')
def handle(self, *args, **options):
if 'help' not in options:
if settings.DIGITAL_OCEAN_TOKEN:
execute_task = deploy
Command.VERBOSE = options.get('verbose', False)
output['running'] = Command.VERBOSE
output['warnings'] = Command.VERBOSE
output['stdout'] = Command.VERBOSE
output['stderr'] = Command.VERBOSE
if options.get('push'):
execute_task = push
elif options.get('update'):
execute_task = update
if options.get('create') or settings.DIGITAL_OCEAN_SERVER:
if options.get('create'):
host = _create_droplet()
if host:
env.hosts = [host]
execute(execute_task, host=host)
else:
print('Sorry! The droplet could not be created.')
else:
host = _check_droplet()
if host:
env.hosts = [host]
execute(execute_task, host=host)
else:
print('Sorry! The droplet {} could not be found'.format(settings.DIGITAL_OCEAN_SERVER))
else:
print('Please, set the DIGITAL_OCEAN_SERVER variable in settings.py or execute the'
' command with --create parameter to create a new droplet.')
else:
print('Please, set the DIGITAL_OCEAN_TOKEN variable in settings.py')
GIT_INGORE_FILE_CONTENT = '''*˜
*.pyc
.svn
.DS_Store
.DS_Store?
._*
.idea/
djangoplus/.idea/*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
.project
.pydevproject
.settings/*
sqlite.db
mail/*
media/*
dist/
djangoplus.egg-info/
*/dist/
*/*egg-info/
geckodriver.log
videos/*
'''
NGINEX_FILE_CONTENT = '''server {{
client_max_body_size 100M;
listen {port};
server_name {server_name};
access_log /var/opt/{project_name}/logs/nginx_access.log;
error_log /var/opt/{project_name}/logs/nginx_error.log;
location /static {{
alias /var/opt/{project_name}/static;
}}
location /media {{
alias /var/opt/{project_name}/media;
}}
location / {{
proxy_pass_header Server;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_set_header X-Real_IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
proxy_pass http://localhost:{local_port}/;
}}
}}
'''
SUPERVISOR_FILE_CONTENT = '''[program:{project_name}]
directory = /var/opt/{project_name}
user = www-data
command = /var/opt/{project_name}/gunicorn_start.sh
stdout_logfile = /var/opt/{project_name}/logs/supervisor_out.log
stderr_logfile = /var/opt/{project_name}/logs/supervisor_err.log
'''
GUNICORN_FILE_CONTENT = '''#!/bin/bash
set -e
source /var/opt/.virtualenvs/{project_name}/bin/activate
mkdir -p /var/opt/{project_name}/logs
cd /var/opt/{project_name}
export QT_QPA_PLATFORM='offscreen'
export QT_QPA_FONTDIR='/usr/share/fonts/truetype/dejavu/'
exec gunicorn {project_name}.wsgi:application -w 1 -b 127.0.0.1:{port} --timeout=600 --user=www-data --group=www-data --log-level=_debug --log-file=/var/opt/{project_name}/logs/gunicorn.log 2>>/var/opt/{project_name}/logs/gunicorn.log
'''
LIMITS_FILE_CONTENT = '''
* soft nofile 65536
* hard nofile 65536
root soft nofile 65536
root hard nofile 65536
'''
BASHRC_FILE_CONTENT = '''
export WORKON_HOME=/var/opt/.virtualenvs
mkdir -p $WORKON_HOME
source /usr/local/bin/virtualenvwrapper.sh
'''
def _debug(s):
if Command.VERBOSE:
print('[{}] {}\n'.format(datetime.datetime.now(), s))
def _available_port():
nginex_dir = '/etc/nginx/sites-enabled'
port = 8000
with cd(nginex_dir):
files = run('ls').split()
files.remove('default')
if project_name in files:
files = [project_name]
if files:
command = "grep localhost {} | grep -o '[0-9]*'".format(' '.join(files))
ports = run(command).split()
ports.sort()
port = ports[-1]
if project_name not in files:
port = int(port) + 1
_debug('Returning port {}!'.format(port))
return int(port)
def _check_local_keys():
local_home_dir = local('echo $HOME', capture=True)
local_ssh_dir = os.path.join(local_home_dir, '.ssh')
local_public_key_path = os.path.join(local_ssh_dir, 'id_rsa.pub')
if not os.path.exists(local_ssh_dir):
_debug('Creating dir {}...'.format(local_ssh_dir))
local('mkdir {}'.format(local_ssh_dir))
if not os.path.exists(local_public_key_path):
local("ssh-keygen -f {}/id_rsa -t rsa -N ''".format(local_ssh_dir))
key = open(local_public_key_path, 'r').read().strip()
_debug('Checking if private key was uploaded to digital ocean...')
url = 'https://api.digitalocean.com/v2/account/keys'
command = '''curl -X GET -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' "{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, url)
response = local(command, capture=True)
# print response
if key not in response:
_debug('Uploading private key to digital ocean...')
command = '''curl -X POST -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' -d '{{"name":"{}","public_key":"{}"}}' "{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, 'Default', key, url)
response = local(command, capture=True)
# print response
def _check_remote_keys():
local_home_dir = local('echo $HOME', capture=True)
local_ssh_dir = os.path.join(local_home_dir, '.ssh')
local_public_key_path = os.path.join(local_ssh_dir, 'id_rsa.pub')
local_private_key_path = os.path.join(local_ssh_dir, 'id_rsa')
remote_home_dir = run('echo $HOME')
remote_ssh_dir = os.path.join(remote_home_dir, '.ssh')
remote_public_key_path = os.path.join(remote_ssh_dir, 'id_rsa.pub')
remote_private_key_path = os.path.join(remote_ssh_dir, 'id_rsa')
remote_private_known_hosts_path = os.path.join(remote_ssh_dir, 'known_hosts')
if not exists(remote_ssh_dir):
_debug('Creading remote dir {}...'.format(remote_ssh_dir))
run('mkdir -p {}'.format(remote_ssh_dir))
_debug('Creating empty file {}...'.format(remote_private_known_hosts_path))
run('touch {}'.format(remote_private_known_hosts_path))
with cd(remote_ssh_dir):
public_key = open(local_public_key_path, 'r').read()
private_key = open(local_private_key_path, 'r').read()
_debug('Checking if public key is in file {}...'.format(remote_public_key_path))
if not contains(remote_public_key_path, public_key):
_debug('Appending public key in file {}...'.format(remote_public_key_path))
append(remote_public_key_path, public_key)
_debug('Checking if private key is in file {}...'.format(remote_private_key_path))
if not contains(remote_private_key_path, private_key):
_debug('Appending private key in file {}...'.format(remote_private_key_path))
append(remote_private_key_path, private_key)
run('chmod 644 {}'.format(remote_public_key_path))
run('chmod 600 {}'.format(remote_private_key_path))
_debug('Checking if {} is in file {}...'.format(env.hosts[0], remote_private_known_hosts_path))
if not contains(remote_private_known_hosts_path, env.hosts[0]):
_debug('Appending {} in file {}...'.format(env.hosts[0], remote_private_known_hosts_path))
run('ssh-keyscan {} >> {}'.format(env.hosts[0], remote_private_known_hosts_path))
def _check_repository():
with cd('/home'):
git_dir = '/home/git'
if not exists(git_dir):
run('adduser --disabled-password --gecos "" git')
run('mkdir /home/git/.ssh && chmod 700 /home/git/.ssh')
run('touch /home/git/.ssh/authorized_keys && chmod 600 /home/git/.ssh/authorized_keys')
run('cat /root/.ssh/authorized_keys >> /home/git/.ssh/authorized_keys')
run('chown -R git.git /home/git/.ssh/')
project_git_dir = '/home/git/{}.git'.format(project_name)
if not exists(project_git_dir):
run('mkdir {}'.format(project_git_dir))
run('cd {} && git init --bare'.format(project_git_dir))
run('chown -R git.git {}'.format(project_git_dir))
return 'git@{}:{}.git'.format(env.hosts[0], project_name)
def _setup_local_repository():
_debug('Checking if local project is a git project...')
if not os.path.exists(os.path.join(project_dir, '.git')):
with cd(project_dir):
_debug('Making local project a git project...')
repository_url = _check_repository()
local('git init')
local('git remote add origin "{}"'.format(repository_url))
local('echo "..." > README.md')
local('echo "{}" > .gitignore'.format(GIT_INGORE_FILE_CONTENT))
local('git config --global user.email "<EMAIL>"')
local('git config --global user.name "user"')
def _setup_remote_repository():
_debug('Checking if the project was cloned in remote server...')
if not exists(remote_project_dir):
with cd('/var/opt'):
_debug('Cloning project in remote server...')
repository_url = _check_repository()
run('git clone {} {}'.format(repository_url, project_name))
run('chown -R www-data.www-data {}'.format(project_name))
_debug('Updating project in remote server...')
with cd(remote_project_dir):
run('git pull origin master')
def _push_local_changes():
_debug('Checking if project has local changes...')
now = datetime.datetime.now().strftime("%Y%m%d %H:%M:%S")
with cd(project_dir):
if 'nothing to commit' not in local('git status', capture=True):
_debug('Comminting local changes...')
files = []
for file_name in local('ls', capture=True).split():
if file_name not in GIT_INGORE_FILE_CONTENT or file_name == 'fabfile.py':
files.append(file_name)
files.append('.gitignore')
for pattern in NGINEX_FILE_CONTENT.split():
if pattern in files:
files.remove(pattern)
local('git add {}'.format(' '.join(files)))
local("git commit -m '{}'".format(now))
_debug('Uploading local changes...')
local('git push origin master')
def _setup_remote_env():
_debug('Checking if the virtualenv dir was created in remote server...')
virtual_env_dir = '/var/opt/.virtualenvs'
if not exists(virtual_env_dir):
_debug('Creating dir {}'.format(virtual_env_dir))
run('mkdir -p {}'.format(virtual_env_dir))
project_env_dir = os.path.join(virtual_env_dir, project_name)
_debug('Checking if virtualenv for the project was created...')
if not exists(project_env_dir):
with shell_env(WORKON_HOME=virtual_env_dir):
_debug('Creating virtual env {}'.format(project_name))
run('source /usr/local/bin/virtualenvwrapper.sh && mkvirtualenv --python=/usr/bin/python3 {}'.format(project_name))
def _setup_remote_project():
with cd(remote_project_dir):
_debug('Checking project requirements..')
if exists('requirements.txt'):
virtual_env_dir = '/var/opt/.virtualenvs'
with shell_env(WORKON_HOME=virtual_env_dir):
_debug('Installing/Updating project requirements...')
run('source /usr/local/bin/virtualenvwrapper.sh && workon {} && pip3 install --upgrade pip'.format(project_name))
run('source /usr/local/bin/virtualenvwrapper.sh && workon {} && pip3 install -U -r requirements.txt'.format(project_name))
_debug('Checking if necessary dirs (logs, media and static) were created...')
run('mkdir -p logs')
run('mkdir -p static')
run('mkdir -p media')
_debug('Granting access to www-data...')
run('chown -R www-data.www-data .')
def _check_domain():
if settings.DIGITAL_OCEAN_DOMAIN:
url = 'https://api.digitalocean.com/v2/domains'
command = '''curl -X GET -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' "{}/{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, url, settings.DIGITAL_OCEAN_DOMAIN)
_debug('Checking if domain {} was created...'.format(settings.DIGITAL_OCEAN_DOMAIN))
data = json.loads(local(command, capture=True))
if data.get('id', None) == 'not_found':
_debug('Creating domain {}...'.format(settings.DIGITAL_OCEAN_DOMAIN))
ip_address = env.hosts[0]
command = '''curl -X POST -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' -d '{{"name":"{}","ip_address":"{}"}}' "{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, settings.DIGITAL_OCEAN_DOMAIN, ip_address, url)
data = json.loads(local(command, capture=True))
ip_address = None
try:
ip_address = local('dig {} a +short'.format(settings.DIGITAL_OCEAN_DOMAIN), capture=True).strip()
except Exception as e:
print(e)
if ip_address != env.hosts[0]:
_debug('The domain is not activated yet. The ip {} is going to be used for the deploy.'.format(env.hosts[0]))
return None
return settings.DIGITAL_OCEAN_DOMAIN
def _print_remote_url():
file_path = '/etc/nginx/sites-enabled/{}'.format(project_name)
local_file_path = '/tmp/nginx.tmp'
get(file_path, local_file_path)
file_content = open(local_file_path).read()
server_name = None
port = None
for line in file_content.split('\n'):
if 'server_name ' in line:
server_name = line.strip().split()[1].replace(';', '')
elif 'listen ' in line:
port = line.strip().split()[1].replace(';', '')
url = 'http://{}'.format(server_name)
if int(port) != 80:
url = '{}:{}'.format(url, port)
print(('\n\n\nURL: {}\n\n'.format(url)))
def _setup_nginx_file():
file_path = '/etc/nginx/sites-enabled/{}'.format(project_name)
_debug('Checking nginx file {}...'.format(file_path))
checked_domain = _check_domain()
if exists(file_path):
local_file_path = '/tmp/nginx.tmp'
get(file_path, local_file_path)
file_content = open(local_file_path, 'r').read()
if checked_domain and checked_domain not in file_content:
content = []
for line in file_content.split('\n'):
if 'server_name ' in line:
line = line.replace('server_name', 'server_name {}'.format(checked_domain))
elif 'listen ' in line:
line = ' listen 80;'
content.append(line)
file_descriptor = open('/tmp/nginx.tmp', 'w')
file_descriptor.write('\n'.join(content))
put(file_descriptor, file_path)
_debug('Restarting nginx...')
run('/etc/init.d/nginx restart')
else:
_debug('Creating nginx file {}...'.format(file_path))
local_port = _available_port()
if checked_domain:
port = 80
server_name = checked_domain
else:
port = local_port + 1000
server_name = env.hosts[0]
text = NGINEX_FILE_CONTENT.format(project_name=project_name, server_name=server_name, port=port, local_port=local_port)
append(file_path, text)
_debug('Nginx configured with {}:{}'.format(server_name, port))
_debug('Restarting nginx...')
run('/etc/init.d/nginx restart')
def _setup_supervisor_file():
file_path = '/etc/supervisor/conf.d/{}.conf '.format(project_name)
_debug('Checking supervisor file {}...'.format(file_path))
if not exists(file_path):
_debug('Creating supervisor file {}...'.format(file_path))
text = SUPERVISOR_FILE_CONTENT.format(project_name=project_name)
append(file_path, text)
_debug('Reloading supervisorctl...')
run('supervisorctl reload')
def _setup_gunicorn_file():
file_path = '/var/opt/{}/gunicorn_start.sh '.format(project_name)
_debug('Checking gunicorn file {}...'.format(file_path))
if not exists(file_path):
_debug('Creating gunicorn file {}'.format(file_path))
port = _available_port()
text = GUNICORN_FILE_CONTENT.format(project_name=project_name, port=port)
append(file_path, text)
run('chmod a+x {}'.format(file_path))
def _setup_postgres():
file_path = '/etc/postgresql/9.6/main/pg_hba.conf '
if not exists(file_path):
run('apt-get -y install postgresql postgresql-contrib')
run('cp {} /tmp'.format(file_path))
run('echo "local all postgres trust\\nhost all '
'postgres 127.0.0.1/32 trust\\nhost all postgres ::1/128 '
' trust" > {}'.format(file_path))
run('/etc/init.d/postgresql restart')
def _setup_remote_webserver():
_setup_nginx_file()
_setup_supervisor_file()
_setup_gunicorn_file()
def _reload_remote_application():
_debug('Updating project in remote server...')
with cd(remote_project_dir):
virtual_env_dir = '/var/opt/.virtualenvs'
with shell_env(WORKON_HOME=virtual_env_dir):
run('source /usr/local/bin/virtualenvwrapper.sh && workon {} && python manage.py sync'.format(project_name))
run('chown -R www-data.www-data .')
run('chmod a+w *.db')
run('ls -l')
_debug('Restarting supervisorctl...')
run('supervisorctl restart {}'.format(project_name))
def _delete_remote_project():
_debug('Deleting remove project...')
if exists(remote_project_dir):
run('rm -r {}'.format(remote_project_dir))
def _delete_remote_env():
_debug('Deleting remote env...')
run('source /usr/local/bin/virtualenvwrapper.sh && rmvirtualenv {}'.format(project_name))
def _delete_domain():
url = 'https://api.digitalocean.com/v2/domains'
if settings.DIGITAL_OCEAN_DOMAIN:
_debug('Deleting domain {}...'.format(settings.DIGITAL_OCEAN_DOMAIN))
command = '''curl -X DELETE -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' "{}/{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, url, settings.DIGITAL_OCEAN_DOMAIN)
local(command)
def _delete_repository():
project_git_dir = '/home/git/{}.git'.format(project_name)
if exists(project_git_dir):
run('rm -r {}'.format(project_git_dir))
def _delete_local_repository():
_debug('Deleting local repository...')
with cd(project_dir):
local('rm -rf .git')
def _delete_nginx_file():
_debug('Deleting nginx file...')
file_path = '/etc/nginx/sites-enabled/{} '.format(project_name)
if exists(file_path):
run('rm {}'.format(file_path))
def _delete_supervisor_file():
_debug('Deleting supervisor file..')
file_path = '/etc/supervisor/conf.d/{}.conf'.format(project_name)
if exists(file_path):
run('rm {}'.format(file_path))
def _reload_remote_webserver():
_debug('Reloading supervisorctl...')
run('supervisorctl reload')
_debug('Reloading nginx...')
run('/etc/init.d/nginx restart')
_debug('Starting supervisor...')
run('service supervisor start')
def _configure_crontab():
_debug('Configuring crontab...')
output = run("crontab -l")
line = '0 * * * * /var/opt/.virtualenvs/{}/bin/python /var/opt/{}/manage.py backup >/tmp/cron.log 2>&1'.format(
project_name, project_name)
if line not in output:
run('crontab -l | { cat; echo "{}"; } | crontab -'.format(line))
def _check_droplet():
_check_local_keys()
url = 'https://api.digitalocean.com/v2/droplets/'
command = '''curl -X GET -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' "{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, url)
_debug('Checking if droplet exists...')
response = json.loads(local(command, capture=True))
if 'droplets' in response:
for droplet in response['droplets']:
ip_address = droplet['networks']['v4'][0]['ip_address']
if droplet['name'] == project_name or ip_address == settings.DIGITAL_OCEAN_SERVER:
_debug('Droplet found with IP {}'.format(ip_address))
local_home_dir = local('echo $HOME', capture=True)
local_known_hosts_path = os.path.join(local_home_dir, '.ssh/known_hosts')
_debug('Checking if file {} exists...'.format(local_known_hosts_path))
if not os.path.exists(local_known_hosts_path):
_debug('Creating empty file {}...'.format(local_known_hosts_path))
local('touch {}'.format(local_known_hosts_path))
local_known_hosts_file_content = open(local_known_hosts_path, 'r').read()
if ip_address not in local_known_hosts_file_content:
_debug('Registering {} as known host...'.format(ip_address))
time.sleep(5)
local('ssh-keyscan -T 15 {} >> {}'.format(ip_address, local_known_hosts_path))
if settings.DIGITAL_OCEAN_SERVER not in local_known_hosts_file_content:
_debug('Registering {} as known host...'.format(settings.DIGITAL_OCEAN_SERVER))
local('ssh-keyscan {} >> {}'.format(settings.DIGITAL_OCEAN_SERVER, local_known_hosts_path))
return ip_address
_debug('No droplet cound be found for the project')
else:
raise Exception(response)
def _create_droplet():
# curl -X GET --silent "https://api.digitalocean.com/v2/images?per_page=999" -H "Authorization: Bearer XXXXXXX"
_check_local_keys()
if settings.DIGITAL_OCEAN_TOKEN:
url = 'https://api.digitalocean.com/v2/account/keys'
_debug('Getting installed keys at digital ocean...')
command = '''curl -X GET -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' "{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, url)
response = json.loads(local(command, capture=True))
# print response
ssh_keys = []
for ssh_key in response['ssh_keys']:
ssh_keys.append(ssh_key['id'])
_debug('Creating droplet...')
url = 'https://api.digitalocean.com/v2/droplets/'
command = '''curl -X POST -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' -d '{{"name":"{}","region":"{}","size":"{}","image":"{}", "ssh_keys":{}}}' "{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, project_name, 'nyc3', '512mb', 'debian-9-x64', ssh_keys, url)
response = json.loads(local(command, capture=True))
droplet_id = response['droplet']['id']
time.sleep(15)
url = 'https://api.digitalocean.com/v2/droplets/{}/'.format(droplet_id)
command = '''curl -X GET -H 'Content-Type: application/json' -H 'Authorization: Bearer {}' "{}"'''.format(settings.DIGITAL_OCEAN_TOKEN, url)
response = json.loads(local(command, capture=True))
ip_address = response['droplet']['networks']['v4'][0]['ip_address']
_debug('Droplet created with IP {}!'.format(ip_address))
_update_settings_file(ip_address)
return _check_droplet()
_debug('Please, set the DIGITAL_OCEAN_TOKEN value in settings.py file')
sys.exit()
def _execute_aptget():
with cd('/'):
if not exists('/swap.img'):
run('apt-get update')
run('apt-get -y install python-pip')
run('pip install virtualenv virtualenvwrapper')
run('apt-get -y install python3 python3-pip build-essential python3-dev git nginx supervisor libncurses5-dev')
run('apt-get -y install vim')
run('apt-get -y install libjpeg62-turbo-dev libfreetype6-dev libtiff5-dev liblcms2-dev libwebp-dev tk8.6-dev libjpeg-dev')
run('apt-get -y install wkhtmltopdf xvfb')
run('apt-get -y install htop')
if not contains('/etc/security/limits.conf', '65536'):
# print LIMITS_FILE_CONTENT
append('/etc/security/limits.conf', LIMITS_FILE_CONTENT)
run('pip3 install --upgrade pip')
if not contains('/root/.bashrc', 'WORKON_HOME'):
# print BASHRC_FILE_CONTENT
append('/root/.bashrc', BASHRC_FILE_CONTENT)
if not exists('/swap.img'):
run('lsb_release -a')
run('dd if=/dev/zero of=/swap.img bs=1024k count=2000')
run('mkswap /swap.img')
run('swapon /swap.img')
run('echo "/swap.img none swap sw 0 0" >> /etc/fstab')
def _update_settings_file(ip):
_debug('Updating settings.py file with {} for DIGITAL_OCEAN_SERVER'.format(ip))
settings_file_path = os.path.join(settings.BASE_DIR, '{}/settings.py'.format(project_name))
content = []
settings_file = open(settings_file_path)
lines = settings_file.read().split('\n')
settings_file.close()
for line in lines:
if 'DIGITAL_OCEAN_SERVER' in line:
line = 'DIGITAL_OCEAN_SERVER = \'{}\''.format(ip)
content.append(line)
content_str = '\n'.join(content)
print(content_str)
settings_file = open(settings_file_path, 'w')
settings_file.write(content_str)
settings_file.close()
def backupdb():
local_home_dir = local('echo $HOME', capture=True)
backup_dir = os.path.join(local_home_dir, 'backup')
if not os.path.exists(backup_dir):
local('mkdir -p {}'.format(backup_dir))
with cd('/var/opt'):
for entry in run('ls').split():
file_name = '/var/opt/{}/sqlite.db'.format(entry)
bakcup_file_name = os.path.join(backup_dir, '{}.db'.format(entry))
if exists(file_name):
command = 'scp {}@{}:{} {}'.format(username, env.hosts[0], file_name, bakcup_file_name)
local(command)
def install_docker():
run('apt update')
run('apt install -y apt-transport-https ca-certificates curl gnupg2 software-properties-common')
run('curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add -')
run('add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"')
run('apt update')
run('apt-cache policy docker-ce')
run('apt install -y docker-ce')
def deploy():
_execute_aptget()
_setup_postgres()
_check_remote_keys()
_setup_local_repository()
_push_local_changes()
_setup_remote_env()
_setup_remote_repository()
_setup_remote_project()
_setup_remote_webserver()
_reload_remote_application()
_print_remote_url()
def update():
_push_local_changes()
_setup_remote_repository()
_setup_remote_project()
_reload_remote_application()
_setup_nginx_file()
_print_remote_url()
def push():
_push_local_changes()
_setup_remote_repository()
_reload_remote_application()
_print_remote_url()
def undeploy():
_delete_remote_project()
_delete_domain()
_delete_repository()
_delete_local_repository()
_delete_nginx_file()
_delete_supervisor_file()
_reload_remote_webserver()
_delete_remote_env()
| 1.953125
| 2
|
main.py
|
bywakko/owoautofarm
| 0
|
12784546
|
<gh_stars>0
import discord
from discord.ext import commands
import colorama
from colorama import Fore
import asyncio
import os
#-----SETUP-----#
prefix = "!!"
#use the .env feature to hide your token
token = os.getenv("TOKEN")
#---------------#
bot = commands.Bot(command_prefix=prefix,
help_command=None,
case_insensitive=True,
self_bot=True)
@bot.event
async def on_ready():
activity = discord.Game(name="fullu owo ban bypass made by Ari.#6435", type=4)
await bot.change_presence(status=discord.Status.idle, activity=activity)
print(f'''{Fore.RED}
██╗░░██╗███████╗██████╗░██╗
██║░░██║██╔════╝██╔══██╗██║
███████║█████╗░░██████╔╝██║
██╔══██║██╔══╝░░██╔═══╝░██║
██║░░██║███████╗██║░░░░░██║
╚═╝░░╚═╝╚══════╝╚═╝░░░░░╚═╝{Fore.RED}
▒ ░░ ░░ ▒░ ░░ ▒▓ ░▒▓░░ ▒░
░ ░ ░ ░ ░▒ ░ ▒░░
░ ░ ░░ ░ ░
░ ░ ░
{Fore.GREEN}
________ ________ ___
|\ __ \|\ __ \|\ \
\ \ \|\ \ \ \|\ \ \ \
\ \ __ \ \ _ _\ \ \
\ \ \ \ \ \ \\ \\ \ \ ___
\ \__\ \__\ \__\\ _\\ \__\\__\
\|__|\|__|\|__|\|__|\|__\|__|
___ ________ ___ ___ _______ ________ ________ ________ ________ _____ ______ _____ ______ ___ ___ ________ ___ _________ ___ ___ _______ ________
|\ \ |\ __ \|\ \ / /|\ ___ \ |\ __ \|\ ____\ |\ ____\|\ __ \|\ _ \ _ \|\ _ \ _ \|\ \|\ \|\ ___ \|\ \|\___ ___\ |\ \ / /| / ___ \ |\ __ \
\ \ \ \ \ \|\ \ \ \ / / | \ __/|\ \ \|\ \ \ \___|_ \ \ \___|\ \ \|\ \ \ \\\__\ \ \ \ \\\__\ \ \ \ \\\ \ \ \\ \ \ \ \|___ \ \_| \ \ \/ / / /__/|_/ /| \ \ \|\ \
\ \ \ \ \ \\\ \ \ \/ / / \ \ \_|/_\ \ _ _\ \_____ \ \ \ \ \ \ \\\ \ \ \\|__| \ \ \ \\|__| \ \ \ \\\ \ \ \\ \ \ \ \ \ \ \ \ \ / / |__|// / / \ \ \\\ \
\ \ \____\ \ \\\ \ \ / / \ \ \_|\ \ \ \\ \\|____|\ \ \ \ \____\ \ \\\ \ \ \ \ \ \ \ \ \ \ \ \ \\\ \ \ \\ \ \ \ \ \ \ \ \/ / / / /_/__ __\ \ \\\ \
\ \_______\ \_______\ \__/ / \ \_______\ \__\\ _\ ____\_\ \ \ \_______\ \_______\ \__\ \ \__\ \__\ \ \__\ \_______\ \__\\ \__\ \__\ \ \__\__/ / / |\________\\__\ \_______\
\|_______|\|_______|\|__|/ \|_______|\|__|\|__|\_________\ \|_______|\|_______|\|__| \|__|\|__| \|__|\|_______|\|__| \|__|\|__| \|__|\___/ / \|_______\|__|\|_______|
selfbot is ready!
''')
@bot.command()
async def help(ctx):
embed = discord.Embed(
title="<:bot_tick:870672785322676224>Help AutoOwO<:bot_tick:870672785322676224>",
color=420699,
description=
f":arrow_right: **{prefix}autoOwO**\nowoh, owo sell all, owo flip 500 and owo cash 50 seconds.\n\n**:arrow_right: {prefix}stopautoOwO**\nstops autoOwO.\n\n**:arrow_right: {prefix}Owobanbypass**\nIts prevent you from banning Owo by taking appropriate time\n Example:-`the bot takes breaak 5 min of runnning 1st break= 5min,2nd break=10min 3rd break=15min` \n\n made by Ari.6435"
)
embed.set_thumbnail(
url=
"https://cdn.discordapp.com/icons/856358420385759293/a_c9bb8d2dd31eae42a2a9d6efc010e1bc.png"
)
await ctx.send(embed=embed)
@bot.command(pass_context=True)
async def autoOwO(ctx):
await ctx.message.delete()
await ctx.send('auto OwO is now **enabled**!')
global dmcs
dmcs = True
while dmcs:
async with ctx.typing():
await asyncio.sleep(5)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
@bot.command()
async def stopautoOwO(ctx):
await ctx.message.delete()
await ctx.send('auto OwO Magi is now **disabled**!')
global dmcs
dmcs = False
@bot.command(pass_context=True)
async def Owobanbypass(ctx):
await ctx.message.delete()
await ctx.send('owobanbypass is now **enabled**!')
global dmcs
dmcs = True
while dmcs:
async with ctx.typing():
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(8)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await asyncio.sleep(5)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(11)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(14)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(18)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(12)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(9)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(5)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(17)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(12)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(15)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(9)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(14)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(14)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(300)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(8)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(14)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await asyncio.sleep(5)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(11)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(14)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(18)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(12)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(9)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(18)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(4)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(12)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(16)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(14)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(4)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(11)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(11)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(900)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(8)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(14)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await asyncio.sleep(5)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(11)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(14)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(18)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(12)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(9)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(18)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(4)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(12)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(16)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(14)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(4)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(11)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(11)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(900)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(8)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await asyncio.sleep(5)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(11)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(14)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(18)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(12)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(9)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(10)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(5)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(17)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(12)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(15)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(15)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(9)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(13)
await ctx.send('owoh')
print(f"{Fore.GREEN}succefully owoh")
await asyncio.sleep(14)
await ctx.send('owo sell all')
print(f"{Fore.GREEN}succefully sell")
await ctx.send('owo flip 500')
print(f"{Fore.GREEN}succefully owo flip 500")
await asyncio.sleep(14)
await ctx.send('owo cash')
print(f"{Fore.GREEN}succefully cash")
await asyncio.sleep(1200)
# @bot.command()
# async def stopautoOwO(ctx):
# await ctx.message.delete()
# await ctx.send('auto OwO Magi is now **disabled**!')
# global dmcs
# dmcs = False
bot.run(token, bot=False)
| 2.46875
| 2
|
customers/errors/__init__.py
|
hnb2/flask-customers
| 1
|
12784547
|
<gh_stars>1-10
'''
This module contains the error blueprint to handle all the
common HTTP errors.
'''
from flask import Blueprint, jsonify
bp = Blueprint('errors', __name__)
def _generic_error(error, message, code):
'''
Generic error handler
:param error:
A python error, is None for a normal HTTP error
:param message:
A custom error message to return
:param code:
The HTTP error code to use
'''
response = jsonify(error=message)
response.status_code = code
return response
@bp.app_errorhandler(400)
def bad_request(error):
'''
Error handler for 400
:param error:
A python error, is None for a normal HTTP error
'''
return _generic_error(error, 'Bad request', 400)
@bp.app_errorhandler(401)
def unauthorized(error):
'''
Error handler for 401
:param error:
A python error, is None for a normal HTTP error
'''
return _generic_error(
error,
'Please login with proper credentials',
401
)
@bp.app_errorhandler(404)
def page_not_found(error):
'''
Error handler for 404
:param error:
A python error, is None for a normal HTTP error
'''
return _generic_error(error, 'Page not found', 404)
| 2.703125
| 3
|
activelearning/aev_cluster.py
|
plin1112/ANI-Tools
| 8
|
12784548
|
<reponame>plin1112/ANI-Tools
import pyNeuroChem as pync
import hdnntools as hdn
import pyanitools as pyt
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
wkdir = '/home/jujuman/Research/DataReductionMethods/models/train_c08f/'
cnstfile = wkdir + 'rHCNO-4.6A_16-3.1A_a4-8.params'
saefile = wkdir + 'sae_6-31gd.dat'
nnfdir = wkdir + 'networks/'
# Construct pyNeuroChem classes
nc = pync.conformers(cnstfile, saefile, nnfdir, 0, True)
h5file = '/home/jujuman/Research/ANI-DATASET/h5data/ani-gdb-c01.h5'
# Declare loader
adl = pyt.anidataloader(h5file)
aevs = []
aevi = []
tspc = []
for i,data in enumerate(adl):
# Extract molecule data from somewhere
erg = data['energies']
spc = data['species']
xyz = data['coordinates'].reshape(erg.shape[0], len(spc), 3)
Nm = erg.shape[0] # Number of molecules
Na = len(spc) # number of atoms
# Set the conformers
nc.setConformers(confs=xyz, types=list(spc))
# Compute Energies of Conformations this will produce the AEVs which we can load next
Ec = nc.energy().copy()
# Load AEVs for all atoms, store in aves
for m in range(Nm):
for a in range(Na):
if spc[a] != 'H':
aevi.append((i,Nm,Na))
tspc.append(spc[a])
aevs.append(nc.atomicenvironments(atom_idx=a,molec_idx=m).copy())
X = np.vstack(aevs)
print(X.shape)
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
from matplotlib.pyplot import cm
Nc = 10
k_means = KMeans(n_clusters=Nc, random_state=0).fit(X)
labels = k_means.labels_
center = k_means.cluster_centers_
T = [[],[],[],[],[],[],[],[],[],[],]
D = [[],[],[],[],[],[],[],[],[],[],]
for i,(l,x) in enumerate(zip(labels,X)):
d = np.linalg.norm(center[l] - x)
Lc = np.linalg.norm(center[l])
Lx = np.linalg.norm(x)
t = np.dot(center[l],x)
T[l].append(t)
D[l].append(d)
color=cm.rainbow(np.linspace(0,1,Nc))
for t,d,c in zip(T,D,color):
plt.scatter(t,d,color=c)
plt.show()
| 2
| 2
|
logs/test2.py
|
MLD1024/pythonDemo
| 1
|
12784549
|
# -*- coding: utf-8 -*-
import os
import os.path
# rootdir = 'D:\loganalyze'
# list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件
# for i in range(0 ,len(list)):
# path = os.path.join(rootdir ,list[i])
# if os.path.isfile(path):
# print path
# else:
# print path
#
def getFile(path):
list = os.listdir(path) # 列出文件夹下所有的目录与文件
for i in range(0, len(list)):
filepath = os.path.join(path, list[i])
if os.path.isfile(filepath):
print filepath
else:
getFile(filepath)
if __name__ == '__main__':
getFile('D:\loganalyze')
| 3.078125
| 3
|
src/testproject/sdk/internal/reporter/__init__.py
|
bbornhau/python-opensdk
| 38
|
12784550
|
<filename>src/testproject/sdk/internal/reporter/__init__.py
from .reporter import Reporter
__all__ = ["Reporter"]
| 1.085938
| 1
|