content
stringlengths 5
1.05M
|
|---|
class Clause(object):
def __init__(self, clause):
"""Initializes a clause. Here, the input clause is either a list or set
of integers, or is an instance of Clause; in the latter case, a shallow
copy is made, so that one can modify this clause without modifying the
original clause.
Store the list of literals as a frozenset."""
if isinstance(clause, Clause):
# We use frozenset here, so that clauses are not modifiable.
# This ensures that two equal clauses always have the same hash,
# due also to our definition of the __hash__ method.
self.literals = frozenset(clause.literals)
else:
for i in clause:
# Sanity check.
assert isinstance(i, int), "Not an integer: %r" % i
self.literals = frozenset(clause)
def __repr__(self):
return repr(self.literals)
def __eq__(self, other):
return self.literals == other.literals
def __hash__(self):
"""This will be used to be able to have sets of clauses,
with clause equality defined on the equality of their literal sets."""
return hash(self.literals)
def __len__(self):
return len(self.literals)
@property
def istrue(self):
"""A clause is true if it contains both a predicate and its complement."""
return has_pos_and_neg(self.literals)
@property
def isfalse(self):
"""A clause is false if and only if it is empty."""
return len(self.literals) == 0
def simplify(self, i):
"""Computes the result simplify the clause according to the
truth assignment i."""
if i in self.literals:
return True
if -i in self.literals:
return Clause(self.literals - {-i})
return self
def has_pos_and_neg(l):
return len(set(l)) > len({abs(x) for x in l})
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from importlib import import_module
from thumbor.utils import logger
def import_class(name, get_module=False):
module_name = name if get_module else ".".join(name.split(".")[:-1])
klass = name.split(".")[-1]
module = import_module(module_name)
return module if get_module else getattr(module, klass)
# Unfortunately this is a very useful class and rearchitecting it is going to be hard
# So while we agree that it has too many attributes, it will remain like that for now
class Importer: # pylint: disable=too-many-instance-attributes
def __init__(self, config):
self.config = config
self.engine = None
self.gif_engine = None
self.loader = None
self.url_signer = None
self.upload_photo_storage = None
self.storage = None
self.metrics = None
self.result_storage = None
self.detectors = []
self.filters = []
self.optimizers = []
self.handler_lists = []
self.error_handler_module = None
self.error_handler_class = None
@staticmethod
def import_class(name, get_module=False, validate_fn=None):
kls = import_class(name, get_module)
if validate_fn is not None:
validate_fn(kls)
return kls
def import_modules(self):
self.config.validates_presence_of(
"ENGINE",
"GIF_ENGINE",
"LOADER",
"STORAGE",
"DETECTORS",
"FILTERS",
"URL_SIGNER",
"METRICS",
"HANDLER_LISTS",
)
self.import_item("ENGINE", "Engine")
self.import_item("GIF_ENGINE", "Engine")
self.import_item("LOADER")
self.import_item("STORAGE", "Storage")
self.import_item("METRICS", "Metrics")
self.import_item("DETECTORS", "Detector", is_multiple=True)
self.import_item("FILTERS", "Filter", is_multiple=True, ignore_errors=True)
self.import_item("HANDLER_LISTS", is_multiple=True)
self.import_item("OPTIMIZERS", "Optimizer", is_multiple=True)
self.import_item("URL_SIGNER", "UrlSigner")
if self.config.RESULT_STORAGE:
self.import_item("RESULT_STORAGE", "Storage")
if self.config.UPLOAD_PHOTO_STORAGE:
self.import_item("UPLOAD_PHOTO_STORAGE", "Storage")
if self.config.USE_CUSTOM_ERROR_HANDLING:
self.import_item("ERROR_HANDLER_MODULE", "ErrorHandler")
self.error_handler_class = self.error_handler_module
def import_item(
self,
config_key=None,
class_name=None,
is_multiple=False,
item_value=None,
ignore_errors=False,
validate_fn=None,
):
if item_value is None:
conf_value = getattr(self.config, config_key)
else:
conf_value = item_value
if is_multiple:
self.load_multiple_item(
config_key,
conf_value,
class_name,
item_value,
ignore_errors,
validate_fn,
)
else:
if class_name is not None:
module = self.import_class("%s.%s" % (conf_value, class_name))
else:
module = self.import_class(conf_value, get_module=True)
setattr(self, config_key.lower(), module)
def load_multiple_item(
self,
config_key,
conf_value,
class_name,
item_value,
ignore_errors,
validate_fn,
):
modules = []
if conf_value:
for module_name in conf_value:
try:
if class_name is not None:
if class_name == "*":
module = self.import_class(
module_name, validate_fn=validate_fn
)
else:
module = self.import_class(
"%s.%s" % (module_name, class_name),
validate_fn=validate_fn,
)
else:
module = self.import_class(
module_name, get_module=True, validate_fn=validate_fn
)
modules.append(module)
except ImportError as error:
if ignore_errors:
logger.warning(
"Module %s could not be imported: %s", module_name, error,
)
else:
raise
setattr(self, config_key.lower(), tuple(modules))
|
class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
return (set(range(n+1)) - set(nums)).pop()
|
import sys
import os
import re
import time
import datetime
import subprocess
currentTime = time.time()
# CONVERT UNIX DATE INTO NORMAL HUMAN READABLE DATE
humanDate = datetime.datetime.fromtimestamp(currentTime).strftime('%Y_%m_%d_%H_%M_%S')
# DIRECTORY WE GOING TO STORE THE DATABASE BACKUPS IN
directory = 'C:\\Users\\trasb\\Desktop\\DB_BACKUP_FOLDER\\'
# NAME OF THE SOURCE FILE TO USE FOR LOOPING THROUGH
myfile = 'db_list.txt'
# SQL QUERY TO GET THE INITIAL LIST TO MAKE THE BACKUP FROM
getDB = os.popen('C:\\wamp\\bin\\mysql\\mysql5.7.10\\bin\\mysql -u root "information_schema" -e "SELECT SCHEMA_NAME AS \'\' FROM SCHEMATA WHERE SCHEMA_NAME NOT REGEXP \'information|sys|backup|mysql|test|performance_schema\'"').read()
# REMOVE BLANK LINES FROM LIST
getDB = re.sub('^\s*$','\n',getDB.strip())
# CHECK IF THE FOLDER EXISTS IF IT DOES NOT THEN CREATE THE FOLDER
if not os.path.exists(directory):
os.makedirs(directory)
# WRITE OUR LIST TO THE DIRECTORY CREATED
result = open(directory + myfile, 'w')
result.write(getDB)
# CLOSE THE FILE HERE, IF ITS NOT DONE THEN YOU WONT GET ANY DATA RETURNED
result.close()
# THIS PART BEGINS THE BACKUP PROCESS OF ALL THE DATABASES
with open('C:\\Users\\trasb\\Desktop\\DB_BACKUP_FOLDER\\db_list.txt', 'r') as file:
for line in file.xreadlines():
output = ""+directory+""+humanDate+"_"+line+ "_" + humanDate +""
output = re.sub(r'\n', '.sql', output)
line = re.sub(r'\n', '', line)
shit = "C:\\wamp\\bin\\mysql\\mysql5.7.10\\bin\\mysqldump -u root " + line + " > " + output + ".sql"
zipit = 'gzip '+output+'.sql'
with open(output, 'w') as final:
os.popen(shit)
os.popen(zipit)
# GET LIST OF ALL FILES IN THE DIRECTORY AND REMOVE POSSIBLE HIDDEN FILES
list_files = [x for x in os.listdir(directory) if x[0]!='.']
# NOW LOOP THROUGH THE FILES AND REMOVE EMPTY ONES
for each_file in list_files:
file_path = '%s/%s' % (directory, each_file)
# CHECK SIZE AND DELETE IF 0
if os.path.getsize(file_path)==0:
os.remove(file_path)
else:
name, ext = os.path.splitext(file_path)
if ext == '.sql':
os.remove(file_path)
|
import torch
import copy
from typing import Dict, Any
_supported_types = {torch.nn.Conv2d, torch.nn.Linear}
def max_over_ndim(input, axis_list, keepdim=False):
''' Applies 'torch.max' over the given axises
'''
axis_list.sort(reverse=True)
for axis in axis_list:
input, _ = input.max(axis, keepdim)
return input
def min_over_ndim(input, axis_list, keepdim=False):
''' Applies 'torch.min' over the given axises
'''
axis_list.sort(reverse=True)
for axis in axis_list:
input, _ = input.min(axis, keepdim)
return input
def channel_range(input, axis=0):
''' finds the range of weights associated with a specific channel
'''
size_of_tensor_dim = input.ndim
axis_list = list(range(size_of_tensor_dim))
axis_list.remove(axis)
mins = min_over_ndim(input, axis_list)
maxs = max_over_ndim(input, axis_list)
assert mins.size(0) == input.size(axis), "Dimensions of resultant channel range does not match size of requested axis"
return maxs - mins
def cross_layer_equalization(module1, module2, output_axis=0, input_axis=1):
''' Given two adjacent tensors', the weights are scaled such that
the ranges of the first tensors' output channel are equal to the
ranges of the second tensors' input channel
'''
if type(module1) not in _supported_types or type(module2) not in _supported_types:
raise ValueError("module type not supported:", type(module1), " ", type(module2))
if module1.weight.size(output_axis) != module2.weight.size(input_axis):
raise TypeError("Number of output channels of first arg do not match \
number input channels of second arg")
weight1 = module1.weight
weight2 = module2.weight
bias = module1.bias
weight1_range = channel_range(weight1, output_axis)
weight2_range = channel_range(weight2, input_axis)
# producing scaling factors to applied
weight2_range += 1e-9
scaling_factors = torch.sqrt(weight1_range / weight2_range)
inverse_scaling_factors = torch.reciprocal(scaling_factors)
bias = bias * inverse_scaling_factors
# formatting the scaling (1D) tensors to be applied on the given argument tensors
# pads axis to (1D) tensors to then be broadcasted
size1 = [1] * weight1.ndim
size1[output_axis] = weight1.size(output_axis)
size2 = [1] * weight2.ndim
size2[input_axis] = weight2.size(input_axis)
scaling_factors = torch.reshape(scaling_factors, size2)
inverse_scaling_factors = torch.reshape(inverse_scaling_factors, size1)
weight1 = weight1 * inverse_scaling_factors
weight2 = weight2 * scaling_factors
module1.weight = torch.nn.Parameter(weight1)
module1.bias = torch.nn.Parameter(bias)
module2.weight = torch.nn.Parameter(weight2)
def equalize(model, paired_modules_list, threshold=1e-4, inplace=True):
''' Given a list of adjacent modules within a model, equalization will
be applied between each pair, this will repeated until convergence is achieved
Keeps a copy of the changing modules from the previous iteration, if the copies
are not that different than the current modules (determined by converged_test),
then the modules have converged enough that further equalizing is not necessary
Implementation of this referced section 4.1 of this paper https://arxiv.org/pdf/1906.04721.pdf
Args:
model: a model (nn.module) that equalization is to be applied on
paired_modules_list: a list of lists where each sublist is a pair of two
submodules found in the model, for each pair the two submodules generally
have to be adjacent in the model to get expected/reasonable results
threshold: a number used by the converged function to determine what degree
similarity between models is necessary for them to be called equivalent
inplace: determines if function is inplace or not
'''
if not inplace:
model = copy.deepcopy(model)
name_to_module : Dict[str, torch.nn.Module] = {}
previous_name_to_module: Dict[str, Any] = {}
name_set = {name for pair in paired_modules_list for name in pair}
for name, module in model.named_modules():
if name in name_set:
name_to_module[name] = module
previous_name_to_module[name] = None
while not converged(name_to_module, previous_name_to_module, threshold):
for pair in paired_modules_list:
previous_name_to_module[pair[0]] = copy.deepcopy(name_to_module[pair[0]])
previous_name_to_module[pair[1]] = copy.deepcopy(name_to_module[pair[1]])
cross_layer_equalization(name_to_module[pair[0]], name_to_module[pair[1]])
return model
def converged(curr_modules, prev_modules, threshold=1e-4):
''' Tests for the summed norm of the differences between each set of modules
being less than the given threshold
Takes two dictionaries mapping names to modules, the set of names for each dictionary
should be the same, looping over the set of names, for each name take the differnce
between the associated modules in each dictionary
'''
if curr_modules.keys() != prev_modules.keys():
raise ValueError("The keys to the given mappings must have the same set of names of modules")
summed_norms = 0
if None in prev_modules.values():
return False
for name in curr_modules.keys():
difference = curr_modules[name].weight.sub(prev_modules[name].weight)
summed_norms += torch.norm(difference)
return summed_norms < threshold
|
# PIC Exceptions
class HedronMatchError(Exception):
"""Cannot find hedron in residue for given key."""
pass
class MissingAtomError(Exception):
"""Missing atom coordinates for hedron or dihedron."""
pass
|
#!/usr/bin/env python3
import csv
import random
from collections import defaultdict
import numpy
def poem(mapping):
MAX_LENGTH = 8
summa = sum(mapping[''].values())
i = 0
p = []
word = ''
while word != "." and i < MAX_LENGTH:
i += 1
res = [
(y, x)
for y, x in mapping[word].items()
if x != 0
]
words = [word for word, _ in res]
weights = [weight for _, weight in res]
word = random.choices(random.choices(words, weights))[0]
p.append(word)
return " ".join(p[:-1])
if __name__ == "__main__":
mapping = defaultdict(lambda: defaultdict(int))
total = 0.0
counts = defaultdict(float)
with open("poems.csv", "r") as fi:
reader = csv.DictReader(fi)
for row in reader:
words = row["poem"].split(" ")
for i in range(len(words)-1):
mapping[words[i]][words[i+1]] += 1
mapping[""][words[0]] += 1
mapping[words[-1]]["."] += 1
for word in row["poem"].split(" "):
counts[word] += 1
total += 1
mapping["."] = defaultdict(int)
words = sorted(mapping.keys())
m = numpy.zeros((len(mapping), len(mapping)))
for i, prev_word in enumerate(words):
for j, next_word in enumerate(words):
w = mapping[prev_word][next_word]
m[i][j] = w
# print(m)
print(poem(mapping))
# for word in sorted(counts.keys()):
# print(word, counts[word]/total)
|
# -*- encoding: utf8 -*-
# Some code used in this module was taken from the python documentation and
# can be found in http://docs.python.org/2/library/csv.html
# Modified by Caktus.
from __future__ import unicode_literals, absolute_import
__all__ = ["DictReader", "DictWriter"]
from csv import DictReader as BaseDictReader, DictWriter as BaseDictWriter
class DictReader(BaseDictReader):
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel",
encoding=None,
*args, **kwds):
BaseDictReader.__init__(self, f=f, fieldnames=fieldnames,
restkey=restkey, restval=restval,
dialect=dialect,
*args, **kwds)
from .csv import reader
self.reader = reader(f, dialect=dialect,
encoding=encoding,
**kwds)
class DictWriter(BaseDictWriter):
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel",
encoding=None,
*args, **kwds):
from .csv import Writer
BaseDictWriter.__init__(self, f=f, fieldnames=fieldnames,
restval=restval, extrasaction=extrasaction,
dialect=dialect, *args, **kwds)
self.writer = Writer(csvfile=f, dialect=dialect, encoding=encoding,
**kwds)
self.encoding = encoding
|
# -*- coding: utf-8 -*-
# Author: Icy(enderman1024@foxmail.com)
# OS: ALL
# Name: Log Writer
# Description: This function is used for recording logs
# Used Librarie(s): time, sys
# Version: 1.3
import time
import sys
class Logger: # Logger
def __init__(self, filename=None, line_end="lf",
date_format="%Y-%m-%d %H:%M:%S", level="DEBUG", echo=True):
self.level = 1
self.echo = echo
if level == "DEBUG":
self.level = 0
elif level == "INFO":
self.level = 1
elif level == "WARNING":
self.level = 2
elif level == "ERROR":
self.level = 3
elif level == "CRITICAL":
self.level = 4
else:
raise Exception("logger level: DEBUG, INFO, WARNING, ERROR, CRITICAL")
try:
temp = time.strftime(date_format)
del temp
except Exception as err:
raise Exception("Failed to set date formant, result: " + str(err))
self.date_format = date_format
if line_end == "lf":
self.line_end = "\n"
elif line_end == "crlf":
self.line_end = "\r\n"
else:
raise Exception("Unknow line end character(s): \"" + line_end + "\"")
self.filename = filename
if filename == None:
return
try:
log_file = open(filename,"w")
log_file.close()
except Exception as err:
raise Exception("Can't open file: \"" + filename + "\", result: " + str(err))
def DEBUG(self,msg):
if self.level > 0:
return
infos = "["+ time.strftime(self.date_format) +"] [DBUG] " + msg + self.line_end
if self.echo:
sys.stdout.write(infos)
sys.stdout.flush()
if self.filename == None:
return infos
log_file = open(self.filename,"a")
log_file.write(infos)
log_file.close()
return infos
def INFO(self,msg):
if self.level > 1:
return
infos = "["+ time.strftime(self.date_format) +"] [INFO] " + msg + self.line_end
if self.echo:
sys.stdout.write(infos)
sys.stdout.flush()
if self.filename == None:
return infos
log_file = open(self.filename,"a")
log_file.write(infos)
log_file.close()
return infos
def WARNING(self,msg):
if self.level > 2:
return
infos = "["+ time.strftime(self.date_format) +"] [WARN] " + msg + self.line_end
if self.echo:
sys.stdout.write(infos)
sys.stdout.flush()
if self.filename == None:
return infos
log_file = open(self.filename,"a")
log_file.write(infos)
log_file.close()
return infos
def ERROR(self,msg):
if self.level > 3:
return
infos = "["+ time.strftime(self.date_format) +"] [EROR] " + msg + self.line_end
if self.echo:
sys.stdout.write(infos)
sys.stdout.flush()
if self.filename == None:
return infos
log_file = open(self.filename,"a")
log_file.write(infos)
log_file.close()
return infos
def CRITICAL(self,msg):
infos = "["+ time.strftime(self.date_format) +"] [CRIT] " + msg + self.line_end
if self.echo:
sys.stdout.write(infos)
sys.stdout.flush()
if self.filename == None:
return infos
log_file = open(self.filename,"a")
log_file.write(infos)
log_file.close()
return infos
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 10:33:41 2020
@author: Alden Porter
"""
# =============================================================================
# Import Libraries
# =============================================================================
import numpy as np
from numba import jit
import matplotlib.pyplot as plt
# =============================================================================
# Define Functions
# =============================================================================
def ar_1(mu, a, sigma, T, x_0):
"""
This function computes a simulated ar1 process assuming x_t = mu + a*x_{t-1} + e_t
"""
x_path = np.zeros(T)
x_path[0] = x_0
shocks = np.random.normal(0,sigma,T) # The first term isn't used and will be ignored for sake of code readability
# iteratively construct the AR1 according to x_t = mu + a*x_{t-1} + e_t
for t in range(1,T):
x_path[t] = mu + a*x_path[t-1] + shocks[t]
return x_path # Return the path of the AR1
def censored_ar_1(mu, a, sigma, T, x_0):
"""
This function computes a simulated ar1 process assuming x_t = max(mu + a*x_{t-1} + e_t,0)
"""
x_path = np.zeros(T)
x_path[0] = x_0
shocks = np.random.normal(0,sigma,T) # The first term isn't used and will be ignored for sake of code readability
# iteratively construct the AR1 according to x_t = mu + a*x_{t-1} + e_t
for t in range(1,T):
x_path[t] = max(mu + a*x_path[t-1] + shocks[t], 0)
return x_path # Return the path of the AR1
def compound_interest_rates(interest_path):
'''
This function takes in a path of portfolio returns (a Tx1 numpy array) and it returns a T+1xT+1
numpy array. The returned array can be seen as a lower triangular matrix with ones on the diagonal and
whose lower diagonal entries correspond to the product of the returns up to that index.
'''
T = len(interest_path) # The number of periods - 1 (because we exclued the intial period)
CI = np.zeros([T+1,T+1]) # Initialize the matrix of compund interest paths in each period.
# Loop over rows and columns and sub in the corresponding compound interest rates for the matrix multiplication
for i in range(T+1):
for j in range(T+1):
if j < i:
CI[i, j] = np.prod(interest_path[j:i])
elif j == i:
CI[i, j] = 1
elif j > i:
continue
return CI
def asset_path(income_path, consumption_path, initial_savings, interest_path):
"""
This fucntion computes the total amount you would have saved given a time series of interest rates
given by interest_path and a time series of savings amounts given by savings path with the first index
corresponding to the first time period. It computes the value of the asset at time T-1, the final index.
Inputs:
All inputs need to be Tx1 Numpy Arrays
"""
T = len(income_path) # How many time periods?
S = np.subtract(income_path, consumption_path) # Compute per period savings as per period income minus consumption
S = np.insert(arr = S, obj = 0, values = initial_savings) # Technical trick, from a mathemtical perspective we can consider initial assets to simply be the savings in period 0.
CI = compound_interest_rates(interest_path) # Convert the per period interest path to a compounded interest matrix per period
A = np.dot(CI,S) #Final asset time series is just this dot product
return A
#@jit(nopython = True) # This skips the python interpreter in favor of a more low level interpreter, helps speed up large scale simulations
def asset_monte_carlo(N, T, percentiles, initial_savings, inc_fn, cons_fn, int_fn):
'''
This function runs a monte-carlo simulation on the intrest rate, income and consumption stochastic processes to obtain quantiles on asset values (Q_t) for each time period and the expected return
'''
sim_A = np.empty([N,T+1]) #Simulated assets form an NXT+1 matrix which will be collapsed into T+1x1 vectors corresponding to the percentiles
#Randomly simulate asset paths according to the inc_fun, cons_fn and int_fn functions,
# then compile the simulated paths into a matrix
for n in range(N):
income_path = inc_fn()
consumption_path = cons_fn()
interest_path = int_fn()
A_n = asset_path(income_path, consumption_path, initial_savings, interest_path)
sim_A[n, :] = np.transpose(A_n) #Replace the row in teh simulations matrix with the asset path
E_A = np.empty([T+1,1]) # The expected return
Q_A = np.empty([T+1, len(percentiles)]) # The desired percentiles
#This loop actually estimates the statistics based on the above simulation
for t in range(T+1):
E_A[t] = np.mean(sim_A[:, t]) #Take the mean portfolio value in time t
# This little loops gets the percentiles at each time period
for k, q in enumerate(percentiles):
Q_A[t, k] = np.percentile(sim_A[:, t] , q)
return Q_A, E_A
def main():
'''
This function runs the program
'''
# Set up simple set up
simple_setup_bool = input('Do you want easy setup? [y/n] \n')
# Run simple setup
if simple_setup_bool.lower().strip() != 'n':
T = int(input('How many periods? i.e. T = '))
C_bar = float(input('How much is your consumption budget in each period? i.e. C_bar = '))
Y_bar = float(input('How much are you earning each period in outside income? i.e. Y_bar = '))
r_bar = float(input("What's your average return? i.e. r_bar = "))
a_0 = float(input('What are you intital assets? i.e. a_0 = '))
sigma_bar = input("What's the variance on your per period return? (press enter if you don't know, or enter 0) i.e. sigma_bar = ")
if sigma_bar.strip() == '':
sigma_bar = r_bar/((2*3.14159))
else:
sigma_bar = float(sigma_bar)
N = 2000
percentiles = [1, 10, 25, 75, 90, 99]
ci_color = 'mediumturquoise'
# ========================= Run the simulation
inc_sim = lambda : ar_1(0, 1, 0 , T, Y_bar)
con_sim = lambda : ar_1(0, 1, 0, T, C_bar)
int_sim = lambda : ar_1(0, 1, sigma_bar, T, r_bar)
Q, E = asset_monte_carlo(N, T, percentiles, a_0, inc_sim, con_sim, int_sim)
# ========================= Plot the pretty figure
fig = plt.figure()
ax1 = fig.add_subplot()
ax1.plot(E, color = 'black', linewidth = 1, linestyle = 'dotted')
# Shade confidence bands
ax1.fill_between(list(range(T+1)), Q[:, 0], Q[:, 5], facecolor = ci_color, alpha = .1) # 1-99, thinnest
ax1.fill_between(list(range(T+1)), Q[:, 1], Q[:, 4], facecolor = ci_color, alpha = .3) # 10-90
ax1.fill_between(list(range(T+1)), Q[:, 2], Q[:, 3], facecolor = ci_color, alpha = .5) # 25-75, thickest
ax1.set_ylabel('Total Savings')
ax1.set_xlabel('Time')
ax1.set_title('Possible Savings Paths, Expected Savings and Confidence Intervals')
plt.show()
# =============================================================================
# Run Code
# =============================================================================
main()
|
#!/usr/bin/env python3
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : Allen Woo
import os
try:
shcmd = """ps -ef | grep uwsgi_run.ini | awk '{print $2}' | xargs kill -9"""
r = os.system(shcmd)
print("Kill uwsgi.")
except Exception as e:
print(e)
try:
shcmd = """ps -ef | grep celery_worker.celery | grep -v color=auto | awk '{print $2}' | xargs kill -9"""
r = os.system(shcmd)
print("Kill celery_worker.celery.")
except Exception as e:
print(e)
sudo apt-get remove \
linux-headers-4.15.0-28 \
linux-headers-4.15.0-28-generic \
linux-image-4.15.0-28-generic \
linux-modules-4.15.0-28-generic \
linux-modules-extra-4.15.0-28-generic
|
from django.conf import settings
from django.core import mail
from users.services import send_mail_notification
def test_mail_notification() -> None:
"""Тестирует отправку письма о новом пользователе"""
send_mail_notification(first_name='Тест', total_users=10)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == settings.EMAIL_SUBJECT
assert 'Пользователь Тест активировал бота' in mail.outbox[0].body
assert 'Всего пользователей: 10' in mail.outbox[0].body
|
import utils
# exec(open("test_dash.py").read())
import imp
imp.reload(utils)
from utils import *
server = Flask(__name__)
external_scripts =['https://proteinpaint.stjude.org/bin/proteinpaint.js']
app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=800px, initial-scale=1"}],
server=server,
serve_locally=True,
external_scripts=external_scripts,
)
# server = app.server
app.config.suppress_callback_exceptions = True
# app.scripts.append_script({"external_url": ['https://code.jquery.com/jquery-3.2.1.min.js',]})
@server.route("/results/<path:path>")
def download(path):
"""Serve a file from the upload directory."""
return send_from_directory("results", path, as_attachment=True)
# app.renderer = '''
# runproteinpaint({
# host:'https://proteinpaint.stjude.org',
# holder:document.getElementById('PE_vis'),
# parseurl:true,
# block:true,
# nobox:1,
# noheader:1,
# genome:'hg19',
# position:'chr12:25334648-25426944',
# nativetracks:'RefGene',
# tracks:[
# {
# "type":"bigwig",
# "url":"http://hgdownload.soe.ucsc.edu/goldenPath/hg19/phyloP100way/hg19.100way.phyloP100way.bw",
# // alternatively, load from a local file on the ProteinPaint server
# //"file":"hg19/hg19.100way.phastCons.bw",
# "name":"UCSC phyloP 100ways",
# "height":100
# },
# ]
# })
# '''
app.layout = html.Div(
id="app-container",
children=[
# Banner
html.Div(
id="banner",
className="banner",
children=[html.Img(src=app.get_asset_url("logo.png")),html.H4("Easy-Prime %s"%(get_version()))],
),
# Left column
html.Div(
id="left-column",
className="four columns",
children=[
# welcome
welcome(),
html.Div([
dcc.Input(
id = 'jid',
value = '',
)
], style= {'display': 'none'} # <-- This is the line that will be changed by the dropdown callback
),
# input
# variant_input(),
# parameter
html.Div(id="parameters", style={"background-color":"white"},
children=[variant_input(),buttons(),RTT_slider(app),PBS_slider(app),ngRNA_slider(app),genome_input(app)])
],
),
# Right column
html.Div(
id="right-column",
className="eight columns",
children=[
# vis pe design
html.Div(
id="vis_top_pegRNA",
style={"background-color":"#FFFFFF","margin-top":"20px",'font-weight':'bold','font-size':'20px'},
children=[
html.Div(children=[
'PE design visualization for:',
dcc.Input(
id = 'select_pegRNA_id',
value = '',
style = {"width":"500px"}
)
]),
html.Div(id="PE_vis",children=[html.Img(id='PE_design_figure')]),
],
),
# vis_PE_design(app,pegRNA_list=pegRNA_list),
# PE table title
html.Div(style={"margin-top":"20px","margin-bottom":"10px",'font-weight':'bold','font-size':'20px'},children=['Sequences of top pegRNA and ngRNA combinations are shown below.']),
# show PE sequence table
dash_table.DataTable(
id='pegRNA-table',
columns=[
{'name': i, 'id': i, 'deletable': False} for i in ['sample_ID','type','seq','chr','start','end','strand','predicted efficiency']
],
style_cell={
'minWidth': '0px', 'maxWidth': '20px',
# 'width':'12%',
'whiteSpace': 'normal',
'height': 'auto',
# 'whiteSpace': 'normal',
# 'text-overflow': 'ellipsis',
},
style_data={
'whiteSpace': 'normal',
'height': 'auto',
},
style_cell_conditional=[
{'if': {'column_id': 'type'},
'width': '7%'},
{'if': {'column_id': 'strand'},
'width': '7%'},
{'if': {'column_id': 'start'},
'width': '10%'},
{'if': {'column_id': 'end'},
'width': '10%'},
{'if': {'column_id': 'chr'},
'width': '10%'},
{'if': {'column_id': 'predicted efficiency'},
'width': '10%'},
],
style_header={
'text-align':'center',
'font-weight': 'bold',
},
editable=True,
filter_action="native",
page_action="native",
page_current= 0,
page_size= 13,
),
html.Div(id='download_data'),
],
),
],
)
## main easy prime
@app.callback(
[
Output("jid", "value"),
Output("pegRNA-table", "data"),
],
[
Input('start_search', 'n_clicks'),
Input('slider-pbs', 'value'), # PBS
Input('slider-rtt', 'value'), # RTT
Input('slider-ngRNA', 'value'), # ngRNA
Input('genome', 'value'), # genome
Input('variants', 'value'), # variants
]
)
def search_pegRNA(n_clicks,pbs_value,rtt_value,ngRNA_value,genome,input_variants):
"""search pegRNA, show table, and vis the top one
users can change visualization by searching different
"""
## get parameters
# print ("search_pegRNA")
if n_clicks == None:
return [None,None]
parameters = get_parameters("config.yaml")
# print_parameters(parameters)
jid=str(uuid.uuid4()).split("-")[-1]
# input_variants = StringIO(input_variants)
parameters['min_PBS_length'] = pbs_value[0]
parameters['max_PBS_length'] = pbs_value[1]
parameters['min_RTT_length'] = rtt_value[0]
parameters['max_RTT_length'] = rtt_value[1]
parameters['max_ngRNA_distance'] = ngRNA_value
summary,df_top,df_all,X_p = easy_prime_main(input_variants,jid,parameters)
df_top['predicted_efficiency'] = ["{0:.2f}%".format(x * 100) for x in df_top['predicted_efficiency']]
df_top=df_top.rename(columns = {'predicted_efficiency':'predicted efficiency'})
## initiate data table
return [jid,df_top.to_dict('records')]
@app.callback(
Output("PE_design_figure", "src"),
[
Input('jid', 'value'),
Input('select_pegRNA_id', 'value'),
]
)
def update_vis_pegRNA(jid,select_pegRNA_id):
"""search pegRNA, show table, and vis the top one
users can change visualization by searching different
"""
# print ("my values")
# print ("jid is ",jid)
# print ("select_pegRNA_id is ",select_pegRNA_id)
if jid == None:
# print ("jid is ",jid)
return init_fig()
# if select_pegRNA_id == "":
# print ("select_pegRNA_id is ",select_pegRNA_id)
# return init_fig()
parameters = get_parameters("config.yaml")
df_all = pd.read_csv("results/%s_rawX_pegRNAs.csv.gz"%(jid),index_col=0)
df_top = pd.read_csv("results/%s_topX_pegRNAs.csv"%(jid),index_col=0)
# print (df_all)
# print (df_top)
if select_pegRNA_id == "":
select_pegRNA_id = df_top.index.tolist()[0]
tmp_df_name = jid+".vis.df"
df_all.loc[select_pegRNA_id].to_csv("results/%s"%(tmp_df_name))
# img = vis_pegRNA(,parameters['genome_fasta'])
file_name = "results/%s.fa"%(jid)
if os.path.isfile(file_name):
genome_fasta = file_name
else:
genome_fasta = parameters['genome_fasta']
img = vis_pegRNA("results/%s"%(tmp_df_name),genome_fasta=genome_fasta,out_file_name=tmp_df_name)
return img
## download data table (need jid)
def file_download_link(filename):
"""Create a Plotly Dash 'A' element that downloads a file from the app."""
location = "results/{}".format(urlquote(filename))
return html.A(filename, href=location,target="_blank")
@app.callback(Output('download_data', 'children'), [Input('jid', 'value')])
def download_predict_data(jid):
if jid == None:
return None
return [
"Download files:",
html.Ul([
html.Li(file_download_link("%s_rawX_pegRNAs.csv.gz"%(jid))),
html.Li(file_download_link("%s_topX_pegRNAs.csv"%(jid))),
html.Li(file_download_link("%s_X_p_pegRNAs.csv.gz"%(jid)))
]),
]
'''
@app.callback(Output('topX', 'href'),
[Input('jid', 'value')]
)
def download_topX(jid):
df = pd.read_csv("results/%s_topX_pegRNAs.csv"%(jid),index_col=0)
csv_string = df.to_csv(index=False, encoding='utf-8')
# print (df.head())
# print ("#### printing topX ####")
# csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
# csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(csv_string)
# print (csv_string)
return csv_string
'''
# if __name__ == '__main__':
app.run_server(debug=False,host='0.0.0.0',port=9866)
|
from tkinter import *
# Configuración de la raíz
root = Tk()
root.title("Hola mundo")
root.resizable(1,1)
root.iconbitmap('hola.ico')
frame = Frame(root, width=480, height=320)
frame.pack(fill='both', expand=1)
frame.config(cursor="pirate")
frame.config(bg="lightblue")
frame.config(bd=25)
frame.config(relief="sunken")
root.config(cursor="arrow")
root.config(bg="blue")
root.config(bd=15)
root.config(relief="ridge")
# Finalmente bucle de la apliación
root.mainloop()
|
from asgiref.sync import async_to_sync
from channels.generic.websocket import JsonWebsocketConsumer
class UserConsumer(JsonWebsocketConsumer):
def connect(self):
user = self.scope['user']
if user.is_authenticated:
self.accept()
group = 'user-{}'.format(user.id)
async_to_sync(self.channel_layer.group_add)(group, self.channel_name)
else:
self.close()
def disconnect(self, close_code):
user = self.scope['user']
group = 'user-{}'.format(user.id)
async_to_sync(self.channel_layer.group_discard)(group, self.channel_name)
def event_change(self, event):
self.send_json(content=event['name'])
|
#URL: https://www.hackerrank.com/challenges/equal-stacks/problem
def equalStacks(h1, h2, h3):
# Write your code here
n1 = 0
n2 = 0
n3 = 0
s1 = sum(h1)
s2 = sum(h2)
s3 = sum(h3)
while True:
print(s1,s2,s3)
if s1>s2 or s1>s3:
s1 = s1 - h1[n1]
n1+=1
elif s2>s1 or s2>s3:
s2 = s2 - h2[n2]
n2+=1
elif s3>s1 or s3>s2:
s3 = s3 - h3[n3]
n3+=1
else: break
ans =min(s1,s2,s3)
if ans>0:
return ans
return 0
|
from .h import *
trackingVectorId = "b732b3fe" # hashlib.md5("tracking-vector").hexdigest()[0:8], to minimize chance of collision
def addTrackingVector(doc):
if doc.md.trackingVectorClass is None:
return
els = findAll("[tracking-vector]", doc)
if len(els) == 0:
return
if doc.md.trackingVectorImage is None:
# Generate an SVG and <use> it in all the individual spots
appendChild(
doc.body,
E.svg(
{"viewBox": "0 0 46 64", "style": "display:none"},
E.defs(
{},
E.path(
{
"id": trackingVectorId,
"stroke": "black",
"stroke-linecap": "round",
"stroke-linejoin": "round",
"stroke-dasharray": "3,2,35,2,20,2",
"fill": "none",
"d": "M2 23Q17 -16 40 12M1 35Q17 -20 43 20M2 40Q18 -19 44 25M3 43Q19 -16 45 29M5 46Q20 -12 45 32M5 49Q11 40 15 27T27 16T45 37M5 49Q15 38 19 25T34 27T44 41M6 52Q17 40 21 28T32 29T43 44M6 52Q21 42 23 31T30 32T42 47M7 54Q23 47 24 36T28 34T41 50M8 56Q26 50 26 35Q28 48 40 53M10 58Q24 54 27 45Q30 52 38 55M27 50Q28 53 36 57M25 52Q28 56 31 57M22 55L26 57M10 58L37 57M13 60L32 60M16 62L28 63", # pylint: disable=line-too-long
}
),
),
),
)
for el in els:
prependChild(el, " ") # The space is to separate from the following text.
prependChild(
el,
E.a(
{
"class": doc.md.trackingVectorClass,
"href": "https://infra.spec.whatwg.org/#tracking-vector",
},
trackingVectorImage(
doc.md.trackingVectorImage,
doc.md.trackingVectorImageWidth,
doc.md.trackingVectorImageHeight,
doc.md.trackingVectorAltText,
doc.md.trackingVectorTitle,
),
),
)
removeAttr(el, "tracking-vector")
def trackingVectorImage(imageURL, imageWidth, imageHeight, altText, title):
if imageURL is None:
return E.svg(
{"width": "46", "height": "64", "role": "img", "aria-label": altText},
E.title({}, title),
E.use({"href": "#" + trackingVectorId}),
)
return E.img(
{
"title": title,
"alt": altText,
"src": imageURL,
"width": imageWidth,
"height": imageHeight,
}
)
|
# -*- coding: utf-8 -*-
########## THIS FITTING PROGRAM IS MEANT TO FIT sinusoids to 'mitral responses to sinusoids'!
## USAGE: python2.6 fit_odor_morphs.py ../results/odor_morphs/2011-01-13_odormorph_SINGLES_JOINTS_PGS.pickle
from scipy import optimize
from scipy.special import * # has error function erf() and inverse erfinv()
from pylab import *
import pickle
import sys
import math
sys.path.extend(["..","../networks","../generators","../simulations"])
from stimuliConstants import * # has SETTLETIME, inputList and pulseList, GLOMS_ODOR, GLOMS_NIL
from networkConstants import * # has central_glom
from sim_utils import * # has rebin() to alter binsize
from analysis_utils import * # has read_morphfile() and NUM_REBINS, etc.
iterationnum = 0
## amplitude[sinnum], phase[sinnum] and DC offset are the params
NUMPARAMS = 2*num_sins+1
## I don't use the NUMBINS in simset_odor.py, rather I rebin()
bindt = 5e-3 #s
NUM_REBINS = int(SIN_RUNTIME/bindt)
### numbers of mitral to be fitted.
fitted_mitral_list = [2*central_glom+0, 2*central_glom+1]
FIT_LAT_SINS = True
## those sims for which const rate at central glom and sinusoids at lateral glom
if FIT_LAT_SINS:
filelist = [
#(5.0,'../results/odor_sins/2012_02_14_15_36_sins_SINGLES_JOINTS_NOPGS_numgloms2.pickle') # 20 to 60 Hz
#(5.0,'../results/odor_sins/2012_02_14_17_36_sins_SINGLES_JOINTS_NOPGS_numgloms2.pickle') # 1 to 15 Hz # 5 Hz central
#(5.0,'../results/odor_sins/2012_02_14_19_55_sins_SINGLES_JOINTS_NOPGS_numgloms2.pickle') # 1 to 15 Hz # 9 Hz central
#(5.0,'../results/odor_sins/2012_02_15_08_20_sins_SINGLES_JOINTS_NOPGS_numgloms2.pickle') # 1 to 15 Hz # 3 Hz central
(3.0,'../results/odor_sins/2012_02_15_21_54_sins_SINGLES_JOINTS_NOPGS_numgloms2.pickle') # 1 to 15 Hz # 3 Hz central
]
##
else:
## 30 trials, only mitrals
#filelist = [
#(1.0,'../results/odor_sins/2012_02_02_15_32_sins_NOSINGLES_NOJOINTS_NOPGS_NOLAT_numgloms2.pickle'),
#(2.0,'../results/odor_sins/2012_02_02_17_10_sins_NOSINGLES_NOJOINTS_NOPGS_NOLAT_numgloms2.pickle'),
#(3.0,'../results/odor_sins/2012_02_02_19_18_sins_NOSINGLES_NOJOINTS_NOPGS_NOLAT_numgloms2.pickle')
#]
## 40 trials, higher frequencies, only mitrals
filelist = [
(1.0,'../results/odor_sins/2012_02_04_13_47_sins_NOSINGLES_NOJOINTS_NOPGS_NOLAT_numgloms2.pickle')
]
## 40 trials, higher frequencies, only mitrals, 10x longer time
filelist = [
(1.0,'../results/odor_sins/2012_02_04_17_51_sins_NOSINGLES_NOJOINTS_NOPGS_NOLAT_numgloms2.pickle')
]
## 30 trials, mitrals + spines + singles + PGs
#filelist = [
#(1.0,'../results/odor_sins/2012_02_02_16_17_sins_SINGLES_NOJOINTS_PGS_NOLAT_numgloms2.pickle'),
#(2.0,'../results/odor_sins/2012_02_02_17_54_sins_SINGLES_NOJOINTS_PGS_NOLAT_numgloms2.pickle'),
#(3.0,'../results/odor_sins/2012_02_02_20_08_sins_SINGLES_NOJOINTS_PGS_NOLAT_numgloms2.pickle')
#]
## 40 trials, higher frequencies, mitrals + spines + singles + PGs:
#filelist = [
#(1.0,'../results/odor_sins/2012_02_04_14_00_sins_SINGLES_NOJOINTS_PGS_NOLAT_numgloms2.pickle')
#]
def chisqfunc(params, mitnum, ydata, errdata):
ampl = params[0:num_sins]
phase = params[num_sins:2*num_sins]
DC = params[-1]
global iterationnum
if iterationnum%100==0: print 'iteration number =',iterationnum
chisqarray = [0.0]
for sinnum,f in enumerate(sine_frequencies):
## Leave the first cycle of lowest frequency out for transient settling
## Take the first cycle after leaving above time out
startcyclenum = 1
startbin = int(startcyclenum/float(f)/bindt)
## ydata[sinnum][binnum], similar for errdata
data = ydata[sinnum]
error = errdata[sinnum]
omegabindt = 2*pi*f*bindt
for binnum in range(startbin,NUM_REBINS):
## ampl must be positive, sign appears via phase; phase modulo 2pi
Rmodel = DC + abs(ampl[sinnum]) * sin( omegabindt*binnum + (phase[sinnum]%(2*pi)) )
if Rmodel<0.0: Rmodel=0.0 # threshold if below zero
## divide by error to do chi-square fit
chisqarray.append( (data[binnum] - Rmodel)/error[binnum] )
## not yet squared, so normalized 'chi' to sqrt of number of dof
## ydata[sinnum][binnum]
chisqarray = array(chisqarray) / sqrt(ydata.size-NUMPARAMS)
iterationnum += 1
return chisqarray
def fit_sins(filename, fitted_mitral):
f = open(filename,'r')
mitral_responses_list = pickle.load(f)
f.close()
## mitral_responses_list[avgnum][sinnum][mitnum][spikenum]
mitral_responses_binned_list = \
rebin_pulses(mitral_responses_list, NUM_REBINS, SIN_RUNTIME, 0.0)
numavgs = len(mitral_responses_list)
mitral_responses_mean = mean(mitral_responses_binned_list, axis=0)
mitral_responses_std = std(mitral_responses_binned_list, axis=0)
## take only the responses of the mitral to be fitted
firingbinsmeanList = mitral_responses_mean[:,fitted_mitral,:]
firingbinserrList = mitral_responses_std[:,fitted_mitral,:]/sqrt(numavgs)
## amplitude of sine wave, phase shift and DC offset
params0 = [0.0]*num_sins+[0.0]*num_sins+[0.0]
## put in a minimum error, else divide by zero problems, or NaN value params and fits
## find the minimum error >= errcut
largeerrors = firingbinserrList[where(firingbinserrList>errcut)]
if largeerrors is not (): errmin = largeerrors.min()
else: errmin = errcut
## numpy where(), replace by errmin,
## all those elements in firingbinsList which are less than errmin
firingbinserrList = where(firingbinserrList>errcut, firingbinserrList, errmin)
###################################### Fitting
params = optimize.leastsq( chisqfunc, params0,
args=(fitted_mitral, firingbinsmeanList, firingbinserrList),
full_output=1, maxfev=10000)
print params[3]
params = params[0] # leastsq returns a whole tuple of stuff - errmsg etc.
print "ampl[sinnum]+phase[sinnum]+DC =",params
## Calculate sum of squares of the chisqarray
chisqarraysq = [i**2 for i in
chisqfunc(params, fitted_mitral, firingbinsmeanList, firingbinserrList)]
chisq = reduce(lambda x, y: x+y, chisqarraysq)
############################## Calculate fitted responses and return them
DC_fit = params[-1]
ampl_fit = abs(params[0:num_sins])
phase_fit = params[num_sins:2*num_sins] % (2*pi)
fitted_responses = [ [ \
DC_fit + ampl_fit[sinnum] * sin( 2*pi*t*f + phase_fit[sinnum] ) \
for t in arange(0.0, SIN_RUNTIME, bindt) ] \
for sinnum,f in enumerate(sine_frequencies) ]
return (params,chisq,fitted_responses,firingbinsmeanList,firingbinserrList)
if __name__ == "__main__":
#if len(sys.argv) > 3:
#filename = sys.argv[1]
#ampl = float(sys.argv[2])
#DC = float(sys.argv[3])
#else:
#print "Specify responses data filename, sine amplitude, DC."
#sys.exit(1)
for fitted_mitral in fitted_mitral_list:
mainfig = figure(facecolor='w')
mainax = mainfig.add_subplot(111)
title('Mitral '+str(fitted_mitral)+' frequency response',fontsize=24)
mainfig2 = figure(facecolor='w')
mainax2 = mainfig2.add_subplot(111)
title('Mitral '+str(fitted_mitral)+' phase response',fontsize=24)
paramsList = []
for ampl,filename in filelist:
params,chisq,fitted_responses,firingbinsmeanList,firingbinserrList\
= fit_sins(filename, fitted_mitral)
print "Mit",fitted_mitral,"normalized chisq =",chisq
paramsList.append((ampl,params))
################# Plot simulated and fitted responses
if fitted_mitral != 0: continue
for sinnum in range(num_sins):
fig = figure(facecolor='w')
ax = fig.add_subplot(3,1,2)
sincolor = (sinnum+1) / float(num_sins)
## mean + error (lighter/whiter shade than mean below)
ax.plot(range(NUM_REBINS),\
firingbinsmeanList[sinnum]+firingbinserrList[sinnum],\
color=(0,(1-sincolor)*0.25+0.75,sincolor*0.25+0.75),\
marker='+',linestyle='solid', linewidth=2)
## mean
ax.plot(range(NUM_REBINS),firingbinsmeanList[sinnum],\
color=(0,1-sincolor,sincolor),\
marker='+',linestyle='solid', linewidth=2)
## fitted
ax.plot(range(NUM_REBINS),fitted_responses[sinnum],\
color=(1,1-sincolor,sincolor),\
marker='x',linestyle='solid', linewidth=2)
titlestr = 'Mitral %d response & sinusoid f=%f fit'\
%(fitted_mitral,sine_frequencies[sinnum])
title(titlestr, fontsize=24)
axes_labels(ax,'respiratory phase bin','firing rate (Hz)',adjustpos=True)
################# Plot frequency and phase responses
mainax.plot(sine_frequencies,abs(params[0:num_sins])/float(ampl),label=str(ampl)+'Hz ORN')
mainax2.plot(sine_frequencies,(params[0:num_sins]%(2*pi))/pi*180,label=str(ampl)+'Hz ORN')
axes_labels(mainax,'input frequency (Hz)','stimulus normalized output',adjustpos=True)
mainax.legend()
axes_labels(mainax2,'input frequency (Hz)','output phase (degrees)',adjustpos=True)
mainax2.legend()
show()
|
#!/usr/bin/env python3
from hdwallet import BIP44HDWallet
from hdwallet.cryptocurrencies import EthereumMainnet
from hdwallet.derivations import BIP44Derivation
from hdwallet.utils import generate_mnemonic
from hdwallet import HDWallet
from typing import Optional
import random, requests
from hdwallet.symbols import ETH as SYMBOL
def data_eth():
for address_index in range(divs):
bip44_derivation: BIP44Derivation = BIP44Derivation(
cryptocurrency=EthereumMainnet, account=0, change=False, address=address_index
)
bip44_hdwallet.from_path(path=bip44_derivation)
data.append({
'path': bip44_hdwallet.path(),
'address': bip44_hdwallet.address(),
'privatekey': bip44_hdwallet.private_key(),
'privatedec': int(bip44_hdwallet.private_key(), 16),
})
bip44_hdwallet.clean_derivation()
filename ='eth.txt'
with open(filename) as f:
line_count = 0
for line in f:
line != "\n"
line_count += 1
eth_list = [line.split()[0].lower() for line in open(filename,'r')]
eth_list = set(eth_list)
prompt= '''
************************ Main Menu Mizogg's ETH Tools *******************
* Single Check Tools *
* Option 1.ETH Address with TXS Check [Internet required]= 1 *
* Option 2.Hexadecimal to Decimal (HEX 2 DEC) [Internet required]= 2 *
* Option 3.Decimal to Hexadecimal (DEC 2 HEX) [Internet required]= 3 *
* Option 4.Mnemonic Words to dec and hex [Internet required]= 4 *
* Generators & Multi Check Tools *
* *
* Option 5.Mnemonic Words Generator Random Choice [Offline] = 5 *
* Option 6.Mnemonic Words Generator Random Choice [ONLINE] = 6 *
* *
************** Main Menu Mizogg's ETH Tools made in Python **************
Type You Choice Here Enter 1-6 :
'''
mylistapi = []
while True:
api1="?apiKey=freekey"
api2="?apiKey=freekey"
api3="?apiKey=freekey"
api4="?apiKey=freekey"
mylistapi=[str(api1), str(api2), str(api3), str(api4)]
apikeys=random.choice(mylistapi)
data = []
count=0
total= 0
start=int(input(prompt))
if start == 1:
print ('Ethereum Address Balance and Info Check Tool')
ethadd = str(input('Enter Your ETH Address Here : '))
blocs=requests.get("https://api.ethplorer.io/getAddressInfo/" + ethadd +apikeys)
ress = blocs.json()
address = dict(ress)['address']
countTxs = dict(ress)['countTxs']
ETHbalance = dict(ress)['ETH']['balance']
tokens = dict(ress)['tokens']
print(f'''
|==============================================|=======|=====================|
| Ethereum (ETH) Address |No. TXS|Balance |
|==============================================|=======|=====================|
| ''', address, ''' | ''', countTxs, ''' | ''', ETHbalance, ''' |
|==============================================|============|=======|=================================|
| Ethereum Token Address |HoldersCount|Symbol |Name of Token |
|==============================================|============|=======|=================================|''')
for row in tokens:
tokenInfo= row['tokenInfo']
taddress = tokenInfo['address']
symbol = tokenInfo['symbol']
holdersCount= tokenInfo['holdersCount']
name =tokenInfo['name']
print (' | ', taddress, ' | ', holdersCount, ' | ', symbol, '|', name, '|')
elif start == 2:
print('Hexadecimal to Decimal Tool')
HEX = str(input('Enter Your Hexadecimal HEX Here : '))
dec = int(HEX, 16)
length = len(bin(dec))
length -=2
PRIVATE_KEY = "%064x" % dec
hdwallet: HDWallet = HDWallet(symbol=SYMBOL)
hdwallet.from_private_key(private_key=PRIVATE_KEY)
ethadd = hdwallet.p2pkh_address()
print('\nHexadecimal = ',HEX, '\nTo Decimal = ', dec, ' bits ', length)
print("Cryptocurrency:", hdwallet.cryptocurrency())
print("Symbol:", hdwallet.symbol())
print("Network:", hdwallet.network())
print("Uncompressed:", hdwallet.uncompressed())
print("Compressed:", hdwallet.compressed())
print("Private Key:", hdwallet.private_key())
print("Public Key:", hdwallet.public_key())
print("Finger Print:", hdwallet.finger_print())
print("Hash:", hdwallet.hash())
blocs=requests.get("https://api.ethplorer.io/getAddressInfo/" + ethadd +apikeys)
ress = blocs.json()
address = dict(ress)['address']
countTxs = dict(ress)['countTxs']
ETHbalance = dict(ress)['ETH']['balance']
print(f'''
|==============================================|=======|=========|
| Ethereum (ETH) Address |No. TXS|Balance |
|==============================================|=======|=========|
| ''', address, ''' | ''', countTxs, ''' | ''', ETHbalance, ''' | ''')
elif start == 3:
print('Decimal to Hexadecimal Tool')
dec = int(input('Enter Your Decimal DEC Here : '))
HEX = "%064x" % dec
length = len(bin(dec))
length -=2
hdwallet: HDWallet = HDWallet(symbol=SYMBOL)
hdwallet.from_private_key(private_key=HEX)
ethadd = hdwallet.p2pkh_address()
print('\nDecimal = ', dec, ' bits ', length, '\nTo Hexadecimal = ', HEX)
print("Cryptocurrency:", hdwallet.cryptocurrency())
print("Symbol:", hdwallet.symbol())
print("Network:", hdwallet.network())
print("Uncompressed:", hdwallet.uncompressed())
print("Compressed:", hdwallet.compressed())
print("Private Key:", hdwallet.private_key())
print("Public Key:", hdwallet.public_key())
print("Finger Print:", hdwallet.finger_print())
print("Hash:", hdwallet.hash())
blocs=requests.get("https://api.ethplorer.io/getAddressInfo/" + ethadd +apikeys)
ress = blocs.json()
address = dict(ress)['address']
countTxs = dict(ress)['countTxs']
ETHbalance = dict(ress)['ETH']['balance']
print(f'''
|==============================================|=======|=========|
| Ethereum (ETH) Address |No. TXS|Balance |
|==============================================|=======|=========|
| ''', address, ''' | ''', countTxs, ''' | ''', ETHbalance, ''' | ''')
elif start ==4:
promptword= '''
************************* Mnemonic Words 12/15/18/21/24 tool *************************
* *
* 1-OWN WORDS to DEC & HEX with TX Check [Internet required] *
* 2-Generated WORDS to DEC & HEX with TX Check [Internet required] *
* Type 1-2 to Start *
* *
************************* Mnemonic Words 12/15/18/21/24 tool *************************
'''
startwords=int(input(promptword))
if startwords == 1:
MNEMONIC: str = input(' Type your Own Words Here = ')
Lang = int(input(' Choose language 1.english, 2.french, 3.italian, 4.spanish, 5.chinese_simplified, 6.chinese_traditional, 7.japanese or 8.korean '))
if Lang == 1:
Lang1 = "english"
elif Lang == 2:
Lang1 = "french"
elif Lang == 3:
Lang1 = "italian"
elif Lang == 4:
Lang1 = "spanish"
elif Lang == 5:
Lang1 = "chinese_simplified"
elif Lang == 6:
Lang1 = "chinese_traditional"
elif Lang == 7:
Lang1 = "japanese"
elif Lang == 8:
Lang1 = "korean"
else:
print("WRONG NUMBER!!! Starting with english")
Lang1 = "english"
PASSPHRASE: Optional[str] = None
bip44_hdwallet: BIP44HDWallet = BIP44HDWallet(cryptocurrency=EthereumMainnet)
bip44_hdwallet.from_mnemonic(
mnemonic=MNEMONIC, language=Lang1, passphrase=PASSPHRASE
)
bip44_hdwallet.clean_derivation()
mnemonic_words = bip44_hdwallet.mnemonic()
ethadd = bip44_hdwallet.address()
HEX = bip44_hdwallet.private_key()
dec = int(bip44_hdwallet.private_key(), 16)
length = len(bin(dec))
length -=2
print('\nmnemonic_words : ', mnemonic_words)
print('\nPrivatekey (dec): ', dec, ' bits ', length, '\nPrivatekey (hex): ', HEX)
blocs=requests.get("https://api.ethplorer.io/getAddressInfo/" + ethadd +apikeys)
ress = blocs.json()
address = dict(ress)['address']
countTxs = dict(ress)['countTxs']
ETHbalance = dict(ress)['ETH']['balance']
print(f'''
|==============================================|=======|=========|
| Ethereum (ETH) Address |No. TXS|Balance |
|==============================================|=======|=========|
| ''', address, ''' | ''', countTxs, ''' | ''', ETHbalance, ''' | ''')
if startwords == 2:
print('Mnemonic 12/15/18/21/24 Words to ETH Address Tool')
R = int(input('Enter Ammount Mnemonic Words 12/15/18/21/24 : '))
if R == 12:
s1 = 128
elif R == 15:
s1 = 160
elif R == 18:
s1 = 192
elif R == 21:
s1 = 224
elif R == 24:
s1 = 256
else:
print("WRONG NUMBER!!! Starting with 24 Words")
s1 = 256
Lang = int(input(' Choose language 1.english, 2.french, 3.italian, 4.spanish, 5.chinese_simplified, 6.chinese_traditional, 7.japanese or 8.korean '))
if Lang == 1:
Lang1 = "english"
elif Lang == 2:
Lang1 = "french"
elif Lang == 3:
Lang1 = "italian"
elif Lang == 4:
Lang1 = "spanish"
elif Lang == 5:
Lang1 = "chinese_simplified"
elif Lang == 6:
Lang1 = "chinese_traditional"
elif Lang == 7:
Lang1 = "japanese"
elif Lang == 8:
Lang1 = "korean"
else:
print("WRONG NUMBER!!! Starting with english")
Lang1 = "english"
MNEMONIC: str = generate_mnemonic(language=Lang1, strength=s1)
PASSPHRASE: Optional[str] = None
bip44_hdwallet: BIP44HDWallet = BIP44HDWallet(cryptocurrency=EthereumMainnet)
bip44_hdwallet.from_mnemonic(
mnemonic=MNEMONIC, language=Lang1, passphrase=PASSPHRASE
)
bip44_hdwallet.clean_derivation()
mnemonic_words = bip44_hdwallet.mnemonic()
ethadd = bip44_hdwallet.address()
HEX = bip44_hdwallet.private_key()
dec = int(bip44_hdwallet.private_key(), 16)
length = len(bin(dec))
length -=2
print('\nmnemonic_words : ', mnemonic_words)
print('\nPrivatekey (dec): ', dec, ' bits ', length, '\nPrivatekey (hex): ', HEX)
blocs=requests.get("https://api.ethplorer.io/getAddressInfo/" + ethadd +apikeys)
ress = blocs.json()
address = dict(ress)['address']
countTxs = dict(ress)['countTxs']
ETHbalance = dict(ress)['ETH']['balance']
print(f'''
|==============================================|=======|=========|
| Ethereum (ETH) Address |No. TXS|Balance |
|==============================================|=======|=========|
| ''', address, ''' | ''', countTxs, ''' | ''', ETHbalance, ''' | ''')
elif start ==5:
print('Mnemonic 12/15/18/21/24 Words to ETH Address Tool')
R = int(input('Enter Ammount Mnemonic Words 12/15/18/21/24 : '))
if R == 12:
s1 = 128
elif R == 15:
s1 = 160
elif R == 18:
s1 = 192
elif R == 21:
s1 = 224
elif R == 24:
s1 = 256
else:
print("WRONG NUMBER!!! Starting with 24 Words")
s1 = 256
divs = int(input("How Many Derivation Paths? m/44'/60'/0'/0/0/ to m/44'/60'/0'/0/???? -> "))
Lang = int(input(' Choose language 1.english, 2.french, 3.italian, 4.spanish, 5.chinese_simplified, 6.chinese_traditional, 7.japanese or 8.korean '))
if Lang == 1:
Lang1 = "english"
elif Lang == 2:
Lang1 = "french"
elif Lang == 3:
Lang1 = "italian"
elif Lang == 4:
Lang1 = "spanish"
elif Lang == 5:
Lang1 = "chinese_simplified"
elif Lang == 6:
Lang1 = "chinese_traditional"
elif Lang == 7:
Lang1 = "japanese"
elif Lang == 8:
Lang1 = "korean"
else:
print("WRONG NUMBER!!! Starting with english")
Lang1 = "english"
display = int(input('1=Full Display (Slower) 2=Slient Mode (Faster) : '))
while True:
data=[]
count += 1
total += divs
MNEMONIC: str = generate_mnemonic(language=Lang1, strength=s1)
PASSPHRASE: Optional[str] = None
bip44_hdwallet: BIP44HDWallet = BIP44HDWallet(cryptocurrency=EthereumMainnet)
bip44_hdwallet.from_mnemonic(
mnemonic=MNEMONIC, language=Lang1, passphrase=PASSPHRASE
)
bip44_hdwallet.clean_derivation()
mnemonic_words = bip44_hdwallet.mnemonic()
data_eth()
for target_wallet in data:
address = target_wallet['address'].lower()
if address in eth_list:
print('\nMatch Found')
print('\nmnemonic_words : ', mnemonic_words)
print('Derivation Path : ', target_wallet['path'], ' : ETH Address : ', target_wallet['address'])
print('Privatekey : ', target_wallet['privatekey'])
print('Privatekey DEC : ', target_wallet['privatedec'])
with open("winner.txt", "a") as f:
f.write(f"""\nMnemonic_words: {mnemonic_words}
Derivation Path: {target_wallet['path']}
Privatekey : {target_wallet['privatekey']}
Public Address ETH: {target_wallet['address']}
=====Made by mizogg.co.uk Donations 3GCypcW8LWzNfJEsTvcFwUny3ygPzpTfL4 =====""")
else:
if display == 1:
print(' [' + str(count) + '] ------------------------')
print('Total Checked [' + str(total) + '] ')
print('\nmnemonic_words : ', mnemonic_words)
for bad_wallet in data:
print('Derivation Path : ', bad_wallet['path'], ' : ETH Address : ', bad_wallet['address'])
print('Privatekey : ', bad_wallet['privatekey'])
print('Privatekey DEC : ', bad_wallet['privatedec'])
if display == 2:
print(' [' + str(count) + '] ------', 'Total Checked [' + str(total) + '] ', end='\r')
elif start ==6:
print('Mnemonic 12/15/18/21/24 Words to ETH Address Tool')
R = int(input('Enter Ammount Mnemonic Words 12/15/18/21/24 : '))
if R == 12:
s1 = 128
elif R == 15:
s1 = 160
elif R == 18:
s1 = 192
elif R == 21:
s1 = 224
elif R == 24:
s1 = 256
else:
print("WRONG NUMBER!!! Starting with 24 Words")
s1 = 256
divs = int(input("How Many Derivation Paths? m/44'/60'/0'/0/0/ to m/44'/60'/0'/0/???? -> "))
Lang = int(input(' Choose language 1.english, 2.french, 3.italian, 4.spanish, 5.chinese_simplified, 6.chinese_traditional, 7.japanese or 8.korean '))
if Lang == 1:
Lang1 = "english"
elif Lang == 2:
Lang1 = "french"
elif Lang == 3:
Lang1 = "italian"
elif Lang == 4:
Lang1 = "spanish"
elif Lang == 5:
Lang1 = "chinese_simplified"
elif Lang == 6:
Lang1 = "chinese_traditional"
elif Lang == 7:
Lang1 = "japanese"
elif Lang == 8:
Lang1 = "korean"
else:
print("WRONG NUMBER!!! Starting with english")
Lang1 = "english"
display = int(input('1=Full Display (Slower) 2=Slient Mode (Faster) : '))
while True:
data=[]
count += 1
total += divs
MNEMONIC: str = generate_mnemonic(language=Lang1, strength=s1)
PASSPHRASE: Optional[str] = None
bip44_hdwallet: BIP44HDWallet = BIP44HDWallet(cryptocurrency=EthereumMainnet)
bip44_hdwallet.from_mnemonic(
mnemonic=MNEMONIC, language=Lang1, passphrase=PASSPHRASE
)
bip44_hdwallet.clean_derivation()
mnemonic_words = bip44_hdwallet.mnemonic()
data_eth()
for target_wallet in data:
ethadd = target_wallet['address']
blocs=requests.get("https://api.ethplorer.io/getAddressInfo/" + ethadd +apikeys)
ress = blocs.json()
address = dict(ress)['address']
countTxs = dict(ress)['countTxs']
ETHbalance = dict(ress)['ETH']['balance']
print(f'''
|==============================================|=======|=========|=============|
| Ethereum (ETH) Address |No. TXS|Balance | Scan Number |
|==============================================|=======|=========|=============|
| ''', address, ''' | ''', countTxs, ''' | ''', ETHbalance, ''' | ''',count, ''' | ''')
if countTxs > 0:
with open("winner.txt", "a") as f:
f.write(f"""\nMnemonic_words: {mnemonic_words}
Derivation Path: {target_wallet['path']}
Privatekey : {target_wallet['privatekey']}
Public Address ETH: {target_wallet['address']}""")
else:
print("WRONG NUMBER!!! MUST CHOSE 1 - 6 ")
|
from rest_framework import serializers
from users.models import CustomUser
class CustomUserSerializer(serializers.ModelSerializer):
"""
Serializer Class for CustomUser Model
"""
email = serializers.EmailField(required=True)
username = serializers.CharField(required=True)
password = serializers.CharField(min_length=8, write_only=True, style={'input_type': 'password'})
avatar = serializers.ImageField(required=False, use_url=True)
class Meta:
model = CustomUser
fields = ('id', 'email', 'username', 'password', 'avatar', 'first_name', 'last_name')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
# extract password from the request data
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
# set Hashed password to the user
if password is not None:
instance.set_password(password)
instance.save()
return instance
def update(self, instance, validated_data):
# extract password from the request data
password = validated_data.pop('password', None)
instance = super(CustomUserSerializer, self).update(instance, validated_data)
# set Hashed password to the user
if password is not None:
instance.set_password(password)
instance.save()
return instance
|
#
# PySNMP MIB module IANA-MAU-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IANA-MAU-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:49:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, iso, mib_2, Counter64, ModuleIdentity, Unsigned32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Bits, Counter32, IpAddress, ObjectIdentity, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "iso", "mib-2", "Counter64", "ModuleIdentity", "Unsigned32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Bits", "Counter32", "IpAddress", "ObjectIdentity", "Integer32", "Gauge32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ianaMauMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 154))
ianaMauMIB.setRevisions(('2014-08-01 00:00', '2014-05-22 00:00', '2011-08-12 00:00', '2010-02-23 00:00', '2007-04-21 00:00',))
if mibBuilder.loadTexts: ianaMauMIB.setLastUpdated('201408010000Z')
if mibBuilder.loadTexts: ianaMauMIB.setOrganization('IANA')
class IANAifMauTypeListBits(TextualConvention, Bits):
reference = '[IEEE802.3], Section 30.5.1.1.2'
status = 'current'
namedValues = NamedValues(("bOther", 0), ("bAUI", 1), ("b10base5", 2), ("bFoirl", 3), ("b10base2", 4), ("b10baseT", 5), ("b10baseFP", 6), ("b10baseFB", 7), ("b10baseFL", 8), ("b10broad36", 9), ("b10baseTHD", 10), ("b10baseTFD", 11), ("b10baseFLHD", 12), ("b10baseFLFD", 13), ("b100baseT4", 14), ("b100baseTXHD", 15), ("b100baseTXFD", 16), ("b100baseFXHD", 17), ("b100baseFXFD", 18), ("b100baseT2HD", 19), ("b100baseT2FD", 20), ("b1000baseXHD", 21), ("b1000baseXFD", 22), ("b1000baseLXHD", 23), ("b1000baseLXFD", 24), ("b1000baseSXHD", 25), ("b1000baseSXFD", 26), ("b1000baseCXHD", 27), ("b1000baseCXFD", 28), ("b1000baseTHD", 29), ("b1000baseTFD", 30), ("b10GbaseX", 31), ("b10GbaseLX4", 32), ("b10GbaseR", 33), ("b10GbaseER", 34), ("b10GbaseLR", 35), ("b10GbaseSR", 36), ("b10GbaseW", 37), ("b10GbaseEW", 38), ("b10GbaseLW", 39), ("b10GbaseSW", 40), ("b10GbaseCX4", 41), ("b2BaseTL", 42), ("b10PassTS", 43), ("b100BaseBX10D", 44), ("b100BaseBX10U", 45), ("b100BaseLX10", 46), ("b1000BaseBX10D", 47), ("b1000BaseBX10U", 48), ("b1000BaseLX10", 49), ("b1000BasePX10D", 50), ("b1000BasePX10U", 51), ("b1000BasePX20D", 52), ("b1000BasePX20U", 53), ("b10GbaseT", 54), ("b10GbaseLRM", 55), ("b1000baseKX", 56), ("b10GbaseKX4", 57), ("b10GbaseKR", 58), ("b10G1GbasePRXD1", 59), ("b10G1GbasePRXD2", 60), ("b10G1GbasePRXD3", 61), ("b10G1GbasePRXU1", 62), ("b10G1GbasePRXU2", 63), ("b10G1GbasePRXU3", 64), ("b10GbasePRD1", 65), ("b10GbasePRD2", 66), ("b10GbasePRD3", 67), ("b10GbasePRU1", 68), ("b10GbasePRU3", 69), ("b40GbaseKR4", 70), ("b40GbaseCR4", 71), ("b40GbaseSR4", 72), ("b40GbaseFR", 73), ("b40GbaseLR4", 74), ("b100GbaseCR10", 75), ("b100GbaseSR10", 76), ("b100GbaseLR4", 77), ("b100GbaseER4", 78))
class IANAifMauMediaAvailable(TextualConvention, Integer32):
reference = '[IEEE802.3], Section 30.5.1.1.4'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("available", 3), ("notAvailable", 4), ("remoteFault", 5), ("invalidSignal", 6), ("remoteJabber", 7), ("remoteLinkLoss", 8), ("remoteTest", 9), ("offline", 10), ("autoNegError", 11), ("pmdLinkFault", 12), ("wisFrameLoss", 13), ("wisSignalLoss", 14), ("pcsLinkFault", 15), ("excessiveBER", 16), ("dxsLinkFault", 17), ("pxsLinkFault", 18), ("availableReduced", 19), ("ready", 20))
class IANAifMauAutoNegCapBits(TextualConvention, Bits):
reference = '[IEEE802.3], Section 30.6.1.1.5'
status = 'current'
namedValues = NamedValues(("bOther", 0), ("b10baseT", 1), ("b10baseTFD", 2), ("b100baseT4", 3), ("b100baseTX", 4), ("b100baseTXFD", 5), ("b100baseT2", 6), ("b100baseT2FD", 7), ("bFdxPause", 8), ("bFdxAPause", 9), ("bFdxSPause", 10), ("bFdxBPause", 11), ("b1000baseX", 12), ("b1000baseXFD", 13), ("b1000baseT", 14), ("b1000baseTFD", 15), ("b10GbaseT", 16), ("b1000baseKX", 17), ("b10GbaseKX4", 18), ("b10GbaseKR", 19), ("b40GbaseKR4", 20), ("b40GbaseCR4", 21), ("b100GbaseCR10", 22))
class IANAifJackType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))
namedValues = NamedValues(("other", 1), ("rj45", 2), ("rj45S", 3), ("db9", 4), ("bnc", 5), ("fAUI", 6), ("mAUI", 7), ("fiberSC", 8), ("fiberMIC", 9), ("fiberST", 10), ("telco", 11), ("mtrj", 12), ("hssdc", 13), ("fiberLC", 14), ("cx4", 15), ("sfpPlusDA", 16))
dot3MauType = MibIdentifier((1, 3, 6, 1, 2, 1, 26, 4))
dot3MauTypeAUI = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 1))
if mibBuilder.loadTexts: dot3MauTypeAUI.setStatus('current')
dot3MauType10Base5 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 2))
if mibBuilder.loadTexts: dot3MauType10Base5.setStatus('current')
dot3MauTypeFoirl = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 3))
if mibBuilder.loadTexts: dot3MauTypeFoirl.setStatus('current')
dot3MauType10Base2 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 4))
if mibBuilder.loadTexts: dot3MauType10Base2.setStatus('current')
dot3MauType10BaseT = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 5))
if mibBuilder.loadTexts: dot3MauType10BaseT.setStatus('current')
dot3MauType10BaseFP = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 6))
if mibBuilder.loadTexts: dot3MauType10BaseFP.setStatus('current')
dot3MauType10BaseFB = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 7))
if mibBuilder.loadTexts: dot3MauType10BaseFB.setStatus('current')
dot3MauType10BaseFL = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 8))
if mibBuilder.loadTexts: dot3MauType10BaseFL.setStatus('current')
dot3MauType10Broad36 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 9))
if mibBuilder.loadTexts: dot3MauType10Broad36.setStatus('current')
dot3MauType10BaseTHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 10))
if mibBuilder.loadTexts: dot3MauType10BaseTHD.setStatus('current')
dot3MauType10BaseTFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 11))
if mibBuilder.loadTexts: dot3MauType10BaseTFD.setStatus('current')
dot3MauType10BaseFLHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 12))
if mibBuilder.loadTexts: dot3MauType10BaseFLHD.setStatus('current')
dot3MauType10BaseFLFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 13))
if mibBuilder.loadTexts: dot3MauType10BaseFLFD.setStatus('current')
dot3MauType100BaseT4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 14))
if mibBuilder.loadTexts: dot3MauType100BaseT4.setStatus('current')
dot3MauType100BaseTXHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 15))
if mibBuilder.loadTexts: dot3MauType100BaseTXHD.setStatus('current')
dot3MauType100BaseTXFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 16))
if mibBuilder.loadTexts: dot3MauType100BaseTXFD.setStatus('current')
dot3MauType100BaseFXHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 17))
if mibBuilder.loadTexts: dot3MauType100BaseFXHD.setStatus('current')
dot3MauType100BaseFXFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 18))
if mibBuilder.loadTexts: dot3MauType100BaseFXFD.setStatus('current')
dot3MauType100BaseT2HD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 19))
if mibBuilder.loadTexts: dot3MauType100BaseT2HD.setStatus('current')
dot3MauType100BaseT2FD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 20))
if mibBuilder.loadTexts: dot3MauType100BaseT2FD.setStatus('current')
dot3MauType1000BaseXHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 21))
if mibBuilder.loadTexts: dot3MauType1000BaseXHD.setStatus('current')
dot3MauType1000BaseXFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 22))
if mibBuilder.loadTexts: dot3MauType1000BaseXFD.setStatus('current')
dot3MauType1000BaseLXHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 23))
if mibBuilder.loadTexts: dot3MauType1000BaseLXHD.setStatus('current')
dot3MauType1000BaseLXFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 24))
if mibBuilder.loadTexts: dot3MauType1000BaseLXFD.setStatus('current')
dot3MauType1000BaseSXHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 25))
if mibBuilder.loadTexts: dot3MauType1000BaseSXHD.setStatus('current')
dot3MauType1000BaseSXFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 26))
if mibBuilder.loadTexts: dot3MauType1000BaseSXFD.setStatus('current')
dot3MauType1000BaseCXHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 27))
if mibBuilder.loadTexts: dot3MauType1000BaseCXHD.setStatus('current')
dot3MauType1000BaseCXFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 28))
if mibBuilder.loadTexts: dot3MauType1000BaseCXFD.setStatus('current')
dot3MauType1000BaseTHD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 29))
if mibBuilder.loadTexts: dot3MauType1000BaseTHD.setStatus('current')
dot3MauType1000BaseTFD = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 30))
if mibBuilder.loadTexts: dot3MauType1000BaseTFD.setStatus('current')
dot3MauType10GigBaseX = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 31))
if mibBuilder.loadTexts: dot3MauType10GigBaseX.setStatus('current')
dot3MauType10GigBaseLX4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 32))
if mibBuilder.loadTexts: dot3MauType10GigBaseLX4.setStatus('current')
dot3MauType10GigBaseR = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 33))
if mibBuilder.loadTexts: dot3MauType10GigBaseR.setStatus('current')
dot3MauType10GigBaseER = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 34))
if mibBuilder.loadTexts: dot3MauType10GigBaseER.setStatus('current')
dot3MauType10GigBaseLR = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 35))
if mibBuilder.loadTexts: dot3MauType10GigBaseLR.setStatus('current')
dot3MauType10GigBaseSR = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 36))
if mibBuilder.loadTexts: dot3MauType10GigBaseSR.setStatus('current')
dot3MauType10GigBaseW = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 37))
if mibBuilder.loadTexts: dot3MauType10GigBaseW.setStatus('current')
dot3MauType10GigBaseEW = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 38))
if mibBuilder.loadTexts: dot3MauType10GigBaseEW.setStatus('current')
dot3MauType10GigBaseLW = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 39))
if mibBuilder.loadTexts: dot3MauType10GigBaseLW.setStatus('current')
dot3MauType10GigBaseSW = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 40))
if mibBuilder.loadTexts: dot3MauType10GigBaseSW.setStatus('current')
dot3MauType10GigBaseCX4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 41))
if mibBuilder.loadTexts: dot3MauType10GigBaseCX4.setStatus('current')
dot3MauType2BaseTL = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 42))
if mibBuilder.loadTexts: dot3MauType2BaseTL.setStatus('current')
dot3MauType10PassTS = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 43))
if mibBuilder.loadTexts: dot3MauType10PassTS.setStatus('current')
dot3MauType100BaseBX10D = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 44))
if mibBuilder.loadTexts: dot3MauType100BaseBX10D.setStatus('current')
dot3MauType100BaseBX10U = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 45))
if mibBuilder.loadTexts: dot3MauType100BaseBX10U.setStatus('current')
dot3MauType100BaseLX10 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 46))
if mibBuilder.loadTexts: dot3MauType100BaseLX10.setStatus('current')
dot3MauType1000BaseBX10D = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 47))
if mibBuilder.loadTexts: dot3MauType1000BaseBX10D.setStatus('current')
dot3MauType1000BaseBX10U = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 48))
if mibBuilder.loadTexts: dot3MauType1000BaseBX10U.setStatus('current')
dot3MauType1000BaseLX10 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 49))
if mibBuilder.loadTexts: dot3MauType1000BaseLX10.setStatus('current')
dot3MauType1000BasePX10D = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 50))
if mibBuilder.loadTexts: dot3MauType1000BasePX10D.setStatus('current')
dot3MauType1000BasePX10U = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 51))
if mibBuilder.loadTexts: dot3MauType1000BasePX10U.setStatus('current')
dot3MauType1000BasePX20D = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 52))
if mibBuilder.loadTexts: dot3MauType1000BasePX20D.setStatus('current')
dot3MauType1000BasePX20U = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 53))
if mibBuilder.loadTexts: dot3MauType1000BasePX20U.setStatus('current')
dot3MauType10GbaseT = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 54))
if mibBuilder.loadTexts: dot3MauType10GbaseT.setStatus('current')
dot3MauType10GbaseLRM = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 55))
if mibBuilder.loadTexts: dot3MauType10GbaseLRM.setStatus('current')
dot3MauType1000baseKX = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 56))
if mibBuilder.loadTexts: dot3MauType1000baseKX.setStatus('current')
dot3MauType10GbaseKX4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 57))
if mibBuilder.loadTexts: dot3MauType10GbaseKX4.setStatus('current')
dot3MauType10GbaseKR = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 58))
if mibBuilder.loadTexts: dot3MauType10GbaseKR.setStatus('current')
dot3MauType10G1GbasePRXD1 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 59))
if mibBuilder.loadTexts: dot3MauType10G1GbasePRXD1.setStatus('current')
dot3MauType10G1GbasePRXD2 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 60))
if mibBuilder.loadTexts: dot3MauType10G1GbasePRXD2.setStatus('current')
dot3MauType10G1GbasePRXD3 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 61))
if mibBuilder.loadTexts: dot3MauType10G1GbasePRXD3.setStatus('current')
dot3MauType10G1GbasePRXU1 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 62))
if mibBuilder.loadTexts: dot3MauType10G1GbasePRXU1.setStatus('current')
dot3MauType10G1GbasePRXU2 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 63))
if mibBuilder.loadTexts: dot3MauType10G1GbasePRXU2.setStatus('current')
dot3MauType10G1GbasePRXU3 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 64))
if mibBuilder.loadTexts: dot3MauType10G1GbasePRXU3.setStatus('current')
dot3MauType10GbasePRD1 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 65))
if mibBuilder.loadTexts: dot3MauType10GbasePRD1.setStatus('current')
dot3MauType10GbasePRD2 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 66))
if mibBuilder.loadTexts: dot3MauType10GbasePRD2.setStatus('current')
dot3MauType10GbasePRD3 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 67))
if mibBuilder.loadTexts: dot3MauType10GbasePRD3.setStatus('current')
dot3MauType10GbasePRU1 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 68))
if mibBuilder.loadTexts: dot3MauType10GbasePRU1.setStatus('current')
dot3MauType10GbasePRU3 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 69))
if mibBuilder.loadTexts: dot3MauType10GbasePRU3.setStatus('current')
dot3MauType40GbaseKR4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 70))
if mibBuilder.loadTexts: dot3MauType40GbaseKR4.setStatus('current')
dot3MauType40GbaseCR4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 71))
if mibBuilder.loadTexts: dot3MauType40GbaseCR4.setStatus('current')
dot3MauType40GbaseSR4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 72))
if mibBuilder.loadTexts: dot3MauType40GbaseSR4.setStatus('current')
dot3MauType40GbaseFR = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 73))
if mibBuilder.loadTexts: dot3MauType40GbaseFR.setStatus('current')
dot3MauType40GbaseLR4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 74))
if mibBuilder.loadTexts: dot3MauType40GbaseLR4.setStatus('current')
dot3MauType100GbaseCR10 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 75))
if mibBuilder.loadTexts: dot3MauType100GbaseCR10.setStatus('current')
dot3MauType100GbaseSR10 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 76))
if mibBuilder.loadTexts: dot3MauType100GbaseSR10.setStatus('current')
dot3MauType100GbaseLR4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 77))
if mibBuilder.loadTexts: dot3MauType100GbaseLR4.setStatus('current')
dot3MauType100GbaseER4 = ObjectIdentity((1, 3, 6, 1, 2, 1, 26, 4, 78))
if mibBuilder.loadTexts: dot3MauType100GbaseER4.setStatus('current')
mibBuilder.exportSymbols("IANA-MAU-MIB", dot3MauType=dot3MauType, dot3MauType10GbasePRD2=dot3MauType10GbasePRD2, dot3MauType10Broad36=dot3MauType10Broad36, dot3MauType10G1GbasePRXD1=dot3MauType10G1GbasePRXD1, dot3MauType1000BaseBX10D=dot3MauType1000BaseBX10D, dot3MauType40GbaseSR4=dot3MauType40GbaseSR4, dot3MauType10GigBaseLR=dot3MauType10GigBaseLR, dot3MauType100BaseLX10=dot3MauType100BaseLX10, dot3MauType10GbaseLRM=dot3MauType10GbaseLRM, dot3MauType100GbaseER4=dot3MauType100GbaseER4, dot3MauType100BaseFXFD=dot3MauType100BaseFXFD, dot3MauType10BaseFLFD=dot3MauType10BaseFLFD, IANAifJackType=IANAifJackType, IANAifMauMediaAvailable=IANAifMauMediaAvailable, dot3MauType100BaseBX10D=dot3MauType100BaseBX10D, dot3MauType10GbasePRU1=dot3MauType10GbasePRU1, dot3MauType1000BaseXFD=dot3MauType1000BaseXFD, dot3MauType1000BaseLXHD=dot3MauType1000BaseLXHD, dot3MauType10BaseFP=dot3MauType10BaseFP, dot3MauType10GigBaseR=dot3MauType10GigBaseR, dot3MauType1000BaseBX10U=dot3MauType1000BaseBX10U, dot3MauType10Base5=dot3MauType10Base5, dot3MauType10PassTS=dot3MauType10PassTS, dot3MauType10GbaseKX4=dot3MauType10GbaseKX4, dot3MauType10GbasePRD3=dot3MauType10GbasePRD3, dot3MauType1000BasePX10U=dot3MauType1000BasePX10U, dot3MauType100BaseT4=dot3MauType100BaseT4, dot3MauType100GbaseCR10=dot3MauType100GbaseCR10, dot3MauType100GbaseLR4=dot3MauType100GbaseLR4, dot3MauType10GigBaseX=dot3MauType10GigBaseX, dot3MauType100GbaseSR10=dot3MauType100GbaseSR10, dot3MauType1000BaseCXHD=dot3MauType1000BaseCXHD, dot3MauType1000baseKX=dot3MauType1000baseKX, dot3MauType10GigBaseER=dot3MauType10GigBaseER, dot3MauType10BaseFLHD=dot3MauType10BaseFLHD, PYSNMP_MODULE_ID=ianaMauMIB, dot3MauType100BaseBX10U=dot3MauType100BaseBX10U, dot3MauType1000BaseCXFD=dot3MauType1000BaseCXFD, dot3MauType10GigBaseEW=dot3MauType10GigBaseEW, dot3MauType10GbaseT=dot3MauType10GbaseT, dot3MauType1000BasePX10D=dot3MauType1000BasePX10D, dot3MauType40GbaseFR=dot3MauType40GbaseFR, dot3MauType10G1GbasePRXU1=dot3MauType10G1GbasePRXU1, dot3MauType10GigBaseSR=dot3MauType10GigBaseSR, dot3MauType1000BaseTFD=dot3MauType1000BaseTFD, dot3MauType2BaseTL=dot3MauType2BaseTL, dot3MauType10GbasePRU3=dot3MauType10GbasePRU3, dot3MauType100BaseFXHD=dot3MauType100BaseFXHD, dot3MauType100BaseTXHD=dot3MauType100BaseTXHD, dot3MauType10Base2=dot3MauType10Base2, dot3MauType10GigBaseSW=dot3MauType10GigBaseSW, dot3MauType10GigBaseW=dot3MauType10GigBaseW, dot3MauType10GbasePRD1=dot3MauType10GbasePRD1, dot3MauType10G1GbasePRXU2=dot3MauType10G1GbasePRXU2, dot3MauType1000BaseLX10=dot3MauType1000BaseLX10, dot3MauType1000BasePX20U=dot3MauType1000BasePX20U, dot3MauType10BaseFB=dot3MauType10BaseFB, dot3MauType1000BaseSXHD=dot3MauType1000BaseSXHD, IANAifMauAutoNegCapBits=IANAifMauAutoNegCapBits, dot3MauType10BaseT=dot3MauType10BaseT, dot3MauType40GbaseLR4=dot3MauType40GbaseLR4, dot3MauType10G1GbasePRXD2=dot3MauType10G1GbasePRXD2, dot3MauType10GigBaseLX4=dot3MauType10GigBaseLX4, ianaMauMIB=ianaMauMIB, dot3MauType1000BaseXHD=dot3MauType1000BaseXHD, dot3MauType100BaseT2HD=dot3MauType100BaseT2HD, dot3MauType1000BaseSXFD=dot3MauType1000BaseSXFD, dot3MauType1000BaseTHD=dot3MauType1000BaseTHD, IANAifMauTypeListBits=IANAifMauTypeListBits, dot3MauType100BaseT2FD=dot3MauType100BaseT2FD, dot3MauType1000BaseLXFD=dot3MauType1000BaseLXFD, dot3MauType10GigBaseCX4=dot3MauType10GigBaseCX4, dot3MauType40GbaseCR4=dot3MauType40GbaseCR4, dot3MauType10BaseFL=dot3MauType10BaseFL, dot3MauType10G1GbasePRXD3=dot3MauType10G1GbasePRXD3, dot3MauType10G1GbasePRXU3=dot3MauType10G1GbasePRXU3, dot3MauType10GbaseKR=dot3MauType10GbaseKR, dot3MauType1000BasePX20D=dot3MauType1000BasePX20D, dot3MauType10BaseTFD=dot3MauType10BaseTFD, dot3MauType100BaseTXFD=dot3MauType100BaseTXFD, dot3MauType40GbaseKR4=dot3MauType40GbaseKR4, dot3MauType10BaseTHD=dot3MauType10BaseTHD, dot3MauTypeAUI=dot3MauTypeAUI, dot3MauTypeFoirl=dot3MauTypeFoirl, dot3MauType10GigBaseLW=dot3MauType10GigBaseLW)
|
print('{{')
for y in range(14):
print('\t{ ', end='')
for x in range(18):
print(hex(0x4100 + x + y * 18) + ', ', end='')
print(' },')
print('},{')
for y in range(14):
print('\t{ ', end='')
for x in range(18):
print(hex(0x6200 + x + y * 18) + ', ', end='')
print(' },')
print('}};')
|
s = 'A Big Cat'
print(s.istitle())
s = 'A BIG Cat'
print(s.istitle())
s = 'A big Cat'
print(s.istitle())
s = 'a big cat'
print(s.istitle())
s = 'A'
print(s.istitle())
s = ''
print(s.istitle())
s = '2019'
print(s.istitle())
# special characters
s = 'Â Ɓig Ȼat'
print(s.istitle())
import unicodedata
count = 0
for codepoint in range(2 ** 16):
ch = chr(codepoint)
if ch.istitle():
print(u'{:04x}: {} ({})'.format(codepoint, ch, unicodedata.name(ch, 'UNNAMED')))
count = count + 1
print(f'Total Number of Title Unicode Characters = {count}')
|
import config_main
from Application.Schedulers.simple_RR import run_rr
from Application.Frame.transferJobPorts import prepare_ports_new_wave, log_to_console_exchange_ports
from Utils.log_handler import log_to_console, log_setup_info_to_console
from Application.Utils.TimeLogger import Timer
from Application.Utils.parseInputFolder import get_picture_size_and_number, get_video_capture, get_camera_capture, release_video, \
clear_input_img_dir
from Application.Utils.parseJsonFile import get_jobs
from Application.Utils.image_handler import show_pictures, save_pict_to_file
from Application.Frame.job_handler import job_creation, init_jobs, log_to_console_avg_time, terminate_jobs
from Application.Frame.global_variables import global_var_handler
from Application.Jobs.get_image import get_used_size_values
def run_application():
"""
Main function of system
:return: None
"""
# initialize timers
timer_setup = Timer()
timer_init = Timer()
timer_application = Timer()
timer_wave = Timer()
timer_post_processing = Timer()
if config_main.APPL_INPUT is not None:
log_setup_info_to_console("SETUP STEP")
timer_setup.start_cycle_timer()
global_var_handler()
if config_main.APPL_INPUT == config_main.IMAGE_INPUT:
get_picture_size_and_number()
elif config_main.APPL_INPUT == config_main.IMAGE_TXT_INPUT:
get_picture_size_and_number(is_txt=True)
elif config_main.APPL_INPUT == config_main.VIDEO_INPUT:
get_video_capture()
elif config_main.APPL_INPUT == config_main.CAMERA_INPUT:
get_camera_capture()
log_setup_info_to_console("JOB CREATION STEP")
job_list = job_creation(job_description=get_jobs(json_file=config_main.APPL_INPUT_JOB_LIST))
timer_setup.end_cycle_timer()
timer_setup.cycle_updater()
log_setup_info_to_console("JOB INIT STEP")
timer_init.start_cycle_timer()
init_jobs(list_jobs=job_list)
timer_init.end_cycle_timer()
timer_init.cycle_updater()
log_setup_info_to_console("JOB RUN STEP")
timer_application.start_cycle_timer()
# noinspection PyUnresolvedReferences,PyUnresolvedReferences
while global_var_handler.FRAME < global_var_handler.NR_PICTURES:
timer_wave.start_cycle_timer()
# noinspection PyUnresolvedReferences
log_to_console('FRAME {}'.format(global_var_handler.FRAME))
run_rr(jobs=job_list)
timer_wave.end_cycle_timer()
timer_wave.cycle_updater()
timer_post_processing.start_cycle_timer()
show_pictures()
save_pict_to_file()
timer_post_processing.end_cycle_timer()
timer_post_processing.cycle_updater()
# noinspection PyUnresolvedReferences
global_var_handler.FRAME += 1
# noinspection PyUnresolvedReferences
prepare_ports_new_wave(frame=global_var_handler.FRAME)
timer_application.end_cycle_timer()
timer_application.cycle_updater()
log_setup_info_to_console("TERMINATE STEP")
terminate_jobs(job_list)
log_to_console("IMAGE SIZE USED IN APPLICATION: {}".format(get_used_size_values()))
if config_main.APPL_INPUT == config_main.VIDEO_INPUT or config_main.APPL_INPUT == config_main.CAMERA_INPUT:
release_video()
else:
clear_input_img_dir()
log_setup_info_to_console("PHASE SETUP AVERAGE TIME[s] : {time:10.10f}".format(time=timer_setup.__average_time_sum__))
log_setup_info_to_console("PHASE INIT AVERAGE TIME[s] : {time:10.10f}".format(time=timer_init.__average_time_sum__))
log_setup_info_to_console(
"PHASE WAVE AVERAGE TIME[s] : {time:10.10f}".format(time=timer_wave.get_average_time_seconds()))
log_setup_info_to_console(
"PHASE POST PROCESSING AVERAGE TIME[s] : {time:10.10f}".format(time=timer_post_processing.get_average_time_seconds()))
log_setup_info_to_console(
"PHASE RUN AVERAGE TIME[s] : {time:10.10f}".format(time=timer_application.__average_time_sum__))
log_to_console_avg_time(job_list)
log_to_console_exchange_ports()
else:
log_setup_info_to_console('NO INPUT FOR APPLICATION')
if __name__ == "__main__":
config_main.APPL_INPUT_DIR = '../' + config_main.APPL_INPUT_DIR
config_main.APPL_INPUT_JOB_LIST = config_main.APPL_INPUT_JOB_LIST.replace('Application/', '')
config_main.APPL_SAVE_LOCATION = '../' + config_main.APPL_SAVE_LOCATION
run_application()
|
# -*- coding: utf-8 -*-
from abc import ABC
from dataclasses import dataclass, field
from enum import Enum
from immutabledict import immutabledict
@dataclass(frozen=True)
class VariableFactory:
"""Creation of related Variables based on given shifts (in months)."""
rank: int
name: str
units: str
def __getitem__(self, months):
if months not in lags:
raise ValueError(f"Unsupported months '{months}'. Expected one of {lags}.")
return StandardVariable(
rank=self.rank, name=self.name, shift=months, units=self.units, parent=self
)
def __str__(self):
return self.name
@dataclass(frozen=True, order=True, init=False)
class Variable(ABC):
"""A variable with its associated name, shift (in months), and units."""
rank: int
name: str = field(compare=False)
shift: int
units: str = field(compare=False)
parent: VariableFactory = field(compare=False)
def get_offset(self):
"""Return a transformed Variable if there is a large (>12) shift."""
if self.shift >= 12 and not isinstance(self, OffsetVariable):
return OffsetVariable(
rank=self.rank,
name=self.name,
shift=self.shift,
units=self.units,
parent=self.parent,
)
return self
def get_standard(self):
"""The inverse of `get_offset()`."""
if self.shift >= 12 and isinstance(self, OffsetVariable):
return StandardVariable(
rank=self.rank,
name=self.name,
shift=self.shift,
units=self.units,
parent=self.parent,
)
return self
@dataclass(frozen=True, order=True)
class StandardVariable(Variable):
@property
def _fill_root(self):
"""Add the fill params if needed."""
if self.parent in filled_variables:
return f"{self.name} {st_persistent_perc}P {st_k}k"
return self.name
@property
def _nn_fill_root(self):
"""Add the fill params if needed."""
if self.parent in filled_variables:
return f"{self.name} {nn_n_months}NN"
return self.name
@property
def filled(self):
"""Filled name (if applicable)."""
if self.parent in filled_variables:
return f"{self._fill_root} {self.shift}M"
return self._fill_root
@property
def nn_filled(self):
"""Filled name (if applicable)."""
if self.parent in filled_variables:
return f"{self._nn_fill_root} {self.shift}M"
return self._nn_fill_root
def __str__(self):
if self.shift != 0:
return f"{self.name} {self.shift}M"
return self.name
@property
def raw(self):
if self.shift != 0:
return f"{self.name} -{self.shift} Month"
return self.name
@property
def raw_filled(self):
if self.shift != 0:
return f"{self._fill_root} -{self.shift} Month"
return self._fill_root
@property
def raw_nn_filled(self):
if self.shift != 0:
return f"{self._nn_fill_root} -{self.shift} Month"
return self._nn_fill_root
@dataclass(frozen=True, order=True)
class OffsetVariable(Variable):
"""A variable with its associated name, shift (in months), and units.
This variable represents an anomaly from its own values shift % 12 months ago.
"""
comp_shift: int = field(init=False, compare=False)
def __post_init__(self):
if self.shift < 12:
raise ValueError(f"Expected a shift >= 12, got '{self.shift}'.")
object.__setattr__(self, "comp_shift", self.shift % 12)
def __str__(self):
return f"{self.name} Δ{self.shift}M"
def sort_variables(variables):
"""Sort variables based on their rank and shift.
Note that this relies on all variables having a unique rank.
"""
return tuple(sorted(variables, key=lambda v: (v.rank, v.shift)))
def get_matching(variables, strict=True, single=True, **criteria):
"""Given a set of criteria, find the matching variables(s).
Args:
variables (iterable of Variable): Variables to match against.
strict (bool): If True, require that at least one match is found (see
`single`).
single (bool): If True, require that exactly one variable is found.
**criteria: Criteria to match against, e.g. {'name': 'FAPAR'}.
Returns:
Variable: If `single` is True.
tuple of Variable: Otherwise.
Raises:
RuntimeError: If no matching variable was found.
RuntimeError: If `single` is True and more than a single matching variable was
found.
"""
matching = []
for var in variables:
for crit_name, crit_info in criteria.items():
if getattr(var, crit_name) == crit_info:
continue
else:
break
else:
matching.append(var)
if not matching and strict:
raise RuntimeError("No matching variables were found.")
if single:
if len(matching) > 1:
raise RuntimeError(
f"Expected to find 1 matching variable. Found '{matching}'."
)
if not matching:
return ()
return matching[0]
return tuple(matching)
def match_factory(variable, factories):
"""Match variable to VariableFactory using rank, name, and units.
Args:
variable (Variable): Variable to match.
factories (VariableFactory or tuple of VariableFactory): VariableFactory to
check against.
Returns:
bool: True if a match was found against one of the given VariableFactory.
"""
if not isinstance(factories, tuple):
factories = (factories,)
for factory in factories:
if (
variable.rank == factory.rank
and variable.name == factory.name
and variable.units == factory.units
):
return True
return False
def get_variable_lags(var_factory):
"""Get the lags for a given VariableFactory.
Args:
var_factory (VariableFactory): VariableFactory to retrieve lags for.
Returns:
tuple of int: All possible lags corresponding to the given variable.
"""
if var_factory in shifted_variables:
return lags
return (0,)
def get_shifted_variables(var_factory):
"""Get all possible shifted variables given a VariableFactory.
Args:
var_factory (VariableFactory): Basis for shifted Variable copies.
Returns:
tuple of Variable: All possible shifted variables.
"""
shifted = []
for lag in get_variable_lags(var_factory):
shifted.append(var_factory[lag])
return tuple(shifted)
DRY_DAY_PERIOD = VariableFactory(
rank=1,
name="Dry Day Period",
units="days",
)
SWI = VariableFactory(
rank=2,
name="SWI(1)",
units="$\mathrm{m}^3 \mathrm{m}^{-3}$",
)
MAX_TEMP = VariableFactory(
rank=3,
name="Max Temp",
units="K",
)
DIURNAL_TEMP_RANGE = VariableFactory(
rank=4,
name="Diurnal Temp Range",
units="K",
)
LIGHTNING = VariableFactory(
rank=5,
name="lightning",
units="$\mathrm{strokes}\ \mathrm{km}^{-2}$",
)
PFT_CROP = VariableFactory(
rank=6,
name="pftCrop",
units="1",
)
POPD = VariableFactory(
rank=7,
name="popd",
units="$\mathrm{inh}\ \mathrm{km}^{-2}$",
)
PFT_HERB = VariableFactory(
rank=8,
name="pftHerb",
units="1",
)
SHRUB_ALL = VariableFactory(
rank=9,
name="ShrubAll",
units="1",
)
TREE_ALL = VariableFactory(
rank=10,
name="TreeAll",
units="1",
)
AGB_TREE = VariableFactory(
rank=11,
name="AGB Tree",
units="r$\mathrm{kg}\ \mathrm{m}^{-2}$",
)
VOD = VariableFactory(
rank=12,
name="VOD Ku-band",
units="1",
)
FAPAR = VariableFactory(
rank=13,
name="FAPAR",
units="1",
)
LAI = VariableFactory(
rank=14,
name="LAI",
units="$\mathrm{m}^2\ \mathrm{m}^{-2}$",
)
SIF = VariableFactory(
rank=15,
name="SIF",
units="r$\mathrm{mW}\ \mathrm{m}^{-2}\ \mathrm{sr}^{-1}\ \mathrm{nm}^{-1}$",
)
GFED4_BA = StandardVariable(rank=0, name="GFED4 BA", shift=0, units="1", parent=None)
MCD64CMQ_BA = StandardVariable(
rank=-1, name="MCD64CMQ BA", shift=0, units="1", parent=None
)
# Investigated lags.
lags = (0, 1, 3, 6, 9, 12, 18, 24)
shifted_variables = (DRY_DAY_PERIOD, LAI, FAPAR, VOD, SIF)
# Data filling params.
filled_variables = (SWI, FAPAR, LAI, VOD, SIF)
# Season-trend & minima.
st_persistent_perc = 50
st_k = 4
# NN.
nn_n_months = 3
Category = Enum(
"Category",
[
"METEOROLOGY",
"HUMAN",
"LANDCOVER",
"VEGETATION",
],
)
feature_categories = immutabledict(
{
Category.METEOROLOGY: (
DRY_DAY_PERIOD,
SWI,
MAX_TEMP,
DIURNAL_TEMP_RANGE,
LIGHTNING,
),
Category.HUMAN: (PFT_CROP, POPD),
Category.LANDCOVER: (PFT_HERB, SHRUB_ALL, TREE_ALL, AGB_TREE),
Category.VEGETATION: (VOD, FAPAR, LAI, SIF),
}
)
# The units below are used instead of the ones associated with the Variable instances
# themselves, because the units below are not tied into the caching mechanism. This
# means they are easier to change should this be necessary.
units = {
DRY_DAY_PERIOD: "days",
SWI: r"$\mathrm{m}^3 \mathrm{m}^{-3}$",
MAX_TEMP: "K",
DIURNAL_TEMP_RANGE: "K",
LIGHTNING: r"$\mathrm{strokes}\ \mathrm{km}^{-2}$",
PFT_CROP: "1",
POPD: r"$\mathrm{inh}\ \mathrm{km}^{-2}$",
PFT_HERB: "1",
SHRUB_ALL: "1",
TREE_ALL: "1",
AGB_TREE: r"$\mathrm{kg}\ \mathrm{m}^{-2}$",
VOD: "1",
FAPAR: "%",
LAI: r"$\mathrm{m}^2\ \mathrm{m}^{-2}$",
SIF: r"$\mathrm{mW}\ \mathrm{m}^{-2}\ \mathrm{sr}^{-1}\ \mathrm{nm}^{-1}$",
}
|
import os
from pprint import pprint
from invoke import run, task
# You can edit these variables
SETTINGS = dict(
SPHINXOPTS="",
SPHINXBUILD="sphinx-build",
SOURCEDIR=".",
BUILDDIR="_build",
SPHINXINTL="sphinx-intl",
LOCALE="ja",
LOCALEDIR="_locales",
)
@task(help={"target": "target used by sphinx-build"})
def build(ctx, target):
"""Run sphinx-build with provided target"""
SETTINGS["target"] = target
print("### SETTING ###")
pprint(SETTINGS)
run(
"{SPHINXBUILD} -M {target} {SOURCEDIR} {BUILDDIR} {SPHINXOPTS}".format(
**SETTINGS
)
)
@task
def list_target(ctx):
"""List targets available by sphinx-build"""
build(ctx, "help")
@task
def clean(ctx):
"""Remove everything under BUILD ({BUILD}) directory"""
build(ctx, "clean")
@task
def gettext(ctx):
"""Generate pot files (run: sphinx-build -b gettext)"""
run(
(
"{SPHINXBUILD} -b gettext "
+ "{SOURCEDIR} {LOCALEDIR}/pot "
+ "{SPHINXOPTS}"
).format(**SETTINGS)
)
@task
def locale(ctx):
"""Generate locale mo,po files (run: sphinx-intl update)"""
run(
(
"{SPHINXINTL} update "
+ "-p {LOCALEDIR}/pot -l {LOCALE} "
+ "{SPHINXOPTS}"
).format(**SETTINGS)
)
@task
def trans_stat(ctx):
"""Output translation stat (run: sphinx-intl stat)"""
run(
(
"{SPHINXINTL} stat "
+ "-d {LOCALEDIR} -l {LOCALE} "
+ "{SPHINXOPTS}"
).format(**SETTINGS)
)
@task
def html_trans(ctx):
"""Generate ja translated html files"""
SETTINGS["SPHINXOPTS"] += " -D language={LOCALE} ".format(**SETTINGS)
build(ctx, target="html")
@task
def rtd_trans(ctx):
"""Generate ja translated html files with rtd extensions"""
SETTINGS["SPHINXOPTS"] += " -D language={LOCALE} ".format(**SETTINGS)
build(ctx, target="readthedocs")
# task(s) for git / work environment
@task
def update_git_submodule(ctx):
"""Update git submodule then sync with its requirements.txt"""
run("git submodule update")
#flask_docs_req = open(os.path.join("flask", "docs", "requirements.txt"))
#with open("requirements.txt", "w") as f_out:
# f_out.write(flask_docs_req.read())
# f_out.write(
# "# install current flask under 'flask' submodule\n"
# "-e .\n"
# "sphinx-intl\n"
# "readthedocs-sphinx-ext\n"
# "invoke\n"
# "git+https://github.com/msiz07/sphinx-hoverorig.git@main\n"
# )
@task(update_git_submodule)
def update_tools(ctx):
"""Update tools with requirements.txt content"""
run("pip install -U -r requirements.txt --progress-bar=off")
|
from rlkit.misc.data_processing import Experiment
import matplotlib.pyplot as plt
import numpy as np
def main():
ddpg_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/12-23-ddpg-nupo-sweep-ant/",
criteria={
'exp_id': '16',
},
).get_trials()
her_andrychowicz_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/12-23-her-andrychowicz-ant-rebutal/",
criteria={
'exp_id': '14',
},
).get_trials()
# Ant results with batch size of 128
tdm_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/12-24-ddpg-nupo-sweep-ant/",
criteria={
'exp_id': '16',
}
).get_trials()
# Accidentally called this pusher, but it's really ant
# Here, x-axis is 10k steps.
# tdm_trials = Experiment(
# "/home/vitchyr/git/rlkit/data/doodads3/12-27-pusher-reward-scale-tau-uniform-or-truncated-geo-sweep-2/",
# criteria={
# 'ddpg_tdm_kwargs.base_kwargs.reward_scale': 100,
# 'ddpg_tdm_kwargs.tdm_kwargs.tau_sample_strategy':
# 'truncated_geometric',
# }
# ).get_trials()
ddpg_indicator_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/12-23-ddpg-sparse-sweep-4/",
criteria={
'env_class.$class': 'railrl.envs.multitask.ant_env.GoalXYPosAnt',
'ddpg_tdm_kwargs.base_kwargs.num_updates_per_env_step': 1,
},
).get_trials()
mb_trials = Experiment(
"/home/vitchyr/git/railrl/data/doodads3/12-24-dagger-mb-ant-cheetah-pos-and-vel/",
criteria={
'exp_id': '1',
},
).get_trials()
# MAX_ITERS = 10001
MAX_ITERS = 200
plt.figure()
base_key = 'Final Distance to goal Mean'
for trials, name, key in [
(tdm_trials, 'TDM', base_key),
(mb_trials, 'Model-Based', base_key),
(ddpg_trials, 'DDPG', base_key),
(her_andrychowicz_trials, 'HER', base_key),
(ddpg_indicator_trials, 'DDPG-Sparse', base_key),
]:
key = key.replace(" ", "_")
all_values = []
for trial in trials:
try:
values_ts = trial.data[key]
except:
import ipdb; ipdb.set_trace()
all_values.append(values_ts)
min_len = min(map(len, all_values))
costs = np.vstack([
values[:min_len]
for values in all_values
])
costs = costs[:, :min(costs.shape[1], MAX_ITERS)]
mean = np.mean(costs, axis=0)
std = np.std(costs, axis=0)
epochs = np.arange(0, len(costs[0]))
plt.fill_between(epochs, mean - std, mean + std, alpha=0.1)
plt.plot(epochs, mean, label=name)
plt.xlabel("Environment Samples (x1,000)")
plt.ylabel("Final Euclidean Distance to Goal Position")
plt.legend()
plt.savefig('results/iclr2018/ant.jpg')
# plt.show()
def average_every_n_elements(arr, n):
return np.nanmean(
np.pad(
arr.astype(float),
(0, n - arr.size % n),
mode='constant',
constant_values=np.NaN,
).reshape(-1, n),
axis=1
)
if __name__ == '__main__':
main()
|
from app import app
from flask import render_template
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/contato.html')
def contato():
return render_template('contato.html')
@app.route('/features.html')
def features():
return render_template('features.html')
|
#!/usr/bin/env python3
import numpy as np
import ot
from numba import njit
from scipy.spatial.distance import cdist
from sklearn.utils import check_array
from .transmorph import Transmorph
from .tdata import TData
from .density import kernel_H
def sigma_analysis(dataset,
layer='raw',
subsample=False,
sigmas: np.ndarray=None,
sigma_min: float=1e-10,
sigma_max: float=1e10,
nmarks: int=20,
log_scale: bool=True,
use_reference_if_transmorph: bool=False,
normalize_if_raw: bool=True,
return_sigmas: bool=False):
"""
Returns the discretized mapping sigma -> KL(u_\\sigma, Uniform).
The module uses
argmax_\\sigma KL(u_\\sigma, Uniform)
as a proxy for the optimal Gaussian kernel bandwidth.
See more details in Fouché, bioRxiv 2021.
Parameters:
-----------
dataset: Transmorph, TData or np.ndarray
Dasaset to use for bandwidth selection. If a Transmorph
object is passed, source TData is used by default. Reference
TData can be used setting to True the parameter
`use_reference_if_transmorph`.
layer: str, default = 'raw'
If dataset is Transmorph or TData, which layer to use,
e.g. 'raw', 'pca'...
subsample: bool, default = False
If dataset is Transmorph or TData, use a subsample of the
vertices. Warning: this changes the optimal value for sigma.
sigmas: np.ndarray
The set of sigmas to be tested. If None, a set of sigmas
is selected automatically using the following parameters.
sigma_min: float, default = 1e-10
Lower bound for sigma search (included)
sigma_max: float, default = 1e10
Upper bound for sigma search (excluded)
nmarks: int, default = 20
Number of values to test.
log_scale: bool, default = True
Log-scaled grid.
use_reference_if_transmorph: bool, default = False
Use reference TData instead of source one, if dataset
is a Transmorph.
normalize_if_raw: bool, default = True
Normalize columns if dataset is a np.ndarray.
return_sigmas: bool, default = False
Returns {sigmas}, {values} instead of {values}.
"""
# Handling Transmorph, TData & np.ndarray
if isinstance(dataset, Transmorph):
assert dataset.fitted, \
"Error: Transmorph is not fitted."
if use_reference_if_transmorph:
dataset = dataset.tdata_y
else:
dataset = dataset.tdata_x
elif isinstance(dataset, np.ndarray):
dataset = check_array(dataset, dtype=np.float32, order='C')
dataset = TData(
dataset,
weights=None,
labels=None,
normalize=normalize_if_raw
)
else:
assert isinstance(dataset, TData),\
"Error: Unrecognized dataset type."
# Computing an evenly spaced grid if none is provided
if sigmas is None:
if log_scale:
sigma_min, sigma_max =\
np.log(sigma_min), np.log(sigma_max)
sigmas = np.arange(
sigma_min,
sigma_max,
(sigma_max - sigma_min) / nmarks
)
if log_scale:
sigmas = np.exp(sigmas)
# Delegating to numba-accelerated function
values = _sigma_analysis(
dataset.distance(
metric="euclidean",
layer=layer,
subsample=subsample,
return_full_size=False
),
sigmas
)
if return_sigmas:
return sigmas, values
return values
@njit
def _sigma_analysis(D, sigmas):
values = []
for sigma in sigmas:
values.append(kernel_H(D, sigma))
return values
def wasserstein_distance(tr: Transmorph,
x_integrated: np.ndarray = None,
use_labels: bool = False,
coefficient_labels: float = 1,
categorical_labels: bool = False,
xs_labels: np.ndarray = None,
yt_labels: np.ndarray = None,
layer: str = 'raw',
metric: str = 'sqeuclidean'):
"""
Returns the total cost of transport matrix from a fitted
Transmorph.
Parameters:
-----------
tr: Transmorph
Transmorph, must be fitted
x_integrated: np.ndarray, default = None
Integrated dataset
layer: str, default = 'raw'
In 'raw', 'pca'. Representation to use, must have been
precomputed in Transmorph.
metric: str, default = 'sqeuclidean'
Metric to use for cost matrix.
"""
assert tr.fitted, \
"Error: Transmorph not fitted."
assert layer in ('raw', 'pca'), \
"Layer %s not handled." % layer
if x_integrated is None:
x_integrated = tr.transform(jitter=False)
yt = tr.tdata_y.get_layer(layer)
xt = (
x_integrated if layer == 'raw'
else x_integrated @ tr.tdata_y.extras['pca'].T
)
M = cdist(xt, yt, metric=metric)
if use_labels:
if xs_labels is None:
assert tr.tdata_x.labels is not None, \
"Error: no labels in source dataset."
xs_labels = tr.tdata_x.labels
else:
assert len(xs_labels) == len(tr.tdata_x), \
"Error: Inconsistency between source dataset size and \
labels size (%i != %i)" \
% (len(xs_labels), len(tr.tdata_x))
if yt_labels is None:
assert tr.tdata_y.labels is not None, \
"Error: no labels in reference dataset."
yt_labels = tr.tdata_y.labels
else:
assert len(yt_labels) == len(tr.tdata_y), \
"Error: Inconsistency between reference dataset size and \
labels size (%i != %i)" \
% (len(yt_labels), len(tr.tdata_y))
assert coefficient_labels >= 0, \
"Label coefficient must be positive, found %f" % coefficient_labels
if categorical_labels:
L = (xs_labels[:,None] != yt_labels)
else:
L = (xs_labels[:,None] - yt_labels)**2
M += coefficient_labels * L
M /= np.max(M)
M = check_array(M, dtype=np.float64, order='C')
return ot.lp.emd2(
np.ones(len(xt))/len(xt),
np.ones(len(yt))/len(yt),
M,
numItermax=1e6
)
wd = wasserstein_distance # alias
def distance_label_continuous(tr: Transmorph,
x_integrated: np.ndarray = None,
xs_labels: np.ndarray = None,
yt_labels: np.ndarray = None,
layer: str = 'raw',
metric: str = 'sqeuclidean',
cost_per_point: bool = False):
"""
Returns the total cost of transport matrix from a fitted
Transmorph.
Parameters:
-----------
tr: Transmorph
Transmorph, must be fitted
x_integrated: np.ndarray, default = None
Integrated dataset
layer: str, default = 'raw'
In 'raw', 'pca'. Representation to use, must have been
precomputed in Transmorph.
metric: str, default = 'sqeuclidean'
Metric to use for cost matrix.
"""
assert tr.fitted, \
"Error: Transmorph not fitted."
assert layer in ('raw', 'pca'), \
"Layer %s not handled." % layer
if xs_labels is None:
assert tr.tdata_x.labels is not None, \
"Error: no labels in source dataset."
xs_labels = tr.tdata_x.labels
else:
assert len(xs_labels) == len(tr.tdata_x), \
"Error: Inconsistency between source dataset size and \
labels size (%i != %i)" \
% (len(xs_labels), len(tr.tdata_x))
if yt_labels is None:
assert tr.tdata_y.labels is not None, \
"Error: no labels in reference dataset."
yt_labels = tr.tdata_y.labels
else:
assert len(yt_labels) == len(tr.tdata_y), \
"Error: Inconsistency between reference dataset size and \
labels size (%i != %i)" \
% (len(yt_labels), len(tr.tdata_y))
if x_integrated is None:
x_integrated = tr.transform(jitter=False)
yt = tr.tdata_y.get_layer(layer)
xt = (
x_integrated if layer == 'raw'
else x_integrated @ tr.tdata_y.extras['pca'].T
)
diff = np.abs(
yt_labels - xs_labels[:,None]
)
yt_matched = yt[np.argsort(diff, axis=1)[:,0]]
distances = np.diag(cdist(xt, yt_matched, metric=metric))
if cost_per_point:
return distances
return np.sum(distances)
|
# importa tus librerias
import altair as alt
from vega_datasets import data
# tus datos
source = data.us_employment()
# crea una visualización
alt.Chart(source).mark_bar().encode(
x="month:T",
y="nonfarm_change:Q",
color=alt.condition(
alt.datum.nonfarm_change > 0,
alt.value("steelblue"), # The positive color
alt.value("orange") # The negative color
)
).properties(width=500).configure(background="#FFFFFF")
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtWebEngineWidgets import *
class jkweb():
def __init__(self):
#get all widget from PyQt5
self.window = QWidget()
#set the title of web browser
self.window.setWindowTitle("JehanKandy Web Browser")
#get box layout for web browser
self.layout = QVBoxLayout()
#buttons in horizontal
self.horizontal = QHBoxLayout()
#for Url Bar
self.url_bar = QTextEdit()
self.url_bar.setMaximumHeight(30) #size of url bar
#for Go button
self.go_btn = QPushButton("GO")
self.go_btn.setMinimumHeight(30) #size of GO button
#for Back button
self.back_btn = QPushButton("<-")
self.back_btn.setMinimumHeight(30) #size of Back button
#for Forward button
self.forward_btn = QPushButton("->")
self.forward_btn.setMinimumHeight(30) #size of Forward button
#now add buttons and url box on the browser
self.horizontal.addWidget(self.url_bar) #for url bar
self.horizontal.addWidget(self.go_btn) #for GO Button
self.horizontal.addWidget(self.back_btn) #for Back Button
self.horizontal.addWidget(self.forward_btn) #for GO Button
#now create the browser
self.browser = QWebEngineView()
#when someone click the go button
self.go_btn.clicked.connect(lambda: self.navigation(self.url_bar.toPlainText()))
#when someone click the back button
self.back_btn.clicked.connect(self.browser.back)
#when someone click the forward button
self.forward_btn.clicked.connect(self.browser.forward)
#add horizontal Layout
self.layout.addLayout(self.horizontal)
#add all Widget
self.layout.addWidget(self.browser)
#set the Strating page
self.browser.setUrl(QUrl("http://google.com"))
#display the browser
self.window.setLayout(self.layout)
self.window.show()
#now create browser functions
def navigation(self, url):
#check url not strat with http:
if not url.startswith("http"):
url = "http://" + url
self.url_bar.setText(url)
self.browser.setUrl(QUrl(url))
app = QApplication([])
window = jkweb()
app.exec_()
|
"""Compute optimal and average improvement for different parameters."""
import csv
from warnings import warn
import numpy as np
from bound_evaluation.change_enum import ChangeEnum
from bound_evaluation.manipulate_data import remove_full_nan_rows
from bound_evaluation.mc_enum import MCEnum
from bound_evaluation.mc_enum_to_dist import mc_enum_to_dist
from bound_evaluation.monte_carlo_dist import MonteCarloDist
from nc_arrivals.arrival_enum import ArrivalEnum
from nc_arrivals.iid import DM1, MD1, DGamma1, DWeibull1
from nc_arrivals.markov_modulated import MMOODisc, MMOOCont
from nc_arrivals.regulated_arrivals import (DetermTokenBucket,
LeakyBucketMassoulie)
from nc_operations.perform_enum import PerformEnum
from nc_server.constant_rate_server import ConstantRateServer
from optimization.opt_method import OptMethod
from tqdm import tqdm
from utils.exceptions import NotEnoughResults
from utils.perform_parameter import PerformParameter
from h_mitigator.array_to_results import two_col_array_to_results
from h_mitigator.compare_mitigator import compare_mitigator
from h_mitigator.fat_cross_perform import FatCrossPerform
def csv_fat_cross_param_power(name: str, arrival_enum: ArrivalEnum,
number_flows: int, number_servers: int,
perform_param: PerformParameter,
opt_method: OptMethod, mc_dist: MonteCarloDist,
compare_metric: ChangeEnum,
total_iterations: int,
target_util: float) -> dict:
"""Chooses parameters by Monte Carlo type random choice."""
param_array = mc_enum_to_dist(arrival_enum=arrival_enum,
mc_dist=mc_dist,
number_flows=number_flows,
number_servers=number_servers,
total_iterations=total_iterations)
res_array = np.empty([total_iterations, 2])
# print(res_array)
for i in tqdm(range(total_iterations), total=total_iterations):
if arrival_enum == ArrivalEnum.DM1:
arr_list = [
DM1(lamb=param_array[i, j]) for j in range(number_flows)
]
elif arrival_enum == ArrivalEnum.DGamma1:
arr_list = [
DGamma1(alpha_shape=param_array[i, j],
beta_rate=param_array[i, number_flows + j])
for j in range(number_flows)
]
elif arrival_enum == ArrivalEnum.DWeibull1:
arr_list = [
DWeibull1(lamb=param_array[i, j]) for j in range(number_flows)
]
elif arrival_enum == ArrivalEnum.MD1:
arr_list = [
MD1(lamb=param_array[i, j], mu=1.0)
for j in range(number_flows)
]
elif arrival_enum == ArrivalEnum.MMOODisc:
arr_list = [
MMOODisc(stay_on=param_array[i, j],
stay_off=param_array[i, number_flows + j],
peak_rate=param_array[i, 2 * number_flows + j])
for j in range(number_flows)
]
elif arrival_enum == ArrivalEnum.MMOOFluid:
arr_list = [
MMOOCont(mu=param_array[i, j],
lamb=param_array[i, number_flows + j],
peak_rate=param_array[i, 2 * number_flows + j])
for j in range(number_flows)
]
elif arrival_enum == ArrivalEnum.Massoulie:
arr_list = [
LeakyBucketMassoulie(sigma_single=param_array[i, j],
rho_single=param_array[i,
number_flows + j],
m=20) for j in range(number_flows)
]
# NOTE: n is fixed
elif arrival_enum == ArrivalEnum.TBConst:
arr_list = [
DetermTokenBucket(sigma_single=param_array[i, j],
rho_single=param_array[i, number_flows + j],
m=1) for j in range(number_flows)
]
else:
raise NotImplementedError(f"Arrival parameter {arrival_enum.name} "
f"is infeasible")
ser_list = [
ConstantRateServer(
rate=param_array[i,
arrival_enum.number_parameters() *
number_flows + j])
for j in range(number_servers)
]
fat_cross_setting = FatCrossPerform(arr_list=arr_list,
ser_list=ser_list,
perform_param=perform_param)
computation_necessary = True
if target_util > 0.0:
util = fat_cross_setting.approximate_utilization()
if util < target_util or util > 1:
res_array[i, ] = np.nan
computation_necessary = False
if computation_necessary:
# standard_bound, h_mit_bound = compare_mitigator()
res_array[i, 0], res_array[i, 1] = compare_mitigator(
setting=fat_cross_setting,
opt_method=opt_method,
number_l=number_servers - 1)
if (perform_param.perform_metric == PerformEnum.DELAY_PROB
and res_array[i, 1] > 1.0):
# write as nan if second (in particular both) value(s) are > 1.0
res_array[i, ] = np.nan
if np.isnan(res_array[i, 0]) or np.isnan(res_array[i, 1]):
res_array[i, ] = np.nan
res_array_no_full_nan = remove_full_nan_rows(full_array=res_array)
valid_iterations = res_array_no_full_nan.shape[0]
if valid_iterations < total_iterations * 0.2:
warn(f"Many nan's: {total_iterations - valid_iterations} nans "
f"out of {total_iterations}!")
if valid_iterations < 100:
raise NotEnoughResults("result is useless")
res_dict = two_col_array_to_results(arrival_enum=arrival_enum,
param_array=param_array,
res_array=res_array,
number_servers=number_servers,
compare_metric=compare_metric)
res_dict.update({
"iterations": total_iterations,
"PerformParamValue": perform_param.value,
"optimization": opt_method.name,
"compare_metric": compare_metric.name,
"MCDistribution": mc_dist.to_name(),
"MCParam": mc_dist.param_to_string(),
"number_servers": number_servers
})
filename = name
filename += f"_results_{perform_param.to_name()}_{arrival_enum.name}_" \
f"MC{mc_dist.to_name()}_{opt_method.name}_" \
f"{compare_metric.name}_util_{target_util}"
with open(filename + ".csv", 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in res_dict.items():
writer.writerow([key, value])
return res_dict
if __name__ == '__main__':
COMMON_PERFORM_PARAM = PerformParameter(
perform_metric=PerformEnum.DELAY_PROB, value=10)
# COMMON_PERFORM_PARAM = PerformParameter(perform_metric=PerformEnum.DELAY,
# value=1e-6)
COMMON_OPTIMIZATION = OptMethod.GRID_SEARCH
COMMON_METRIC = ChangeEnum.RATIO_REF_NEW
TARGET_UTIL = 0.7
# MC_UNIF20 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[20.0])
MC_UNIF10 = MonteCarloDist(mc_enum=MCEnum.UNIFORM, param_list=[10.0])
MC_EXP1 = MonteCarloDist(mc_enum=MCEnum.EXPONENTIAL, param_list=[1.0])
# ARRIVAL_PROCESSES = [
# ArrivalEnum.DM1, ArrivalEnum.MMOOFluid, ArrivalEnum.MD1
# ]
ARRIVAL_PROCESSES = [
ArrivalEnum.DM1, ArrivalEnum.DWeibull1, ArrivalEnum.DGamma1,
ArrivalEnum.MMOODisc
]
for PROCESS in ARRIVAL_PROCESSES:
print(
csv_fat_cross_param_power(name="simple_setting",
arrival_enum=PROCESS,
number_flows=2,
number_servers=2,
perform_param=COMMON_PERFORM_PARAM,
opt_method=COMMON_OPTIMIZATION,
mc_dist=MC_EXP1,
compare_metric=COMMON_METRIC,
total_iterations=10**5,
target_util=TARGET_UTIL))
print(
csv_fat_cross_param_power(name="simple_setting",
arrival_enum=PROCESS,
number_flows=2,
number_servers=2,
perform_param=COMMON_PERFORM_PARAM,
opt_method=COMMON_OPTIMIZATION,
mc_dist=MC_UNIF10,
compare_metric=COMMON_METRIC,
total_iterations=10**5,
target_util=TARGET_UTIL))
|
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.nn import functional as F
import torch
from model.TemporalBlock import TemporalBlock
class pulsar_encoder_block(nn.Module):
# Single block of the pulsar encoder
def __init__(self, n_inputs, n_outputs, kernel_size, stride=2, pool=1, conv_groups=1, norm_groups=4, no_pad=False):
super().__init__()
# Set padding in a way that the length is divided by the stride
if no_pad:
padding_1 = 0
padding_2 = 0
else:
if (kernel_size - stride) % 2 == 0:
padding_1 = int((kernel_size - stride) / 2)
padding_2 = 0
else:
# Only works for ker=4,8 and stride=1, pool=4
padding_1 = int((kernel_size - stride) / 2)
# padding_1 = int(kernel_size / 2)
padding_2 = int(kernel_size / pool)
self.pool = pool
self.kernel_size = kernel_size
self.stride = stride
# The net reduces the length input by stride * pool
self.net = nn.Sequential(weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size, padding=padding_1, stride=stride, bias=True, groups=conv_groups)),
nn.MaxPool1d(self.pool, padding=padding_2),
nn.LeakyReLU(),
nn.GroupNorm(norm_groups, n_outputs,
affine=True),
)
def forward(self, x):
out = self.net(x)
# if hasattr(self, 'dm0'):
# if self.dm0=='concat':
# pooled = F.avg_pool2d(x.unsqueeze(1), (x.shape[1], self.stride*self.pool), stride=(1,self.stride*self.pool))
# out = torch.cat((out, pooled[:,0,:,:]), dim=1)
# print(x.shape, out.shape)
return out
class pool_block(nn.Module):
# Single block of the pulsar encoder
def __init__(self, kernel_size):
super().__init__()
# 2D pooling block
self.pool = nn.MaxPool2d(kernel_size)
def forward(self, x):
out = self.pool(x.unsqueeze(1))
return out.squeeze(1)
class pulsar_encoder(nn.Module):
# Whole pulsar decoder using serveral encoder blocks
# channel_list is a list of output channels which defines the amount of blocks
def __init__(self, input_shape, model_para, no_pad=False):
super().__init__()
# layers = []
channel_list = model_para.encoder_channels
layers = [nn.Dropout(model_para.initial_dropout), ]
levels = len(model_para.encoder_channels)
self.input_channels = input_shape[0]
# self.dm0 = dm0
out_channels = input_shape[0]
for i in range(levels):
in_channels = int(input_shape[0]) if i == 0 else int(
channel_list[i - 1])
out_channels = int(channel_list[i])
layers += [pulsar_encoder_block(in_channels, out_channels, model_para.encoder_kernel,
stride=model_para.encoder_stride, pool=model_para.encoder_pooling,
conv_groups=model_para.encoder_conv_groups, norm_groups=model_para.encoder_norm_groups,
no_pad=no_pad)]
if model_para.tcn_1_layers != 0:
for i in range(model_para.tcn_1_layers):
dil = model_para.tcn_1_dilation ** i
# if i ==0 and self.dm0 == 'concat':
# added_chan=1
# else:
# added_chan=0
layers += [TemporalBlock(out_channels, out_channels, model_para.tcn_1_kernel, stride=1, dilation=dil,
norm_groups=model_para.tcn_1_norm_groups, conv_groups=model_para.tcn_1_conv_groups, residual=True, dropout=model_para.tcn_1_dropout)]
if out_channels != model_para.tcn_2_channels:
layers += [weight_norm(nn.Conv1d(out_channels, model_para.tcn_2_channels, 1)),
nn.LeakyReLU(),
nn.GroupNorm(model_para.tcn_2_norm_groups, model_para.tcn_2_channels, affine=True)]
else:
if model_para.tcn_2_channels != out_channels:
if levels==0:
conv_groups = model_para.encoder_conv_groups
else:
conv_groups = 1
layers += [weight_norm(nn.Conv1d(out_channels, model_para.tcn_2_channels, 1, groups=conv_groups)),
nn.LeakyReLU(),
nn.GroupNorm(model_para.tcn_2_norm_groups, model_para.tcn_2_channels, affine=True)]
# if num_inputs != self.ini_channels:
# self.first_conv.add_module('downsample', nn.Conv1d(
# num_inputs, self.ini_channels, 1))
# self.first_conv.add_module('relu', nn.LeakyReLU())
self.network = nn.Sequential(*layers)
def forward(self, x):
# Input is expected to be of shape (batch_size, channel_in, time_steps)
return self.network(x)
|
def input():
return int(raw_input())
number = input()
if number == 8:
print "Oxygen"
elif number == 1:
print "Hydrogen"
elif number == 2:
print "Helium"
elif number == 11:
print "Sodium"
else:
print "I have no idea what %d is" % number
# Alternative solution
number = input()
db = {
1: "Hydrogen",
2: "Helium",
8: "Oxygen",
11: "Sodium",
}
print db.get(number, "I have no idea what %d is" % number)
|
from django.contrib.admin import register, ModelAdmin
from {{project_name}}.apps.employee.models import EmployeeModel
@register(EmployeeModel)
class EmployeeAdmin(ModelAdmin):
list_display = ('dni', 'phone_number', 'labor_specialty')
|
import datetime
import unittest
from rdr_service import singletons
from rdr_service.clock import FakeClock
TIME_1 = datetime.datetime(2016, 1, 1)
TIME_2 = datetime.datetime(2016, 1, 2)
TIME_3 = datetime.datetime(2016, 1, 4)
# TODO: represent in new test suite
class SingletonsTest(unittest.TestCase):
foo_count = 0
@staticmethod
def foo():
SingletonsTest.foo_count += 1
return SingletonsTest.foo_count
def setUp(self):
SingletonsTest.foo_count = 0
singletons.reset_for_tests()
def test_get_no_ttl(self):
with FakeClock(TIME_1):
self.assertEqual(1, singletons.get(123, SingletonsTest.foo))
with FakeClock(TIME_2):
self.assertEqual(1, singletons.get(123, SingletonsTest.foo))
def test_get_ttl(self):
with FakeClock(TIME_1):
self.assertEqual(1, singletons.get(123, SingletonsTest.foo, 86401))
with FakeClock(TIME_2):
self.assertEqual(1, singletons.get(123, SingletonsTest.foo, 86401))
with FakeClock(TIME_3):
self.assertEqual(2, singletons.get(123, SingletonsTest.foo, 86401))
|
import os
import pickle
import tkinter
from tkinter import ttk
import cv2
import random
import string
import shutil
import filedialog
import tkinter.messagebox
import threading
import PIL
import PIL.ImageTk
import time
class define():
def GetRandomStr(num):
dat = string.digits + string.ascii_lowercase + string.ascii_uppercase
return ''.join([random.choice(dat) for i in range(num)])
class edit():
def get_info(argv):
if argv == "enable":
return 1
if argv == "addon_var":
return 0
elif argv == "template_var":
return "b0.0.3"
elif argv == "__built_in__":
return 0
elif argv == "type":
return ".mvid"
elif argv == "type_ex":
return "Maru video file"
else:
return 0
def file_new(open_pas):
pass
def file_open(open_path, ofps, master):
master.open_path = open_path
master.directory = "./tmp"+define.GetRandomStr(5)
if not os.path.exists(master.directory):
os.mkdir(master.directory)
else:
master.directory = "./tmp"+define.GetRandomStr(5)
os.mkdir(master.directory)
if ofps == 0:
pass
else:
pass
return [open_path]
def file_save(master):
if os.path.exists(master.open_path):
os.remove(master.open_path)
pickle.dump(master.t.get('1.0', 'end -1c'),open(master.open_path,"wb"))
def file_main(master):
def show_menu(event, name):
if name == "track":
master.f3.f1.menu.post(event.x_root,event.y_root)
if event.widget in master.tracks1:
print(master.tracks1.index(event.widget))
elif event.widget in master.tracks2:
print(master.tracks2.index(event.widget))
elif name == "file":
master.f1.lb1.menu.post(event.x_root,event.y_root)
def add_file(path=None):
if path != None:
file = path
else:
file = filedialog.askopenfilename()
if os.path.exists(file):
if os.path.splitext(file)[1] in [".mp4"]:
if os.path.getsize(file):
if tkinter.messagebox.askyesno("確認","このファイルをプロジェクトファイルの内部に\n配置しますか?"):
shutil.copy(file,master.directory)
file = os.path.join(master.directory,file)
else:
pass
master.main[0]["files"].append(["video",cv2.VideoCapture(file)])
if master.main[0]["files"][-1][1].isOpened():
master.f1.lb1.insert("end",master.main[0]["files"][-1][0]+" : "+file)
else:
master.main[0]["files"].pop(-1)
def preview(p):
class Preview:
def __init__(self, window, window_title, video_source=0):
self.window = window
self.window.title(window_title)
self.video_source = video_source
self.vid = MyVideoCapture(self.video_source)
self.canvas = tkinter.Canvas(window, width = self.vid.width, height = self.vid.height)
self.canvas.pack(fill="both")
self.l = ttk.Label(window,text="0")
self.l.pack()
self.video_source.set(cv2.CAP_PROP_POS_FRAMES, 0)
self.btn_snapshot=tkinter.Button(window, text="Snapshot", width=50, command=self.snapshot)
self.btn_snapshot.pack(anchor=tkinter.CENTER, expand=True)
self.btn_reset=ttk.Button(window, text="reset", command=lambda: self.video_source.set(cv2.CAP_PROP_POS_FRAMES, 0))
self.btn_reset.pack()
self.update()
self.window.mainloop()
def snapshot(self):
ret, frame = self.vid.get_frame()
if ret:
cv2.imwrite("frame-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
def update(self):
ret, frame = self.vid.get_frame()
if ret:
self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame), master=self.window)
self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)
self.l.configure(text=str(self.video_source.get(cv2.CAP_PROP_POS_FRAMES)))
self.window.after(int(self.video_source.get(5)), self.update)
class MyVideoCapture:
def __init__(self, video_source=0):
# Open the video source
self.vid = video_source
if not self.vid.isOpened():
raise ValueError("Unable to open video source", video_source)
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
def get_frame(self):
if self.vid.isOpened():
ret, frame = self.vid.read()
if ret:
return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
else:
return (ret, None)
else:
return (ret, None)
def __del__(self):
if self.vid.isOpened():
self.vid.release()
Preview(tkinter.Tk(),"Preview",p[1])
master.images = []
master.files = {}
master.files.update(movie=[])
master.files.update(music=[])
master.files.update(picture=[])
master.files.update(movie_music=[])
master.f0 = ttk.Frame(master)
master.f1 = ttk.Frame(master)
master.f2 = ttk.Frame(master)
master.f3 = ttk.Frame(master)
master.f0.grid(column=0,row=0, sticky = 'nsew', columnspan=2)
master.f1.grid(column=0,row=1, sticky = 'nsew')
master.f2.grid(column=1,row=1, sticky = 'nsew')
master.f3.grid(column=0,row=2, sticky = 'nsew', columnspan=2)
if os.path.exists(master.open_path):
master.t.insert("end",pickle.load(open(master.open_path,"rb")))
master.f0.b1 = ttk.Button(master.f0, text="ファイル追加", command=add_file)
master.f0.b1.pack(side="left")
master.f1.l1 = ttk.Label(master.f1,text="ファイル:")
master.f1.l1.pack(side="top",fill="both")
master.f1.lb1 = tkinter.Listbox(master.f1)
master.f1.lb1.pack(fill="both")
master.f1.lb1.menu = tkinter.Menu(master,tearoff=False)
master.f1.lb1.menu.add_command(label="ファイル追加", command=add_file)
master.f1.lb1.menu.add_command(label="プレビュー", command=lambda: preview(master.main[0]["files"][master.f1.lb1.curselection()[0]]))
master.f1.lb1.bind('<Button-3>',lambda event:show_menu(event, "file"))
master.images.append(tkinter.PhotoImage(file='./image/empty_dark.png', master=master))
master.f2.l1 = ttk.Label(master.f2,text="プレビュー:")
master.f2.l1.pack(side="top",fill="both")
master.f2.c1 = tkinter.Canvas(master.f2)
master.f2.c1.pack()
master.f2.c1.create_image(200, 150, image=master.images[-1])
master.f3.f1 = ttk.Frame(master.f3)
master.f3.f1.grid(column=0,row=0, sticky = 'nsew')
master.f3.f2 = ttk.Frame(master.f3)
master.f3.f2.grid(column=1,row=0, sticky = 'nsew')
master.f3.f1.menu = tkinter.Menu(master,tearoff=False)
master.f3.f1.menu.add_command(label="dummy")
if os.path.exists(master.directory+"/main.conf"):
master.main = pickle.load(open(master.directory+"/main.conf","rb"))
else:
master.main = []
master.tmp = {}
master.tmp.update(files=list())
master.main.append(master.tmp)
pickle.dump(master.main,open(master.directory+"/main.conf","wb"))
master.track = 5
master.tracks1 = []
master.tracks2 = []
for i in range(master.track):
master.tracks1.append(ttk.Button(master.f3.f1, text="track"+str(i)))
master.tracks1[-1].pack()
master.tracks1[-1].bind("<1>",lambda event:show_menu(event, "track"))
master.tracks1[-1].bind('<Button-3>',lambda event:show_menu(event, "track"))
master.tracks2.append(ttk.Frame(master.f3.f2, width = 1000, height = 26, relief="ridge"))
master.tracks2[-1].pack()
master.tracks2[-1].bind('<Button-3>',lambda event:show_menu(event, "track"))
def file_exit(master):
if os.path.exists(master.directory):
shutil.rmtree(master.directory)
if __name__ == "__main__":
os.chdir("../")
master = tkinter.Tk()
edit.file_open("./test.mvid",0,master)
edit.file_main(master)
edit.file_exit(master)
|
__author__ = 'Alex Ge, alexgecontrol@qq.com'
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
def cuda_detect():
'''Attempt to detect the version of CUDA present in the operating system.
On Windows and Linux, the CUDA library is installed by the NVIDIA
driver package, and is typically found in the standard library path,
rather than with the CUDA SDK (which is optional for running CUDA apps).
On macOS, the CUDA library is only installed with the CUDA SDK, and
might not be in the library path.
Returns: version string (Ex: '9.2') or None if CUDA not found.
'''
# platform specific libcuda location
import platform
system = platform.system()
if system == 'Darwin':
lib_filenames = [
'libcuda.dylib', # check library path first
'/usr/local/cuda/lib/libcuda.dylib'
]
elif system == 'Linux':
lib_filenames = [
'libcuda.so', # check library path first
'/usr/lib64/nvidia/libcuda.so', # Redhat/CentOS/Fedora
'/usr/lib/x86_64-linux-gnu/libcuda.so', # Ubuntu
]
elif system == 'Windows':
lib_filenames = ['nvcuda.dll']
else:
return None # CUDA not available for other operating systems
# open library
import ctypes
if system == 'Windows':
dll = ctypes.windll
else:
dll = ctypes.cdll
libcuda = None
for lib_filename in lib_filenames:
try:
libcuda = dll.LoadLibrary(lib_filename)
break
except:
pass
if libcuda is None:
return None
# Get CUDA version
try:
cuInit = libcuda.cuInit
flags = ctypes.c_uint(0)
ret = cuInit(flags)
if ret != 0:
return None
cuDriverGetVersion = libcuda.cuDriverGetVersion
version_int = ctypes.c_int(0)
ret = cuDriverGetVersion(ctypes.byref(version_int))
if ret != 0:
return None
# Convert version integer to version string
value = version_int.value
return '%d.%d' % (value // 1000, (value % 1000) // 10)
except:
return None
|
import itertools
from helper import get_input
def main():
data = [x.split(' = ') for x in get_input(14).split('\n') if x]
one_mask = int('000000000000000000000000000000000000', 2)
x_positions = []
mem = {}
for operation, value in data:
if operation == 'mask':
tmp_one_mask = [x for x in '000000000000000000000000000000000000']
x_positions = []
for i, char in enumerate(value):
if char == '1':
tmp_one_mask[i] = '1'
elif char == 'X':
x_positions.append(i)
one_mask = int(''.join(tmp_one_mask), 2)
else:
pos = int(operation[:-1].split('[')[1])
masked_pos = (int(pos) | one_mask)
for l in range(0, len(x_positions) + 1):
for subset in itertools.combinations(x_positions, l):
tmp_0 = [x for x in '000000000000000000000000000000000000']
tmp_1 = [x for x in '111111111111111111111111111111111111']
for x in subset:
tmp_0[x] = '1'
for x in x_positions:
if x not in subset:
tmp_1[x] = '0'
pos = (masked_pos | int(''.join(tmp_0), 2)) & int(''.join(tmp_1), 2)
mem[pos] = int(value)
print(sum(mem.values()))
if __name__ == '__main__':
main()
|
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
from __future__ import division
import os
import yaml
from datetime import datetime
pkg_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../.."))
pkg_path = os.path.realpath(pkg_path)
def read_yaml(filename):
with open(filename, "r") as stream:
try:
d = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return d
def get_settings():
fpath = os.path.join(pkg_path, "p3iv/configurations/settings.yaml")
settings = read_yaml(fpath)
settings["dataset"] = os.path.normpath(os.path.join(pkg_path, "../.", settings["dataset"]))
settings["start_time"] = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
# Number of timesteps (horizon is defined in seconds, not ms)
N = int(settings["temporal"]["horizon"] / (settings["temporal"]["dt"] / 1000))
settings["temporal"]["N"] = N
return settings
def load_configurations(test_case_id):
fpath = os.path.join(pkg_path, "p3iv/configurations/test_cases.yaml")
test_cases = read_yaml(fpath)
try:
configurations = test_cases[test_case_id]
except KeyError:
msg = "The test case '" + test_case_id + "' is not found in p3iv/configurations/test_cases.py"
raise KeyError(msg)
s = get_settings()
configurations.update(s)
return configurations
|
from __future__ import print_function
import os
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cdutil
import matplotlib
import numpy as np
import scipy.stats
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
from e3sm_diags.derivations.default_regions import regions_specs
from e3sm_diags.driver.utils.general import get_output_dir
matplotlib.use("Agg")
import matplotlib.colors as colors # isort:skip # noqa: E402
import matplotlib.lines as lines # isort:skip # noqa: E402
import matplotlib.pyplot as plt # isort:skip # noqa: E402
plotTitle = {"fontsize": 11.5}
plotSideTitle = {"fontsize": 9.5}
# Border padding relative to subplot axes for saving individual panels
# (left, bottom, width, height) in page coordinates
border = (-0.14, -0.06, 0.04, 0.08)
def add_cyclic(var):
lon = var.getLongitude()
return var(longitude=(lon[0], lon[0] + 360.0, "coe"))
def get_ax_size(fig, ax):
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
return width, height
def determine_tick_step(degrees_covered):
if degrees_covered > 180:
return 60
if degrees_covered > 60:
return 30
elif degrees_covered > 20:
return 10
else:
return 1
def plot_panel_seasonality_map(
plot_type, fig, proj, export, color_list, panel, parameter
):
if plot_type == "test":
panel_index = 0
seasonality_index_export_index = 5
peak_month_export_index = 6
title = (None, parameter.test_title, None)
elif plot_type == "ref":
panel_index = 1
seasonality_index_export_index = 3
peak_month_export_index = 4
title = (None, parameter.reference_title, None)
else:
raise Exception("Invalid plot_type={}".format(plot_type))
# Plot of streamflow gauges. Color -> peak month, marker size -> seasonality index.
ax = fig.add_axes(panel[panel_index], projection=proj)
region_str = parameter.regions[0]
region = regions_specs[region_str]
if "domain" in region.keys(): # type: ignore
# Get domain to plot
domain = region["domain"] # type: ignore
else:
# Assume global domain
domain = cdutil.region.domain(latitude=(-90.0, 90, "ccb"))
kargs = domain.components()[0].kargs
# lon_west, lon_east, lat_south, lat_north = (0, 360, -90, 90)
lon_west, lon_east, lat_south, lat_north = (-180, 180, -90, 90)
if "longitude" in kargs:
lon_west, lon_east, _ = kargs["longitude"]
if "latitude" in kargs:
lat_south, lat_north, _ = kargs["latitude"]
lon_covered = lon_east - lon_west
lon_step = determine_tick_step(lon_covered)
xticks = np.arange(lon_west, lon_east, lon_step)
# Subtract 0.50 to get 0 W to show up on the right side of the plot.
# If less than 0.50 is subtracted, then 0 W will overlap 0 E on the left side of the plot.
# If a number is added, then the value won't show up at all.
xticks = np.append(xticks, lon_east - 0.50)
lat_covered = lat_north - lat_south
lat_step = determine_tick_step(lat_covered)
yticks = np.arange(lat_south, lat_north, lat_step)
yticks = np.append(yticks, lat_north)
ax.set_extent([lon_west, lon_east, lat_south, lat_north], crs=proj)
proj_function = ccrs.PlateCarree
# Stream gauges
si_2 = 2
si_4 = 3
si_6 = 4
si_large = 5
# `export` is the array of gauges. Each gauge has multiple fields -- e.g., lat is index 7
for gauge in export:
lat = gauge[7]
lon = gauge[8]
seasonality_index = gauge[seasonality_index_export_index]
if seasonality_index < 2:
markersize = si_2
elif seasonality_index < 4:
markersize = si_4
elif seasonality_index < 6:
markersize = si_6
elif seasonality_index <= 12:
markersize = si_large
else:
raise Exception("Invalid seasonality index={}".format(seasonality_index))
if seasonality_index == 1:
color = "black"
else:
peak_month = int(gauge[peak_month_export_index])
color = color_list[peak_month]
# https://scitools.org.uk/iris/docs/v1.9.2/examples/General/projections_and_annotations.html
# Place a single marker point for each gauge.
plt.plot(
lon,
lat,
marker="o",
color=color,
markersize=markersize,
transform=proj_function(),
)
# NOTE: the "plt.annotate call" does not have a "transform=" keyword,
# so for this one we transform the coordinates with a Cartopy call.
at_x, at_y = ax.projection.transform_point(lon, lat, src_crs=proj_function())
# https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/custom_legends.html
legend_elements = [
lines.Line2D(
[0],
[0],
marker="o",
color="w",
label="1 <= SI < 2",
markerfacecolor="black",
markersize=si_2,
),
lines.Line2D(
[0],
[0],
marker="o",
color="w",
label="2 <= SI < 4",
markerfacecolor="black",
markersize=si_4,
),
lines.Line2D(
[0],
[0],
marker="o",
color="w",
label="4 <= SI < 6",
markerfacecolor="black",
markersize=si_6,
),
lines.Line2D(
[0],
[0],
marker="o",
color="w",
label="6 <= SI <= 12",
markerfacecolor="black",
markersize=si_large,
),
]
seasonality_legend_title = "Seasonality (SI)"
plt.legend(
handles=legend_elements,
title=seasonality_legend_title,
prop={"size": 8},
)
# Full world would be aspect 360/(2*180) = 1
ax.set_aspect((lon_east - lon_west) / (2 * (lat_north - lat_south)))
ax.coastlines(lw=0.3)
ax.add_feature(cfeature.RIVERS)
if title[0] is not None:
ax.set_title(title[0], loc="left", fontdict=plotSideTitle)
if title[1] is not None:
ax.set_title(title[1], fontdict=plotTitle)
if title[2] is not None:
ax.set_title(title[2], loc="right", fontdict=plotSideTitle)
ax.set_xticks(xticks, crs=proj_function())
ax.set_yticks(yticks, crs=proj_function())
lon_formatter = LongitudeFormatter(zero_direction_label=True, number_format=".0f")
lat_formatter = LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.tick_params(labelsize=8.0, direction="out", width=1)
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("left")
# Color bar
cbax = fig.add_axes(
(
panel[panel_index][0] + 0.7535,
panel[panel_index][1] + 0.0515,
0.0326,
0.1792,
)
)
# https://matplotlib.org/tutorials/colors/colorbar_only.html
num_colors = len(color_list)
if parameter.print_statements:
print("num_colors={}".format(num_colors))
cmap = colors.ListedColormap(color_list)
cbar_label = "Peak month"
bounds = list(range(num_colors))
# Set ticks to be in between the bounds
ticks = list(map(lambda bound: bound + 0.5, bounds))
# Add one more bound at the bottom of the colorbar.
# `bounds` should be one longer than `ticks`.
bounds += [bounds[-1] + 1]
if parameter.print_statements:
print("bounds={}".format(bounds))
norm = colors.BoundaryNorm(bounds, cmap.N)
cbar = fig.colorbar(
matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm),
cax=cbax,
boundaries=bounds,
ticks=ticks,
spacing="uniform",
orientation="vertical",
label=cbar_label,
)
# https://matplotlib.org/3.1.1/gallery/ticks_and_spines/colorbar_tick_labelling_demo.html
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
cbar.ax.set_yticklabels(months)
cbar.ax.invert_yaxis()
w, h = get_ax_size(fig, cbax)
cbar.ax.tick_params(labelsize=9.0, length=0)
def plot_seasonality_map(export, parameter):
if parameter.backend not in ["cartopy", "mpl", "matplotlib"]:
return
# Position and sizes of subplot axes in page coordinates (0 to 1)
# (left, bottom, width, height) in page coordinates
panel = [
(0.0900, 0.5500, 0.7200, 0.3000),
(0.0900, 0.1300, 0.7200, 0.3000),
]
# Create figure, projection
fig = plt.figure(figsize=parameter.figsize, dpi=parameter.dpi)
proj = ccrs.PlateCarree(central_longitude=0)
# test and ref color lists
# Selected from 'hsv' colormap:
color_list = [
(0.05, 0.00, 0.99),
(0.03, 0.30, 0.98),
(0.12, 0.92, 0.99),
(0.13, 1.00, 0.65),
(0.14, 1.00, 0.05),
(0.98, 0.99, 0.04),
(0.99, 0.67, 0.04),
(0.99, 0.34, 0.03),
(0.99, 0.07, 0.03),
(0.99, 0.00, 0.53),
(0.68, 0.00, 1.00),
(0.29, 0.00, 1.00),
]
# First panel
plot_panel_seasonality_map("test", fig, proj, export, color_list, panel, parameter)
# Second panel
plot_panel_seasonality_map("ref", fig, proj, export, color_list, panel, parameter)
# Figure title
fig.suptitle(parameter.main_title_seasonality_map, x=0.5, y=0.97, fontsize=15)
# Prepare to save figure
# get_output_dir => {parameter.results_dir}/{set_name}/{parameter.case_id}
# => {parameter.results_dir}/streamflow/{parameter.case_id}
output_dir = get_output_dir(parameter.current_set, parameter)
if parameter.print_statements:
print("Output dir: {}".format(output_dir))
# get_output_dir => {parameter.orig_results_dir}/{set_name}/{parameter.case_id}
# => {parameter.orig_results_dir}/streamflow/{parameter.case_id}
original_output_dir = get_output_dir(
parameter.current_set, parameter, ignore_container=True
)
if parameter.print_statements:
print("Original output dir: {}".format(original_output_dir))
# parameter.output_file_seasonality_map is defined in e3sm_diags/parameter/streamflow_parameter.py
# {parameter.results_dir}/streamflow/{parameter.case_id}/{parameter.output_file_seasonality_map}
file_path = os.path.join(output_dir, parameter.output_file_seasonality_map)
# {parameter.orig_results_dir}/streamflow/{parameter.case_id}/{parameter.output_file_seasonality_map}
original_file_path = os.path.join(
original_output_dir, parameter.output_file_seasonality_map
)
# Save figure
for f in parameter.output_format:
f = f.lower().split(".")[-1]
plot_suffix = "." + f
plot_file_path = file_path + plot_suffix
plt.savefig(plot_file_path)
# Get the filename that the user has passed in and display that.
# When running in a container, the paths are modified.
original_plot_file_path = original_file_path + plot_suffix
# Always print, even without `parameter.print_statements`
print("Plot saved in: " + original_plot_file_path)
# Save individual subplots
for f in parameter.output_format_subplot:
page = fig.get_size_inches()
i = 0
for p in panel:
# Extent of subplot
subpage = np.array(p).reshape(2, 2)
subpage[1, :] = subpage[0, :] + subpage[1, :]
subpage = subpage + np.array(border).reshape(2, 2)
subpage = list((subpage * page).flatten())
extent = matplotlib.transforms.Bbox.from_extents(*subpage)
# Save subplot
subplot_suffix = ".%i." % i + f
subplot_file_path = file_path + subplot_suffix
plt.savefig(subplot_file_path, bbox_inches=extent)
# Get the filename that the user has passed in and display that.
# When running in a container, the paths are modified.
original_subplot_file_path = original_file_path + subplot_suffix
# Always print, even without `parameter.print_statements`
print("Sub-plot saved in: " + original_subplot_file_path)
i += 1
plt.close()
def plot_panel_annual_map(panel_index, fig, proj, export, bias_array, panel, parameter):
if panel_index == 0:
panel_type = "test"
elif panel_index == 1:
panel_type = "ref"
elif panel_index == 2:
panel_type = "bias"
else:
raise Exception("Invalid panel_index={}".format(panel_index))
# Plot of streamflow gauges. Color -> peak month, marker size -> seasonality index.
# Position and sizes of subplot axes in page coordinates (0 to 1)
ax = fig.add_axes(panel[panel_index], projection=proj)
region_str = parameter.regions[0]
region = regions_specs[region_str]
if "domain" in region.keys(): # type: ignore
# Get domain to plot
domain = region["domain"] # type: ignore
else:
# Assume global domain
domain = cdutil.region.domain(latitude=(-90.0, 90, "ccb"))
kargs = domain.components()[0].kargs
# lon_west, lon_east, lat_south, lat_north = (0, 360, -90, 90)
lon_west, lon_east, lat_south, lat_north = (-180, 180, -90, 90)
if "longitude" in kargs:
lon_west, lon_east, _ = kargs["longitude"]
if "latitude" in kargs:
lat_south, lat_north, _ = kargs["latitude"]
lon_covered = lon_east - lon_west
lon_step = determine_tick_step(lon_covered)
xticks = np.arange(lon_west, lon_east, lon_step)
# Subtract 0.50 to get 0 W to show up on the right side of the plot.
# If less than 0.50 is subtracted, then 0 W will overlap 0 E on the left side of the plot.
# If a number is added, then the value won't show up at all.
xticks = np.append(xticks, lon_east - 0.50)
lat_covered = lat_north - lat_south
lat_step = determine_tick_step(lat_covered)
yticks = np.arange(lat_south, lat_north, lat_step)
yticks = np.append(yticks, lat_north)
ax.set_extent([lon_west, lon_east, lat_south, lat_north], crs=proj)
proj_function = ccrs.PlateCarree
# Stream gauges
color_list, value_min, value_max, norm = setup_annual_map(
parameter, panel_type, bias_array
)
plot_gauges_annual_map(
panel_type,
export,
bias_array,
value_min,
value_max,
color_list,
proj_function,
ax,
)
# Full world would be aspect 360/(2*180) = 1
ax.set_aspect((lon_east - lon_west) / (2 * (lat_north - lat_south)))
ax.coastlines(lw=0.3)
ax.add_feature(cfeature.RIVERS)
if panel_type == "test":
title = parameter.test_title
elif panel_type == "ref":
title = parameter.reference_title
elif panel_type == "bias":
title = "Relative Bias"
else:
raise Exception("Invalid panel_type={}".format(panel_type))
title = (None, title, None)
if title[0] is not None:
ax.set_title(title[0], loc="left", fontdict=plotSideTitle)
if title[1] is not None:
ax.set_title(title[1], fontdict=plotTitle)
if title[2] is not None:
ax.set_title(title[2], loc="right", fontdict=plotSideTitle)
ax.set_xticks(xticks, crs=proj_function())
ax.set_yticks(yticks, crs=proj_function())
lon_formatter = LongitudeFormatter(zero_direction_label=True, number_format=".0f")
lat_formatter = LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.tick_params(labelsize=8.0, direction="out", width=1)
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("left")
# Color bar
# Position and sizes of subplot axes in page coordinates (0 to 1)
# (left, bottom, width, height) in page coordinates
cbax = fig.add_axes(
(
panel[panel_index][0] + 0.6635,
panel[panel_index][1] + 0.0115,
0.0326,
0.1792,
)
)
cmap = colors.ListedColormap(color_list)
if panel_type in ["test", "ref"]:
cbar_label = "Mean annual discharge ($m^3$/$s$)"
elif panel_type == "bias":
cbar_label = "Bias of mean annual discharge (%)\n(test-ref)/ref"
else:
raise Exception("Invalid panel_type={}".format(panel_type))
cbar = fig.colorbar(
matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm),
cax=cbax,
label=cbar_label,
extend="both",
)
w, h = get_ax_size(fig, cbax)
if panel_type in ["test", "ref"]:
pass
elif panel_type == "bias":
step_size = (value_max - value_min) // 5
ticks = np.arange(int(value_min), int(value_max) + step_size, step_size)
cbar.ax.tick_params(labelsize=9.0, length=0)
cbar.ax.set_yticklabels(ticks)
else:
raise Exception("Invalid panel_type={}".format(panel_type))
def setup_annual_map(parameter, panel_type, bias_array):
# Continuous colormap
colormap = plt.get_cmap("jet_r")
color_list = list(map(lambda index: colormap(index)[:3], range(colormap.N)))
if panel_type in ["test", "ref"]:
value_min, value_max = 1, 1e4
# https://matplotlib.org/3.2.1/tutorials/colors/colormapnorms.html
norm = matplotlib.colors.LogNorm(vmin=value_min, vmax=value_max)
elif panel_type == "bias":
if parameter.print_statements:
value_min = np.floor(np.min(bias_array))
value_max = np.ceil(np.max(bias_array))
print(
"Bias of mean annual discharge {} min={}, max={}".format(
panel_type, value_min, value_max
)
)
value_min = -100
value_max = 100
norm = matplotlib.colors.Normalize()
else:
raise Exception("Invalid panel_type={}".format(panel_type))
return color_list, value_min, value_max, norm
def plot_gauges_annual_map(
panel_type, export, bias_array, value_min, value_max, color_list, proj_function, ax
):
# `export` is the array of gauges. Each gauge has multiple fields -- e.g., lat is index 7
for gauge, i in zip(export, range(len(export))):
if panel_type == "test":
# Test mean annual discharge
value = gauge[1]
elif panel_type == "ref":
# Ref mean annual discharge
value = gauge[0]
elif panel_type == "bias":
# Bias
value = bias_array[i]
else:
raise Exception("Invalid panel_type={}".format(panel_type))
if np.isnan(value):
continue
if value < value_min:
value = value_min
elif value > value_max:
value = value_max
if panel_type in ["test", "ref"]:
# Logarithmic Rescale (min-max normalization) to [-1,1] range
normalized_value = (np.log10(value) - np.log10(value_min)) / (
np.log10(value_max) - np.log10(value_min)
)
elif panel_type == "bias":
# Rescale (min-max normalization) to [-1,1] range
normalized_value = (value - value_min) / (value_max - value_min)
else:
raise Exception("Invalid panel_type={}".format(panel_type))
lat = gauge[7]
lon = gauge[8]
color = color_list[int(normalized_value * (len(color_list) - 1))]
# https://scitools.org.uk/iris/docs/v1.9.2/examples/General/projections_and_annotations.html
# Place a single marker point for each gauge.
plt.plot(
lon,
lat,
marker="o",
markersize=2,
color=color,
transform=proj_function(),
)
# NOTE: the "plt.annotate call" does not have a "transform=" keyword,
# so for this one we transform the coordinates with a Cartopy call.
at_x, at_y = ax.projection.transform_point(lon, lat, src_crs=proj_function())
def plot_annual_map(export, bias, parameter):
if parameter.backend not in ["cartopy", "mpl", "matplotlib"]:
return
# Position and sizes of subplot axes in page coordinates (0 to 1)
# (left, bottom, width, height) in page coordinates
panel = [
(0.1691, 0.6810, 0.6465, 0.2258),
(0.1691, 0.3961, 0.6465, 0.2258),
(0.1691, 0.1112, 0.6465, 0.2258),
]
# Create figure, projection
fig = plt.figure(figsize=parameter.figsize, dpi=parameter.dpi)
proj = ccrs.PlateCarree(central_longitude=0)
# First panel
plot_panel_annual_map(0, fig, proj, export, bias, panel, parameter)
# Second panel
plot_panel_annual_map(1, fig, proj, export, bias, panel, parameter)
# Third panel
plot_panel_annual_map(2, fig, proj, export, bias, panel, parameter)
# Figure title
fig.suptitle(parameter.main_title_annual_map, x=0.5, y=0.97, fontsize=15)
# Prepare to save figure
# get_output_dir => {parameter.results_dir}/{set_name}/{parameter.case_id}
# => {parameter.results_dir}/streamflow/{parameter.case_id}
output_dir = get_output_dir(parameter.current_set, parameter)
if parameter.print_statements:
print("Output dir: {}".format(output_dir))
# get_output_dir => {parameter.orig_results_dir}/{set_name}/{parameter.case_id}
# => {parameter.orig_results_dir}/streamflow/{parameter.case_id}
original_output_dir = get_output_dir(
parameter.current_set, parameter, ignore_container=True
)
if parameter.print_statements:
print("Original output dir: {}".format(original_output_dir))
# parameter.output_file_annual_map is defined in e3sm_diags/parameter/streamflow_parameter.py
# {parameter.results_dir}/streamflow/{parameter.case_id}/{parameter.output_file_annual_map}
file_path = os.path.join(output_dir, parameter.output_file_annual_map)
# {parameter.orig_results_dir}/streamflow/{parameter.case_id}/{parameter.output_file_annual_map}
original_file_path = os.path.join(
original_output_dir, parameter.output_file_annual_map
)
# Save figure
for f in parameter.output_format:
f = f.lower().split(".")[-1]
plot_suffix = "." + f
plot_file_path = file_path + plot_suffix
plt.savefig(plot_file_path)
# Get the filename that the user has passed in and display that.
# When running in a container, the paths are modified.
original_plot_file_path = original_file_path + plot_suffix
# Always print, even without `parameter.print_statements`
print("Plot saved in: " + original_plot_file_path)
# Save individual subplots
for f in parameter.output_format_subplot:
page = fig.get_size_inches()
i = 0
for p in panel:
# Extent of subplot
subpage = np.array(p).reshape(2, 2)
subpage[1, :] = subpage[0, :] + subpage[1, :]
subpage = subpage + np.array(border).reshape(2, 2)
subpage = list((subpage * page).flatten())
extent = matplotlib.transforms.Bbox.from_extents(*subpage)
# Save subplot
subplot_suffix = ".%i." % i + f
subplot_file_path = file_path + subplot_suffix
plt.savefig(subplot_file_path, bbox_inches=extent)
# Get the filename that the user has passed in and display that.
# When running in a container, the paths are modified.
original_subplot_file_path = original_file_path + subplot_suffix
# Always print, even without `parameter.print_statements`
print("Sub-plot saved in: " + original_subplot_file_path)
i += 1
plt.close()
def plot_annual_scatter(xs, ys, zs, parameter):
# Position and sizes of subplot axes in page coordinates (0 to 1)
# (left, bottom, width, height) in page coordinates
panel = [(0.0900, 0.2000, 0.7200, 0.6000)]
fig = plt.figure(figsize=parameter.figsize, dpi=parameter.dpi)
ax = fig.add_axes(panel[0])
cmap = plt.get_cmap("jet")
ax.scatter(xs, ys, label="Scatterplot", marker="o", s=10, c=zs, cmap=cmap)
r, _ = scipy.stats.pearsonr(xs, ys)
r2 = r * r
r2_str = "{0:.2f}".format(r2)
bounds = [0.01, 100000]
ax.plot(bounds, bounds, color="red", linestyle="-")
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(
"{} streamflow ($m^3$/$s$)".format(parameter.reference_title),
fontsize=12,
)
ax.set_ylabel("{} streamflow ($m^3$/$s$)".format(parameter.test_title), fontsize=12)
ax.set_xlim(bounds[0], bounds[1])
ax.set_ylim(bounds[0], bounds[1])
ax.tick_params(axis="both", labelsize=12)
# Color bar
# Position and sizes of subplot axes in page coordinates (0 to 1)
# (left, bottom, width, height) in page coordinates
cbax = fig.add_axes(
(panel[0][0] + 0.7535, panel[0][1] + 0.0515, 0.0326, 0.1792 * 2)
)
cbar_label = "Drainage area bias (%)"
cbar = fig.colorbar(matplotlib.cm.ScalarMappable(cmap=cmap), cax=cbax)
cbar.ax.set_ylabel(cbar_label, fontsize=12)
w, h = get_ax_size(fig, cbax)
zs_max = np.ceil(np.max(zs))
zs_min = np.floor(np.min(zs))
step_size = (zs_max - zs_min) // 5
try:
ticks = np.arange(zs_min, zs_max + step_size, step_size)
cbar.ax.set_yticklabels(ticks)
except ValueError:
# `zs` has invalid values (likely from no area_upstream being found).
# Just use default colorbar.
pass
cbar.ax.tick_params(labelsize=12.0, length=0)
# Figure title
if parameter.main_title_annual_scatter == "":
main_title_annual_scatter = "Annual mean streamflow\n{} vs {}".format(
parameter.test_title, parameter.reference_title
)
else:
main_title_annual_scatter = parameter.main_title_annual_scatter
ax.set_title(main_title_annual_scatter, loc="center", y=1.05, fontsize=15)
legend_title = "$R^2$={}, (n={})".format(r2_str, xs.shape[0])
ax.legend(handles=[], title=legend_title, loc="upper left", prop={"size": 12})
# Prepare to save figure
# get_output_dir => {parameter.results_dir}/{set_name}/{parameter.case_id}
# => {parameter.results_dir}/streamflow/{parameter.case_id}
output_dir = get_output_dir(parameter.current_set, parameter)
if parameter.print_statements:
print("Output dir: {}".format(output_dir))
# get_output_dir => {parameter.orig_results_dir}/{set_name}/{parameter.case_id}
# => {parameter.orig_results_dir}/streamflow/{parameter.case_id}
original_output_dir = get_output_dir(
parameter.current_set, parameter, ignore_container=True
)
if parameter.print_statements:
print("Original output dir: {}".format(original_output_dir))
# parameter.output_file_annual_scatter is defined in e3sm_diags/parameter/streamflow_parameter.py
# {parameter.results_dir}/streamflow/{parameter.case_id}/{parameter.output_file_annual_scatter}
file_path = os.path.join(output_dir, parameter.output_file_annual_scatter)
# {parameter.orig_results_dir}/streamflow/{parameter.case_id}/{parameter.output_file_annual_scatter}
original_file_path = os.path.join(
original_output_dir, parameter.output_file_annual_scatter
)
# Save figure
for f in parameter.output_format:
f = f.lower().split(".")[-1]
plot_suffix = "." + f
plot_file_path = file_path + plot_suffix
plt.savefig(plot_file_path)
# Get the filename that the user has passed in and display that.
# When running in a container, the paths are modified.
original_plot_file_path = original_file_path + plot_suffix
print("Plot saved in: " + original_plot_file_path)
plt.close()
|
import sys
import csv
# Set up CL args
# Define header
def main():
rows = []
# Read in rows
# Process rows
# Write rows to file
pass
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""Top-level package for Trips."""
__author__ = """Anders K. Pettersen"""
__email__ = 'andstatical@gmail.com'
__version__ = '0.1.0'
|
"""
django_simple_slack_app URL Configuration
"""
from django.conf import settings
from django.urls import path
from django.views.generic import TemplateView, RedirectView
from django_simple_slack_app.views import SlackCommandView, SlackEventView, SlackOAuthView
event_url = getattr(settings, "SLACK_EVENT_URL", "events")
command_url = getattr(settings, "SLACK_COMMAND_URL", "command")
oauth_url = getattr(settings, "SLACK_OAUTH_URL", "oauth")
urlpatterns = [
path(event_url, SlackEventView.as_view()),
path(command_url, SlackCommandView.as_view()),
path(oauth_url, SlackOAuthView.as_view()),
path("install", RedirectView.as_view(url=f"https://slack.com/oauth/v2/authorize?client_id={settings.SLACK_CLIENT_ID}&scope=channels:history,chat:write,commands&user_scope=chat:write"), name="install"),
path("done", TemplateView.as_view(template_name="django_simple_slack_app/oauth_done.html"), name="oauth_done"),
]
|
import logging
from kubernetes import client
from kube_api.config import core_v1_api as api
from .utils import api_request, get_dict_value
logger = logging.getLogger(__name__)
class VolumeClaim:
"""Represents a volume claim.
"""
def __init__(self, claim_name, disk_space, namespace='default', storage_class="standard-fc"):
""" Initialize a VolumeClaim object
"""
self.name = claim_name
self.namespace = namespace
self.vc_spec = dict(access_modes=['ReadWriteOnce'], storage_class_name=storage_class)
self.disk_space = disk_space
@staticmethod
def generate_claim_name(prefix, n=6):
"""Generates a volume claim name by concatenating a prefix and a string of random lower case letters.
The prefix will be converted to lower case string.
Non-alphanumeric characters in the prefix will be replaced by "-".
Args:
prefix (string): Prefix of the claim name.
n (int): The number of random lower case letters to be appended to the prefix.
Returns: A string like "prefix-abcdef"
"""
# Replace non alpha numeric and convert to lower case.
if prefix:
claim_name = re.sub('[^0-9a-zA-Z]+', '-', str(prefix).strip()).lower() + "-"
else:
claim_name = ""
if len(claim_name) > (48 - n):
claim_name = claim_name[:48 - n]
# Append a random string
claim_name += ''.join(random.choice(string.ascii_lowercase) for _ in range(n))
return claim_name
def create(self, vc_spec=None):
"""Creates a volume claim on the cluster.
Args:
vc_spec: A dictionary of keyword arguments that will be passed to V1PersistentVolumeClaimSpec()
Returns: A dictionary containing the results of creating the volume claim on the cluster.
"""
if vc_spec is None:
vc_spec = self.vc_spec
# Convert claim name to lower case
claim_name = str(self.name).lower()
vc_body = client.V1PersistentVolumeClaim()
vc_body.metadata = client.V1ObjectMeta(namespace=self.namespace, name=claim_name)
resources = client.V1ResourceRequirements(requests={'storage': str(self.disk_space)+'Gi'})
vc_body.spec = client.V1PersistentVolumeClaimSpec(resources=resources, **vc_spec)
self.creation_response = api_request(api.create_namespaced_persistent_volume_claim, self.namespace, vc_body)
return self.creation_response
def delete(self):
body = client.V1DeleteOptions(propagation_policy='Foreground')
return api_request(api.delete_namespaced_persistent_volume_claim,
name=self.name,
namespace=self.namespace,
body=body)
|
import pytest
from requests import Request
from trycourier.session import CourierAPISession
@pytest.fixture
def _request():
return Request('GET', 'http://someurl')
def test_request_headers(_request):
s = CourierAPISession()
s.init_library_version('1.0.0')
r = s.prepare_request(_request)
assert r.headers['Content-Type'] == 'application/json'
assert r.headers['User-Agent'] == 'courier-python/1.0.0'
def test_token_auth(_request):
s = CourierAPISession()
s.init_token_auth('123456789ABCDF')
r = s.prepare_request(_request)
assert r.headers['Authorization'] == 'Bearer 123456789ABCDF'
def test_basic_auth(_request):
s = CourierAPISession()
s.init_basic_auth('user', 'pass')
r = s.prepare_request(_request)
assert r.headers['Authorization'] == 'Basic dXNlcjpwYXNz'
|
#!/usr/bin/env python
"""
futurize.py
===========
This script is only used by the unit tests. Another script called
"futurize" is created automatically (without the .py extension) by
setuptools.
futurize.py attempts to turn Py2 code into valid, clean Py3 code that is
also compatible with Py2 when using the ``future`` package.
Licensing
---------
Copyright 2013-2015 Python Charmers Pty Ltd, Australia.
The software is distributed under an MIT licence. See LICENSE.txt.
"""
import sys
from libfuturize.main import main
sys.exit(main())
|
from arrview.tools.base import MouseButtons, MouseState, ToolSet
from arrview.tools.colormap import ColorMapTool
from arrview.tools.cursor_info import CursorInfoTool
from arrview.tools.pan_zoom import PanTool, ZoomTool
from arrview.tools.roi import ROITool
|
from common_utils_py.agreements.service_agreement import ServiceAgreement
from common_utils_py.agreements.service_agreement_template import ServiceAgreementTemplate
from common_utils_py.agreements.service_types import ServiceTypes, ServiceTypesIndices
from common_utils_py.agreements.utils import get_sla_template
from common_utils_py.ddo.service import Service
from common_utils_py.did import did_to_id
class ServiceDescriptor(object):
"""Tuples of length 2. The first item must be one of ServiceTypes and the second
item is a dict of parameters and values required by the service"""
@staticmethod
def metadata_service_descriptor(attributes, service_endpoint):
"""
Metadata service descriptor.
:param attributes: conforming to the Metadata accepted by Nevermined, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (ServiceTypes.METADATA,
{'attributes': attributes, 'serviceEndpoint': service_endpoint})
@staticmethod
def authorization_service_descriptor(attributes, service_endpoint):
"""
Authorization service descriptor.
:param attributes: attributes of the authorization service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (ServiceTypes.AUTHORIZATION,
{'attributes': attributes, 'serviceEndpoint': service_endpoint})
@staticmethod
def access_service_descriptor(attributes, service_endpoint):
"""
Access service descriptor.
:param attributes: attributes of the access service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (
ServiceTypes.ASSET_ACCESS,
{'attributes': attributes, 'serviceEndpoint': service_endpoint}
)
@staticmethod
def access_proof_service_descriptor(attributes, service_endpoint):
"""
Access service with data transfer proof descriptor.
:param attributes: attributes of the access service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (
ServiceTypes.ASSET_ACCESS_PROOF,
{'attributes': attributes, 'serviceEndpoint': service_endpoint}
)
@staticmethod
def compute_service_descriptor(attributes, service_endpoint):
"""
Compute service descriptor.
:param attributes: attributes of the compute service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (
ServiceTypes.CLOUD_COMPUTE,
{'attributes': attributes, 'serviceEndpoint': service_endpoint}
)
@staticmethod
def did_sales_service_descriptor(attributes, service_endpoint):
"""
DID Sales service descriptor.
:param attributes: attributes of the did sales service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (
ServiceTypes.DID_SALES,
{'attributes': attributes, 'serviceEndpoint': service_endpoint}
)
@staticmethod
def nft_sales_service_descriptor(attributes, service_endpoint):
"""
NFT Sales service descriptor.
:param attributes: attributes of the nft sales service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (
ServiceTypes.NFT_SALES,
{'attributes': attributes, 'serviceEndpoint': service_endpoint}
)
@staticmethod
def nft_access_service_descriptor(attributes, service_endpoint):
"""
NFT Access service descriptor.
:param attributes: attributes of the nft access service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service descriptor.
"""
return (
ServiceTypes.NFT_ACCESS,
{'attributes': attributes, 'serviceEndpoint': service_endpoint}
)
class ServiceFactory(object):
"""Factory class to create Services."""
@staticmethod
def build_services(service_descriptors):
"""
Build a list of services.
:param service_descriptors: List of tuples of length 2. The first item must be one of
ServiceTypes
and the second item is a dict of parameters and values required by the service
:return: List of Services
"""
services = []
for i, service_desc in enumerate(service_descriptors):
service = ServiceFactory.build_service(service_desc)
# set index for each service
service.update_value(ServiceAgreement.SERVICE_INDEX, int(i))
services.append(service)
return services
@staticmethod
def build_service(service_descriptor):
"""
Build a service.
:param service_descriptor: Tuples of length 2. The first item must be one of ServiceTypes
and the second item is a dict of parameters and values required by the service
:return: Service
"""
assert isinstance(service_descriptor, tuple) and len(
service_descriptor) == 2, 'Unknown service descriptor format.'
service_type, kwargs = service_descriptor
if service_type == ServiceTypes.METADATA:
return ServiceFactory.build_metadata_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.AUTHORIZATION:
return ServiceFactory.build_authorization_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.ASSET_ACCESS:
return ServiceFactory.build_access_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.ASSET_ACCESS_PROOF:
return ServiceFactory.build_access_proof_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.CLOUD_COMPUTE:
return ServiceFactory.build_compute_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.DID_SALES:
return ServiceFactory.build_did_sales_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.NFT_SALES:
return ServiceFactory.build_nft_sales_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.NFT_ACCESS:
return ServiceFactory.build_nft_access_service(
kwargs['attributes'],
kwargs['serviceEndpoint']
)
raise ValueError(f'Unknown service type {service_type}')
@staticmethod
def build_metadata_service(metadata, service_endpoint):
"""
Build a metadata service.
:param metadata: conforming to the Metadata accepted by Nevermined, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint,
ServiceTypes.METADATA,
values={'attributes': metadata},
index=ServiceTypesIndices.DEFAULT_METADATA_INDEX
)
@staticmethod
def build_authorization_service(attributes, service_endpoint):
"""
Build an authorization service.
:param attributes: attributes of authorization service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint, ServiceTypes.AUTHORIZATION,
values={'attributes': attributes},
index=ServiceTypesIndices.DEFAULT_AUTHORIZATION_INDEX)
@staticmethod
def build_access_service(attributes, service_endpoint):
"""
Build an access service.
:param attributes: attributes of access service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint, ServiceTypes.ASSET_ACCESS,
values={'attributes': attributes},
index=ServiceTypesIndices.DEFAULT_ACCESS_INDEX)
@staticmethod
def build_access_proof_service(attributes, service_endpoint):
"""
Build an access service.
:param attributes: attributes of access service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint, ServiceTypes.ASSET_ACCESS_PROOF,
values={'attributes': attributes},
index=ServiceTypesIndices.DEFAULT_ACCESS_PROOF_INDEX)
@staticmethod
def build_compute_service(attributes, service_endpoint):
"""
Build a compute service.
:param attributes: attributes of compute service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint, ServiceTypes.CLOUD_COMPUTE,
values={'attributes': attributes},
index=ServiceTypesIndices.DEFAULT_COMPUTING_INDEX)
@staticmethod
def build_did_sales_service(attributes, service_endpoint):
"""
Build a did sales service.
:param attributes: attributes of did sales service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint, ServiceTypes.DID_SALES,
values={'attributes': attributes},
index=ServiceTypesIndices.DEFAULT_DID_SALES_INDEX)
@staticmethod
def build_nft_sales_service(attributes, service_endpoint):
"""
Build a nft sales service.
:param attributes: attributes of nft sales service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint, ServiceTypes.NFT_SALES,
values={'attributes': attributes},
index=ServiceTypesIndices.DEFAULT_NFT_SALES_INDEX)
@staticmethod
def build_nft_access_service(attributes, service_endpoint):
"""
Build a nft sales service.
:param attributes: attributes of nft sales service, dict
:param service_endpoint: identifier of the service inside the asset DDO, str
:return: Service
"""
return Service(service_endpoint, ServiceTypes.NFT_ACCESS,
values={'attributes': attributes},
index=ServiceTypesIndices.DEFAULT_NFT_ACCESS_INDEX)
@staticmethod
def complete_access_service(did, service_endpoint, attributes, template_id,
reward_contract_address=None, service_type=ServiceTypes.ASSET_ACCESS):
"""
Build the access service.
:param did: DID, str
:param service_endpoint: identifier of the service inside the asset DDO, str
:param template_id: id of the template use to create the service, str
:param reward_contract_address: hex str ethereum address of deployed reward condition
smart contract
:return: ServiceAgreement
"""
param_map = {
'_documentId': did_to_id(did),
'_amount': attributes['main']['price']
}
if reward_contract_address is not None:
param_map ['_rewardAddress'] = reward_contract_address
try:
param_map['_did'] = did_to_id(did)
param_map['_amounts'] = attributes['main']['_amounts']
param_map['_receivers'] = attributes['main']['_receivers']
param_map['_numberNfts'] = attributes['main']['_numberNfts']
param_map['_tokenAddress'] = attributes['main']['_tokenAddress']
except KeyError:
pass
sla_template_dict = get_sla_template(service_type)
sla_template = ServiceAgreementTemplate(template_id, service_type,
attributes['main']['creator'], sla_template_dict)
sla_template.template_id = template_id
conditions = sla_template.conditions[:]
for cond in conditions:
for param in cond.parameters:
param.value = param_map.get(param.name, '')
if cond.timeout > 0:
cond.timeout = attributes['main']['timeout']
sla_template.set_conditions(conditions)
sa = ServiceAgreement(
attributes,
sla_template,
service_endpoint,
service_type
)
return sa
@staticmethod
def complete_compute_service(did, service_endpoint, attributes, template_id,
reward_contract_address):
"""
Build the access service.
:param did: DID, str
:param service_endpoint: identifier of the service inside the asset DDO, str
:param template_id: id of the template use to create the service, str
:param reward_contract_address: hex str ethereum address of deployed reward condition
smart contract
:return: ServiceAgreement
"""
param_map = {
'_documentId': did_to_id(did),
'_amount': attributes['main']['price'],
'_rewardAddress': reward_contract_address
}
try:
param_map['_amounts'] = attributes['main']['_amounts']
param_map['_receivers'] = attributes['main']['_receivers']
except KeyError:
pass
sla_template_dict = get_sla_template(ServiceTypes.CLOUD_COMPUTE)
sla_template = ServiceAgreementTemplate(template_id, ServiceTypes.CLOUD_COMPUTE,
attributes['main']['creator'], sla_template_dict)
sla_template.template_id = template_id
conditions = sla_template.conditions[:]
for cond in conditions:
for param in cond.parameters:
param.value = param_map.get(param.name, '')
if cond.timeout > 0:
cond.timeout = attributes['main']['timeout']
sla_template.set_conditions(conditions)
sa = ServiceAgreement(
attributes,
sla_template,
service_endpoint,
ServiceTypes.CLOUD_COMPUTE
)
return sa
@staticmethod
def complete_nft_sales_service(did, service_endpoint, attributes, template_id,
reward_contract_address=None, service_type=ServiceTypes.NFT_SALES):
"""
Build the nft sales service.
:param did: DID, str
:param service_endpoint: identifier of the service inside the asset DDO, str
:param template_id: id of the template use to create the service, str
:param reward_contract_address: hex str ethereum address of deployed reward condition
smart contract
:return: ServiceAgreement
"""
param_map = {
'_documentId': did_to_id(did),
'_amount': attributes['main']['price']
}
if reward_contract_address is not None:
param_map ['_rewardAddress'] = reward_contract_address
try:
param_map['_amounts'] = attributes['main']['_amounts']
param_map['_receivers'] = attributes['main']['_receivers']
param_map['_numberNfts'] = attributes['main']['_numberNfts']
param_map['_nftHolder'] = attributes['main']['_nftHolder']
except KeyError:
pass
sla_template_dict = get_sla_template(service_type)
sla_template = ServiceAgreementTemplate(template_id, service_type,
attributes['main']['creator'], sla_template_dict)
sla_template.template_id = template_id
conditions = sla_template.conditions[:]
for cond in conditions:
for param in cond.parameters:
param.value = param_map.get(param.name, '')
if cond.timeout > 0:
cond.timeout = attributes['main']['timeout']
sla_template.set_conditions(conditions)
sa = ServiceAgreement(
attributes,
sla_template,
service_endpoint,
service_type
)
return sa
|
'''
@Author: Yingshi Chen
@Date: 2020-04-27 18:30:01
@
# Description:
'''
from torch import nn
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from sparse_max import sparsemax, sparsemoid, entmoid15, entmax15
from genotypes import Genotype
import time
from MixedOp import *
from torch.autograd import Variable
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type=='avg':
avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( avg_pool )
elif pool_type=='max':
max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( max_pool )
elif pool_type=='lp':
lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp( lp_pool )
elif pool_type=='lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp( lse_pool )
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum = channel_att_sum + channel_att_raw
scale = F.sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3).expand_as(x)
return x * scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = F.sigmoid(x_out) # broadcasting
return x * scale
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
super(CBAM, self).__init__()
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.no_spatial=no_spatial
if not no_spatial:
self.SpatialGate = SpatialGate()
def forward(self, x):
x_out = self.ChannelGate(x)
if not self.no_spatial:
x_out = self.SpatialGate(x_out)
return x_out
class se_channels(nn.Module):
def __init__(self, channel, reduction=16):
super(se_channels, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
#einsum is more elegant than code at https://github.com/moskomule/senet.pytorch/blob/master/senet/se_module.py
def forward_verify(self, x,out_0):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y)
out = torch.einsum('bcxy,bc->bcxy', x,y)
dist = torch.dist(out,out_0,2)
assert dist==0
return
#elegant code from https://github.com/moskomule/senet.pytorch/blob/master/senet/se_module.py
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
out = x * y.expand_as(x)
self.forward_verify(x,out)
return out
class eca_channel(nn.Module):
def __init__(self, channel, k_size=3):
super(eca_channel, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, h, w = x.size()
y = self.avg_pool(x)
# Two different branches of ECA module
y0 = y.squeeze(-1).transpose(-1, -2)
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y.expand_as(x)
class se_operate(nn.Module):
def __init__(self, nOP, reduction=2):
super(se_operate, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1) #The number of output features is equal to the number of input planes.
self.nOP,reduction = nOP,2
self.fc = nn.Sequential(
nn.Linear(nOP, nOP // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(nOP // reduction, nOP, bias=False),
#nn.Sigmoid()
nn.Softmax()
)
self.desc=f"se_operate_{reduction}"
self.nStep = 0
#self.cur_alpha = torch.zeros(self.nOP).cuda()
self.alpha_sum = torch.zeros(self.nOP)
def __repr__(self):
return self.desc
# def InitAlpha(self):
# self.nStep = 0
# self.alpha = torch.zeros(self.nOP)
def UpdateAlpha(self):
if self.nStep==0:
print(f"\tnStep=0")
self.alpha=self.alpha_sum*1
return
self.alpha=self.alpha_sum/self.nStep
#print(f"\tnStep={self.nStep}",end="")
a = torch.sum(self.alpha).item()
self.alpha_sum.fill_(0)
self.nStep = 0
assert np.isclose(a, 1)
#elegant code from https://github.com/moskomule/senet.pytorch/blob/master/senet/se_module.py
def forward(self, listOPX):
assert len(listOPX)==self.nOP
y_list=[]
for i,opx in enumerate(listOPX):
y = torch.mean(self.avg_pool(opx).squeeze(),dim=1)
y_list.append(y)
y = torch.stack( y_list ,dim=1)
w = self.fc(y)
m_ = torch.mean(w,dim=0).detach()
#assert np.isclose(torch.sum(m_).item(), 1)
self.alpha_sum += m_.cpu()
self.nStep = self.nStep+1
if False: #似乎都可以,真奇怪
out = 0
for i,opx in enumerate(listOPX):
w_i = w[:,i:i+1].squeeze()
out = out+torch.einsum('bcxy,b->bcxy',opx,w_i)
else:
out = sum(w * opx for w, opx in zip(m_, listOPX))
return out
#多个cell之间可共用alpha parameters!!!
class Alpha4Cell(object):
def __init__(self,config, nOP,topo,isReduce=False):
super(Alpha4Cell, self).__init__()
self.config = config
self.nets = None
self.topo = topo
self.isReduce = isReduce
self.hasBeta = self.config.op_struc == "PCC" or self.config.op_struc == "pair"
if self.config.topo_edges == "2":
self.hasBeta = True
#k = sum(1 for i in range(self.nNode) for n in range(2+i))
k = self.topo.hGraph[-1]
self.desc = f"W[edges={k},nOP={nOP}]"
if self.config.op_struc=="se":
pass
else:
self.alphas_ = Variable(1e-3*torch.randn((k,nOP)).cuda(), requires_grad=True)
if self.hasBeta:
self.betas_ = Variable(1e-3*torch.randn(k).cuda(), requires_grad=True)
if self.hasBeta: self.desc+="\tbeta"
if self.isReduce: self.desc+="\treduce"
def __repr__(self):
return self.desc
# def BeforeEpoch(self):
# pass
# def AfterEpoch(self):
# pass
def step(self):
pass
def get_weight(self,purpose="get_gene"):
if not hasattr(self,"alphas_"):
# assert False
return [None,None]
w_a,w_b = F.softmax(self.alphas_, dim=-1),None
if self.hasBeta:
if False:
n = 3
start = 2
weights2 = F.softmax(self.betas_[0:2], dim=-1)
for i in range(self.nNode-1):
end = start + n
tw2 = F.softmax(self.betas_[start:end], dim=-1)
start = end
n += 1
weights2 = torch.cat([weights2,tw2],dim=0)
assert end==len(self.betas_)
else:
I_I = self.topo.I_I
weights2 = torch.cat( [F.softmax(self.betas_[I_I(id)], dim=-1) for id in range(self.topo.nNode)] ,dim=0)
w_b = weights2
return [w_a,w_b]
def get_gene(self,plot_path=""):
[weights,weights2] = self.get_weight()
if weights is None:
return ""
weights = weights.detach().cpu().numpy()
if weights2 is not None:
weights2 = weights2.detach().cpu().numpy()
nNode = self.topo.nNode
nEdges = self.topo.nMostEdge()
PRIMITIVES_pool = self.config.PRIMITIVES_pool
gene = []
none_index = PRIMITIVES_pool.index('none')
for i in range(nNode):
II = self.topo.I_I(i)
start = self.topo.hGraph[i] #类似于单刚和总刚
nEdge = len(II)
#W = weights[II].copy()
if weights2 is not None:
#W2 = weights2[II].copy()
for j in II:
weights[j, :] = weights[j, :]*weights2[j]
edges,cur_gene = [],[]
for edge in II:
W_edge = weights[edge].copy() #print(W_edge)
cur_nz = len(weights[edge])
k_sort = sorted(range(cur_nz), key=lambda k:W_edge[k])
k_sort.remove(none_index)
k_best = k_sort[cur_nz-2]
cur_min, cur_max = W_edge[k_sort[0]], W_edge[k_best]
edges.append(-cur_max)
cur_gene.append((PRIMITIVES_pool[k_best], edge-start))
edges = sorted(range(nEdge), key=lambda k:edges[k]) #Default is ascending
gene.extend([cur_gene[edges[0]], cur_gene[edges[1]]])
if plot_path is not None: #已考虑weights2的影响
sns.set(font_scale=1)
fig, ax = plt.subplots(figsize=(8,3))
g = sns.heatmap(weights.T,square=True, cmap='coolwarm', ax=ax) #, annot=True
g.set_yticklabels(PRIMITIVES_pool, rotation=0)
g.set_xticklabels([i+1 for i in range(nEdges)],rotation=0) #rotation=45
fig.savefig(plot_path, bbox_inches='tight', pad_inches=0)
#plt.show()
plt.close("all")
return gene
def get_param(self):
param_list = []
if self.nets is not None:
for net in self.nets:
for name, param in net.named_parameters():
param_list.append(param)
if self.hasBeta:
param_list.append(self.betas_)
return param_list
if self.hasBeta:
return [self.alphas_,self.betas_]
else:
return [self.alphas_]
class Alpha_se(Alpha4Cell):
def __init__(self,config, nOP,topo,isReduce=False):
super(Alpha_se, self).__init__(config, nOP,topo,isReduce)
self.nets = [se_operate(nOP) for i in range(self.topo.nMostEdge())]
self.nets = nn.ModuleList(self.nets)
self.desc+=f"\t\"{self.nets[0]}\"x{len(self.nets)}"
def __repr__(self):
return self.desc
# def BeforeEpoch(self):
# for net in self.nets:
# net.BeforeEpoch()
def step(self):
list_alpha=[]
nNet = len(self.nets)
for i,net in enumerate(self.nets):
net.UpdateAlpha()
list_alpha.append(net.alpha)
self.alphas_ = torch.stack(list_alpha,dim=0)
#print("")
# for net in self.nets: #重置
# net.InitAlpha()
def get_weight(self):
if not hasattr(self,"alphas_"):
# assert False
return [None,None]
return [self.alphas_,None]
|
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import spacy
import re
from gensim.corpora import Dictionary
class ElasticCorpus:
def __init__(self, host=None, port=None, username=None, password=None, index=None, debug_limit=None,
dictionary=Dictionary()):
if host:
self.elastic = Elasticsearch([{'host': host, 'port': port}], http_auth=(username, password))
self.index = index
self.debug_limit = debug_limit
self.space = spacy.load('en_core_web_sm')
self.dictionary = dictionary
def __get_document_count(self):
return self.elastic.count(index=self.index, body={'query': {"match_all": {}}})["count"]
def __iter__(self):
counter = 0
if self.debug_limit:
document_counter = self.debug_limit
else:
document_counter = self.__get_document_count()
steps = document_counter // 100 if document_counter > 100 else 1
for entry in helpers.scan(self.elastic, query={"query": {"match_all": {}}}, _source=["content"],
index=self.index, size=2000):
text = entry["_source"]["content"]
text = re.sub('[^A-Za-z0-9 ]+', '', text)
text = re.sub(' +', ' ', text)
doc = self.space(text)
tokens = [t.lemma_.upper() for t in doc if not t.is_stop]
self.dictionary.add_documents([tokens])
if counter % steps == 0:
print(f"Progress: {(counter / document_counter) * 100} %")
if self.debug_limit and self.debug_limit == counter:
break
counter = counter + 1
yield self.dictionary.doc2bow(tokens)
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import mock
import six
import nailgun
from nailgun import consts
from nailgun.db.sqlalchemy import models
from nailgun import objects
from nailgun import rpc
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator.deployment_serializers import \
deployment_info_to_legacy
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.extensions.network_manager.serializers.neutron_serializers import \
NeutronNetworkDeploymentSerializer80
from nailgun.extensions.network_manager.serializers.neutron_serializers import \
NeutronNetworkTemplateSerializer80
from nailgun.test.integration.test_orchestrator_serializer import \
BaseDeploymentSerializer
from nailgun.test.integration.test_orchestrator_serializer import \
TestSerializeInterfaceDriversData
from nailgun.test.integration.test_orchestrator_serializer_70 import \
TestDeploymentHASerializer70
from nailgun.test.integration.test_orchestrator_serializer_80 import \
TestSerializer80Mixin
class TestSerializerWrapper(deployment_serializers.DeploymentLCMSerializer):
def serialize(self, cluster, nodes, ignore_customized=False):
return deployment_serializers.serialize_for_lcm(
cluster, nodes, ignore_customized=ignore_customized)
def get_net_provider_serializer(cls, cluster):
return deployment_serializers\
.DeploymentHASerializer80.get_net_provider_serializer(cluster)
class TestSerializerConverter80To90MixIn(TestSerializer80Mixin):
env_version = "liberty-8.0"
task_deploy = True
is_propagate_task_deploy = True
enforce_lcm = True
@classmethod
def create_serializer(cls, cluster):
serializer_type = TestSerializerWrapper
return serializer_type(None)
class TestNetworkTemplateSerializer80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
legacy_serializer = NeutronNetworkDeploymentSerializer80
template_serializer = NeutronNetworkTemplateSerializer80
def setUp(self, *args):
super(TestNetworkTemplateSerializer80MixIn, self).setUp()
self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
self.net_template = self.env.read_fixtures(['network_template_80'])[0]
self.cluster = self.env.clusters[-1]
self.cluster.extensions = ['volume_manager', 'converted_serializers']
self.serializer = self.create_serializer(self.cluster)
def test_get_net_provider_serializer(self):
self.cluster.network_config.configuration_template = None
net_serializer = self.serializer.\
get_net_provider_serializer(self.cluster)
self.assertIs(net_serializer, self.legacy_serializer)
self.cluster.network_config.configuration_template = \
self.net_template
net_serializer = self.serializer.\
get_net_provider_serializer(self.cluster)
self.assertIs(net_serializer, self.template_serializer)
def test_baremetal_neutron_attrs(self):
brmtl_template = deepcopy(
self.net_template['adv_net_template']['default'])
brmtl_template['network_assignments']['baremetal'] = {
'ep': 'br-baremetal'}
brmtl_template['templates_for_node_role']['controller'].append(
'baremetal')
brmtl_template['nic_mapping']['default']['if8'] = 'eth7'
brmtl_template['network_scheme']['baremetal'] = {
'endpoints': ['br-baremetal'],
'transformations': [],
'roles': {'baremetal': 'br-baremetal'}}
self.cluster.network_config.configuration_template = {
'adv_net_template': {'default': brmtl_template}, 'pk': 1}
self._check_baremetal_neutron_attrs(self.cluster)
def test_network_schemes_priorities(self):
expected = [
{
"action": "add-br",
"name": "br-prv",
"provider": "ovs"
},
{
"action": "add-br",
"name": "br-aux"
},
{
"action": "add-patch",
"bridges": [
"br-prv",
"br-aux"
],
"provider": "ovs",
"mtu": 65000
},
{
"action": "add-port",
"bridge": "br-aux",
"name": "eth3.101"
},
{
"action": "add-br",
"name": "br-fw-admin"
},
{
"action": "add-port",
"bridge": "br-fw-admin",
"name": "eth0"
},
{
"action": "add-br",
"name": "br-mgmt"
},
{
"action": "add-port",
"bridge": "br-mgmt",
"name": "eth1.104"
},
{
"action": "add-br",
"name": "br-storage"
},
{
"action": "add-port",
"bridge": "br-storage",
"name": "eth2"
}
]
objects.Cluster.set_network_template(
self.cluster,
self.net_template
)
node = self.env.create_nodes_w_interfaces_count(
1, 8, roles=['compute', 'cinder'],
cluster_id=self.cluster.id
)[0]
self.serializer = get_serializer_for_cluster(self.cluster)
net_serializer = self.serializer.get_net_provider_serializer(
self.cluster)
nm = objects.Cluster.get_network_manager(self.cluster)
network_scheme = net_serializer.generate_network_scheme(
node, nm.get_node_networks(node))
self.assertEqual(expected, network_scheme['transformations'])
class TestDeploymentTasksSerialization80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
tasks_for_rerun = {"globals", "netconfig"}
def setUp(self):
super(TestDeploymentTasksSerialization80MixIn, self).setUp()
self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan,
'status': consts.CLUSTER_STATUSES.operational},
nodes_kwargs=[
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready}]
)
self.cluster = self.env.clusters[-1]
self.cluster.extensions = ['volume_manager', 'converted_serializers']
if not self.task_deploy:
self.env.disable_task_deploy(self.cluster)
def add_node(self, role):
return self.env.create_node(
cluster_id=self.cluster.id,
pending_roles=[role],
pending_addition=True
)
def get_rpc_args(self):
self.env.launch_deployment()
args, kwargs = nailgun.task.manager.rpc.cast.call_args
return args[1][1]['args']
def check_add_node_for_task_deploy(self, rpc_message):
tasks_graph = rpc_message['tasks_graph']
for node_id, tasks in six.iteritems(tasks_graph):
if node_id is None or node_id == consts.MASTER_NODE_UID:
# skip virtual node
continue
task_ids = {
t['id'] for t in tasks
if t['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped
}
# all tasks are run on all nodes
self.assertTrue(self.tasks_for_rerun.issubset(task_ids))
def check_add_compute_for_granular_deploy(self, new_node_uid, rpc_message):
for node in rpc_message['deployment_info']:
task_ids = {t['id'] for t in node['tasks']}
if node['tasks'][0]['uids'] == [new_node_uid]:
# all tasks are run on a new node
self.assertTrue(
self.tasks_for_rerun.issubset(task_ids))
else:
# only selected tasks are run on a deployed node
self.assertItemsEqual(self.tasks_for_rerun, task_ids)
def check_add_controller_for_granular_deploy(self, rpc_message):
for node in rpc_message['deployment_info']:
task_ids = {t['id'] for t in node['tasks']}
# controller is redeployed when other one is added
# so all tasks are run on all nodes
self.assertTrue(
self.tasks_for_rerun.issubset(task_ids))
@mock.patch('nailgun.rpc.cast')
def test_add_compute(self, _):
new_node = self.add_node('compute')
rpc_deploy_message = self.get_rpc_args()
if self.task_deploy:
self.check_add_node_for_task_deploy(rpc_deploy_message)
else:
self.check_add_compute_for_granular_deploy(
new_node.uid, rpc_deploy_message
)
@mock.patch('nailgun.rpc.cast')
def test_add_controller(self, _):
self.add_node('controller')
rpc_deploy_message = self.get_rpc_args()
if self.task_deploy:
self.check_add_node_for_task_deploy(rpc_deploy_message)
else:
self.check_add_controller_for_granular_deploy(rpc_deploy_message)
class TestDeploymentAttributesSerialization80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
def setUp(self):
super(TestDeploymentAttributesSerialization80MixIn, self).setUp()
self.cluster = self.env.create(
release_kwargs={
'version': self.env_version,
'operating_system': consts.RELEASE_OS.ubuntu},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
self.cluster.extensions = ['volume_manager', 'converted_serializers']
self.serializer = self.create_serializer(self.cluster_db)
def test_neutron_attrs(self):
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['controller'], primary_roles=['controller']
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
for node in serialized_for_astute:
self.assertEqual(
{
"bridge": consts.DEFAULT_BRIDGES_NAMES.br_floating,
"vlan_range": None
},
node['quantum_settings']['L2']['phys_nets']['physnet1']
)
l2 = (node["quantum_settings"]["predefined_networks"]
[self.cluster_db.network_config.floating_name]["L2"])
self.assertEqual("physnet1", l2["physnet"])
self.assertEqual("flat", l2["network_type"])
def test_baremetal_transformations(self):
self.env._set_additional_component(self.cluster_db, 'ironic', True)
self.env.create_node(cluster_id=self.cluster_db.id,
roles=['primary-controller'])
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
transformations = node['network_scheme']['transformations']
baremetal_brs = filter(lambda t: t.get('name') ==
consts.DEFAULT_BRIDGES_NAMES.br_baremetal,
transformations)
baremetal_ports = filter(lambda t: t.get('name') == "eth0.104",
transformations)
expected_patch = {
'action': 'add-patch',
'bridges': [consts.DEFAULT_BRIDGES_NAMES.br_ironic,
consts.DEFAULT_BRIDGES_NAMES.br_baremetal],
'provider': 'ovs'}
self.assertEqual(len(baremetal_brs), 1)
self.assertEqual(len(baremetal_ports), 1)
self.assertEqual(baremetal_ports[0]['bridge'],
consts.DEFAULT_BRIDGES_NAMES.br_baremetal)
self.assertIn(expected_patch, transformations)
def test_disks_attrs(self):
disks = [
{
"model": "TOSHIBA MK1002TS",
"name": "sda",
"disk": "sda",
"size": 1004886016
},
]
expected_node_volumes_hash = [
{
u'name': u'sda',
u'bootable': True,
u'extra': [],
u'free_space': 330,
u'volumes': [
{
u'type': u'boot',
u'size': 300
},
{
u'mount': u'/boot',
u'type': u'partition',
u'file_system': u'ext2',
u'name': u'Boot',
u'size': 200
},
{
u'type': u'lvm_meta_pool',
u'size': 64
},
{
u'vg': u'os',
u'type': u'pv',
u'lvm_meta_size': 64,
u'size': 394
},
{
u'vg': u'vm',
u'type': u'pv',
u'lvm_meta_size': 0,
u'size': 0
}
],
u'type': u'disk',
u'id': u'sda',
u'size': 958
},
{
u'_allocate_size': u'min',
u'label': u'Base System',
u'min_size': 19456,
u'volumes': [
{
u'mount': u'/',
u'size': -3766,
u'type': u'lv',
u'name': u'root',
u'file_system': u'ext4'
},
{
u'mount': u'swap',
u'size': 4096,
u'type': u'lv',
u'name': u'swap',
u'file_system': u'swap'
}
],
u'type': u'vg',
u'id': u'os'
},
{
u'_allocate_size': u'all',
u'label': u'Virtual Storage',
u'min_size': 5120,
u'volumes': [
{
u'mount': u'/var/lib/nova',
u'size': 0,
u'type': u'lv',
u'name': u'nova',
u'file_system': u'xfs'
}
],
u'type': u'vg',
u'id': u'vm'
}
]
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute'],
meta={"disks": disks},
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
self.assertIn("node_volumes", node)
self.assertItemsEqual(
expected_node_volumes_hash, node["node_volumes"])
def test_attributes_contains_plugins(self):
self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_1',
attributes_metadata={'attributes': {'name': 'plugin_1'}},
package_version='4.0.0',
fuel_version=['8.0'])
self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_2',
attributes_metadata={'attributes': {'name': 'plugin_2'}},
package_version='4.0.0',
fuel_version=['8.0'])
self.env.create_plugin(
cluster=self.cluster_db,
enabled=False,
name='plugin_3',
attributes_metadata={'attributes': {'name': 'plugin_3'}},
package_version='4.0.0',
fuel_version=['8.0'])
expected_plugins_list = ['plugin_1', 'plugin_2']
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute']
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
self.assertIn('plugins', node)
self.assertItemsEqual(
expected_plugins_list, node['plugins'])
self.assertTrue(all(name in node for name
in expected_plugins_list))
def test_common_attributes_contains_plugin_metadata(self):
expected_value = 'check_value'
plugin = self.env.create_plugin(
cluster=self.cluster_db,
name='test_plugin',
package_version='4.0.0',
fuel_version=['8.0'],
attributes_metadata={
'attributes': {
'config': {
'description': "Description",
'weight': 52,
'value': expected_value
}
}
}
)
attrs = self.serializer.get_common_attrs(self.cluster_db)
self.assertIn('test_plugin', attrs)
self.assertIn('metadata', attrs['test_plugin'])
self.assertEqual(
plugin.id, attrs['test_plugin']['metadata']['plugin_id']
)
self.assertEqual(expected_value, attrs['test_plugin']['config'])
class TestMultiNodeGroupsSerialization80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
def setUp(self):
super(TestMultiNodeGroupsSerialization80MixIn, self).setUp()
cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan}
)
self.env.create_nodes_w_interfaces_count(
nodes_count=3,
if_count=2,
roles=['controller', 'cinder'],
pending_addition=True,
cluster_id=cluster['id'])
self.cluster_db = self.db.query(models.Cluster).get(cluster['id'])
cluster.extensions = ['volume_manager', 'converted_serializers']
self.serializer = self.create_serializer(cluster)
def _add_node_group_with_node(self, cidr_start, node_address):
node_group = self.env.create_node_group(
api=False, cluster_id=self.cluster_db.id,
name='ng_' + cidr_start + '_' + str(node_address))
with mock.patch.object(rpc, 'cast'):
resp = self.env.setup_networks_for_nodegroup(
cluster_id=self.cluster_db.id, node_group=node_group,
cidr_start=cidr_start)
self.assertEqual(resp.status_code, 200)
self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq
).delete(synchronize_session=False)
self.env.create_nodes_w_interfaces_count(
nodes_count=1,
if_count=2,
roles=['compute'],
pending_addition=True,
cluster_id=self.cluster_db.id,
group_id=node_group.id,
ip='{0}.9.{1}'.format(cidr_start, node_address))
def _check_routes_count(self, count):
objects.Cluster.prepare_for_deployment(self.cluster_db)
facts = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in facts:
if node['uid'] == 'master':
continue
endpoints = node['network_scheme']['endpoints']
for name, descr in six.iteritems(endpoints):
if descr['IP'] == 'none':
self.assertNotIn('routes', descr)
else:
self.assertEqual(len(descr['routes']), count)
def test_routes_with_no_shared_networks_2_nodegroups(self):
self._add_node_group_with_node('199.99', 3)
# all networks have different CIDRs
self._check_routes_count(1)
def test_routes_with_no_shared_networks_3_nodegroups(self):
self._add_node_group_with_node('199.99', 3)
self._add_node_group_with_node('199.77', 3)
# all networks have different CIDRs
self._check_routes_count(2)
def test_routes_with_shared_networks_3_nodegroups(self):
self._add_node_group_with_node('199.99', 3)
self._add_node_group_with_node('199.99', 4)
# networks in two racks have equal CIDRs
self._check_routes_count(1)
class TestBlockDeviceDevicesSerialization80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
def setUp(self):
super(TestBlockDeviceDevicesSerialization80MixIn, self).setUp()
self.cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
self.cluster.extensions = ['volume_manager', 'converted_serializers']
self.serializer = self.create_serializer(self.cluster_db)
def test_block_device_disks(self):
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['cinder-block-device']
)
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['controller']
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
self.assertIn("node_volumes", node)
for node_volume in node["node_volumes"]:
if node_volume["id"] == "cinder-block-device":
self.assertEqual(node_volume["volumes"], [])
else:
self.assertNotEqual(node_volume["volumes"], [])
class TestSerializeInterfaceDriversData80MixIn(
TestSerializerConverter80To90MixIn,
TestSerializeInterfaceDriversData
):
pass
class TestDeploymentHASerializer80MixIn(
TestSerializerConverter80To90MixIn,
TestDeploymentHASerializer70
):
pass
|
from setuptools import setup, find_packages
with open("README.md", "r") as readme_file:
readme = readme_file.read()
requirements = ["casbin==0.8.4", "psycopg2-binary==2.8.6", "black==20.8b1"]
setup(
name="casbin-postgresql-watcher",
version="0.0.1",
author="hsluoyz",
author_email="hsluoyz@gmail.com",
description="Casbin role watcher to be used for monitoring updates to policies for PyCasbin",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/pycasbin/postgresql-watcher",
packages=find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
],
)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file ui_hw_wipe_device_wdg.ui
#
# Created by: PyQt5 UI code generator
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_WdgWipeHwDevice(object):
def setupUi(self, WdgWipeHwDevice):
WdgWipeHwDevice.setObjectName("WdgWipeHwDevice")
WdgWipeHwDevice.resize(505, 357)
self.verticalLayout = QtWidgets.QVBoxLayout(WdgWipeHwDevice)
self.verticalLayout.setObjectName("verticalLayout")
self.lblMessage = QtWidgets.QLabel(WdgWipeHwDevice)
self.lblMessage.setMinimumSize(QtCore.QSize(0, 0))
self.lblMessage.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lblMessage.setAlignment(QtCore.Qt.AlignCenter)
self.lblMessage.setObjectName("lblMessage")
self.verticalLayout.addWidget(self.lblMessage)
self.retranslateUi(WdgWipeHwDevice)
QtCore.QMetaObject.connectSlotsByName(WdgWipeHwDevice)
def retranslateUi(self, WdgWipeHwDevice):
_translate = QtCore.QCoreApplication.translate
WdgWipeHwDevice.setWindowTitle(_translate("WdgWipeHwDevice", "Form"))
self.lblMessage.setText(_translate("WdgWipeHwDevice", "..."))
|
import random
import shutil
import os.path as osp
from typing import Optional, Callable, List, Tuple
import torch
from torch_geometric.datasets import FAUST
from torch.utils.data import DataLoader
from torch_geometric.data import InMemoryDataset, Data, extract_zip
from torch_geometric.io import read_ply
class FullFAUST(InMemoryDataset):
"""
Adapted from Pytorch Geometric FAUST dataloader
https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/datasets/faust.html#FAUST
"""
url = 'http://faust.is.tue.mpg.de/'
def __init__(self, root: str, train: bool = True,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
pre_filter: Optional[Callable] = None):
super().__init__(root, transform, pre_transform, pre_filter)
# path = self.processed_paths[0] if train else self.processed_paths[1]
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> str:
return 'MPI-FAUST.zip'
@property
def processed_file_names(self) -> List[str]:
return ['all_faust.pt']
def download(self):
raise RuntimeError(
f"Dataset not found. Please download '{self.raw_file_names}' from "
f"'{self.url}' and move it to '{self.raw_dir}'")
def process(self):
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
path = osp.join(self.raw_dir, 'MPI-FAUST', 'training', 'registrations')
path = osp.join(path, 'tr_reg_{0:03d}.ply')
data_list = []
for i in range(100):
data = read_ply(path.format(i))
data.person = torch.tensor([i % 10], dtype=torch.long)
data.pose = torch.tensor([i // 10], dtype=torch.long)
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
torch.save(self.collate(data_list), self.processed_paths[0])
shutil.rmtree(osp.join(self.raw_dir, 'MPI-FAUST'))
def split_faust_by_person(dataset: FullFAUST, test_people_ids: List[int]) -> Tuple[List[Data], List[Data]]:
train = []
test = []
for data in dataset:
if data.person in test_people_ids:
test.append(data)
else:
train.append(data)
return train, test
# TODO: Refactor BatchWrapper
from collections import namedtuple
BatchWrapper = namedtuple('BatchWrapper', ['x', 'pose'])
class FAUSTDataLoader(DataLoader):
def __init__(self, dataset: FAUST, batch_size=1, shuffle=False, onehot=False, **kwargs):
def collate_fn(data_list: List[Data]):
batch = torch.vstack([data.pos for data in data_list])
batch = batch.reshape(-1, *data_list[0].pos.shape).double()
pose = torch.vstack([data.pose for data in data_list])
if onehot:
pose = torch.nn.functional.one_hot(pose.flatten(), num_classes=10)
else:
pose = pose.reshape(-1, *data_list[0].pose.shape).double()
return BatchWrapper(x=batch, pose=pose)
super(FAUSTDataLoader, self).__init__(
dataset,
batch_size,
shuffle,
collate_fn=collate_fn,
**kwargs,
)
if __name__ == '__main__':
d = FullFAUST('.')
loader = FAUSTDataLoader(d, 5, onehot=True, shuffle=True)
for x in loader:
print(x)
|
"""
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain
1000 digits?
"""
def fib():
last = 1
penultimate = 1
yield last
yield penultimate
while True:
ret = last + penultimate
penultimate = last
yield ret
last = ret
f = fib()
index = 1
while True:
ret = next(f)
ret_list = [n for n in str(ret)]
if len(ret_list) > 999:
print(index, ret)
break
index += 1
|
#!/usr/bin/env python3
from __future__ import print_function
def main():
digits = 10
infile = 'input/Euler013.txt'
nums = parse_file(infile)
numsum = sum(nums)
print(str(numsum)[:digits])
def parse_file(path):
with open(path) as f:
return [int(num) for num in f.readlines()]
if __name__ == '__main__':
main()
|
from .linear_constraint import LinearConstraint, canlinear_colloc_to_interpolate
from ..constraint import DiscretizationType
import numpy as np
class JointTorqueConstraint(LinearConstraint):
"""Joint Torque Constraint.
A joint torque constraint is given by
.. math::
A(q) \ddot q + \dot q^\\top B(q) \dot q + C(q) + D( \dot q )= w,
where w is a vector that satisfies the polyhedral constraint:
.. math::
F(q) w \\leq g(q).
Notice that `inv_dyn(q, qd, qdd) = w` and that `cnsf_coeffs(q) =
F(q), g(q)`.
To evaluate the constraint on a geometric path `p(s)`, multiple
calls to `inv_dyn` and `const_coeff` are made. Specifically one
can derive the second-order equation as follows
.. math::
A(q) p'(s) \ddot s + [A(q) p''(s) + p'(s)^\\top B(q) p'(s)] \dot s^2 + C(q) + D( \dot q ) = w,
a(s) \ddot s + b(s) \dot s ^2 + c(s) = w
To evaluate the coefficients a(s), b(s), c(s), inv_dyn is called
repeatedly with appropriate arguments.
Parameters
----------
inv_dyn: (array, array, array) -> array
The "inverse dynamics" function that receives joint position, velocity and
acceleration as inputs and ouputs the "joint torque". See notes for more
details.
tau_lim: array
Shape (dof, 2). The lower and upper torque bounds of the
j-th joint are tau_lim[j, 0] and tau_lim[j, 1] respectively.
fs_coef: array
Shape (dof). The coefficients of dry friction of the
joints.
discretization_scheme: :class:`.DiscretizationType`
Can be either Collocation (0) or Interpolation
(1). Interpolation gives more accurate results with slightly
higher computational cost.
"""
def __init__(
self,
inv_dyn,
tau_lim,
fs_coef,
discretization_scheme=DiscretizationType.Collocation,
):
super(JointTorqueConstraint, self).__init__()
self.inv_dyn = inv_dyn
self.tau_lim = np.array(tau_lim, dtype=float)
self.fs_coef = np.array(fs_coef)
self.dof = self.tau_lim.shape[0]
self.set_discretization_type(discretization_scheme)
assert self.tau_lim.shape[1] == 2, "Wrong input shape."
self._format_string = " Torque limit: \n"
for i in range(self.tau_lim.shape[0]):
self._format_string += (
" J{:d}: {:}".format(i + 1, self.tau_lim[i]) + "\n"
)
self.identical = True
def compute_constraint_params(self, path, gridpoints):
if path.dof != self.get_dof():
raise ValueError(
"Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format(
self.get_dof(), path.dof
)
)
v_zero = np.zeros(path.dof)
p = path.eval(gridpoints)
ps = path.evald(gridpoints)
pss = path.evaldd(gridpoints)
N = gridpoints.shape[0] - 1
dof = path.dof
I_dof = np.eye(dof)
F = np.zeros((dof * 2, dof))
g = np.zeros(dof * 2)
g[0:dof] = self.tau_lim[:, 1]
g[dof:] = -self.tau_lim[:, 0]
F[0:dof, :] = I_dof
F[dof:, :] = -I_dof
c = np.array([self.inv_dyn(p_, v_zero, v_zero) for p_ in p])
a = np.array([self.inv_dyn(p_, v_zero, ps_) for p_, ps_ in zip(p, ps)]) - c
b = (
np.array([self.inv_dyn(p_, ps_, pss_) for p_, ps_, pss_ in zip(p, ps, pss)])
- c
)
# dry friction
for i in range(0, dof):
c[:, i] += self.fs_coef[i] * np.sign(ps[:, i])
if self.discretization_type == DiscretizationType.Collocation:
return a, b, c, F, g, None, None
elif self.discretization_type == DiscretizationType.Interpolation:
return canlinear_colloc_to_interpolate(
a, b, c, F, g, None, None, gridpoints, identical=True
)
else:
raise NotImplementedError("Other form of discretization not supported!")
|
'''
EnzymeML (c) University of Manchester 2018
EnzymeML is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-arguments
import re
import uuid
from libsbml import SBMLDocument, BIOLOGICAL_QUALIFIER, BQB_IS, CVTerm
def get_document():
'''Get SBMLDocument.'''
document = SBMLDocument()
# Create model with default units:
model = document.createModel()
model.setExtentUnits('mole')
model.setTimeUnits('second')
# Create default compartment of size 1 litre:
compartment = model.createCompartment()
compartment.setId('c')
compartment.setConstant(True)
compartment.setSize(1)
compartment.setSpatialDimensions(3)
compartment.setUnits('litre')
return document, model, compartment
def add_reaction(model, name, reversible=True):
'''Add reaction.'''
reaction = model.createReaction()
reaction.setId(get_id(uuid.uuid4()))
reaction.setName(name)
reaction.setSBOTerm('SBO:0000176')
reaction.setReversible(reversible)
reaction.setFast(False)
return reaction
def add_substrate(model, reaction, species_id, comp_id,
name=None, stoichiometry=1):
'''Add substrate.'''
return _add_substrate_product(model, reaction, species_id, name,
comp_id, stoichiometry, True)
def add_product(model, reaction, species_id, comp_id,
name=None, stoichiometry=1):
'''Add product.'''
return _add_substrate_product(model, reaction, species_id, name,
comp_id, stoichiometry, False)
def add_enzyme(model, reaction, species_id, comp_id,
name=None, uniprot_id=None):
'''Add enzyme.'''
species = _add_species(model, species_id, name, comp_id, 252,
constant=True, boundary_condition=False)
if uniprot_id:
add_annotation(species, 'http://identifiers.org/uniprot/' + uniprot_id)
spec_ref = reaction.createModifier()
spec_ref.setSpecies(species_id)
spec_ref.setSBOTerm(460)
return species
def add_non_participant(model, species_id, comp_id,
name=None, sbo_term=0):
'''Add non-participating species.'''
return _add_species(model, species_id, name, comp_id, sbo_term,
constant=True, boundary_condition=True)
def add_parameter(kinetic_law, value, units, name, sbo_term=0):
'''Add parameter.'''
parameter = kinetic_law.createLocalParameter()
parameter.setValue(float(value))
parameter.setUnits(units)
parameter.setId(get_id(uuid.uuid4()))
parameter.setName(name)
if sbo_term:
parameter.setSBOTerm(sbo_term)
def set_notes(elem, notes):
'''Set notes.'''
elem.setNotes('<body xmlns=\'http://www.w3.org/1999/xhtml\'>' +
'<pre>' + notes + '</pre></body>')
def get_id(id_in):
'''Format id.'''
return '_' + re.sub(r'\W+', '_', str(id_in))
def add_annotation(obj, resource, qualifier_type=BIOLOGICAL_QUALIFIER,
qualifier_sub_type=BQB_IS):
'''Add an annotation.'''
cv_term = CVTerm()
cv_term.setQualifierType(qualifier_type)
if qualifier_type is BIOLOGICAL_QUALIFIER:
cv_term.setBiologicalQualifierType(qualifier_sub_type)
cv_term.addResource(resource)
obj.setMetaId('_meta' + obj.getId())
obj.addCVTerm(cv_term)
def _add_substrate_product(model, reaction, species_id, name, comp_id,
stoichiometry, is_substrate):
'''Add reaction participant.'''
species = _add_species(model, species_id, name, comp_id, 247,
constant=False, boundary_condition=False)
spec_ref = reaction.createReactant() if is_substrate \
else reaction.createProduct()
spec_ref.setSpecies(species_id)
spec_ref.setStoichiometry(stoichiometry)
spec_ref.setConstant(False)
spec_ref.setSBOTerm(10 if is_substrate else 11)
return species, spec_ref
def _add_species(model, species_id, name, comp_id, sbo_term,
constant, boundary_condition):
'''Add species.'''
species = model.createSpecies()
species.setId(species_id)
species.setCompartment(comp_id)
species.setHasOnlySubstanceUnits(True)
species.setConstant(constant)
species.setBoundaryCondition(boundary_condition)
if name:
species.setName(name)
if sbo_term:
species.setSBOTerm(sbo_term)
return species
|
"""This module contains the general information for SesDiskSlotEp ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class SesDiskSlotEpConsts():
DISK_PRESENT_FALSE = "false"
DISK_PRESENT_NO = "no"
DISK_PRESENT_TRUE = "true"
DISK_PRESENT_YES = "yes"
LC_ALLOCATED = "allocated"
LC_AVAILABLE = "available"
LC_DEALLOCATED = "deallocated"
LC_REPURPOSED = "repurposed"
SCSI_DISK_STATE_BAD = "bad"
SCSI_DISK_STATE_GOOD = "good"
SCSI_DISK_STATE_UNKNOWN = "unknown"
class SesDiskSlotEp(ManagedObject):
"""This is SesDiskSlotEp class."""
consts = SesDiskSlotEpConsts()
naming_props = set([u'encId', u'id'])
mo_meta = MoMeta("SesDiskSlotEp", "sesDiskSlotEp", "disk-slot-ep-[enc_id]-id-[id]", VersionMeta.Version151a, "InputOutput", 0x3f, [], ["read-only"], [u'sesEnclosure'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"disk_dn": MoPropertyMeta("disk_dn", "diskDn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"disk_present": MoPropertyMeta("disk_present", "diskPresent", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"enc_id": MoPropertyMeta("enc_id", "encId", "uint", VersionMeta.Version151a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version151a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"lc": MoPropertyMeta("lc", "lc", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "available", "deallocated", "repurposed"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"scsi_disk_state": MoPropertyMeta("scsi_disk_state", "scsiDiskState", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["bad", "good", "unknown"], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"slot_dn": MoPropertyMeta("slot_dn", "slotDn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"diskDn": "disk_dn",
"diskPresent": "disk_present",
"dn": "dn",
"encId": "enc_id",
"id": "id",
"lc": "lc",
"model": "model",
"revision": "revision",
"rn": "rn",
"scsiDiskState": "scsi_disk_state",
"serial": "serial",
"slotDn": "slot_dn",
"status": "status",
"vendor": "vendor",
}
def __init__(self, parent_mo_or_dn, enc_id, id, **kwargs):
self._dirty_mask = 0
self.enc_id = enc_id
self.id = id
self.child_action = None
self.disk_dn = None
self.disk_present = None
self.lc = None
self.model = None
self.revision = None
self.scsi_disk_state = None
self.serial = None
self.slot_dn = None
self.status = None
self.vendor = None
ManagedObject.__init__(self, "SesDiskSlotEp", parent_mo_or_dn, **kwargs)
|
# -*- flake8: noqa -*-
from metrology.instruments.counter import Counter
from metrology.instruments.derive import Derive
from metrology.instruments.gauge import Gauge
from metrology.instruments.histogram import Histogram, HistogramUniform, HistogramExponentiallyDecaying
from metrology.instruments.meter import Meter
from metrology.instruments.profiler import Profiler
from metrology.instruments.timer import Timer, UtilizationTimer
|
# NLP written by GAMS Convert at 04/21/18 13:55:06
#
# Equation counts
# Total E G L N X C B
# 43 35 0 8 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 119 119 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 403 277 126 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,1000000),initialize=0)
m.obj = Objective(expr= m.x112 + m.x113 + m.x114 + m.x115 + m.x116 + m.x117 + m.x118, sense=minimize)
m.c2 = Constraint(expr= - m.x64 - m.x76 - m.x77 - m.x78 - m.x79 - m.x80 - m.x81 - m.x82 == -80)
m.c3 = Constraint(expr= - m.x65 - m.x83 - m.x84 - m.x85 - m.x86 - m.x87 - m.x88 - m.x89 == -450)
m.c4 = Constraint(expr= - m.x66 - m.x90 - m.x91 - m.x92 - m.x93 - m.x94 - m.x95 - m.x96 == -230)
m.c5 = Constraint(expr= - m.x67 - m.x97 - m.x98 - m.x99 - m.x100 - m.x101 - m.x102 - m.x103 == -90)
m.c6 = Constraint(expr= - m.x68 - m.x104 - m.x105 - m.x106 - m.x107 - m.x108 - m.x109 - m.x110 == -330)
m.c7 = Constraint(expr= - m.x15 - m.x22 - m.x29 - m.x36 - m.x43 - m.x50 - m.x57 - m.x76 - m.x83 - m.x90 - m.x97 - m.x104
+ m.x112 == 0)
m.c8 = Constraint(expr= - m.x16 - m.x23 - m.x30 - m.x37 - m.x44 - m.x51 - m.x58 - m.x77 - m.x84 - m.x91 - m.x98 - m.x105
+ m.x113 == 0)
m.c9 = Constraint(expr= - m.x17 - m.x24 - m.x31 - m.x38 - m.x45 - m.x52 - m.x59 - m.x78 - m.x85 - m.x92 - m.x99 - m.x106
+ m.x114 == 0)
m.c10 = Constraint(expr= - m.x18 - m.x25 - m.x32 - m.x39 - m.x46 - m.x53 - m.x60 - m.x79 - m.x86 - m.x93 - m.x100
- m.x107 + m.x115 == 0)
m.c11 = Constraint(expr= - m.x19 - m.x26 - m.x33 - m.x40 - m.x47 - m.x54 - m.x61 - m.x80 - m.x87 - m.x94 - m.x101
- m.x108 + m.x116 == 0)
m.c12 = Constraint(expr= - m.x20 - m.x27 - m.x34 - m.x41 - m.x48 - m.x55 - m.x62 - m.x81 - m.x88 - m.x95 - m.x102
- m.x109 + m.x117 == 0)
m.c13 = Constraint(expr= - m.x21 - m.x28 - m.x35 - m.x42 - m.x49 - m.x56 - m.x63 - m.x82 - m.x89 - m.x96 - m.x103
- m.x110 + m.x118 == 0)
m.c14 = Constraint(expr= - m.x15 - m.x16 - m.x17 - m.x18 - m.x19 - m.x20 - m.x21 - m.x69 + m.x112 == 0)
m.c15 = Constraint(expr= - m.x22 - m.x23 - m.x24 - m.x25 - m.x26 - m.x27 - m.x28 - m.x70 + m.x113 == 0)
m.c16 = Constraint(expr= - m.x29 - m.x30 - m.x31 - m.x32 - m.x33 - m.x34 - m.x35 - m.x71 + m.x114 == 0)
m.c17 = Constraint(expr= - m.x36 - m.x37 - m.x38 - m.x39 - m.x40 - m.x41 - m.x42 - m.x72 + m.x115 == 0)
m.c18 = Constraint(expr= - m.x43 - m.x44 - m.x45 - m.x46 - m.x47 - m.x48 - m.x49 - m.x73 + m.x116 == 0)
m.c19 = Constraint(expr= - m.x50 - m.x51 - m.x52 - m.x53 - m.x54 - m.x55 - m.x56 - m.x74 + m.x117 == 0)
m.c20 = Constraint(expr= - m.x57 - m.x58 - m.x59 - m.x60 - m.x61 - m.x62 - m.x63 - m.x75 + m.x118 == 0)
m.c21 = Constraint(expr= - m.x64 - m.x65 - m.x66 - m.x67 - m.x68 - m.x69 - m.x70 - m.x71 - m.x72 - m.x73 - m.x74 - m.x75
+ m.x111 == 0)
m.c22 = Constraint(expr=m.x15*m.x8 + m.x22*m.x9 + m.x29*m.x10 + m.x36*m.x11 + m.x43*m.x12 + m.x50*m.x13 + m.x57*m.x14 -
m.x112*m.x1 + 12*m.x76 + 50*m.x83 + 500*m.x90 + 400*m.x97 + 120*m.x104 == 0)
m.c23 = Constraint(expr=m.x16*m.x8 + m.x23*m.x9 + m.x30*m.x10 + m.x37*m.x11 + m.x44*m.x12 + m.x51*m.x13 + m.x58*m.x14 -
m.x113*m.x2 + 12*m.x77 + 50*m.x84 + 500*m.x91 + 400*m.x98 + 120*m.x105 == 0)
m.c24 = Constraint(expr=m.x17*m.x8 + m.x24*m.x9 + m.x31*m.x10 + m.x38*m.x11 + m.x45*m.x12 + m.x52*m.x13 + m.x59*m.x14 -
m.x114*m.x3 + 12*m.x78 + 50*m.x85 + 500*m.x92 + 400*m.x99 + 120*m.x106 == 0)
m.c25 = Constraint(expr=m.x18*m.x8 + m.x25*m.x9 + m.x32*m.x10 + m.x39*m.x11 + m.x46*m.x12 + m.x53*m.x13 + m.x60*m.x14 -
m.x115*m.x4 + 12*m.x79 + 50*m.x86 + 500*m.x93 + 400*m.x100 + 120*m.x107 == 0)
m.c26 = Constraint(expr=m.x19*m.x8 + m.x26*m.x9 + m.x33*m.x10 + m.x40*m.x11 + m.x47*m.x12 + m.x54*m.x13 + m.x61*m.x14 -
m.x116*m.x5 + 12*m.x80 + 50*m.x87 + 500*m.x94 + 400*m.x101 + 120*m.x108 == 0)
m.c27 = Constraint(expr=m.x20*m.x8 + m.x27*m.x9 + m.x34*m.x10 + m.x41*m.x11 + m.x48*m.x12 + m.x55*m.x13 + m.x62*m.x14 -
m.x117*m.x6 + 12*m.x81 + 50*m.x88 + 500*m.x95 + 400*m.x102 + 120*m.x109 == 0)
m.c28 = Constraint(expr=m.x21*m.x8 + m.x28*m.x9 + m.x35*m.x10 + m.x42*m.x11 + m.x49*m.x12 + m.x56*m.x13 + m.x63*m.x14 -
m.x118*m.x7 + 12*m.x82 + 50*m.x89 + 500*m.x96 + 400*m.x103 + 120*m.x110 == 0)
m.c29 = Constraint(expr= m.x1 <= 400)
m.c30 = Constraint(expr= m.x2 <= 100)
m.c31 = Constraint(expr= m.x3 <= 50)
m.c32 = Constraint(expr= m.x4 <= 570)
m.c33 = Constraint(expr= m.x5 <= 100)
m.c34 = Constraint(expr= m.x6 <= 30)
m.c35 = Constraint(expr= m.x7 <= 640)
m.c36 = Constraint(expr= - 0.9*m.x1 + m.x8 == 0)
m.c37 = Constraint(expr= - 0.6*m.x2 + m.x9 == 0)
m.c38 = Constraint(expr= - 0.15*m.x3 + m.x10 == 0)
m.c39 = Constraint(expr= - 0.26*m.x4 + m.x11 == 0)
m.c40 = Constraint(expr= - 0.1*m.x5 + m.x12 == 0)
m.c41 = Constraint(expr= - 0.4*m.x6 + m.x13 == 0)
m.c42 = Constraint(expr= - 0.3*m.x7 + m.x14 == 0)
m.c43 = Constraint(expr=m.x69*m.x8 + m.x70*m.x9 + m.x71*m.x10 + m.x72*m.x11 + m.x73*m.x12 + m.x74*m.x13 + m.x75*m.x14
+ 12*m.x64 + 50*m.x65 + 500*m.x66 + 400*m.x67 + 120*m.x68 - 4*m.x111 <= 0)
|
import traceback
class ErrorHandler:
def __init__(self, form_name):
self.form_name = form_name
def __call__(self, exctype, value, tb, *args, **kwargs):
for trace in traceback.format_tb(tb):
print(trace)
print('Exception Information')
print('Type:', exctype)
print('Value:', value)
print('Exception occured in: '+self.form_name)
|
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# custom plot colors
GLD = [1.0, 0.698, 0.063]
# read in data
df = pd.read_csv(r'./data/Baltimore_City_Employees_Salaries.csv')
# included years in data, shorten FY20XX to FYXX
years = df['FiscalYear'].unique()
years = sorted(years)
yrs = [year[4:] for year in years]
# total gross pay in billions
gross_sum = [df.loc[df['FiscalYear']==year,'GrossPay'].sum() for year in years]
gross_sum_billion = np.array(gross_sum)/1E9
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.patch.set_facecolor('k')
ax.set_facecolor('k')
ax.plot(yrs, gross_sum_billion, color=GLD, linewidth=5, marker='o', markersize='15')
ax.set_xlabel('Fiscal Year', color='w', fontsize=16)
ax.set_ylabel('Billions of US Dollars', color='w', fontsize=16)
ax.set_title('Baltimore City Total Employees Gross Pay', color='w', fontsize=16, fontweight='bold')
# Set the borders to a given color...
ax.tick_params(color='w', labelcolor='w', size=10.0, labelsize=14, width=3, direction='in')
for spine in ax.spines.values():
spine.set_edgecolor('w')
spine.set_linewidth(3.0)
plt.savefig('./figs/gross_sum_billions.png', facecolor=fig.get_facecolor(), edgecolor='none')
plt.show()
|
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install prompt-tuning."""
import ast
import setuptools
def get_version(file_name: str, version_name: str = "__version__") -> str:
"""Find version by AST parsing to avoid needing to import this package."""
with open(file_name) as f:
tree = ast.parse(f.read())
# Look for all assignment nodes in the AST, if the variable name is what
# we assigned the version number too, grab the value (the version).
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
if node.targets[0].id == version_name:
return node.value.s
raise ValueError(f"Couldn't find assignment to variable {version_name} "
f"in file {file_name}")
with open("README.md") as fp:
LONG_DESCRIPTION = fp.read()
_jax_version = "0.2.27"
setuptools.setup(
name="prompt-tuning",
version=get_version("prompt_tuning/__init__.py"),
description="Prompt Tuning from Lester et al., 2021",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author="Google Inc.",
author_email="no-reply@google.com",
url="http://github.com/google-research/prompt-tuning",
license="Apache 2.0",
packages=setuptools.find_packages(),
include_package_data=True,
package_data={
"": ["**/*.gin"],
},
scripts=[],
install_requires=[
"absl-py",
"flax",
"gin-config",
f"jax>={_jax_version}",
"numpy",
"seqio-nightly",
"t5",
"tensorflow",
"tensorflow_datasets",
# Install from git as they have setup.pys but are not on PyPI.
"t5x @ git+https://github.com/google-research/t5x@main#egg=t5x",
"flaxformer @ git+https://github.com/google/flaxformer@main#egg=flaxformer",
],
extras_require={
"test": ["pytest>=6.0"],
# TODO: mt5 and byt5 are not setup as python packages.
# Figure out best way to bring them in as dependencies.
"mt5": [],
"byt5": [],
"mrqa": ["pandas"],
"tpu": [f"jax[tpu]>={_jax_version}"]
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords=[
"prompt tuning",
"machine learning",
"transformers",
"neural networks",
"pre-trained language models",
"nlp",
"jax",
"flax",
"t5",
"t5x",
]
)
|
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
import json
import pytest
import requests
import requests_mock
from pkg_resources import resource_filename
from scriptabit.habitica_service import HabiticaService
from scriptabit.utility_functions import UtilityFunctions
from .fake_data import get_fake_stats
class MockConfig(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
class TestUtilityFunctions(object):
hs = None
@classmethod
def setup_class(cls):
cls.hs = HabiticaService(
{},
'https://habitica.com/api/v3/')
def test_set_health(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats()[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(hp=39)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_health(39)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.hp=39'
def test_inc_health(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(hp=1)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(hp=8)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_health(7, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.hp=8'
def test_dec_health(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(hp=50)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(hp=29)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_health(-21, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.hp=29'
def test_set_mana(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats()[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(mp=100)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_mana(100)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.mp=100'
def test_inc_mana(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(mp=30)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(mp=99)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_mana(69, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.mp=99'
def test_dec_mana(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(mp=50)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(mp=39)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_mana(-11, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.mp=39'
def test_set_xp(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats()[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(exp=1009)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_xp(1009)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.exp=1009'
def test_inc_xp(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(exp=30)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(exp=39)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_xp(9, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.exp=39'
def test_dec_xp(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(exp=500)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(exp=120)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_xp(-380, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.exp=120'
def test_set_gold(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats()[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(gp=9009)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_gold(9009)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.gp=9009'
def test_inc_gold(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(gp=30)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(gp=39)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_gold(9, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.gp=39'
def test_dec_gold(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user',
text=get_fake_stats(gp=50)[1])
m.put('https://habitica.com/api/v3/user',
text=get_fake_stats(gp=39)[1])
uf = UtilityFunctions(MockConfig(), self.hs)
uf.set_gold(-11, True)
history = m.request_history
assert history[1].method == 'PUT'
assert history[1].text == 'stats.gp=39'
def test_set_health_dry_run(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user', text=get_fake_stats()[1])
uf = UtilityFunctions(MockConfig(dry_run=True), self.hs)
uf.set_health(39)
history = m.request_history
# the put method to set HP should not be called
assert history[0].method == 'GET'
assert len(history) == 1
def test_set_mana_dry_run(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user', text=get_fake_stats()[1])
uf = UtilityFunctions(MockConfig(dry_run=True), self.hs)
uf.set_mana(39)
history = m.request_history
# the put method to set mana should not be called
assert history[0].method == 'GET'
assert len(history) == 1
def test_set_xp_dry_run(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user', text=get_fake_stats()[1])
uf = UtilityFunctions(MockConfig(dry_run=True), self.hs)
uf.set_xp(39)
history = m.request_history
# the put method to set XP should not be called
assert history[0].method == 'GET'
assert len(history) == 1
def test_set_gold_dry_run(self):
with requests_mock.mock() as m:
m.get('https://habitica.com/api/v3/user', text=get_fake_stats()[1])
uf = UtilityFunctions(MockConfig(dry_run=True), self.hs)
uf.set_gold(39)
history = m.request_history
# the put method to set HP should not be called
assert history[0].method == 'GET'
assert len(history) == 1
|
"""Read genome build configurations from Galaxy *.loc and bcbio-nextgen resource files.
"""
from six.moves import configparser
import glob
import os
import sys
from xml.etree import ElementTree
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed import objectstore
from bcbio.log import logger
from bcbio.ngsalign import star
from bcbio.pipeline import alignment
from bcbio.provenance import do
from bcbio.rnaseq import gtf
# ## bcbio-nextgen genome resource files
def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.load(in_handle)
def resource_file_path(x):
if isinstance(x, basestring) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data)
def add_required_resources(resources):
"""Add default or empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "clinvar"], ["variation", "dbsnp"],
["variation", "lcr"], ["variation", "polyx"],
["variation", "encode_blacklist"],
["variation", "train_hapmap"], ["variation", "train_indels"],
["variation", "editing"], ["variation", "exac"], ["variation", "esp"],
["variation", "1000g"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources
def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources
# ## Utilities
def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, basestring):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, basestring):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out
# ## Galaxy integration -- *.loc files
def _get_galaxy_loc_file(name, galaxy_dt, ref_dir, galaxy_base):
"""Retrieve Galaxy *.loc file for the given reference/aligner name.
First tries to find an aligner specific *.loc file. If not defined
or does not exist, then we need to try and remap it from the
default reference file
"""
if "file" in galaxy_dt and os.path.exists(os.path.join(galaxy_base, galaxy_dt["file"])):
loc_file = os.path.join(galaxy_base, galaxy_dt["file"])
need_remap = False
elif alignment.TOOLS[name].galaxy_loc_file is None:
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
else:
loc_file = os.path.join(ref_dir, alignment.TOOLS[name].galaxy_loc_file)
need_remap = False
if not os.path.exists(loc_file):
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
return loc_file, need_remap
def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False):
"""Iterator returning genome build and references from Galaxy *.loc file.
"""
if "column" in galaxy_dt:
dbkey_i = galaxy_dt["column"].index("dbkey")
path_i = galaxy_dt["column"].index("path")
else:
dbkey_i = None
if os.path.exists(loc_file):
with open(loc_file) as in_handle:
for line in in_handle:
if line.strip() and not line.startswith("#"):
parts = [x.strip() for x in line.strip().split("\t")]
# Detect and report spaces instead of tabs
if len(parts) == 1:
parts = [x.strip() for x in line.strip().split(" ") if x.strip()]
if len(parts) > 1:
raise IOError("Galaxy location file uses spaces instead of "
"tabs to separate fields: %s" % loc_file)
if dbkey_i is not None and not need_remap:
dbkey = parts[dbkey_i]
cur_ref = parts[path_i]
else:
if parts[0] == "index":
parts = parts[1:]
dbkey = parts[0]
cur_ref = parts[-1]
yield (dbkey, cur_ref)
def _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data):
"""Retrieve reference genome file from Galaxy *.loc file.
Reads from tool_data_table_conf.xml information for the index if it
exists, otherwise uses heuristics to find line based on most common setups.
"""
refs = [ref for dbkey, ref in _galaxy_loc_iter(loc_file, galaxy_dt, need_remap)
if dbkey == genome_build]
remap_fn = alignment.TOOLS[name].remap_index_fn
need_remap = remap_fn is not None
if len(refs) == 0:
raise ValueError("Did not find genome build %s in bcbio installation: %s" %
(genome_build, os.path.normpath(loc_file)))
else:
cur_ref = refs[-1]
# Find genome directory and check for packed wf tarballs
cur_ref_norm = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
base_dir_i = cur_ref_norm.find("/%s/" % genome_build)
base_dir = os.path.join(cur_ref_norm[:base_dir_i], genome_build)
for tarball in glob.glob(os.path.join(base_dir, "*-wf.tar.gz")):
cwlutils.unpack_tarballs(tarball, {"dirs": {"work": base_dir}}, use_subdir=False)
if need_remap:
assert remap_fn is not None, "%s requires remapping function from base location file" % name
cur_ref = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
cur_ref = remap_fn(os.path.abspath(cur_ref))
return cur_ref
def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info
def _get_galaxy_data_table(name, dt_config_file):
"""Parse data table config file for details on tool *.loc location and columns.
"""
out = {}
if os.path.exists(dt_config_file):
tdtc = ElementTree.parse(dt_config_file)
for t in tdtc.getiterator("table"):
if t.attrib.get("name", "") in [name, "%s_indexes" % name]:
out["column"] = [x.strip() for x in t.find("columns").text.split(",")]
out["file"] = t.find("file").attrib.get("path", "")
return out
def get_refs(genome_build, aligner, galaxy_base, data):
"""Retrieve the reference genome file location from galaxy configuration.
"""
out = {}
name_remap = {"samtools": "fasta"}
if genome_build:
galaxy_config = _get_galaxy_tool_info(galaxy_base)
for name in [x for x in ("samtools", aligner) if x]:
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
cur_ref = _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data)
base = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
# Expand directories unless we are an aligner like minimap2 that uses the seq directory
if os.path.isdir(base) and not (need_remap and os.path.basename(base) == "seq"):
indexes = sorted(glob.glob(os.path.join(base, "*")))
elif name != "samtools":
indexes = sorted(glob.glob("%s*" % utils.splitext_plus(base)[0]))
else:
indexes = []
name = name_remap.get(name, name)
out[name] = {}
if os.path.exists(base) and os.path.isfile(base):
out[name]["base"] = base
if indexes:
out[name]["indexes"] = indexes
# For references, add compressed inputs and indexes if they exist
if name == "fasta" and "base" in out[name] and os.path.exists(out[name]["base"] + ".gz"):
indexes = [out[name]["base"] + ".gz.fai", out[name]["base"] + ".gz.gzi",
utils.splitext_plus(out[name]["base"])[0] + ".dict"]
out[name + "gz"] = {"base": out[name]["base"] + ".gz",
"indexes": [x for x in indexes if os.path.exists(x)]}
# add additional indices relative to the base
if tz.get_in(["fasta", "base"], out):
ref_dir, ref_filebase = os.path.split(out["fasta"]["base"])
rtg_dir = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "rtg",
"%s.sdf" % (os.path.splitext(ref_filebase)[0])))
out["rtg"] = {"base": os.path.join(rtg_dir, "mainIndex"),
"indexes": [x for x in glob.glob(os.path.join(rtg_dir, "*"))
if not x.endswith("/mainIndex")]}
twobit = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "ucsc",
"%s.2bit" % (os.path.splitext(ref_filebase)[0])))
if os.path.exists(twobit):
out["twobit"] = twobit
return out
def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out
|
'''
Created on Jun 8, 2021
@author: mballance
'''
import os
import stat
from string import Template
class CmdInit(object):
def __init__(self):
pass
def __call__(self, args):
params = dict(
name=args.name,
version=args.version
)
# TODO: allow override
proj = os.getcwd()
ivpm_dir = os.path.dirname(os.path.realpath(__file__))
templates_dir = os.path.join(ivpm_dir, "templates")
for src,dir in zip(["ivpm.yaml"], [""]):
with open(os.path.join(templates_dir, src), "r") as fi:
content = fi.read()
outdir = os.path.join(proj, dir)
if not os.path.isdir(outdir):
print("Note: Creating directory " + str(outdir))
os.mkdir(outdir)
content_t = Template(content)
content = content_t.safe_substitute(params)
dest = os.path.join(proj, dir, src)
if os.path.isfile(dest) and not args.force:
raise Exception("File " + str(dest) + " exists and --force not specified")
with open(dest, "w") as fo:
fo.write(content)
|
#!/usr/bin/env python3
import tempfile, sys, os, re
import traceback
import custom_arguments
from argparse_compat import argparse
from replace_imports import include_imports, normalize_requires, get_required_contents, recursively_get_requires_from_file, absolutize_and_mangle_libname
from import_util import get_file, get_recursive_require_names, run_recursively_get_imports
from strip_comments import strip_comments
from strip_newlines import strip_newlines
from split_file import split_coq_file_contents, split_leading_comments_and_whitespace
from split_definitions import split_statements_to_definitions, join_definitions
from admit_abstract import transform_abstract_to_admit
from import_util import lib_of_filename, clear_libimport_cache, IMPORT_ABSOLUTIZE_TUPLE, ALL_ABSOLUTIZE_TUPLE
from import_util import split_requires_of_statements, get_file_statements_insert_references
from memoize import memoize
from coq_version import get_coqc_version, get_coqtop_version, get_coqc_help, get_coq_accepts_top, get_coq_native_compiler_ondemand_fragment, group_coq_args, get_coqc_coqlib, get_coq_accepts_compile
from coq_running_support import get_ltac_support_snippet
from custom_arguments import add_libname_arguments, add_passing_libname_arguments, update_env_with_libnames, update_env_with_coqpath_folders, add_logging_arguments, process_logging_arguments, DEFAULT_LOG, LOG_ALWAYS
from binding_util import has_dir_binding, deduplicate_trailing_dir_bindings, process_maybe_list
from file_util import clean_v_file, read_from_file, write_to_file, restore_file
from util import yes_no_prompt, PY3
import util
if PY3: raw_input = util.raw_input
import diagnose_error
# {Windows,Python,coqtop} is terrible; we fail to write to (or read
# from?) coqtop. But we can wrap it in a batch scrip, and it works
# fine.
SCRIPT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
DEFAULT_COQTOP = 'coqtop' if os.name != 'nt' else os.path.join(SCRIPT_DIRECTORY, 'coqtop.bat')
parser = custom_arguments.ArgumentParser(description='Attempt to create a small file which reproduces a bug found in a large development.')
parser.add_argument('bug_file', metavar='BUGGY_FILE', type=argparse.FileType('r'),
help='a .v file which displays the bug')
parser.add_argument('output_file', metavar='OUT_FILE', type=str,
help='a .v file which will hold intermediate results, as well as the final reduced file')
parser.add_argument('temp_file', metavar='TEMP_FILE', nargs='?', type=str, default='',
help='a .v file which will be used to build up intermediate files while they are being tested')
parser.add_argument('--fast-merge-imports', dest='fast_merge_imports',
action='store_const', const=True, default=False,
help='Use a faster method for combining imports')
parser.add_argument('--no-wrap-modules', dest='wrap_modules',
action='store_const', const=False, default=True,
help=("Don't wrap imports in Modules. By default, the " +
"contents of each file is wrapped in its own " +
"module to deal with renaming issues. This " +
"can cause issues with subdirectories."))
parser.add_argument('--absolutize-constants', dest='absolutize',
action='store_const', default=IMPORT_ABSOLUTIZE_TUPLE, const=ALL_ABSOLUTIZE_TUPLE,
help=("Replace constants with fully qualified versions. " +
"By default, all constants are not fully qualified. If you have " +
"many overlapping file names in different directories " +
"and use partially qualified names that differ depending " +
"on which files have been Required, not absolutizing constants " +
"may cause name resolution to fail."))
parser.add_argument('--strip-newlines', dest='max_consecutive_newlines',
metavar='N', nargs='?', type=int, default=2,
help=("Passing `--strip-newlines N` will cause the " +
"program to, for all M > N, replace any " +
"instances of M consecutive newlines with N " +
"consecutive newlines. The result will be a " +
"file with no more than N consecutive newlines. " +
"Passing a negative number will disable this option. " +
"(Default: 2)"))
parser.add_argument('--no-admit-opaque', dest='admit_opaque',
action='store_const', const=False, default=True,
help=("Don't try to replace opaque things ([Qed] and [abstract])" +
"with [admit]s."))
parser.add_argument('--no-admit-transparent', dest='admit_transparent',
action='store_const', const=False, default=True,
help=("Don't try to replace transparent things with [admit]s."))
parser.add_argument('--no-admit-obligations', dest='admit_obligations',
action='store_const', const=False, default=True,
help=("Don't try to replace obligations with [Admit Obligations]."))
parser.add_argument('--no-admit', dest='admit_any',
action='store_const', const=False, default=True,
help=("Don't try to replace things with [admit]s."))
parser.add_argument('--no-aggressive', dest='aggressive',
action='store_const', const=False, default=True,
help=("Be less aggressive; don't try to remove _all_ definitions/lines."))
parser.add_argument('--no-remove-typeclasses', dest='save_typeclasses',
action='store_const', const=True, default=False,
help=("Don't remove Hints, Instances, or Canonical Structures; " +
"this should mostly preserve typeclass logs, and can be useful " +
"for debugging slow typeclass bugs."))
parser.add_argument('--ignore-coq-prog-args', dest='use_coq_prog_args',
action='store_const', const=False, default=True,
help=("Don't add extra arguments from a coq-prog-args file header."))
parser.add_argument('--dynamic-header', dest='dynamic_header', nargs='?', type=str,
default='(* File reduced by coq-bug-minimizer from %(old_header)s, then from %(original_line_count)d lines to %(final_line_count)d lines *)',
help=("A line to be placed at the top of the " +
"output file, followed by a newline. The " +
"variables original_line_count and " +
"final_line_count will be available for " +
"substitution. The variable old_header will" +
"have the previous contents of this comment. " +
"The default is " +
"`(* File reduced by coq-bug-minimizer from %%(old_header)s, then from %%(original_line_count)d lines to %%(final_line_count)d lines *)'"))
parser.add_argument('--header', dest='header', nargs='?', type=str,
default='(* coqc version %(coqc_version)s\n coqtop version %(coqtop_version)s%(module_inline_failure_string)s\n Expected coqc runtime on this file: %(recent_runtime).3f sec *)',
help=("A line to be placed at the top of the " +
"output file, below the dynamic header, " +
"followed by a newline. The variables " +
"coqtop_version and coqc_version will be " +
"available for substitution. The default is " +
"`(* coqc version %%(coqc_version)s\\n coqtop version %%(coqtop_version)s%%(module_inline_failure_string)s\\n Expected coqc runtime on this file: %%(recent_runtime).3f sec *)'"))
parser.add_argument('--no-strip-trailing-space', dest='strip_trailing_space',
action='store_const', const=False, default=True,
help=("Don't strip trailing spaces. By default, " +
"trailing spaces on each line are removed."))
parser.add_argument('--strict-whitespace', dest='strict_whitespace',
action='store_const', const=True, default=False,
help=("Strictly enforce whitespace matching in error " +
"messages. By default, locations where there " +
"are newlines followed by spaces are interchangable " +
"with any amount of spacing."))
parser.add_argument('--no-deps', dest='walk_tree',
action='store_const', const=False, default=True,
help=("Don't do dependency analysis on all files in the current " +
"file tree."))
parser.add_argument('--inline-coqlib', dest='inline_coqlib',
metavar='COQLIB', nargs='?', type=str,
help=("Attempt to inline requires from Coq's standard library,\n" +
"passing `-coqlib COQLIB' to coqc"))
parser.add_argument('--inline-user-contrib', dest='inline_user_contrib',
action='store_const', const=True, default=False,
help=("Attempt to inline requires from the user-contrib folder"))
parser.add_argument('--timeout', dest='timeout', metavar='SECONDS', type=int, default=-1,
help=("Use a timeout; make sure Coq is " +
"killed after running for this many seconds. " +
"If 0, there is no timeout. If negative, then " +
"twice the initial run of the script is used.\n\n" +
"Default: -1"))
parser.add_argument('--no-timeout', dest='timeout', action='store_const', const=0,
help=("Do not use a timeout"))
parser.add_argument('--passing-timeout', dest='passing_timeout', metavar='SECONDS', type=int, default=-1,
help=("Like --timeout, but only for the passing Coq"))
parser.add_argument('--nonpassing-timeout', dest='nonpassing_timeout', metavar='SECONDS', type=int, default=-1,
help=("Like --timeout, but only for the non-passing Coq"))
parser.add_argument('--no-minimize-before-inlining', dest='minimize_before_inlining',
action='store_const', const=False, default=True,
help=("Don't run the full minimization script before inlining [Requires], " +
"and between the inlining of every individual [Require].\n\n" +
"Note that this option will not work well in conjunction with " +
"--passing-coqc.\n"
"Passing this option results in a much more robust " +
"run; it removes the requirement that the compiled dependencies " +
"of the file being debugged remain in place for the duration of the run."))
parser.add_argument('--coqbin', metavar='COQBIN', dest='coqbin', type=str, default='',
help='The path to a folder containing the coqc and coqtop programs.')
parser.add_argument('--coqc', metavar='COQC', dest='coqc', type=str, default='coqc',
help='The path to the coqc program.')
parser.add_argument('--coqtop', metavar='COQTOP', dest='coqtop', type=str, default=DEFAULT_COQTOP,
help=('The path to the coqtop program (default: %s).' % DEFAULT_COQTOP))
parser.add_argument('--coqc-is-coqtop', dest='coqc_is_coqtop', default=False, action='store_const', const=True,
help="Strip the .v and pass -load-vernac-source to the coqc programs; this allows you to pass `--coqc coqtop'")
parser.add_argument('--coqc-args', metavar='ARG', dest='coqc_args', type=str, action='append',
help=('Arguments to pass to coqc; e.g., " -indices-matter" (leading and trailing spaces are stripped)\n' +
'NOTE: If you want to pass an argument to both coqc and coqtop, use --arg="-indices-matter", not --coqc-args="-indices-matter"'))
parser.add_argument('--coqtop-args', metavar='ARG', dest='coqtop_args', type=str, action='append',
help=('Arguments to pass to coqtop; e.g., " -indices-matter" (leading and trailing spaces are stripped)\n' +
'NOTE: If you want to pass an argument to both coqc and coqtop, use --arg="-indices-matter", not --coqc-args="-indices-matter"'))
parser.add_argument('--coq_makefile', metavar='COQ_MAKEFILE', dest='coq_makefile', type=str, default='coq_makefile',
help='The path to the coq_makefile program.')
parser.add_argument('--passing-coqc', metavar='COQC', dest='passing_coqc', type=str, default='',
help='The path to the coqc program that should compile the file successfully.')
parser.add_argument('--base-dir', metavar='DIR', dest='base_dir', type=str, default='',
help='The path to the base directory from which coqc should be run')
parser.add_argument('--passing-base-dir', metavar='DIR', dest='passing_base_dir', type=str, default='',
help='The path to the base directory from which the passing coqc should be run')
parser.add_argument('--passing-coqc-args', metavar='ARG', dest='passing_coqc_args', type=str, action='append',
help='Arguments to pass to coqc so that it compiles the file successfully; e.g., " -indices-matter" (leading and trailing spaces are stripped)')
parser.add_argument('--nonpassing-coqc-args', metavar='ARG', dest='nonpassing_coqc_args', type=str, action='append',
help='Arguments to pass to coqc so that it compiles the file successfully; e.g., " -indices-matter" (leading and trailing spaces are stripped)')
parser.add_argument('--passing-coqc-is-coqtop', dest='passing_coqc_is_coqtop', default=False, action='store_const', const=True,
help="Strip the .v and pass -load-vernac-source to the coqc programs; this allows you to pass `--passing-coqc coqtop'")
parser.add_argument('--error-log', metavar='ERROR_LOG', dest='error_log', type=argparse.FileType('r'), default=None,
help='If given, ensure that the computed error message occurs in this log.')
parser.add_argument('-y', '--yes', '--assume-yes', dest='yes', action='store_true',
help='Automatic yes to prompts. Assume "yes" as answer to all prompts and run non-interactively.')
add_libname_arguments(parser)
add_passing_libname_arguments(parser)
add_logging_arguments(parser)
SENSITIVE_TIMEOUT_RETRY_COUNT=3
@memoize
def re_compile(pattern, *args):
return re.compile(pattern, *args)
# memoize the compilation
def re_search(pattern, string, flags=0):
return re_compile(pattern, flags).search(string)
def ask(query, **kwargs):
if kwargs['yes']:
print(query)
return 'y'
else:
return raw_input(query)
def get_error_reg_string_of_output(output, **kwargs):
error_reg_string = ''
if diagnose_error.has_error(output):
error_string = diagnose_error.get_error_string(output)
error_reg_string = diagnose_error.make_reg_string(output, strict_whitespace=kwargs['strict_whitespace'])
kwargs['log']("\nI think the error is '%s'.\nThe corresponding regular expression is '%s'.\n" % (error_string, error_reg_string.replace('\\\n', '\\n').replace('\n', '\\n')), force_stdout=True, level=LOG_ALWAYS)
result = ''
while result not in ('y', 'n', 'yes', 'no'):
result = ask('Is this correct? [(y)es/(n)o] ', **kwargs).lower().strip()
if result in ('no', 'n'):
error_reg_string = ''
else:
kwargs['log']('\nThe current state of the file does not have a recognizable error.', level=LOG_ALWAYS)
if error_reg_string == '':
success = False
while not success:
error_reg_string = raw_input('\nPlease enter a regular expression which matches on the output. Leave blank to re-coq the file.\n')
try:
re.compile(error_reg_string)
except Exception as e:
kwargs['log']('\nThat regular expression does not compile: %s' % e, force_stdout=True, level=LOG_ALWAYS)
success = False
else:
success = True
while (error_reg_string != ''
and (not re.search(error_reg_string, output)
or len(re.search(error_reg_string, output).groups()) != 2)):
if not re.search(error_reg_string, output):
kwargs['log']('\nThe given regular expression does not match the output.', force_stdout=True, level=LOG_ALWAYS)
elif len(re.search(error_reg_string, output).groups()) != 2:
kwargs['log']('\nThe given regular expression does not have two groups.', force_stdout=True, level=LOG_ALWAYS)
kwargs['log']('It must first have one integer group which matches on the line number,', force_stdout=True, level=LOG_ALWAYS)
kwargs['log']('and second a group which matches on the error string.', force_stdout=True, level=LOG_ALWAYS)
error_reg_string = raw_input('Please enter a valid regular expression which matches on the output. Leave blank to re-coq the file (%s).\n'
% output_file_name)
return error_reg_string
def get_error_reg_string(output_file_name, **kwargs):
error_reg_string = ''
while error_reg_string == '':
kwargs['log']('\nCoqing the file (%s)...' % output_file_name)
contents = read_from_file(output_file_name)
diagnose_error.reset_timeout()
kwargs['log']('\nContents:\n\n%s\n\n' % contents, level=3)
output, cmds, retcode, runtime = diagnose_error.get_coq_output(kwargs['coqc'], kwargs['coqc_args'], contents, kwargs['timeout'], is_coqtop=kwargs['coqc_is_coqtop'], verbose_base=1, **kwargs)
result = ''
kwargs['log']("\nThis file produces the following output when Coq'ed:\n%s" % output, force_stdout=True, level=LOG_ALWAYS)
while result not in ('y', 'n', 'yes', 'no'):
result = ask('Does this output display the correct error? [(y)es/(n)o] ', **kwargs).lower().strip()
if result in ('n', 'no'):
raw_input('Please modify the file (%s) so that it errors correctly, and then press ENTER to continue, or ^C to break.' % output_file_name)
continue
error_reg_string = get_error_reg_string_of_output(output, **kwargs)
if error_reg_string == '':
continue
return error_reg_string
def escape_coq_prog_args(coq_prog_args):
return ' '.join('"' + arg.replace('\\', '\\\\').replace('"', r'\"') + '"'
for arg in coq_prog_args)
def unescape_coq_prog_args(coq_prog_args):
ret = []
cur = None
in_string = False
idx = 0
while idx < len(coq_prog_args):
cur_char = coq_prog_args[idx]
idx += 1
if not in_string:
if cur_char == '"':
in_string = True
cur = ''
elif cur_char not in ' \t':
DEFAULT_LOG("Warning: Invalid unquoted character '%s' at index %d in coq-prog-args '%s'" % (cur_char, idx - 1, coq_prog_args), level=LOG_ALWAYS)
return tuple(ret)
else:
if cur_char == '"':
in_string = False
ret.append(cur)
cur = None
elif cur_char == '\\':
if idx < len(coq_prog_args):
# take the next character
cur += coq_prog_args[idx]
idx += 1
else:
DEFAULT_LOG("Warning: Invalid backslash at end of coq-prog-args '%s'" % coq_prog_args, level=LOG_ALWAYS)
else:
cur += cur_char
return tuple(ret)
COQ_PROG_ARGS_REG = re.compile(r'coq-prog-args\s*:\s*\(([^\)]+)\)')
def get_coq_prog_args(contents):
return tuple(arg
for args in COQ_PROG_ARGS_REG.findall(contents)
for arg in unescape_coq_prog_args(args)
if arg not in ("-emacs", "-emacs-U"))
COQ_PROG_ARGS_REP = re.compile(r'[ \t]*\(\*+\s+-\*-\s+.*?\s-\*-\s+\*+\)\s*')
def strip_coq_prog_args(contents):
return COQ_PROG_ARGS_REP.sub('', contents)
def get_old_header(contents, header=''):
contents = strip_coq_prog_args(contents)
if header[:2] == '(*' and header[-2:] == '*)' and '*)' not in header[2:-2]:
pre_header = header[:header.index('%')]
if pre_header in contents and contents.index('*)') > contents.index(pre_header):
return contents[contents.index(pre_header)+len(pre_header):contents.index('*)')].strip()
return 'original input'
def prepend_header(contents, dynamic_header='', header='', header_dict={}, **kwargs):
"""Fills in the variables in the header for output files"""
contents = strip_coq_prog_args(contents)
if dynamic_header[:2] == '(*' and dynamic_header[-2:] == '*)' and '*)' not in dynamic_header[2:-2]:
pre_header = dynamic_header[:dynamic_header.index('%')]
if contents[:len(pre_header)] == pre_header:
# strip the old header
contents = contents[contents.index('*)')+2:]
if contents[0] == '\n': contents = contents[1:]
if header[:2] == '(*' and header[-2:] == '*)' and '*)' not in header[2:-2]:
pre_header = header[:header.index('%')]
if contents[:len(pre_header)] == pre_header:
# strip the old header
contents = contents[contents.index('*)')+2:]
if contents[0] == '\n': contents = contents[1:]
final_line_count = len(contents.split('\n'))
header_dict = dict(header_dict) # clone the dict
header_dict['final_line_count'] = final_line_count
header_dict['inline_failure_libnames'] = ', '.join(kwargs['inline_failure_libnames'])
header_dict['module_inline_failure_string'] = ('\n Modules that could not be inlined: %s' % header_dict['inline_failure_libnames'] if header_dict['inline_failure_libnames'] else '')
if 'old_header' not in header_dict.keys():
header_dict['old_header'] = 'original input'
use_header = (dynamic_header + '\n' + header) % header_dict
coq_prog_args = ('(* -*- mode: coq; coq-prog-args: ("-emacs" %s) -*- *)\n' % escape_coq_prog_args(kwargs['coqc_args'])
if len(kwargs['coqc_args']) > 0
else '')
## de-duplicate things in a list
## XXX This is a hack to deal with things like "from x lines to y lines, from x lines to y lines"
#if use_header[-3:] == ' *)':
# use_header = ','.join(OrderedSet(use_header[:-3].split(','))) + ' *)'
return '%s%s\n%s' % (coq_prog_args, use_header, contents)
INSTANCE_REG = re.compile(r"(?<![\w'])Instance\s")
CANONICAL_STRUCTURE_REG = re.compile(r"(?<![\w'])Canonical\s+Structure\s")
TC_HINT_REG = re.compile("(?<![\w'])Hint\s")
def get_header_dict(contents, old_header=None, original_line_count=0, **env):
coqc_version = get_coqc_version(env['coqc'], **env)
coqtop_version = get_coqtop_version(env['coqtop'], **env)
if old_header is None: old_header = get_old_header(contents, env['dynamic_header'])
return {'original_line_count':original_line_count,
'old_header':old_header,
'coqc_version':coqc_version,
'coqtop_version':coqtop_version,
'recent_runtime':0}
CONTENTS_UNCHANGED, CHANGE_SUCCESS, CHANGE_FAILURE = 'contents_unchanged', 'change_success', 'change_failure'
def classify_contents_change(old_contents, new_contents, ignore_coq_output_cache=False, **kwargs):
# returns (RESULT_TYPE, PADDED_CONTENTS, OUTPUT_LIST, option BAD_INDEX, DESCRIPTION_OF_FAILURE_MODE, RUNTIME, EXTRA_VERBOSE_DESCRIPTION_OF_FAILURE_MODE_TUPLE_LIST)
kwargs['header_dict'] = kwargs.get('header_dict', get_header_dict(new_contents, original_line_count=len(old_contents.split('\n')), **env))
# this is a function, so that once we update the header dict with the runtime, we get the right header
def get_padded_contents(): return prepend_header(new_contents, **kwargs)
if new_contents == old_contents:
return (CONTENTS_UNCHANGED, get_padded_contents(), tuple(), None, 'No change. ', None, [])
if ignore_coq_output_cache: diagnose_error.reset_coq_output_cache(kwargs['coqc'], kwargs['coqc_args'], new_contents, kwargs['timeout'], cwd=kwargs['base_dir'], is_coqtop=kwargs['coqc_is_coqtop'], verbose_base=2, **kwargs)
output, cmds, retcode, runtime = diagnose_error.get_coq_output(kwargs['coqc'], kwargs['coqc_args'], new_contents, kwargs['timeout'], cwd=kwargs['base_dir'], is_coqtop=kwargs['coqc_is_coqtop'], verbose_base=2, **kwargs)
if diagnose_error.has_error(output, kwargs['error_reg_string']):
if kwargs['passing_coqc']:
if ignore_coq_output_cache: diagnose_error.reset_coq_output_cache(kwargs['passing_coqc'], kwargs['passing_coqc_args'], new_contents, kwargs['passing_timeout'], cwd=kwargs['passing_base_dir'], is_coqtop=kwargs['passing_coqc_is_coqtop'], verbose_base=2, **kwargs)
passing_output, cmds, passing_retcode, passing_runtime = diagnose_error.get_coq_output(kwargs['passing_coqc'], kwargs['passing_coqc_args'], new_contents, kwargs['passing_timeout'], cwd=kwargs['passing_base_dir'], is_coqtop=kwargs['passing_coqc_is_coqtop'], verbose_base=2, **kwargs)
if not (diagnose_error.has_error(passing_output) or diagnose_error.is_timeout(passing_output)):
# we return passing_runtime, under the presumption
# that in Coq's test-suite, the file should pass, and
# so this is a better indicator of how long it'll take
kwargs['header_dict']['recent_runtime'] = passing_runtime
return (CHANGE_SUCCESS, get_padded_contents(), (output, passing_output), None, 'Change successful. ', passing_runtime, [])
else:
return (CHANGE_FAILURE, get_padded_contents(), (output, passing_output), 1, 'The alternate coqc (%s) was supposed to pass, but instead emitted an error. ' % kwargs['passing_coqc'], runtime, [])
else:
kwargs['header_dict']['recent_runtime'] = runtime
return (CHANGE_SUCCESS, get_padded_contents(), (output,), None, 'Change successful. ', runtime, [])
else:
extra_desc = ''
extra_desc_list = [(2, 'The error was:\n%s\n' % output)]
return (CHANGE_FAILURE, get_padded_contents(), (output,), 0, extra_desc, runtime, extra_desc_list)
def check_change_and_write_to_file(old_contents, new_contents, output_file_name,
unchanged_message='No change.', success_message='Change successful.',
failure_description='make a change', changed_description='Changed file',
timeout_retry_count=1, ignore_coq_output_cache=False,
verbose_base=1, display_source_to_error=False,
**kwargs):
kwargs['log']('Running coq on the file\n"""\n%s\n"""' % new_contents, level=2 + verbose_base)
change_result, contents, outputs, output_i, error_desc, runtime, error_desc_verbose_list = classify_contents_change(old_contents, new_contents, ignore_coq_output_cache=ignore_coq_output_cache, **kwargs)
if change_result == CONTENTS_UNCHANGED:
kwargs['log']('\n%s' % unchanged_message, level=verbose_base)
return False
elif change_result == CHANGE_SUCCESS:
kwargs['log']('\n%s' % success_message, level=verbose_base)
write_to_file(output_file_name, contents)
return True
elif change_result == CHANGE_FAILURE:
kwargs['log']('\nNon-fatal error: Failed to %s and preserve the error. %s' % (failure_description, error_desc), level=verbose_base)
for lvl, msg in error_desc_verbose_list: kwargs['log'](msg, level=lvl)
if not kwargs['remove_temp_file']: kwargs['log']('Writing %s to %s.' % (changed_description.lower(), kwargs['temp_file_name']), level=verbose_base)
kwargs['log']('The new error was:', level=verbose_base)
kwargs['log'](outputs[output_i], level=verbose_base)
kwargs['log']('All Outputs:\n%s' % '\n'.join(outputs), level=verbose_base+2)
if kwargs['remove_temp_file']: kwargs['log']('%s not saved.' % changed_description, level=verbose_base)
if not kwargs['remove_temp_file']:
write_to_file(kwargs['temp_file_name'], contents)
if timeout_retry_count > 1 and diagnose_error.is_timeout(outputs[output_i]):
kwargs['log']('\nRetrying another %d time%s...' % (timeout_retry_count - 1, 's' if timeout_retry_count > 2 else ''), level=verbose_base)
return check_change_and_write_to_file(old_contents, new_contents, output_file_name,
unchanged_message=unchanged_message, success_message=success_message,
failure_description=failure_description, changed_description=changed_description,
timeout_retry_count=timeout_retry_count-1, ignore_coq_output_cache=True,
verbose_base=verbose_base,
**kwargs)
elif display_source_to_error and diagnose_error.has_error(outputs[output_i]):
new_line = diagnose_error.get_error_line_number(outputs[output_i])
new_start, new_end = diagnose_error.get_error_byte_locations(outputs[output_i])
new_contents_lines = new_contents.split('\n')
new_contents_to_error, new_contents_rest = '\n'.join(new_contents_lines[:new_line-1]), '\n'.join(new_contents_lines[new_line-1:])
kwargs['log']('The file generating the error was:', level=verbose_base)
kwargs['log']('%s\n%s\n' % (new_contents_to_error,
new_contents_rest.encode('utf-8')[:new_end].decode('utf-8')), level=verbose_base)
return False
else:
kwargs['log']('ERROR: Unrecognized change result %s on\nclassify_contents_change(\n %s\n ,%s\n)\n%s'
% (change_result, repr(old_contents), repr(new_contents),
repr((change_result, contents, outputs, output_i, error_desc, runtime, error_desc_verbose_list))),
level=LOG_ALWAYS)
return None
def try_transform_each(definitions, output_file_name, transformer, skip_n=1, **kwargs):
"""Tries to apply transformer to each definition in definitions,
additionally passing in the list of subsequent definitions. If
the returned value of the 'statement' key is not equal to the old
value, or if the return value is a false-y value (indicating that
we should remove the line) then we see if the error is still
present. If it is, we keep the change; otherwise, we discard it.
The order in which definitions are passed in is guaranteed to be
reverse-order.
Returns updated definitions."""
kwargs['log']('try_transform_each', level=3)
original_definitions = [dict(i) for i in definitions]
# TODO(jgross): Use coqtop and [BackTo] to do incremental checking
success = False
i = len(definitions) - 1 - skip_n
while i >= 0:
old_definition = definitions[i]
new_definition = transformer(old_definition, definitions[i + 1:])
if not new_definition:
if kwargs['save_typeclasses'] and \
(INSTANCE_REG.search(old_definition['statement']) or
CANONICAL_STRUCTURE_REG.search(old_definition['statement']) or
TC_HINT_REG.search(old_definition['statement'])):
kwargs['log']('Ignoring Instance/Canonical Structure/Hint: %s' % old_definition['statement'], level=3)
i -= 1
continue
new_definitions = []
elif isinstance(new_definition, dict):
if not new_definition['statement'].strip(): new_definitions = []
else: new_definitions = [new_definition]
else: new_definitions = list(new_definition)
if len(new_definitions) != 1 or \
re.sub(r'\s+', ' ', old_definition['statement']).strip() != re.sub(r'\s+', ' ', new_definitions[0]['statement']).strip():
if len(new_definitions) == 0:
kwargs['log']('Attempting to remove %s' % repr(old_definition['statement']),
level=2)
try_definitions = definitions[:i] + definitions[i + 1:]
else:
kwargs['log']('Attempting to transform %s\ninto\n%s' % (old_definition['statement'], ''.join(defn['statement'] for defn in new_definitions)),
level=2)
if len(new_definitions) > 1: kwargs['log']('Splitting definition: %s' % repr(new_definitions), level=2)
try_definitions = definitions[:i] + new_definitions + definitions[i + 1:]
if check_change_and_write_to_file('', join_definitions(try_definitions), output_file_name, verbose_base=2, **kwargs):
success = True
definitions = try_definitions
# make a copy for saving
save_definitions = [dict(defn) for defn in try_definitions]
else:
kwargs['log']('No change to %s' % old_definition['statement'], level=3)
i -= 1
if success:
kwargs['log'](kwargs['noun_description'] + ' successful')
if join_definitions(save_definitions) != join_definitions(definitions):
kwargs['log']('Probably fatal error: definitions != save_definitions', level=LOG_ALWAYS)
else:
contents = prepend_header(join_definitions(definitions), **kwargs)
write_to_file(output_file_name, contents)
return definitions
else:
kwargs['log'](kwargs['noun_description'] + ' unsuccessful.')
return original_definitions
def try_transform_reversed(definitions, output_file_name, transformer, skip_n=1, **kwargs):
"""Replaces each definition in definitions, with transformer
applied to that definition and the subsequent (transformed)
definitions. If transformer returns a false-y value, the
definition is removed. After transforming the entire list, we see
if the error is still present. If it is, we keep the change;
otherwise, we discard it. The order in which definitions are
passed in is guaranteed to be reverse-order.
Returns updated definitions."""
kwargs['log']('try_transform_reversed', level=3)
definitions = list(definitions) # clone the list of definitions
original_definitions = list(definitions)
kwargs['log'](len(definitions), level=3)
kwargs['log'](definitions, level=3)
for i in reversed(list(range(len(definitions) - skip_n))):
new_definition = transformer(definitions[i], definitions[i + 1:])
if new_definition:
if definitions[i] != new_definition:
kwargs['log']('Transforming %s into %s' % (definitions[i]['statement'], new_definition['statement']),
level=2)
else:
kwargs['log']('No change to %s' % new_definition['statement'], level=3)
definitions[i] = new_definition
else:
if kwargs['save_typeclasses'] and \
(INSTANCE_REG.search(definitions[i]['statement']) or
CANONICAL_STRUCTURE_REG.search(definitions[i]['statement']) or
TC_HINT_REG.search(definitions[i]['statement'])):
kwargs['log']('Ignoring Instance/Canonical Structure/Hint: %s' % definitions[i]['statement'], level=3)
pass
else:
kwargs['log']('Removing %s' % definitions[i]['statement'], level=2)
definitions = definitions[:i] + definitions[i + 1:]
if check_change_and_write_to_file('', join_definitions(definitions), output_file_name,
success_message=kwargs['noun_description']+' successful.', failure_description=kwargs['verb_description'],
changed_description='Intermediate code', **kwargs):
return definitions
return original_definitions
def try_transform_reversed_or_else_each(definitions, *args, **kwargs):
"""Invokes try_transform_reversed. If there are no changes, then try_transform_each is tried."""
old_definitions = join_definitions(definitions) # for comparison,
# to see if things have changed first, try to do everything at
# once; python cycles are assumed to be cheap in comparison to coq
# cycles
definitions = try_transform_reversed(definitions, *args, **kwargs)
new_definitions = join_definitions(definitions)
if new_definitions == old_definitions:
# we failed to do everything at once, try the simple thing and
# try to admit each individually
kwargs['log']('Failed to do everything at once; trying one at a time.')
definitions = try_transform_each(definitions, *args, **kwargs)
new_definitions = join_definitions(definitions)
if new_definitions == old_definitions:
kwargs['log']('No successful changes.')
else:
kwargs['log']('Success!')
return definitions
def try_remove_if_not_matches_transformer(definition_found_in, **kwargs):
def transformer(cur_definition, rest_definitions):
if any(definition_found_in(cur_definition, future_definition)
for future_definition in rest_definitions):
kwargs['log']('Definition found; found:\n%s\nin\n%s'
% (cur_definition,
[future_definition['statement']
for future_definition in rest_definitions
if definition_found_in(cur_definition, future_definition)][0]),
level=3)
return cur_definition
else:
return None
return transformer
# don't count things like [Section ...], [End ...]
EXCLUSION_REG = re.compile(r"^\s*Section\s+[^\.]+\.\s*$" +
r"|^\s*Module\s+[^\.]+\.\s*$" +
r"|^\s*End\s+[^\.]+\.\s*$" +
r"|^\s*Require\s+[^\.]+\.\s*$" +
r"|^\s*Import\s+[^\.]+\.\s*$" +
r"|^\s*Export\s+[^\.]+\.\s*$")
def try_remove_if_name_not_found_in_transformer(get_names, **kwargs):
def definition_found_in(cur_definition, future_definition):
names = get_names(cur_definition)
if len(names) == 0:
return True
elif EXCLUSION_REG.search(future_definition['statement']):
return False # we don't care if the name is found in a
# statement like [Section ...] or [End ...]
return any(re_search(r"(?<![\w'])%s(?![\w'])" % re.escape(name), future_definition['statement'])
for name in names)
return try_remove_if_not_matches_transformer(definition_found_in, **kwargs)
def try_remove_if_name_not_found_in_section_transformer(get_names, **kwargs):
SECTION_BEGIN_REG = re.compile(r'^\s*(?:Section|Module)\s+[^\.]+\.\s*$')
SECTION_END_REG = re.compile(r'^\s*End\s+[^\.]+\.\s*$')
def transformer(cur_definition, rest_definitions):
names = get_names(cur_definition)
if len(names) == 0:
return cur_definition
section_level = 0
for future_definition in rest_definitions:
if section_level < 0:
break
if SECTION_BEGIN_REG.match(future_definition['statement']):
section_level += 1
elif SECTION_END_REG.match(future_definition['statement']):
section_level -= 1
elif any(re_search(r"(?<![\w'])%s(?![\w'])" % re.escape(name), future_definition['statement'])
for name in names):
return cur_definition
# we didn't find the name, so we can safely remove it
return None
return transformer
def try_remove_non_instance_definitions(definitions, output_file_name, **kwargs):
def get_names(definition):
if INSTANCE_REG.search(definition['statements'][0]):
return tuple()
elif CANONICAL_STRUCTURE_REG.search(definition['statements'][0]):
return tuple()
else:
return definition.get('terms_defined', tuple())
return try_transform_reversed(definitions, output_file_name,
try_remove_if_name_not_found_in_transformer(get_names, **kwargs),
noun_description='Non-instance definition removal',
verb_description='remove non-instance definitions',
**kwargs)
def try_remove_definitions(definitions, output_file_name, **kwargs):
return try_transform_reversed(definitions, output_file_name,
try_remove_if_name_not_found_in_transformer(lambda definition: definition.get('terms_defined', tuple()), **kwargs),
noun_description='Definition removal',
verb_description='remove definitions',
**kwargs)
def try_remove_each_definition(definitions, output_file_name, **kwargs):
return try_transform_each(definitions, output_file_name,
try_remove_if_name_not_found_in_transformer(lambda definition: definition.get('terms_defined', tuple()), **kwargs),
noun_description='Definition removal',
verb_description='remove definitions',
**kwargs)
def try_remove_each_and_every_line(definitions, output_file_name, **kwargs):
return try_transform_each(definitions, output_file_name,
(lambda cur_definition, rest_definitions: False),
noun_description='Line removal',
verb_description='remove lines',
**kwargs)
ABORT_REG = re.compile(r'\sAbort\s*\.\s*$')
def try_remove_aborted(definitions, output_file_name, **kwargs):
return try_transform_reversed(definitions, output_file_name,
(lambda definition, rest:
None if ABORT_REG.search(definition['statement']) else definition),
noun_description='Aborted removal',
verb_description='remove Aborts',
**kwargs)
LTAC_REG = re.compile(r'^\s*(?:Local\s+|Global\s+)?Ltac\s+([^\s]+)', flags=re.MULTILINE)
def try_remove_ltac(definitions, output_file_name, **kwargs):
return try_transform_reversed(definitions, output_file_name,
try_remove_if_name_not_found_in_transformer(lambda definition: LTAC_REG.findall(definition['statement'].replace(':', '\
: ')),
**kwargs),
noun_description='Ltac removal',
verb_description='remove Ltac',
**kwargs)
DEFINITION_ISH = r'Variables|Variable|Hypotheses|Hypothesis|Parameters|Parameter|Axioms|Axiom|Conjectures|Conjecture'
HINT_REG = re.compile(r'^\s*' +
r'(?:Local\s+|Global\s+|Polymorphic\s+|Monomorphic\s+)*' +
r'(?:' +
r'Definition|Fixpoint|Record|Inductive' +
r'|Coinductive|CoFixpoint' +
r'|Set\s+Universe\s+Polymorphism' +
r'|Unet\s+Universe\s+Polymorphism' +
r'|' + DEFINITION_ISH +
r')\.?(?:\s+|$)')
def try_remove_hints(definitions, output_file_name, **kwargs):
return try_transform_each(definitions, output_file_name,
(lambda definition, rest:
(None
if len(definition['statements']) == 1 and \
not HINT_REG.match(definition['statement'])
else definition)),
noun_description='Hint removal',
verb_description='remove hints',
**kwargs)
VARIABLE_REG = re.compile(r'^\s*' +
r'(?:Local\s+|Global\s+|Polymorphic\s+|Monomorphic\s+)*' +
r'(?:' + DEFINITION_ISH + r')\s+' +
r'([^\.:]+)',
flags=re.MULTILINE)
def try_remove_variables(definitions, output_file_name, **kwargs):
def get_names(definition):
terms = VARIABLE_REG.findall(definition['statement'])
return [i for i in sorted(set(j
for term in terms
for j in term.split(' ')))]
return try_transform_reversed(definitions, output_file_name,
try_remove_if_name_not_found_in_section_transformer(get_names, **kwargs),
noun_description='Variable removal',
verb_description='remove variables',
**kwargs)
CONTEXT_REG = re.compile(r'^\s*' +
r'(?:Local\s+|Global\s+|Polymorphic\s+|Monomorphic\s+)*' +
r'Context\s*`\s*[\({]\s*([^:\s]+)\s*:',
flags=re.MULTILINE)
def try_remove_contexts(definitions, output_file_name, **kwargs):
return try_transform_reversed(definitions, output_file_name,
try_remove_if_name_not_found_in_section_transformer(lambda definition: CONTEXT_REG.findall(definition['statement'].replace(':', ' : ')), **kwargs),
noun_description='Context removal',
verb_description='remove Contexts',
**kwargs)
def try_admit_abstracts(definitions, output_file_name, **kwargs):
def do_call(method, definitions, agressive):
return method(definitions, output_file_name,
(lambda definition, rest_definitions:
transform_abstract_to_admit(definition, rest_definitions, agressive=agressive, log=kwargs['log'])),
noun_description='Admitting [abstract ...]',
verb_description='[abstract ...] admits',
**kwargs)
old_definitions = join_definitions(definitions)
# for comparison, to see if things have changed first, try to do
# everything at once; python cycles are assumed to be cheap in
# comparison to coq cycles
definitions = do_call(try_transform_reversed, definitions, True)
new_definitions = join_definitions(definitions)
if new_definitions != old_definitions:
kwargs['log']('Success with [abstract ...] admits on try_transform_reversed, agressive: True, definitions:\n%s'
% new_definitions,
level=3)
return definitions
# try the other options, each less agressive than the last
definitions = do_call(try_transform_reversed, definitions, False)
new_definitions = join_definitions(definitions)
if new_definitions != old_definitions:
kwargs['log']('Success with [abstract ...] admits on try_transform_reversed, agressive: False, definitions:\n%s'
% new_definitions,
level=3)
return definitions
definitions = do_call(try_transform_each, definitions, True)
new_definitions = join_definitions(definitions)
if new_definitions != old_definitions:
kwargs['log']('Success with [abstract ...] admits on try_transform_each, agressive: True, definitions:\n%s'
% new_definitions,
level=3)
return definitions
definitions = do_call(try_transform_each, definitions, False)
new_definitions = join_definitions(definitions)
if new_definitions != old_definitions:
kwargs['log']('Success with [abstract ...] admits on try_transform_each, agressive: False, definitions:\n%s'
% new_definitions, level=3)
else:
kwargs['log']('Failure with [abstract ...] admits.', level=3)
return definitions
def make_try_admit_matching_definitions(matcher, use_admitted=False, **kwargs):
def transformer(cur_definition, rest_definitions):
if len(cur_definition['statements']) > 2 and matcher(cur_definition):
statements = (cur_definition['statements'][0], 'Admitted.') if use_admitted else (cur_definition['statements'][0], 'admit.', 'Defined.')
return {'statements':statements,
'statement':'\n'.join(statements),
'terms_defined':cur_definition['terms_defined']}
else:
return cur_definition
def try_admit_matching_definitions(definitions, output_file_name, **kwargs2):
return try_transform_reversed_or_else_each(definitions, output_file_name, transformer, **dict(list(kwargs.items()) + list(kwargs2.items())))
return try_admit_matching_definitions
def make_try_admit_qeds(**kwargs):
QED_REG = re.compile(r"(?<![\w'])Qed\s*\.\s*$", flags=re.MULTILINE)
return make_try_admit_matching_definitions((lambda definition: QED_REG.search(definition['statement'])),
noun_description='Admitting Qeds',
verb_description='admit Qeds',
**kwargs)
def make_try_admit_lemmas(**kwargs):
LEMMA_REG = re.compile(r'^\s*' +
r'(?:Local\s+|Global\s+|Polymorphic\s+|Monomorphic\s+)*' +
r'(?:Lemma|Remark|Fact|Corollary|Proposition)\s*', flags=re.MULTILINE)
return make_try_admit_matching_definitions((lambda definition: LEMMA_REG.search(definition['statement'])),
noun_description='Admitting lemmas',
verb_description='admit lemmas',
**kwargs)
def make_try_admit_definitions(**kwargs):
return make_try_admit_matching_definitions((lambda definition: True),
noun_description='Admitting definitions',
verb_description='admit definitions',
**kwargs)
def try_split_imports(definitions, output_file_name, **kwargs):
def transformer(cur_definition, rest_definitions):
if (len(cur_definition['statements']) > 1
or any(ch in cur_definition['statement'] for ch in '*()')
or cur_definition['statement'].strip()[-1] != '.'
or cur_definition['statement'].strip().replace('\n', ' ').split(' ')[0] not in ('Import', 'Export')):
return cur_definition
else:
terms = [i for i in cur_definition['statement'].strip().replace('\n', ' ')[:-1].split(' ') if i != '']
import_or_export, terms = terms[0], terms[1:]
pat = import_or_export + ' %s.'
rtn_part = dict(cur_definition)
rtn = []
for term in terms:
rtn_part['statement'] = pat % term
rtn_part['statements'] = (pat % term,)
rtn.append(dict(rtn_part))
return tuple(rtn)
return try_transform_each(definitions, output_file_name,
transformer,
noun_description='Import/Export splitting',
verb_description='split Imports/Exports',
**kwargs)
def try_admit_matching_obligations(definitions, output_file_name, matcher, **kwargs):
OBLIGATION_REG = re.compile(r"^\s*(Next\s+Obligation|Obligation\s+[0-9]+)\b", flags=re.DOTALL)
def transformer(cur_definition, rest_definitions):
if len(cur_definition['statements']) > 1 and OBLIGATION_REG.match(cur_definition['statements'][0]) and matcher(cur_definition):
statements = ('Admit Obligations.',)
return {'statements':statements,
'statement':'\n'.join(statements),
'terms_defined':cur_definition['terms_defined']}
else:
return cur_definition
return try_transform_reversed_or_else_each(definitions, output_file_name, transformer, **kwargs)
def try_admit_qed_obligations(definitions, output_file_name, **kwargs):
QED_REG = re.compile(r"(?<![\w'])(Qed|Admitted)\s*\.\s*$", flags=re.MULTILINE)
return try_admit_matching_obligations(definitions, output_file_name,
(lambda definition: QED_REG.search(definition['statement'])),
noun_description='Admitting Qed Obligations',
verb_description='admit Qed Obligations',
**kwargs)
def try_admit_obligations(definitions, output_file_name, **kwargs):
return try_admit_matching_obligations(definitions, output_file_name,
(lambda definition: True),
noun_description='Admitting Obligations',
verb_description='admit Obligations',
**kwargs)
def try_split_oneline_definitions(definitions, output_file_name, **kwargs):
def update_paren(in_string, paren_count, new_string):
for ch in new_string:
if in_string:
if ch == '"': in_string = False
else:
if ch == '"':
in_string = True
elif ch == '(':
paren_count += 1
elif ch == ')':
paren_count -= 1
return (in_string, paren_count)
def transformer(cur_definition, rest_definitions):
if (len(cur_definition['statements']) > 1
or cur_definition['statement'].strip()[-1] != '.'
or ':=' not in cur_definition['statement']
or len(cur_definition['terms_defined']) != 1):
return cur_definition
else:
terms = cur_definition['statement'].strip()[:-1].split(':=')
pre_statement = terms[0]
in_string, paren_count = update_paren(False, 0, pre_statement)
for i, term in list(enumerate(terms))[1:]:
if not in_string and paren_count == 0:
rtn_part = dict(cur_definition)
rtn_part['statements'] = (pre_statement.rstrip() + '.',
'exact (%s).' % ':='.join(terms[i:]).strip(),
'Defined.')
rtn_part['statement'] = ' '.join(rtn_part['statements'])
return rtn_part
else:
in_string, paren_count = update_paren(in_string, paren_count, term)
pre_statement = ':=' + term
return cur_definition
return try_transform_each(definitions, output_file_name,
transformer,
noun_description='One-line definition splitting',
verb_description='split one-line definitions',
**kwargs)
MODULE_REG = re.compile(r'^(\s*Module)(\s+[^\s\.]+\s*\.\s*)$')
def try_export_modules(definitions, output_file_name, **kwargs):
def transformer(cur_definition, rest_definitions):
if (len(cur_definition['statements']) > 1 or
not MODULE_REG.match(cur_definition['statement'])):
return cur_definition
else:
new_statement = MODULE_REG.sub(r'\1 Export\2', cur_definition['statement'])
rtn = dict(cur_definition)
rtn['statement'] = new_statement
rtn['statements'] = (new_statement, )
return rtn
return try_transform_each(definitions, output_file_name,
transformer,
noun_description='Module exportation',
verb_description='export modules',
**kwargs)
def try_strip_comments(output_file_name, **kwargs):
contents = read_from_file(output_file_name)
old_contents = contents
new_contents = strip_comments(contents)
check_change_and_write_to_file(old_contents, new_contents, output_file_name,
unchanged_message='No strippable comments.',
success_message='Succeeded in stripping comments.',
failure_description='strip comments',
changed_descruption='Stripped comments file',
**kwargs)
def try_normalize_requires(output_file_name, **kwargs):
contents = read_from_file(output_file_name)
old_contents = contents
# we need to clear the libimport cache to get an accurate list of requires
clear_libimport_cache(lib_of_filename(output_file_name, **kwargs))
new_contents = normalize_requires(output_file_name, update_globs=True, **env)
check_change_and_write_to_file(old_contents, new_contents, output_file_name,
unchanged_message='No Requires to normalize.',
success_message='Succeeded in normalizing Requires.',
failure_description='normalize Requires',
changed_descruption='Normalized Requires file',
**kwargs)
def try_split_requires(output_file_name, **kwargs):
contents = read_from_file(output_file_name)
old_contents = contents
annotated_contents = get_file_statements_insert_references(output_file_name, update_globs=True, types=('lib',), appends=('<>',), **kwargs)
if annotated_contents is None:
env['log']('\nNon-fatal error: Failed to get references for %s' % output_file_name, level=LOG_ALWAYS)
return False
annotated_contents = split_requires_of_statements(annotated_contents, **env)
new_contents = ''.join(v[0].decode('utf-8') for v in annotated_contents)
return check_change_and_write_to_file(old_contents, new_contents, output_file_name,
unchanged_message='No Requires to split.',
success_message='Succeeded in splitting Requires.',
failure_description='split Requires',
changed_descruption='Split Requires file',
**kwargs)
def try_strip_newlines(output_file_name, max_consecutive_newlines, strip_trailing_space, **kwargs):
contents = read_from_file(output_file_name)
old_contents = contents
if strip_trailing_space:
contents = '\n'.join(line.rstrip() for line in contents.split('\n'))
new_contents = strip_newlines(contents, max_consecutive_newlines)
check_change_and_write_to_file(old_contents, new_contents, output_file_name,
unchanged_message='No strippable newlines or spaces.',
success_message='Succeeded in stripping newlines and spaces.',
failure_description='strip newlines and spaces',
changed_descruption='Stripped file',
**kwargs)
def try_strip_extra_lines(output_file_name, line_num, **kwargs):
contents = read_from_file(output_file_name)
statements = split_coq_file_contents(contents)
cur_line_num = 0
new_statements = statements
for statement_num, statement in enumerate(statements):
cur_line_num += statement.count('\n') + 1 # +1 for the extra newline between each statement
if cur_line_num >= line_num:
new_statements = statements[:statement_num + 1]
break
if check_change_and_write_to_file('\n'.join(statements), '\n'.join(new_statements), output_file_name,
unchanged_message='No lines to trim.',
success_message=('Trimming successful. We removed all lines after %d; the error was on line %d.' % (cur_line_num, line_num)),
failure_description='trim file',
changed_descruption='Trimmed file',
**kwargs):
kwargs['log']('Trimmed file:\n%s' % read_from_file(output_file_name), level=3)
EMPTY_SECTION_REG = re.compile(r'(\.\s+|^\s*)(?:Section|Module\s+Export|Module)\s+([^ \.]+)\.' +
r'(?:\s' +
r'|Local\s'
r'|Set\s+Universe\s+Polymorphism\s*\.\s' +
r'|Unset\s+Universe\s+Polymorphism\s*\.\s)+End\s+([^ \.]+)\.(\s+|$)', flags=re.MULTILINE)
def try_strip_empty_sections(output_file_name, **kwargs):
contents = read_from_file(output_file_name)
old_contents = contents
new_contents = EMPTY_SECTION_REG.sub(r'\1', old_contents)
while new_contents != old_contents:
old_contents, new_contents = new_contents, EMPTY_SECTION_REG.sub(r'\1', new_contents)
check_change_and_write_to_file(contents, new_contents, output_file_name,
unchanged_message='No empty sections to remove.',
success_message='Empty section removal successful.',
failure_description='remove empty sections',
changed_descruption='Trimmed file',
**kwargs)
def add_admit_tactic(contents, **kwargs):
before, after = get_ltac_support_snippet(**kwargs)
tac_code = r"""%sModule Export AdmitTactic.
Module Import LocalFalse.
Inductive False : Prop := .
End LocalFalse.
Axiom proof_admitted : False.
%sTactic Notation "admit" := abstract case proof_admitted.
End AdmitTactic.
""" % (before, after)
tac_code_re = r"""\s*Module Export AdmitTactic\.
?(?:Module Import LocalFalse\.
?(?:Inductive False : Prop := \.)?
?End LocalFalse\.)?
?(?:Axiom proof_admitted : False\.)?
?(?:%s)?(?:Tactic Notation "admit" := abstract case proof_admitted\.)?
?End AdmitTactic\.\n*""" % re.escape(after)
header, contents = split_leading_comments_and_whitespace(contents)
return '%s%s%s' % (header, tac_code, re.sub(tac_code_re, '\n', contents.replace(before, ''), flags=re.DOTALL|re.MULTILINE))
def default_on_fatal(message, log=DEFAULT_LOG, **env):
if message is not None: log(message, level=0, force_stderr=True)
sys.exit(1)
def minimize_file(output_file_name, die=default_on_fatal, **env):
"""The workhorse of bug minimization. The only thing it doesn't handle is inlining [Require]s and other preprocesing"""
contents = read_from_file(output_file_name)
coqc_help = get_coqc_help(env['coqc'], **env)
env['header_dict'] = get_header_dict(contents, **env)
if not check_change_and_write_to_file('', contents, output_file_name,
unchanged_message='Invalid empty file!', success_message='Sanity check passed.',
failure_description='validate all coq runs', changed_description='File',
timeout_retry_count=SENSITIVE_TIMEOUT_RETRY_COUNT,
**env):
return die('Fatal error: Sanity check failed.', **env)
if env['max_consecutive_newlines'] >= 0 or env['strip_trailing_space']:
env['log']('\nNow, I will attempt to strip repeated newlines and trailing spaces from this file...')
try_strip_newlines(output_file_name, **env)
contents = read_from_file(output_file_name)
original_line_count = len(contents.split('\n'))
env['header_dict']['original_line_count'] = original_line_count
env['log']('\nNow, I will attempt to strip the comments from this file...')
try_strip_comments(output_file_name, **env)
env['log']('\nNow, I will attempt to factor out all of the [Require]s...')
try_normalize_requires(output_file_name, **env)
env['log']('\nNow, I will attempt to split up [Require] statements...')
try_split_requires(output_file_name, **env)
contents = read_from_file(output_file_name)
env['log']('\nIn order to efficiently manipulate the file, I have to break it into statements. I will attempt to do this by matching on periods.')
strings = re.findall(r'"[^"\n\r]+"', contents)
bad_strings = [i for i in strings if re.search(r'(?<=[^\.]\.\.\.)\s|(?<=[^\.]\.)\s', i)]
if bad_strings:
env['log']('If you have periods in strings, and these periods are essential to generating the error, then this process will fail. Consider replacing the string with some hack to get around having a period and then a space, like ["a. b"%string] with [("a." ++ " b")%string].')
env['log']('You have the following strings with periods in them:\n%s' % '\n'.join(bad_strings))
statements = split_coq_file_contents(contents)
if not check_change_and_write_to_file('', '\n'.join(statements), output_file_name,
unchanged_message='Invalid empty file!',
success_message='Splitting successful.',
failure_description='split file to statements',
changed_description='Split',
timeout_retry_count=SENSITIVE_TIMEOUT_RETRY_COUNT,
**env):
env['log']('I will not be able to proceed.')
env['log']('re.search(' + repr(env['error_reg_string']) + ', <output above>)', level=2)
return die(None, **env)
env['log']('\nI will now attempt to remove any lines after the line which generates the error.')
output, cmds, retcode, runtime = diagnose_error.get_coq_output(env['coqc'], env['coqc_args'], '\n'.join(statements), env['timeout'], is_coqtop=env['coqc_is_coqtop'], verbose_base=2, **env)
line_num = diagnose_error.get_error_line_number(output, env['error_reg_string'])
try_strip_extra_lines(output_file_name, line_num, **env)
env['log']('\nIn order to efficiently manipulate the file, I have to break it into definitions. I will now attempt to do this.')
contents = read_from_file(output_file_name)
statements = split_coq_file_contents(contents)
env['log']('I am using the following file: %s' % '\n'.join(statements), level=3)
definitions = split_statements_to_definitions(statements, **env)
if not check_change_and_write_to_file('', join_definitions(definitions), output_file_name,
unchanged_message='Invalid empty file!',
success_message='Splitting to definitions successful.',
failure_description='split file to definitions',
changed_description='Split',
timeout_retry_count=SENSITIVE_TIMEOUT_RETRY_COUNT,
**env):
env['log']('I will not be able to proceed.')
env['log']('re.search(' + repr(env['error_reg_string']) + ', <output above>)', level=2)
return die(None, **env)
recursive_tasks = (('remove goals ending in [Abort.]', try_remove_aborted),
('remove unused Ltacs', try_remove_ltac),
('remove unused definitions', try_remove_definitions),
('remove unused non-instance, non-canonical structure definitions', try_remove_non_instance_definitions),
('remove unused variables', try_remove_variables),
('remove unused contexts', try_remove_contexts))
tasks = recursive_tasks
if env['admit_opaque']:
if env['admit_obligations']:
tasks += (('replace Qed Obligation with Admit Obligations', try_admit_qed_obligations),)
tasks += ((('replace Qeds with Admitteds', make_try_admit_qeds(use_admitted=True)),
('replace Qeds with admit. Defined.', make_try_admit_qeds(use_admitted=False))) +
# we've probably just removed a lot, so try to remove definitions again
recursive_tasks +
(('admit [abstract ...]s', try_admit_abstracts),) +
# we've probably just removed a lot, so try to remove definitions again
recursive_tasks)
if not env['aggressive']:
tasks += (('remove unused definitions, one at a time', try_remove_each_definition),)
if env['admit_transparent']:
if env['admit_obligations']:
tasks += (('replace Obligation with Admit Obligations', try_admit_obligations),)
tasks += (('admit lemmas with Admitted', make_try_admit_lemmas(use_admitted=True)),
('admit definitions with Admitted', make_try_admit_definitions(use_admitted=True)),
('admit lemmas with admit. Defined', make_try_admit_lemmas(use_admitted=False)),
('admit definitions with admit. Defined', make_try_admit_definitions(use_admitted=False)))
if not env['aggressive'] and not env['save_typeclasses']:
tasks += (('remove hints', try_remove_hints),)
tasks += (('export modules', try_export_modules),
('split imports and exports', try_split_imports),
('split := definitions', try_split_oneline_definitions))
if env['aggressive']:
tasks += ((('remove all lines, one at a time', try_remove_each_and_every_line),) +
# we've probably just removed a lot, so try to remove definitions again
recursive_tasks)
old_definitions = ''
while old_definitions != join_definitions(definitions):
old_definitions = join_definitions(definitions)
env['log']('Definitions:', level=2)
env['log'](definitions, level=2)
for description, task in tasks:
env['log']('\nI will now attempt to %s' % description)
definitions = task(definitions, output_file_name, **env)
env['log']('\nI will now attempt to remove empty sections')
try_strip_empty_sections(output_file_name, **env)
if env['max_consecutive_newlines'] >= 0 or env['strip_trailing_space']:
env['log']('\nNow, I will attempt to strip repeated newlines and trailing spaces from this file...')
try_strip_newlines(output_file_name, **env)
return True
def maybe_add_coqlib_import(contents, **env):
if env['inline_coqlib']:
contents = 'Require Coq.Init.Prelude.\nImport Coq.Init.Prelude.\n' + contents
return contents
if __name__ == '__main__':
try:
args = process_logging_arguments(parser.parse_args())
except argparse.ArgumentError as exc:
if exc.message == 'expected one argument':
exc.reraise('\nNote that argparse does not accept arguments with leading dashes.\nTry --foo=bar or --foo " -bar", if this was your intent.\nSee Python issue 9334.')
else:
exc.reraise()
def prepend_coqbin(prog):
if args.coqbin != '':
return os.path.join(args.coqbin, prog)
else:
return prog
bug_file_name = args.bug_file.name
output_file_name = args.output_file
env = {
'fast_merge_imports': args.fast_merge_imports,
'log': args.log,
'coqc': prepend_coqbin(args.coqc),
'coqtop': prepend_coqbin(args.coqtop),
'as_modules': args.wrap_modules,
'max_consecutive_newlines': args.max_consecutive_newlines,
'header': args.header,
'dynamic_header': args.dynamic_header,
'strip_trailing_space': args.strip_trailing_space,
'timeout': (args.nonpassing_timeout if args.nonpassing_timeout != -1 else args.timeout),
'passing_timeout': (args.passing_timeout if args.passing_timeout != -1 else args.timeout),
'absolutize': args.absolutize,
'minimize_before_inlining': args.minimize_before_inlining,
'save_typeclasses': args.save_typeclasses,
'admit_opaque': args.admit_opaque and args.admit_any,
'admit_obligations': args.admit_obligations and args.admit_any,
'aggressive': args.aggressive,
'admit_transparent': args.admit_transparent and args.admit_any,
'coqc_args': tuple(i.strip()
for i in (list(process_maybe_list(args.nonpassing_coqc_args, log=args.log))
+ list(process_maybe_list(args.nonpassing_coq_args, log=args.log))
+ list(process_maybe_list(args.coq_args, log=args.log)))),
'coqtop_args': tuple(i.strip()
for i in (list(process_maybe_list(args.coqtop_args, log=args.log))
+ list(process_maybe_list(args.nonpassing_coq_args, log=args.log))
+ list(process_maybe_list(args.coq_args, log=args.log)))),
'coq_makefile': prepend_coqbin(args.coq_makefile),
'passing_coqc_args': tuple(i.strip()
for i in (list(process_maybe_list(args.passing_coqc_args, log=args.log))
+ list(process_maybe_list(args.passing_coq_args, log=args.log))
+ list(process_maybe_list(args.coq_args, log=args.log)))),
'passing_coqc' : (prepend_coqbin(args.passing_coqc)
if args.passing_coqc != ''
else (prepend_coqbin(args.coqc)
if args.passing_coqc_args is not None
else None)),
'passing_base_dir': (os.path.abspath(args.passing_base_dir)
if args.passing_base_dir != ''
else None),
'base_dir': (os.path.abspath(args.base_dir)
if args.base_dir != ''
else None),
'walk_tree': args.walk_tree,
'strict_whitespace': args.strict_whitespace,
'temp_file_name': args.temp_file,
'coqc_is_coqtop': args.coqc_is_coqtop,
'passing_coqc_is_coqtop': args.passing_coqc_is_coqtop,
'inline_coqlib': args.inline_coqlib,
'yes': args.yes,
'inline_failure_libnames': [],
}
if bug_file_name[-2:] != '.v':
env['log']('\nError: BUGGY_FILE must end in .v (value: %s)' % bug_file_name, force_stdout=True, level=LOG_ALWAYS)
sys.exit(1)
if output_file_name[-2:] != '.v':
env['log']('\nError: OUT_FILE must end in .v (value: %s)' % output_file_name, force_stdout=True, level=LOG_ALWAYS)
sys.exit(1)
if os.path.exists(output_file_name):
env['log']('\nWarning: OUT_FILE (%s) already exists. Would you like to overwrite?\n' % output_file_name, force_stdout=True, level=LOG_ALWAYS)
if not yes_no_prompt(yes=env['yes']):
sys.exit(1)
for k, arg in (('base_dir', '--base-dir'), ('passing_base_dir', '--passing-base-dir')):
if env[k] is not None and not os.path.isdir(env[k]):
env['log']('\nError: Argument to %s (%s) must exist and be a directory.' % (arg, env[k]), force_stdout=True, level=LOG_ALWAYS)
sys.exit(1)
env['remove_temp_file'] = False
if env['temp_file_name'] == '':
temp_file = tempfile.NamedTemporaryFile(suffix='.v', dir='.', delete=False)
env['temp_file_name'] = temp_file.name
temp_file.close()
env['remove_temp_file'] = True
def make_make_coqc(coqc_prog, **kwargs):
if get_coq_accepts_compile(coqc_prog): return os.path.join(SCRIPT_DIRECTORY, 'coqtop-as-coqc.sh') + ' ' + coqc_prog
if 'coqtop' in coqc_prog: return coqc_prog.replace('coqtop', 'coqc')
return 'coqc'
if env['coqc_is_coqtop']:
if env['coqc'] == 'coqc': env['coqc'] = env['coqtop']
env['make_coqc'] = make_make_coqc(env['coqc'], **env)
if env['passing_coqc_is_coqtop']:
if env['passing_coqc'] == 'coqc': env['passing_coqc'] = env['coqtop']
env['passing_make_coqc'] = make_make_coqc(env['passing_coqc'], **env)
coqc_help = get_coqc_help(env['coqc'], **env)
coqc_version = get_coqc_version(env['coqc'], **env)
update_env_with_libnames(env, args, include_passing=env['passing_coqc'],
use_default =not has_dir_binding(env[ 'coqc_args'], coqc_help=coqc_help, file_name=bug_file_name),
use_passing_default=not has_dir_binding(env['passing_coqc_args'], coqc_help=coqc_help, file_name=bug_file_name))
if args.inline_user_contrib:
for passing_prefix in ('', 'passing_'):
if env[passing_prefix + 'coqc']:
update_env_with_coqpath_folders(passing_prefix, env, os.path.join(get_coqc_coqlib(env[passing_prefix + 'coqc'], **env), 'user-contrib'))
env['log']('{', level=2)
for k, v in sorted(list(env.items())):
env['log'](' %s: %s' % (repr(k), repr(v)), level=2)
env['log']('}', level=2)
for passing_prefix in ('', 'passing_'):
if env[passing_prefix + 'coqc']:
args_key = passing_prefix + 'coqc_args'
if '-native-compiler' not in env[args_key]:
env[args_key] = tuple(list(env[args_key]) + list(get_coq_native_compiler_ondemand_fragment(env[passing_prefix + 'coqc'], **env)))
try:
if env['temp_file_name'][-2:] != '.v':
env['log']('\nError: TEMP_FILE must end in .v (value: %s)' % env['temp_file_name'], force_stdout=True, level=LOG_ALWAYS)
sys.exit(1)
env['log']('\nCoq version: %s\n' % coqc_version)
extra_args = get_coq_prog_args(get_file(bug_file_name, **env)) if args.use_coq_prog_args else []
for args_name, coq_prog, passing_prefix in (('coqc_args', env['coqc'], ''), ('coqtop_args', env['coqtop'], ''), ('passing_coqc_args', env['passing_coqc'] if env['passing_coqc'] else env['coqc'], 'passing_')):
env[args_name] = tuple(list(env[args_name]) + list(extra_args))
for dirname, libname in env.get(passing_prefix + 'libnames', []):
env[args_name] = tuple(list(env[args_name]) + ['-R', dirname, libname])
for dirname, libname in env.get(passing_prefix + 'non_recursive_libnames', []):
env[args_name] = tuple(list(env[args_name]) + ['-Q', dirname, libname])
for dirname in env.get(passing_prefix + 'ocaml_dirnames', []):
env[args_name] = tuple(list(env[args_name]) + ['-I', dirname])
env[args_name] = deduplicate_trailing_dir_bindings(env[args_name], coqc_help=coqc_help, file_name=bug_file_name, coq_accepts_top=get_coq_accepts_top(coq_prog))
for arg in group_coq_args(extra_args, coqc_help):
for passing_prefix in ('passing_', ''):
if arg[0] == '-R': env.get(passing_prefix + 'libnames', []).append((arg[1], arg[2]))
if arg[0] == '-Q': env.get(passing_prefix + 'non_recursive_libnames', []).append((arg[1], arg[2]))
if arg[0] == '-I': env.get(passing_prefix + 'ocaml_dirnames', []).append(arg[1])
if env['minimize_before_inlining']:
env['log']('\nFirst, I will attempt to absolutize relevant [Require]s in %s, and store the result in %s...' % (bug_file_name, output_file_name))
inlined_contents = get_file(bug_file_name, update_globs=True, **env)
args.bug_file.close()
inlined_contents = maybe_add_coqlib_import(inlined_contents, **env)
inlined_contents = add_admit_tactic(inlined_contents, **env)
write_to_file(output_file_name, inlined_contents)
else:
if env['inline_coqlib']:
env['log']('\nError: --inline-coqlib is incompatible with --no-minimize-before-inlining;\nthe Coq standard library is not suited for inlining all-at-once.', force_stdout=True, level=LOG_ALWAYS)
sys.exit(1)
env['log']('\nFirst, I will attempt to inline all of the inputs in %s, and store the result in %s...' % (bug_file_name, output_file_name))
inlined_contents = include_imports(bug_file_name, **env)
args.bug_file.close()
if inlined_contents:
inlined_contents = add_admit_tactic(inlined_contents, **env)
env['log']('Stripping trailing ends')
while re.search(r'End [^ \.]*\.\s*$', inlined_contents):
inlined_contents = re.sub(r'End [^ \.]*\.\s*$', '', inlined_contents)
write_to_file(output_file_name, inlined_contents)
else:
env['log']('Failed to inline inputs.')
sys.exit(1)
if env['inline_coqlib']:
for key in ('coqc_args', 'coqtop_args', 'passing_coqc_args'):
env[key] = tuple(list(env[key]) + ['-nois', '-coqlib', env['inline_coqlib']])
env['libnames'] = tuple(list(env['libnames']) + [(os.path.join(env['inline_coqlib'], 'theories'), 'Coq')])
env['log']('\nNow, I will attempt to coq the file, and find the error...')
env['error_reg_string'] = get_error_reg_string(output_file_name, **env)
if args.error_log:
env['log']('\nNow, I will attempt to find the error message in the log...')
error_log = args.error_log.read()
args.error_log.close()
if not diagnose_error.has_error(error_log, env['error_reg_string']):
default_on_fatal('The computed error message was not present in the given error log.', **env)
# initial run before we (potentially) do fancy things with the requires
minimize_file(output_file_name, **env)
if env['minimize_before_inlining']: # if we've not already inlined everything
# so long as we keep changing, we will pull all the
# requires to the top, then try to replace them in reverse
# order. As soon as we succeed, we reset the list
last_output = get_file(output_file_name, **env)
clear_libimport_cache(lib_of_filename(output_file_name, **env))
cur_output_gen = (lambda mod_remap: add_admit_tactic(get_file(output_file_name, mod_remap=mod_remap, **env), **env).strip() + '\n')
cur_output = cur_output_gen(dict())
# keep a list of libraries we've already tried to inline, and don't try them again
libname_blacklist = []
first_run = True
while cur_output != last_output or first_run:
first_run = False
last_output = cur_output
requires = recursively_get_requires_from_file(output_file_name, update_globs=True, **env)
for req_module in reversed(requires):
if req_module in libname_blacklist:
continue
else:
libname_blacklist.append(req_module)
rep = '\nRequire %s.\n' % req_module
if rep not in '\n' + cur_output:
env['log']('\nWarning: I cannot find Require %s.' % req_module)
env['log']('in contents:\n' + cur_output, level=3)
continue
try:
# we prefer wrapping modules via Include,
# because this is a bit more robust against
# future module inlining (see example test 45)
def get_test_output(absolutize_mods=False, first_wrap_then_include=True, without_require=True, insert_at_top=False):
test_output = cur_output if not absolutize_mods else cur_output_gen({req_module: absolutize_and_mangle_libname(req_module, first_wrap_then_include=first_wrap_then_include)})
replacement = '\n' + get_required_contents(req_module, first_wrap_then_include=first_wrap_then_include, without_require=without_require, **env).strip() + '\n'
if without_require:
all_imports = run_recursively_get_imports(req_module, **env) # like get_recursive_require_names, but with better sorting properties, I think, and also automatically strips off the self module
replacement = '\n' + ''.join('Require %s.\n' % i for i in all_imports) + replacement
if insert_at_top:
header, test_output = split_leading_comments_and_whitespace(test_output)
return add_admit_tactic((
header +
replacement + '\n' +
('\n' + test_output).replace(rep, '\n')).strip() + '\n',
**env)
else:
return ('\n' + test_output).replace(rep, replacement, 1).replace(rep, '\n').strip() + '\n'
test_output_alts = [
(((' without Include' if not first_wrap_then_include else ' via Include')
+ (', absolutizing mod references' if absolutize_mods else '')
+ (', stripping Requires' if without_require else ', with Requires')),
get_test_output(absolutize_mods=absolutize_mods, first_wrap_then_include=first_wrap_then_include, without_require=without_require, insert_at_top=insert_at_top))
for absolutize_mods in (False, True)
for first_wrap_then_include in (True, False)
for without_require, insert_at_top in ((True, False),
(False, True),
(False, False))
]
(test_output_descr, test_output), test_output_alts = test_output_alts[0], test_output_alts[1:]
except IOError as e:
env['log']('\nWarning: Cannot inline %s (%s)\nRecursively Searched: %s\nNonrecursively Searched: %s' % (req_module, str(e), str(tuple(env['libnames'])), str(tuple(env['non_recursive_libnames']))))
continue
diagnose_error.reset_timeout()
if not check_change_and_write_to_file(
cur_output, test_output, output_file_name,
unchanged_message='Invalid empty file!', success_message=('Inlining %s%s succeeded.' % (req_module, test_output_descr)),
failure_description=('inline %s%s' % (req_module, test_output_descr)), changed_description='File',
timeout_retry_count=SENSITIVE_TIMEOUT_RETRY_COUNT, # is this the right retry count?
display_source_to_error=False,
**env):
# any lazily evaluates the iterator, so we'll
# only run the check up to the point of the
# first success
if not any(check_change_and_write_to_file(
cur_output, test_output_alt, output_file_name,
unchanged_message='Invalid empty file!', success_message=('Inlining %s%s succeeded.' % (req_module, descr)),
failure_description=('inline %s%s' % (req_module, descr)), changed_description='File',
timeout_retry_count=SENSITIVE_TIMEOUT_RETRY_COUNT, # is this the right retry count?
display_source_to_error=True,
**env)
for descr, test_output_alt in test_output_alts):
# let's also display the error and source
# for the original failure to inline,
# without the Include, so we can see
# what's going wrong in both cases
check_change_and_write_to_file(
cur_output, test_output, output_file_name,
unchanged_message='Invalid empty file!', success_message=('Inlining %s%s succeeded.' % (req_module, test_output_descr)),
failure_description=('inline %s%s' % (req_module, test_output_descr)), changed_description='File',
timeout_retry_count=SENSITIVE_TIMEOUT_RETRY_COUNT, # is this the right retry count?
display_source_to_error=True,
**env)
extra_blacklist = [r for r in get_recursive_require_names(req_module, **env) if r not in libname_blacklist]
if extra_blacklist:
env['log']('\nWarning: Preemptively skipping recursive dependency module%s: %s\n'
% (('' if len(extra_blacklist) == 1 else 's'), ', '.join(extra_blacklist)))
libname_blacklist.extend(extra_blacklist)
env['inline_failure_libnames'].append(req_module)
continue
if minimize_file(output_file_name, die=(lambda *args, **kargs: False), **env):
break
clear_libimport_cache(lib_of_filename(output_file_name, libnames=tuple(env['libnames']), non_recursive_libnames=tuple(env['non_recursive_libnames'])))
cur_output = cur_output_gen(dict())
# and we make one final run, or, in case there are no requires, one run
minimize_file(output_file_name, **env)
except EOFError:
env['log'](traceback.format_exc(), level=LOG_ALWAYS)
raise
except Exception:
if hasattr(traceback, 'TracebackException'):
etype, value, tb = sys.exc_info()
env['log'](''.join(traceback.TracebackException(type(value), value, tb, capture_locals=True).format()), level=LOG_ALWAYS)
else:
env['log'](traceback.format_exc(), level=LOG_ALWAYS)
raise
finally:
if env['remove_temp_file']:
clean_v_file(env['temp_file_name'])
|
import pytest
from pages.BaseApp import BasePage
from selenium.webdriver.common.by import By
class YandexSeacrhLocators:
URL = "https://ya.ru/"
LOCATOR_YANDEX_SEARCH_FIELD = (By.ID, "text")
LOCATOR_YANDEX_SEARCH_BUTTON = (By.CLASS_NAME, "search2__button")
LOCATOR_YANDEX_NAVIGATION_BAR = (By.CSS_SELECTOR, ".service__name")
class YandexPage(BasePage):
def go_to_site(self):
return self.driver.get(YandexSeacrhLocators.URL)
def enter_word(self, word):
search_field = self.find_element(YandexSeacrhLocators.LOCATOR_YANDEX_SEARCH_FIELD)
search_field.click()
search_field.send_keys(word)
return search_field
def click_on_the_search_button(self):
return self.find_element(YandexSeacrhLocators.LOCATOR_YANDEX_SEARCH_BUTTON,time=2).click()
def check_navigation_bar(self):
all_list = self.find_elements(YandexSeacrhLocators.LOCATOR_YANDEX_NAVIGATION_BAR,time=2)
nav_bar_menu = [x.text for x in all_list if len(x.text) > 0]
return nav_bar_menu
|
__all__ = ['pca']
import numpy as np
from sklearn.decomposition import PCA
def pca(data, *, n_components):
"""the PCA dimensionality reduction algorithms
Parameters
----------
data: tuple
conclude the train set and test set
n_components: int
the parameter of the pca
Returns
-------
return the train set and test set after dimensionality
"""
train = data[0]
test = data[1]
train_feature_lable = np.array(train)
test_feature_lable = np.array(test)
train_set = train_feature_lable[:, 0:-1]
test_set = test_feature_lable[:, 0:-1]
pca_model = PCA(n_components=n_components).fit(train_set) # 对离差标准化的训练集,生成规则
# 应用规则到训练集
train_pca = pca_model.transform(train_set)
# 应用规则到测试集
test_pca = pca_model.transform(test_set)
train_pca = np.hstack((train_pca, train_feature_lable[:, -1].reshape((-1, 1))))
test_pca = np.hstack((test_pca, test_feature_lable[:, -1].reshape((-1, 1))))
return train_pca, test_pca
|
import datetime
from database_setup import *
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
session.query(Catagory).delete()
session.query(Items).delete()
session.query(User).delete()
user1 = User(name="Vincent Wang",
email="vincentwang1130@gmail.com",
picture="https://plus.google.com/u/0/photos/"
"108682973839556116579/albums/profile/6387569522704376114")
session.add(user1)
session.commit()
catagory1 = Catagory(name='Soccer', user_id=1)
session.add(catagory1)
session.commit()
catagory2 = Catagory(name='Basketball', user_id=1)
session.add(catagory2)
session.commit()
catagory3 = Catagory(name='Baseball', user_id=1)
session.add(catagory3)
session.commit()
catagory4 = Catagory(name='Frisbee', userid=1)
session.add(catagory4)
session.commit()
catagory5 = Catagory(name='Snowboarding', userid=1)
session.add(catagory5)
session.commit()
catagory6 = Catagory(name='Rock Climbing', userid=1)
session.add(catagory6)
session.commit()
catagory7 = Catagory(name='Foosball', userid=1)
session.add(catagory7)
session.commit()
catagory8 = Catagory(name='Skating', userid=1)
session.add(catagory8)
session.commit()
catagory9 = Catagory(name='Hockey', userid=1)
session.add(catagory9)
session.commit()
item1 = Items(name='Stick',
date=datetime.datetime.now(),
description="Our selection of hockey sticks "
"will have you shooting better and scoring more. "
"High quality composite, wood and goalie sticks.",
catagory_id=9,
user_id=1)
session.add(item1)
session.commit()
item2 = Items(name='Goggles',
date=datetime.datetime.now(),
description="Superior color and contrast enhancement "
"comes standard in our lineup of Happy Lens snow goggles.",
catagory_id=5,
user_id=1)
session.add(item2)
session.commit()
item3 = Items(name='Snowboard',
date=datetime.datetime.now(),
description="There is no mountain large enough to scare away "
"the K2 Ultra Splitboard. "
"This burly, stiff deck has a directional shape "
"tuned for the best descents of your life.",
catagory_id=5,
user_id=1)
session.add(item3)
session.commit()
item4 = Items(name='Two shinguards',
date=datetime.datetime.now(),
description=" Shin guards and socks are required "
"for legal play in nearly all soccer leagues.",
catagory_id=1,
user_id=1)
session.add(item4)
session.commit()
item5 = Items(name='Shinguards',
date=datetime.datetime.now(),
description="Shin guards and socks are required "
"for legal play in nearly all soccer leagues.",
catagory_id=1,
user_id=1)
session.add(item5)
session.commit()
item6 = Items(name='Frisbee',
date=datetime.datetime.now(),
description="We spent 30 hours testing flying discs and "
"decided that the Discraft UltraStar is the best "
"recreational flying disc.",
catagory_id=4,
user_id=1)
session.add(item6)
session.commit()
item7 = Items(name='Bat',
date=datetime.datetime.now(),
description="A baseball bat is a smooth wooden or metal club "
"used in the sport of baseball to hit the ball after "
"it is thrown by the pitcher.",
catagory_id=3,
user_id=1)
session.add(item7)
session.commit()
item8 = Items(name='Jersey',
date=datetime.datetime.now(),
description="We have it here! Get a matching jersey "
"to go along with your kids favorite team, "
"check out our entire collection of Soccer Jerseys.",
catagory_id=1,
user_id=1)
session.add(item8)
session.commit()
item9 = Items(name='Soccer Cleats',
date=datetime.datetime.now(),
description="urf shoes have identical uppers to traditional "
"soccer boots, but the outsoles differ in "
"that they make up the cleat portion.",
catagory_id=1,
user_id=1)
session.add(item9)
print "Data base dummy data injected"
|
class FileInfo:
def __init__(self, file_info):
self.components = file_info["components"]
self.parent = file_info["parent"]
self.name = file_info["name"]
self.extension = file_info.get("extension","")
self.toString = file_info["toString"]
def _get_components(self):
return self._components
def _set_components(self, value):
self._components = value
def _get_parent(self):
return self._parent
def _set_parent(self, value):
self._parent = value
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
def _get_extension(self):
return self._extension
def _set_extension(self, value):
self._extension = value
def _get_to_string(self):
return self._toString
def _set_to_string(self, value):
self._toString = value
components = property(_get_components, _set_components, doc="The components the file reside in.")
parent = property(_get_parent, _set_parent, doc="The parent folder the file resides in.")
name = property(_get_name, _set_name, doc="The name of the file.")
extension = property(_get_extension, _set_extension, doc="The extension of the file.")
toString = property(_get_to_string, _set_to_string,
doc="A string value representing the full path from repository root.")
|
import os
import build_utils
import build_config
import shutil
def get_supported_targets(platform):
if platform == 'win32':
return ['win32']
elif platform == 'darwin':
return ['macos']
else:
return []
def get_dependencies_for_target(target):
return []
def build_for_target(target, working_directory_path, root_project_path):
if target == 'win32':
_build_win32(working_directory_path, root_project_path)
elif target == 'macos':
_build_macos(working_directory_path, root_project_path)
def get_download_info():
return 'Libs/bullet'
def _build_win32(working_directory_path, root_project_path):
source_folder_path=os.path.join(root_project_path, get_download_info())
build_x86_folder, build_x64_folder = (
build_utils.build_and_copy_libraries_win32_cmake(
os.path.join(working_directory_path, 'gen'),
source_folder_path,
root_project_path,
'bullet.sln', 'bullet',
'bullet.lib', 'bullet.lib',
'bullet.lib', 'bullet.lib',
'bullet.lib', 'bullet.lib',
static_runtime=False))
def _build_macos(working_directory_path, root_project_path):
source_folder_path=os.path.join(root_project_path, get_download_info())
build_utils.build_and_copy_libraries_macos_cmake(
os.path.join(working_directory_path, 'gen'),
source_folder_path,
root_project_path,
'bullet.xcodeproj', 'bullet',
'libbullet.a',
'libbullet.a')
|
from pycircuit.circuit import *
from myTabVCCS import myVCCS
import pylab
circuit.default_toolkit = numeric
Rs=50.
Vs=1e-4
def build_lna_degen_gm():
c = SubCircuit()
c['Rs'] = R(1, gnd, r=Rs)
c['vs'] = VS(2, 1, vac=Vs)
c['vccs'] = VCCS(2, 4, 3, 4, gm=0.1)
c['Cgs'] = C(2, 4, c=1e-9)
c['rl'] = R(3, gnd, r=200.)
c['rd'] = R(4, gnd, r=10.)
return c
def wave_conv(w1,w2):
'''Convolution of two waveforms.
Convolution by frequency of two waveforms.
'''
x1vec=w1.get_x()[0]#frequency array
y1vec=w1.get_y() #values
x2vec=w2.get_x()[0]#frequency array
y2vec=w2.get_y() #values
wdict={}
for x1,y1 in zip(x1vec,y1vec):
for x2,y2 in zip(x2vec,y2vec):
if wdict.has_key(x1+x2):
wdict[x1+x2]+=y1*y2
else:
wdict[x1+x2]=y1*y2
newx=np.sort(array(wdict.keys()))
newy=array([])
for x in newx:
newy=np.append(newy,wdict[x])
newwave=Waveform(newx,newy)
return newwave
def solve_by_freq(w,c,vac=True):
ac=AC(c)
xvec=w.get_x()[0]
vc=array([])
vout=array([])
if vac:
c['inl']=IS(3,4,iac=0.)
c['vs']=VS(2,1,vac=1.)
else:
c['vs']=VS(2,1,vac=0.)
c['inl']=IS(3,4,iac=1.)
res=ac.solve(xvec)
wvc=res.v(2,4)*w
wvout=res.v(3)*w
return wvc,wvout
def lna_volterra(w):
'''Convolution based 2nd and 3rd order volterra analysis.
w is a Waveform object with stimuli frequencies and amplitudes
Using (sparse) convolution to calculate nonlinear currents.
'''
c=build_lna_degen_gm()
K2gm=0.01
K3gm=0.001
# 1st order response of controlling voltage and output
vc1,vout_1=solve_by_freq(w,c)
#self convolution of 1st order spectrum
v1_c_v1=wave_conv(vc1,vc1)
#nonlinear current per frequency
INL2=K2gm*v1_c_v1
# 2nd order response of controlling voltage and output
vc2,vout_2=solve_by_freq(INL2,c,vac=False)
#calulate 3rd order from K3gm
INL31=K3gm*wave_conv(vc1,v1_c_v1)
vc31,vout_31=solve_by_freq(INL31,c,vac=False)
#calculate 3rd order from K2gm
INL32=K2gm*wave_conv(vc2,vc1)
vc32,vout_32=solve_by_freq(INL32,c,vac=False)
return vout_1,vout_2,vout_31,vout_32
def test_volterra():
f=array([-1200.,-1000.,1000.,1200.])
v=array([2.,2.,2.,2.])
w=Waveform(f,v)
v1,v2,v31,v32=lna_volterra(w)
pylab.subplot(4,1,1)
pylab.stem(v1.get_x()[0],v1.get_y())
pylab.subplot(4,1,2)
pylab.stem(v2.get_x()[0],v2.get_y())
pylab.subplot(4,1,3)
pylab.stem(v31.get_x()[0],v31.get_y())
pylab.subplot(4,1,4)
pylab.stem(v32.get_x()[0],v32.get_y())
pylab.show()
if __name__ == '__main__':
test_volterra()
|
# -*- coding: UTF-8 -*-
from django.db import models
# Create your models here.
class Person(models.Model):
student_number = models.CharField(verbose_name = '学号', max_length = 12, unique = True)
name = models.CharField(verbose_name = '姓名', max_length = 10)
pinyin = models.CharField(verbose_name = '拼音', max_length = 25)
gender = models.CharField(verbose_name = '性别', choices = (('F', 'Female'), ('M', 'Male')), max_length = 2)
native_province = models.CharField(verbose_name = '籍贯', max_length = 10, blank = True)
dormitory = models.CharField(verbose_name = '寝室', blank = True, max_length = 7)
birthday = models.DateField(verbose_name = '生日', blank = True)
phone_number = models.CharField(verbose_name = '手机号码', max_length = 11, blank = True)
position = models.CharField(verbose_name = '职务', max_length = 20, blank = True)
participation = models.PositiveSmallIntegerField(verbose_name = '活动参与分', default = 0)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Activity(models.Model):
name = models.CharField(verbose_name = '活动名称', max_length = 30)
date = models.DateField(verbose_name = '日期', blank = True)
time = models.TimeField(verbose_name = '开始时间', blank = True)
place = models.CharField(verbose_name = '地点', max_length = 15, blank = True)
content = models.TextField(verbose_name = '内容', blank = True)
participation = models.SmallIntegerField(verbose_name = '参与得分', default = 0)
participator = models.TextField(verbose_name = '参与者学号', blank = True)
images = models.TextField(verbose_name = '相关图片urls', blank = True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Feedback(models.Model):
summary = models.TextField(verbose_name = '反馈总结', max_length = 50)
details = models.TextField(verbose_name = '详细信息', max_length = 500, blank = True)
category = models.CharField(verbose_name = '反馈分类', choices = (('A', 'Advice'), ('B', 'Bug')),
max_length = 2, default = 'B')
contact = models.EmailField(verbose_name = '联系邮箱')
def __unicode__(self):
return self.summary
def __str__(self):
return self.summary
class Finance(models.Model):
"""
班费的单次记录
"""
income = models.FloatField(verbose_name = '收入', default = 0)
expense = models.FloatField(verbose_name = '支出', default = 0)
date = models.DateField(verbose_name = '日期')
event = models.TextField(verbose_name = '相关事件')
details = models.TextField(verbose_name = '详细信息', blank = True)
def __unicode__(self):
return '%s +%.2f -%.2f' % (self.event, self.income, self.expense)
# return '{} +{:.2f}, -{:.2f}'.format(self.event.encode('utf-8'), self.income, self.expense).encode('utf-8')
def __str__(self):
return '%s +%.2f -%.2f' % (self.event, self.income, self.expense)
# return '{} +{:.2f}, -{:.2f}'.format(self.event.encode('utf-8'), self.income, self.expense).encode('utf-8')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from nominal.chi_squared_test import ChiSquaredTest
from nominal.fisher_test import FisherTest
class UnpairedTwoSampleTestOfNominalScale:
def test(self, data):
# check data length
if len(data.keys()) != 2:
print "len(data.keys()) should be two"
sys.exit()
elif len(data[(data.keys())[0]]) != len(data[(data.keys())[1]]):
print "len(data[(data.keys())[0]]) and len(data[(data.keys())[1]]) should be same"
sys.exit()
else:
"""
Is there any difference between the number of people who satisfies Condition1 and Yes (a) and that of people who satisfies Condition2 and Yes (c)?
data = {"Condition1": [a, b], "Condition2": [c, d]}
OrderedDict([('Illness', [52, 8]), ('Healty', [48, 42])])
Yes No Total <= sum_row
--------------------------------------
Condition1 a b a+b
Condition2 c d c+d
--------------------------------------
Total a+c b+d n (= a+b+c+d)
^
|_ sum_column
"""
# calculate n
n = sum(data[(data.keys())[0]]) + sum(data[(data.keys())[1]])
sum_row = []
sum_column = []
# calculate sum_column
for i in range(len(data[(data.keys())[0]])):
tmp = 0.0
for j in data.keys():
tmp += data[j][i]
sum_column.append(tmp)
# calculate sum_row
for i in data.keys():
sum_row.append(sum(data[i]))
# calculate expected data
data_exp = []
for i in range(len(data[(data.keys())[0]])):
for j in range(len(data.keys())):
data_exp.append(sum_row[j] * sum_column[i] / float(n))
# select the way of calculation based on the minimum expected data (fisher's test or chi-square test)
"""
fisher test is used in a certain condition (Cochran's rule);
see http://aoki2.si.gunma-u.ac.jp/lecture/Cross/warning.html and
http://drmagician.exblog.jp/22086293/
"""
if min(data_exp) < 5:
# use fisher's test
# followd this link: http://aoki2.si.gunma-u.ac.jp/lecture/Cross/Fisher.html
fisher_test = FisherTest()
p = fisher_test.test(data)
return p
else:
# use chi-square test
chi_squared_test = ChiSquaredTest()
'''
squared: 検定統計量
p: p value
dof: 自由度
ef: 期待度数
'''
squared, p, dof, ef = chi_squared_test.test(data)
return squared, p, dof, ef
if __name__ == '__main__':
pass
|
# Module to draw circles, rectangles and lines
import cv2
image = cv2.imread('lena.jpg')
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
cv2.line(image, pt1=(0, 0), pt2=(100, 200), color=GREEN)
cv2.line(image, pt1=(300, 200), pt2=(150, 150), color=RED, thickness=5)
cv2.rectangle(image, pt1=(20, 20), pt2=(120, 120), color=BLUE, thickness=10)
cv2.rectangle(image, pt1=(200, 50), pt2=(225, 125), color=GREEN, thickness=-1)
height, width, _ = image.shape
# image center
center = (width // 2, height // 2)
for radius in range(0, 175, 15):
cv2.circle(image, center, radius, RED)
cv2.imshow('Drawing over image', image)
cv2.waitKey(0)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2018, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
from operator import or_
from os.path import join as path_join, dirname
from py2neo.matching import NodeMatcher
from py2neo.testing import IntegrationTestCase
from py2neo.data import Node, Relationship
class NodeMatcherTestCase(IntegrationTestCase):
def setUp(self):
self.graph.delete_all()
with open(path_join(dirname(__file__), "..", "resources", "movies.cypher")) as f:
cypher = f.read()
self.graph.run(cypher)
self.matcher = NodeMatcher(self.graph)
def tearDown(self):
self.graph.delete_all()
def test_can_match_by_label_key_value(self):
found = list(self.matcher.match("Person", name="Keanu Reeves"))
assert len(found) == 1
first = found[0]
assert isinstance(first, Node)
assert first["name"] == "Keanu Reeves"
assert first["born"] == 1964
def test_can_match_by_label_only(self):
found = list(self.matcher.match("Person"))
assert len(found) == 131
def test_can_match_all_nodes(self):
found = list(self.matcher.match())
assert len(found) == 169
def test_can_count_all_nodes(self):
count = len(self.matcher.match())
self.assertEqual(count, 169)
def test_can_match_by_label_and_multiple_values(self):
found = list(self.matcher.match("Person", name="Keanu Reeves", born=1964))
assert len(found) == 1
first = found[0]
assert isinstance(first, Node)
assert first["name"] == "Keanu Reeves"
assert first["born"] == 1964
def test_multiple_values_must_intersect(self):
found = list(self.matcher.match("Person", name="Keanu Reeves", born=1963))
assert len(found) == 0
def test_custom_conditions(self):
found = list(self.matcher.match("Person").where("_.name =~ 'K.*'"))
found_names = {actor["name"] for actor in found}
assert found_names == {'Keanu Reeves', 'Kelly McGillis', 'Kevin Bacon',
'Kevin Pollak', 'Kiefer Sutherland', 'Kelly Preston'}
def test_custom_conditions_with_parameters(self):
found = list(self.matcher.match("Person").where(("_.name = {1}", {"1": "Keanu Reeves"})))
assert len(found) == 1
first = found[0]
assert isinstance(first, Node)
assert first["name"] == "Keanu Reeves"
assert first["born"] == 1964
def test_special_parameters_gt(self):
year = 1985
found = list(self.matcher.match("Person", born__gt=year))
assert found
for actor in found:
assert actor["born"] > year
def test_special_parameters_gte(self):
year = 1985
found = list(self.matcher.match("Person", born__gte=year))
assert found
for actor in found:
assert actor["born"] >= year
def test_special_parameters_lt(self):
year = 1985
found = list(self.matcher.match("Person", born__lt=year))
assert found
for actor in found:
assert actor["born"] < year
def test_special_parameters_lte(self):
year = 1985
found = list(self.matcher.match("Person", born__lte=year))
assert found
for actor in found:
assert actor["born"] <= year
def test_special_parameters_exact(self):
year = 1985
found = list(self.matcher.match("Person", born__exact=year))
assert found
for actor in found:
assert actor["born"] == year
def test_special_parameters_not(self):
year = 1985
found = list(self.matcher.match("Person", born__not=year))
assert found
for actor in found:
assert actor["born"] != year
def test_special_parameters_regex(self):
found = list(self.matcher.match("Person", name__regex='K.*'))
found_names = {actor["name"] for actor in found}
assert found_names == {'Keanu Reeves', 'Kelly McGillis', 'Kevin Bacon',
'Kevin Pollak', 'Kiefer Sutherland', 'Kelly Preston'}
def test_special_parameters_startswith(self):
found = list(self.matcher.match("Person", name__startswith='K'))
for actor in found:
assert actor["name"].startswith("K")
def test_special_parameters_endswith(self):
found = list(self.matcher.match("Person", name__endswith='eeves'))
for actor in found:
assert actor["name"].endswith("eeves")
def test_special_parameters_contains(self):
found = list(self.matcher.match("Person", name__contains='shall'))
for actor in found:
assert "shall" in actor["name"]
def test_order_by(self):
found = list(self.matcher.match("Person").where("_.name =~ 'K.*'").order_by("_.name"))
found_names = [actor["name"] for actor in found]
assert found_names == ['Keanu Reeves', 'Kelly McGillis', 'Kelly Preston',
'Kevin Bacon', 'Kevin Pollak', 'Kiefer Sutherland']
def test_skip(self):
found = list(self.matcher.match("Person").where("_.name =~ 'K.*'").order_by("_.name").skip(2))
found_names = [actor["name"] for actor in found]
assert found_names == ['Kelly Preston', 'Kevin Bacon', 'Kevin Pollak', 'Kiefer Sutherland']
def test_limit(self):
found = list(self.matcher.match("Person").where("_.name =~ 'K.*'").order_by("_.name").skip(2).limit(2))
found_names = [actor["name"] for actor in found]
assert found_names == ['Kelly Preston', 'Kevin Bacon']
def test_multiple_custom_conditions(self):
found = list(self.matcher.match("Person").where("_.name =~ 'J.*'", "_.born >= 1960", "_.born < 1970"))
found_names = {actor["name"] for actor in found}
assert found_names == {'James Marshall', 'John Cusack', 'John Goodman', 'John C. Reilly', 'Julia Roberts'}
def test_one(self):
the_one = self.matcher.match("Person").where("_.name =~ 'K.*'").order_by("_.name").first()
assert the_one["name"] == 'Keanu Reeves'
def test_tuple_property_value(self):
found = list(self.matcher.match("Person", name=("Kevin Bacon", "Kiefer Sutherland")))
found_names = {actor["name"] for actor in found}
assert found_names == {"Kevin Bacon", "Kiefer Sutherland"}
def test_set_property_value(self):
found = list(self.matcher.match("Person", name={"Kevin Bacon", "Kiefer Sutherland"}))
found_names = {actor["name"] for actor in found}
assert found_names == {"Kevin Bacon", "Kiefer Sutherland"}
def test_frozenset_property_value(self):
found = list(self.matcher.match("Person", name=frozenset(["Kevin Bacon", "Kiefer Sutherland"])))
found_names = {actor["name"] for actor in found}
assert found_names == {"Kevin Bacon", "Kiefer Sutherland"}
class RelationshipMatchNodeCombinationsTestCase(IntegrationTestCase):
def setUp(self):
TO = Relationship.type("TO")
self.graph.delete_all()
a = self.a = Node()
b = self.b = Node()
c = self.c = Node()
d = self.d = Node()
self.r = [TO(a, b), TO(b, a), TO(b, c), TO(b, b), TO(c, d), TO(a, d)]
self.graph.create(reduce(or_, self.r))
def test_a_to_b(self):
match = self.graph.match(nodes=(self.a, self.b))
self.assertEqual(len(match), 1)
r = list(match)
self.assertEqual(len(r), 1)
self.assertSetEqual(set(r), {self.r[0]})
def test_a_to_x(self):
match = self.graph.match(nodes=(self.a, None))
self.assertEqual(len(match), 2)
r = list(match)
self.assertEqual(len(r), 2)
self.assertSetEqual(set(r), {self.r[0], self.r[5]})
def test_x_to_b(self):
match = self.graph.match(nodes=(None, self.b))
self.assertEqual(len(match), 2)
r = list(match)
self.assertEqual(len(r), 2)
self.assertSetEqual(set(r), {self.r[0], self.r[3]})
def test_x_to_x(self):
match = self.graph.match(nodes=(None, None))
self.assertEqual(len(match), 6)
r = list(match)
self.assertEqual(len(r), 6)
self.assertSetEqual(set(r), {self.r[0], self.r[1], self.r[2], self.r[3], self.r[4], self.r[5]})
def test_a_and_b(self):
match = self.graph.match(nodes={self.a, self.b})
self.assertEqual(len(match), 2)
r = list(match)
self.assertEqual(len(r), 2)
self.assertSetEqual(set(r), {self.r[0], self.r[1]})
def test_a_only(self):
match = self.graph.match(nodes={self.a})
self.assertEqual(len(match), 3)
r = list(match)
self.assertEqual(len(r), 3)
self.assertSetEqual(set(r), {self.r[0], self.r[1], self.r[5]})
def test_b_only(self):
match = self.graph.match(nodes={self.b})
self.assertEqual(len(match), 4)
r = list(match)
self.assertEqual(len(r), 4)
self.assertSetEqual(set(r), {self.r[0], self.r[1], self.r[2], self.r[3]})
def test_any(self):
match = self.graph.match(nodes=set())
self.assertEqual(len(match), 6)
r = list(match)
self.assertEqual(len(r), 6)
self.assertSetEqual(set(r), {self.r[0], self.r[1], self.r[2], self.r[3], self.r[4], self.r[5]})
|
#FastMGWR MPI Script
#Author: Ziqi Li
#Email: liziqi1992@gmail.com
import math
import numpy as np
from mpi4py import MPI
from scipy.spatial.distance import cdist,pdist
import argparse
from copy import deepcopy
from FastGWR import FastGWR
class FastMGWR(FastGWR):
"""
FastMGWR class.
Parameters
----------
comm : MPI communicators initialized with mpi4py.
parser : The parser object contains model arguments.
Attributes
----------
comm : MPI communicators initialized with mpi4py.
parser : The parser object contains model arguments.
y : array
n*1, dependent variable
X : array
n*k, independent variables (include constant, if any)
coords : array
n*2, collection of n sets of (x,y) coordinates used for
calibration locations
n : int
number of observations
k : int
number of independent variables
minbw : float
lower-bound bandwidth in the search range
maxbw : float
upper-bound bandwidth in the search range
"""
def __init__(self, comm, parser):
"""
Initialize class
"""
FastGWR.__init__(self, comm, parser)
#Standardizaing data
if self.constant:
stds = np.std(self.X, axis=0)
stds[0] = 1
self.X = (self.X - np.mean(self.X,axis=0))/stds
self.X[:,0] = 1
else:
self.X = (self.X - np.mean(self.X,axis=0))/np.std(self.X, axis=0)
self.y = (self.y - np.mean(self.y,axis=0))/np.std(self.y, axis=0)
def backfitting(self):
"""
Backfitting MGWR model and obtain parameter estimates
and covariate-specific bandwidths.
see Fotheringham et al. 2017. Annals of AAG.
"""
if self.comm.rank ==0:
print("MGWR Backfitting...",flush=True)
print("Data are standardized",flush=True)
#Initalization
betas,bw = self.fit(init_mgwr=True,mgwr=True)
self.bw_init = bw
if self.comm.rank ==0:
print("Initialization Done...",flush=True)
XB = betas*self.X
err = self.y.reshape(-1) - np.sum(XB,axis=1)
bws = [None]*self.k
bw_stable_counter = 0
bws_history = []
for mgwr_iters in range(1,201):
newXB = np.empty(XB.shape, dtype=np.float64)
newbetas = np.empty(XB.shape, dtype=np.float64)
for j in range(self.k):
temp_y = (XB[:,j] + err).reshape(-1,1)
temp_X = self.X[:,j].reshape(-1,1)
if bw_stable_counter >= 5:
#If in backfitting, all bws not changing in bws_same_times (default 5) iterations
bw_j = bws[j]
betas = self.mpi_gwr_fit(temp_y,temp_X,bw_j,final=True,mgwr=True)
else:
betas,bw_j = self.fit(y=temp_y,X=temp_X,init_mgwr=False,mgwr=True)
XB_j = (betas*temp_X).reshape(-1)
err = temp_y.reshape(-1) - XB_j
newXB[:,j] = XB_j
newbetas[:,j] = betas.reshape(-1)
bws[j] = bw_j
if (mgwr_iters > 1) and np.all(bws_history[-1] == bws):
bw_stable_counter += 1
else:
bw_stable_counter = 0
bws_history.append(deepcopy(bws))
num = np.sum((newXB - XB)**2) / self.n
den = np.sum(np.sum(newXB, axis=1)**2)
score = (num / den)**0.5
XB = newXB
if self.comm.rank ==0:
print("Iter:",mgwr_iters,"SOC:","{:.2e}".format(score),flush=True)
print("bws:",bws,flush=True)
if score < 1e-5:
break
self.bws_history = np.array(bws_history)
self.RSS = np.sum(err**2)
self.TSS = np.sum((self.y - np.mean(self.y))**2)
self.R2 = 1 - self.RSS/self.TSS
self.err = err
self.params = newbetas
if self.comm.rank == 0 and self.estonly:
header="index,residual,"
varNames = np.genfromtxt(self.fname, dtype=str, delimiter=',',names=True, max_rows=1).dtype.names[3:]
if self.constant:
varNames = ['intercept'] + list(varNames)
for x in varNames:
header += ("b_"+x+',')
self.output_diag(None,None,self.R2)
index = np.arange(self.n).reshape(-1,1)
output = np.hstack([index,self.err.reshape(-1,1),self.params])
self.save_results(output,header)
def _chunk_compute_R(self, chunk_id=0):
"""
Compute MGWR inference by chunks to reduce memory footprint.
See Li and Fotheringham, 2020. IJGIS and Yu et al., 2019. GA.
"""
n = self.n
k = self.k
n_chunks = self.n_chunks
chunk_size = int(np.ceil(float(n / n_chunks)))
ENP_j = np.zeros(k)
CCT = np.zeros((n, k))
chunk_index = np.arange(n)[chunk_id * chunk_size:(chunk_id + 1) *
chunk_size]
init_pR = np.zeros((n, len(chunk_index)))
init_pR[chunk_index, :] = np.eye(len(chunk_index))
pR = np.zeros((n, len(chunk_index),k)) #partial R: n by chunk_size by k
for i in range(n):
wi = self.build_wi(i, self.bw_init).reshape(-1, 1)
xT = (self.X * wi).T
P = np.linalg.solve(xT.dot(self.X), xT).dot(init_pR).T
pR[i, :, :] = P * self.X[i]
err = init_pR - np.sum(pR, axis=2) #n by chunk_size
for iter_i in range(self.bws_history.shape[0]):
for j in range(k):
pRj_old = pR[:, :, j] + err
Xj = self.X[:, j]
n_chunks_Aj = n_chunks
chunk_size_Aj = int(np.ceil(float(n / n_chunks_Aj)))
for chunk_Aj in range(n_chunks_Aj):
chunk_index_Aj = np.arange(n)[chunk_Aj * chunk_size_Aj:(
chunk_Aj + 1) * chunk_size_Aj]
pAj = np.empty((len(chunk_index_Aj), n))
for i in range(len(chunk_index_Aj)):
index = chunk_index_Aj[i]
wi = self.build_wi(index, self.bws_history[iter_i, j]).reshape(-1)
xw = Xj * wi
pAj[i, :] = Xj[index] / np.sum(xw * Xj) * xw
pR[chunk_index_Aj, :, j] = pAj.dot(pRj_old)
err = pRj_old - pR[:, :, j]
for j in range(k):
CCT[:, j] += ((pR[:, :, j] / self.X[:, j].reshape(-1, 1))**2).sum(axis=1)
for i in range(len(chunk_index)):
ENP_j += pR[chunk_index[i], i, :]
return ENP_j, CCT
def mgwr_fit(self,n_chunks=2):
"""
Fit MGWR model and output results
"""
if self.estonly:
return
if self.comm.rank ==0:
print("Computing Inference with",n_chunks,"Chunk(s)",flush=True)
self.n_chunks = self.comm.size * n_chunks
self.chunks = np.arange(self.comm.rank*n_chunks, (self.comm.rank+1)*n_chunks)
ENP_list = []
CCT_list = []
for r in self.chunks:
ENP_j_r, CCT_r = self._chunk_compute_R(r)
ENP_list.append(ENP_j_r)
CCT_list.append(CCT_r)
ENP_list = np.array(self.comm.gather(ENP_list, root=0))
CCT_list = np.array(self.comm.gather(CCT_list, root=0))
if self.comm.rank == 0:
ENP_j = np.sum(np.vstack(ENP_list), axis=0)
CCT = np.sum(np.vstack(CCT_list), axis=0)
header="index,residual,"
varNames = np.genfromtxt(self.fname, dtype=str, delimiter=',',names=True, max_rows=1).dtype.names[3:]
if self.constant:
varNames = ['intercept'] + list(varNames)
for x in varNames:
header += ("b_"+x+',')
for x in varNames:
header += ("se_"+x+',')
trS = np.sum(ENP_j)
sigma2_v1 = self.RSS/(self.n-trS)
aicc = self.compute_aicc(self.RSS, trS)
self.output_diag(aicc,ENP_j,self.R2)
bse = np.sqrt(CCT*sigma2_v1)
index = np.arange(self.n).reshape(-1,1)
output = np.hstack([index,self.err.reshape(-1,1),self.params,bse])
self.save_results(output,header)
return
|
from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery, InlineKeyboardMarkup, InlineKeyboardButton
from data.text import text, confirm_payment_button_text
from keyboards.inline.plan_keyboards import plansMenu
from loader import dp, bot
from utils.paypal import create_token_paypal
from utils.stripe import create_link_stripe
@dp.callback_query_handler(text='paypal')
async def paypal(call: CallbackQuery, state: FSMContext):
async with state.proxy() as data:
bot_name = dict(await bot.get_me())['username']
token = await create_token_paypal(data['plans_price'], bot_name, data['currency'])
pay_button = InlineKeyboardMarkup(inline_keyboard=[
[
InlineKeyboardButton(text=confirm_payment_button_text['subscribe'], url=f'https://www.paypal.com/checkoutnow?token={token[0]}'),
InlineKeyboardButton(text=confirm_payment_button_text['confirm'], callback_data='confirm_paypal')
]
])
data['pay-id'] = token[-1]
data['payment_method'] = 'paypal'
await call.message.answer(text=text['pay_text'], reply_markup=pay_button)
@dp.callback_query_handler(text='stripe')
async def stripe(call: CallbackQuery, state: FSMContext):
async with state.proxy() as data:
bot_name = dict(await bot.get_me())['username']
print(data)
link = await create_link_stripe(int(float(data['plans_price']) * 100), bot_name, data['currency'], data['plan_type'])
pay_button = InlineKeyboardMarkup(inline_keyboard=[
[
InlineKeyboardButton(text=confirm_payment_button_text['subscribe'], url=link[0]),
InlineKeyboardButton(text=confirm_payment_button_text['confirm'], callback_data='confirm_stripe')
]
])
data['intenet_id'] = link[-1]
data['payment_method'] = 'stripe'
await call.message.answer(text=text['pay_text'], reply_markup=pay_button)
@dp.callback_query_handler(text='back')
async def back_button(call: CallbackQuery):
await call.message.edit_text(text=text['plan'], reply_markup=await plansMenu())
await call.answer("Cancel")
# await call.answer(cache_time=60)
|
from bungiesearch.fields import DateField, StringField
from bungiesearch.indices import ModelIndex
from core.models import Article, ManangedButEmpty, User
class ArticleIndex(ModelIndex):
effective_date = DateField(eval_as='obj.created if obj.created and obj.published > obj.created else obj.published')
meta_data = StringField(eval_as='" ".join([fld for fld in [obj.link, str(obj.tweet_count), obj.raw] if fld])')
more_fields = StringField(eval_as='"some value"')
class Meta:
model = Article
updated_field = 'updated'
exclude = ('raw', 'missing_data', 'negative_feedback', 'positive_feedback', 'popularity_index', 'source_hash')
hotfixes = {'updated': {'null_value': '2013-07-01'},
'title': {'boost': 1.75},
'description': {'boost': 1.35},
'full_text': {'boost': 1.125}}
default = False
class UserIndex(ModelIndex):
effective_date = DateField(eval_as='obj.created if obj.created and obj.published > obj.created else obj.published')
meta_data = StringField(eval_as='" ".join([fld for fld in [obj.link, str(obj.tweet_count), obj.raw] if fld])')
more_fields = StringField(eval_as='"some value"')
class Meta:
model = User
id_field = 'user_id'
updated_field = 'updated'
exclude = ('raw', 'missing_data', 'negative_feedback', 'positive_feedback', 'popularity_index', 'source_hash')
hotfixes = {'updated': {'null_value': '2013-07-01'},
'title': {'boost': 1.75},
'about': {'boost': 1.35},
'full_text': {'boost': 1.125}}
default = False
class EmptyIndex(ModelIndex):
def matches_indexing_condition(self, item):
return False
class Meta:
model = ManangedButEmpty
exclude = ('field_description',)
optimize_queries = True
|
from warnings import warn
import kts.stl.misc
from kts.validation.leaderboard import leaderboard as lb
from kts.validation.split import Refiner
from kts.validation.validator import Validator
def assert_splitters(exps):
"""
Args:
exps:
Returns:
"""
all_splitters = set()
for exp in exps:
all_splitters.add(repr(exp.splitter))
if len(all_splitters) > 1:
raise Exception(
f"Experiment {repr(exp.identifier)} has {exp.splitter} instead of {all_splitters.pop()}"
)
def assert_metrics(exps):
"""
Args:
exps:
Returns:
"""
all_metrics = set()
for exp in exps:
if "source" in dir(exp.validator.metric):
all_metrics.add(exp.validator.metric.source)
else:
all_metrics.add(exp.validator.metric.__name__)
if len(all_metrics) > 1:
warn(
f"Different metrics were used for scoring provided experiments: {all_metrics}."
f" The first one will be used unless you specify it explicitly.")
def stack(ids,
safe=True,
inner_splitter=None,
metric=None,
validator_class=Validator):
"""
Args:
ids:
safe: (Default value = True)
inner_splitter: (Default value = None)
metric: (Default value = None)
validator_class: (Default value = Validator)
Returns:
"""
experiments = lb[ids]
if safe:
assert_splitters(experiments)
outer_splitter = experiments[0].validator.splitter
assert_metrics(experiments)
if inner_splitter is None:
inner_splitter = experiments[0].validator.splitter
refiner = Refiner(outer_splitter, inner_splitter)
if metric is None:
metric = experiments[0].validator.metric
fc_stack = kts.stl.misc.stack(ids)
val_stack = validator_class(refiner, metric)
return val_stack, fc_stack
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views import generic
from django.conf import settings
from django.utils import timezone
import datetime
from .models import Artist
from centinela.models import Slider
from .forms import ArtistForm
class ArtistCreate(generic.CreateView):
form_class = ArtistForm
template_name = 'megaXID/artist_form.html'
success_url = '/megaXID/artist/success'
def get_context_data(self, **kwargs):
context = super(ArtistCreate, self).get_context_data(**kwargs)
sliders = Slider.objects.filter(location='news').order_by('order')
context['active_sliders_list'] = sliders
context['site'] = settings.CENTINELA
return context
class SuccessView(generic.TemplateView):
template_name = 'megaXID/register_artist_success.html'
def get_context_data(self):
context = super(SuccessView, self).get_context_data()
sliders = Slider.objects.filter(location='news').order_by('order')
context['active_sliders_list'] = sliders
context['site'] = settings.CENTINELA
return context
# Create your views here.
|
"""module containing all testcases"""
|
import serial, pygame, json, os
with open("full_map_list.json", 'r') as fd:
mapping=json.load(fd)
pressed = []
shift_key = mapping["default"].index("shift")
code_key = mapping["default"].index("code")
text=""
with serial.Serial("COM10", 2000000) as conn:
conn.readline()
conn.readline()
while 1:
try:data = (list(map(lambda a:not int(a), conn.readline().decode("ascii", "ignore").strip().split(" "))))
except ValueError as e:
print("WARN: RX Error: "+str(e))
continue
if len(data)!=64:
print("WARN: Bad packet")
continue
matrix = []
for i in range(64):
lc_key = mapping["default"][i]
if data[i] and lc_key not in pressed:
if lc_key=="backspace":
text = text[:-1]
elif lc_key=="return":
text+="\n"
elif len(lc_key)==1:
text+=mapping["shifted" if "shift" in pressed else "default"][i]
os.system("cls")
print(text)
# print(pressed)
pressed.append(lc_key)
elif not data[i] and lc_key in pressed:
pressed.remove(lc_key)
|
#!/usr/local/bin/python3
import socket
from random import randint
print(0)
HOST = '' # Symbolic name meaning all available interfaces
print(1)
PORT = 50007 # Arbitrary non-privileged port
print(2)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(3)
s.bind((HOST, PORT))
print(4)
s.listen(1)
print(5)
conn, addr = s.accept()
print(6)
print('Connected by: ' + str(addr))
while True:
data = conn.recv(1024)
if randint(0,1000) == 423:
print(type(data))
if not data:
break
conn.sendall("You sent: " + str(data))
conn.close()
|
import logging
from functools import wraps
from bs4 import BeautifulSoup
from telegram import ParseMode, Update
from telegram.ext import CallbackContext
# Require x non-command messages between each /rules etc.
RATE_LIMIT_SPACING = 5
def get_reply_id(update):
if update.message and update.message.reply_to_message:
return update.message.reply_to_message.message_id
return None
def reply_or_edit(update, context, text):
chat_data = context.chat_data
if update.edited_message:
chat_data[update.edited_message.message_id].edit_text(text,
parse_mode=ParseMode.HTML,
disable_web_page_preview=True)
else:
issued_reply = get_reply_id(update)
if issued_reply:
chat_data[update.message.message_id] = context.bot.sendMessage(update.message.chat_id, text,
reply_to_message_id=issued_reply,
parse_mode=ParseMode.HTML,
disable_web_page_preview=True)
else:
chat_data[update.message.message_id] = update.message.reply_text(text,
parse_mode=ParseMode.HTML,
disable_web_page_preview=True)
def get_text_not_in_entities(html):
soup = BeautifulSoup(html, 'html.parser')
return ' '.join(soup.find_all(text=True, recursive=False))
def build_menu(buttons,
n_cols,
header_buttons=None,
footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, header_buttons)
if footer_buttons:
menu.append(footer_buttons)
return menu
def set_rate_limit(rate):
global RATE_LIMIT_SPACING
RATE_LIMIT_SPACING = rate
def rate_limit_tracker(update: Update, context: CallbackContext):
data = context.chat_data.get('rate_limit', {})
for key in data.keys():
data[key] += 1
def rate_limit(f):
"""
Rate limit command so that RATE_LIMIT_SPACING non-command messages are
required between invocations.
"""
@wraps(f)
def wrapper(update, context, *args, **kwargs):
# Get rate limit data
try:
data = context.chat_data['rate_limit']
except KeyError:
data = context.chat_data['rate_limit'] = {}
# If we have not seen two non-command messages since last of type `f`
if data.get(f, RATE_LIMIT_SPACING) < RATE_LIMIT_SPACING:
logging.debug('Ignoring due to rate limit!')
return
data[f] = 0
return f(update, context, *args, **kwargs)
return wrapper
|
import numpy as np
import pickle
import glob
import cv2
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
'''
# Aplicando técnica EigenFaces na face de gatos
Algumas fontes de pesquisa:
https://www.youtube.com/watch?v=_lY74pXWlS8&list=PLye4FtK3gI1Szyc_i8ygGVDiQqV_c0o6j&index=90
https://medium.com/@williangp/reconhecimento-de-padr%C3%B5es-eigenfaces-e4cef8f04919
'''
def adapta_imagem(img, shape, max_val=1):
'''
Função que adapta a imagem para o range e o tipo correto.
'''
img = (img - img.min())/(img.max() - img.min())
if max_val > 1:
img *= max_val
return img.reshape(shape).astype(np.uint8)
else:
return img.reshape(shape)
# Carregando as imagens -> Utilizando apenas 5000 imagens [mais rápido/leve]
images = np.load('cats_rgb.npy')[:5000]
# Transformando as imagens em vetores. Na matriz resultante, cada linha é uma imagem e as colunas os respectivos pixels
images = np.array([file.flatten() for file in images])
# Visualizando uma imagem exemplo
plt.figure()
plt.imshow(images[5].reshape(64,64,3), cmap='gray')
plt.show()
# Obtendo e visualizando a média das imagens
mean_img = np.mean(images, axis=0)
plt.figure()
plt.imshow(adapta_imagem(mean_img, (64, 64, 3), 255))
plt.show()
# Padronizando o valor dos pixels das imagens [Pré-processamento para PCA]
scaler = StandardScaler()
images = scaler.fit_transform(images)
# Instanciando, adaptando o PCA e obtendo os autovetores. Aqui são escolhidos 10 autovetores, mas o ideal
# para se obter 90% de representação total da variância é de 143 autovetores. Para 10 autovetores, obtém-se 68.89%.
pca = PCA(n_components=10)
imgs_pca = pca.fit_transform(images)
autovetores = pca.components_ # Autovetores
print(autovetores.shape)
print(f"Total da variância representada pelos componentes/autovetores: {pca.explained_variance_ratio_.sum()*100}[%]")
# Visualizando os três primeiros autovetores e o último
plt.figure()
plt.subplot(221)
plt.imshow(adapta_imagem(autovetores[0], (64, 64, 3), 255), cmap='gray')
plt.subplot(222)
plt.imshow(adapta_imagem(autovetores[1], (64, 64, 3), 255), cmap='gray')
plt.subplot(223)
plt.imshow(adapta_imagem(autovetores[2], (64, 64, 3), 255), cmap='gray')
plt.subplot(224)
plt.imshow(adapta_imagem(autovetores[-1], (64, 64, 3), 255), cmap='gray')
plt.show()
# Salvando a média e os autovetores
np.save('eigenvectors.npy', autovetores)
np.save('mean.npy', mean_img)
|
from tests.customlist_tests.base.customlist_test_base import CustomListTestBase
class CustomListUnderboundTests(CustomListTestBase):
def test_customListUnderbound_whenMultipleNumbers_shouldReturnTheIndexOfTheMinElement(self):
custom_list = self.setup_list(1, 2, 3)
result = custom_list.underbound()
self.assertEqual(0, result)
def test_customListUnderbound_whenNumbersAndLenObjects_shouldReturnTheIndexOfTheMinElement(self):
custom_list = self.setup_list('1', 2, 3)
result = custom_list.underbound()
self.assertEqual(0, result)
|
#
# Copyright John Reid 2009
#
"""
Code to model transcriptional programs.
"""
from shared import *
import os, csv
def yield_genes_from_csv(f):
"""
Load the gene names from the file object.
"""
for l in csv.reader(f, delimiter=','):
yield l[0]
class BasicTranscriptionalProgram(object):
"""
Holds basic information about a transcriptional program.
"""
def __init__(self, k, factors, targets):
"Construct a transcriptional program."
self.k = k
"The index of this transcriptional program."
self.factors = factors
"The factors in this program."
self.targets = targets
"The targets of this program."
def write_files(self, ensembl_names):
"""
Write the factors and the targets to csv files with their names.
"""
write_gene_set_with_names(
open(os.path.join(get_programs_dir(), '%03d-factors.csv' % self.k), 'w'),
self.factors,
ensembl_names
)
write_gene_set_with_names(
open(os.path.join(get_programs_dir(), '%03d-targets.csv' % self.k), 'w'),
self.targets,
ensembl_names
)
def tp_from_directory(directory, k):
"""
Construct a transcriptional program from the factor and target files in the given directory.
"""
return BasicTranscriptionalProgram(
k,
set(yield_genes_from_csv(open(os.path.join(directory, '%03d-factors.csv' % k)))),
set(yield_genes_from_csv(open(os.path.join(directory, '%03d-targets.csv' % k))))
)
def tp_from_dpm_summary(summariser, factor_universe, target_universe, k):
"""
Construct a transcriptional program from the summary of a DPM.
"""
return BasicTranscriptionalProgram(
k,
set(str(factor_universe[w]) for w in summariser.statistics.words_for_topic(k)),
set(str(target_universe[d]) for d in summariser.statistics.documents_for_topic(k))
)
|
# Two modes:
# (1) interactive: Everything runs in one thread. Long-running event handlers block the update.
# (2) separated: non-main thread for all non-mainloop code.
"""
We match the Tk hierarchy of classes onto the ACM hierarchy as follows:
Each application (i.e. instance of TkBackend) has exactly one Tk root (i.e. one tkinter.Tk()).
This top widget takes care of everything.
After instantiation of the first instance of a TkBackend, the tkinter._default_root will be set.
Except in extreme circumstances, there should only ever be one instance of TkBackend.
Each GWindow is associated to a single Canvas. GWindows beyond the first will open a new Toplevel window.
## EXIT BEHAVIOR
(A) At program termination, Tk object is not destroyed, and "program" isn't done until Tk window is closed.
(B) At program termination, Tk object is destroyed and program is finished.
"""
# TODO(sredmond): For all methods that implicitly operate on the most recent
# GWindow, allow the client to pass an optional GWindow on which to operate.
# It is discouraged to instantiate multiple instances of Tk graphics
from campy.private.backends.backend_base import GraphicsBackendBase
from campy.private.backends.tk.menu import setup_menubar
import atexit
import functools
import logging
import pathlib
import tkinter as tk
import tkinter.font as tkfont
import tkinter.filedialog as tkfiledialog
import tkinter.messagebox as tkmessagebox
import tkinter.simpledialog as tksimpledialog
import threading
import sys
# TODO(sredmond): What magic is this?
try:
from _tkinter import DONT_WAIT
except ImportError:
DONT_WAIT = 2
# Load the PIL PhotoImage class if possible, otherwise use tkinter's.
try:
from PIL.ImageTk import PhotoImage
except ImportError:
from tkinter import PhotoImage
# Module-level logger.
logger = logging.getLogger(__name__)
class TkWindow:
"""The Tk equivalent to a :class:`GWindow`."""
def __init__(self, root, width, height, parent):
self._parent = parent
self._master = tk.Toplevel(root)
self._closed = False
self._master.protocol("WM_DELETE_WINDOW", self._close)
self._master.resizable(width=False, height=False) # Disable resizing by default.
# Raise the master to be the top window.
self._master.wm_attributes("-topmost", 1) # TODO(sredmond): Is this really necessary?
self._master.lift()
self._master.focus_force()
# TODO(sredmond): On macOS, multiple backends might race to set the process-level menu bar.
setup_menubar(self._master)
self._frame = tk.Frame(self._master) #, bd=2, bg='red'
self._canvas = tk.Canvas(self._frame, width=width, height=height, highlightthickness=0, bd=0)
self._canvas.pack(fill=tk.BOTH, expand=True)
self._frame.pack(fill=tk.BOTH, expand=True)
self._frame.update()
self._master.update()
# Empty side regions for interaction. Their ordering and layout depends
# on order of construction, so start them off empty.
self._top = None
self._bottom = None
self._left = None
self._right = None
@property
def canvas(self):
return self._canvas
@property
def top(self):
"""Get the top bar for interactors, creating it if needed."""
if not self._top:
self._top = tk.Frame(self._master)
self._top.pack(fill=tk.X, side=tk.TOP)
self._frame.pack_forget()
self._frame.pack(fill=tk.BOTH, expand=True)
return self._top
@property
def bottom(self):
"""Get the bottom bar for interactors, creating it if needed."""
if not self._bottom:
self._bottom = tk.Frame(self._master)
self._bottom.pack(fill=tk.X, side=tk.BOTTOM)
self._frame.pack_forget()
self._frame.pack(fill=tk.BOTH, expand=True)
return self._bottom
@property
def left(self):
"""Get the left bar for interactors, creating it if needed."""
if not self._left:
self._left = tk.Frame(self._master)
self._left.pack(fill=tk.Y, side=tk.LEFT)
self._frame.pack_forget()
self._frame.pack(fill=tk.BOTH, expand=True)
return self._left
@property
def right(self):
"""Get the right bar for interactors, creating it if needed."""
if not self._right:
self._right = tk.Frame(self._master)
self._right.pack(fill=tk.Y, side=tk.LEFT)
self._frame.pack_forget()
self._frame.pack(fill=tk.BOTH, expand=True)
return self._right
def clear(self):
# Delete all canvas elements and all interactors, but leave the canvas and interactor regions in place."""
self.clear_canvas()
if self._top:
for child in self._top.children:
child.destroy()
if self._bottom:
for child in self._bottom.children:
child.destroy()
if self._left:
for child in self._left.children:
child.destroy()
if self._right:
for child in self._right.children:
child.destroy()
def clear_canvas(self):
# Delete all canvas elements, but leave the canvas (and all interactor regions) in place.
self.canvas.delete('all')
def _close(self):
if self._closed: return
self._closed = True
self._master.destroy()
# TODO(sredmond): Consider autoflushing like Zelle.
self._parent._remove_tkwin(self) # Tell the parent that we have closed.
class TkBackend(GraphicsBackendBase):
def __init__(self):
self._root = tk.Tk() # A wrapper around a new Tcl interpreter.
self._root.withdraw() # Removes the window from the screen (without destroying it).
atexit.register(self._root.mainloop) # TODO(sredmond): For debugging only.
self._windows = [] # TODO(sredmond): Use winfo_children().
def _update_active_window(self, window):
# Optimization: Don't mess with the windows when there's only one.
if len(self._windows) == 1: return
window._master.lift()
# Move the window to top of the stack.
self._windows.remove(window)
self._windows.append(window)
def _remove_tkwin(self, window):
if not window._closed:
window._close()
try:
self._windows.remove(window)
except ValueError:
pass
if not self._windows:
self._shutdown()
def _shutdown(self):
self._root.destroy()
######################
# GWindow lifecycle. #
######################
def gwindow_constructor(self, gwindow, width, height, top_compound, visible=True):
gwindow._tkwin = TkWindow(self._root, width, height, parent=self)
gwindow._tkwin._gwindow = gwindow # Circular reference so a TkWindow knows its originating GWindow.
self._windows.append(gwindow._tkwin)
# HACK: Get nice titles built in.
self.gwindow_set_window_title(gwindow, gwindow.title)
def gwindow_close(self, gwindow):
self._remove_tkwin(gwindow._tkwin)
def gwindow_delete(self, gwindow):
self._remove_tkwin(gwindow._tkwin)
def gwindow_exit_graphics(self):
self._remove_tkwin(gwindow._tkwin)
def gwindow_set_exit_on_close(self, gwindow, exit_on_close): pass
####################
# GWindow drawing. #
####################
def gwindow_clear(self, gwindow):
self._update_active_window(gwindow._tkwin)
gwindow._tkwin.clear()
def gwindow_clear_canvas(self, gwindow):
self._update_active_window(gwindow._tkwin)
gwindow._tkwin.clear_canvas()
def gwindow_repaint(self, gwindow):
# Update any unresolved tasks.
gwindow._tkwin._master.update_idletasks()
def gwindow_draw(self, gwindow, gobject): pass
####################
# GWindow drawing. #
####################
def gwindow_request_focus(self, gwindow):
self._update_active_window(gwindow._tkwin)
gwindow._tkwin._master.focus_force()
def gwindow_set_visible(self, gwindow, flag):
self._update_active_window(gwindow._tkwin)
if flag: # Show the window.
gwindow._tkwin._master.deiconify()
else: # Show the window.
gwindow._tkwin._master.withdraw()
def gwindow_set_window_title(self, gwindow, title):
self._update_active_window(gwindow._tkwin)
gwindow._tkwin._master.title(title)
def gwindow_get_width(self):
self._update_active_window(gwindow._tkwin)
return gwindow._tkwin._master.geometry()[0]
def gwindow_get_height(self):
self._update_active_window(gwindow._tkwin)
return gwindow._tkwin._master.geometry()[1]
######################
# GWindow alignment. #
######################
def gwindow_add_to_region(self, gwindow, gobject, region):
from campy.graphics.gwindow import Region
if region == Region.NORTH:
self._ginteractor_add(gobject, gwindow._tkwin.top)
if region == Region.EAST:
self._ginteractor_add(gobject, gwindow._tkwin.right)
if region == Region.SOUTH:
self._ginteractor_add(gobject, gwindow._tkwin.bottom)
if region == Region.WEST:
self._ginteractor_add(gobject, gwindow._tkwin.left)
def gwindow_remove_from_region(self, gwindow, gobject, region): pass
def gwindow_set_region_alignment(self, gwindow, region, align): pass
##############################
# Shared GObject operations. #
##############################
def gobject_set_location(self, gobject, x, y):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
coords = win.canvas.coords(tkid)
win.canvas.move(tkid, x - coords[0], y - coords[1])
win._master.update_idletasks()
def gobject_set_filled(self, gobject, flag):
from campy.graphics.gobjects import GArc
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
if flag:
win.canvas.itemconfig(tkid, fill=gobject.fill_color.hex)
if isinstance(object, GArc):
win.canvas.itemconfig(tkid, style=tk.PIESLICE)
else:
win.canvas.itemconfig(tkid, fill='')
if isinstance(object, GArc):
self.itemconfig(tkid, style=tkinter.ARC)
win._master.update_idletasks()
def gobject_remove(self, gobject):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
win.canvas.delete(tkid)
delattr(gobject, '_tkid')
delattr(gobject, '_tkwin')
win._master.update_idletasks()
def gobject_set_color(self, gobject, color):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
# Awkward import.
from campy.graphics.gobjects import GLabel, GLine
if not isinstance(gobject, GLabel) and not isinstance(gobject, GLine):
win.canvas.itemconfig(tkid, outline=color.hex)
else:
# GLabels and GLines are special because their "color" is actually a fill color.
win.canvas.itemconfig(tkid, fill=color.hex)
win._master.update_idletasks()
def gobject_set_fill_color(self, gobject, color):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
win.canvas.itemconfig(tkid, fill=color.hex)
win._master.update_idletasks()
def gobject_send_forward(self, gobject): pass
def gobject_send_to_front(self, gobject):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
win.canvas.tag_raise(tkid)
win._master.update_idletasks()
def gobject_send_backward(self, gobject): pass
def gobject_send_to_back(self, gobject):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
win.canvas.tag_lower(tkid)
win._master.update_idletasks()
def gobject_set_size(self, gobject, width, height): pass
def gobject_get_size(self, gobject):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
x0, y0, x1, y1 = win.canvas.bbox(tkid)
return x1 - x0, y1 - y0
def gobject_get_bounds(self, gobject): pass
def gobject_set_line_width(self, gobject, line_width): pass
def gobject_contains(self, gobject, x, y): pass
def gobject_set_visible(self, gobject, flag):
if not hasattr(gobject, '_tkid'): return
tkid = gobject._tkid
win = gobject._tkwin
if not flag:
win.canvas.itemconfig(tkid, state=tk.HIDDEN)
else:
win.canvas.itemconfig(tkid, state=tk.NORMAL)
def gobject_scale(self, gobject, sx, sy): pass
def gobject_rotate(self, gobject, theta): pass
########################
# Rectangular regions. #
########################
def grect_constructor(self, grect):
if hasattr(grect, '_tkwin'):
return
win = self._windows[-1]
grect._tkwin = win
grect._tkid = win.canvas.create_rectangle(
grect.x, grect.y, grect.x + grect.width, grect.y + grect.height,
outline=grect.color.hex, fill=grect.fill_color.hex if grect.filled else '',
state=tk.NORMAL if grect.visible else tk.HIDDEN)
win._master.update_idletasks()
def groundrect_constructor(self, gobject, width, height, corner): pass
def g3drect_constructor(self, gobject, width, height, raised): pass
def g3drect_set_raised(self, gobject, raised): pass
#######################
# Elliptical regions. #
#######################
def goval_constructor(self, goval):
if hasattr(goval, '_tkwin'):
return
win = self._windows[-1]
goval._tkwin = win
goval._tkid = win.canvas.create_oval(
goval.x, goval.y, goval.x + goval.width, goval.y + goval.height,
outline=goval.color.hex, fill=goval.fill_color.hex if goval.filled else '',
state=tk.NORMAL if goval.visible else tk.HIDDEN)
win._master.update_idletasks()
def garc_constructor(self, garc):
if hasattr(garc, '_tkwin'):
return
win = self._windows[-1]
garc._tkwin = win
garc._tkid = win.canvas.create_arc(
garc.x, garc.y, garc.x + garc.width, garc.y + garc.height,
start=garc.start, extent=garc.sweep,
outline=garc.color.hex, fill=garc.fill_color.hex if garc.filled else '',
style=tk.PIESLICE if garc.filled else tk.ARC,
state=tk.NORMAL if garc.visible else tk.HIDDEN)
win._master.update_idletasks()
def garc_set_start_angle(self, garc, angle):
if not hasattr(garc, '_tkid'): return
tkid = garc._tkid
win = garc._tkwin
win.canvas.itemconfig(tkid, start=angle)
win._master.update_idletasks()
def garc_set_sweep_angle(self, garc, angle):
if not hasattr(garc, '_tkid'): return
tkid = garc._tkid
win = garc._tkwin
win.canvas.itemconfig(tkid, extent=angle)
win._master.update_idletasks()
def garc_set_frame_rectangle(self, garc, x, y, width, height): pass
##########
# GLines #
##########
def gline_constructor(self, gline):
if hasattr(gline, '_tkwin'):
return
win = self._windows[-1]
gline._tkwin = win
gline._tkid = win.canvas.create_line(
gline.start.x, gline.start.y, gline.end.x, gline.end.y,
fill=gline.color.hex,
state=tk.NORMAL if gline.visible else tk.HIDDEN)
win._master.update_idletasks()
def gline_set_start_point(self, gline, x, y):
if not hasattr(gline, '_tkid'): return
tkid = gline._tkid
win = gline._tkwin
win.canvas.coords(tkid, x, y, gline.end.x, gline.end.y,)
win._master.update_idletasks()
def gline_set_end_point(self, gline, x, y):
if not hasattr(gline, '_tkid'): return
tkid = gline._tkid
win = gline._tkwin
win.canvas.coords(tkid, gline.start.x, gline.start.y, x, y)
win._master.update_idletasks()
##############
# GCompounds #
##############
def gcompound_constructor(self, gobject): pass
def gcompound_add(self, compound, gobject): pass
#########
# Fonts #
#########
# See: https://www.astro.princeton.edu/~rhl/Tcl-Tk_docs/tk/font.n.html
def gfont_default_attributes(self):
# Resolves to the platform-specific default.
font = tkfont.nametofont('TkDefaultFont')
return font.config()
def gfont_attributes_from_system_name(self, font_name):
# Attempt to load the font with the given name.
font = tkfont.nametofont(font_name)
return font.config()
def gfont_get_font_metrics(self, gfont):
font = tkfont.Font(family=gfont.family, size=gfont.size,
weight=tkfont.BOLD if gfont.weight else tkfont.NORMAL,
slant=tkfont.ITALIC if gfont.slant else tkfont.ROMAN)
if not hasattr(gfont, '_tkfont'):
gfont._tkfont = font
return font.metrics()
def gfont_measure_text_width(self, gfont, text):
if not hasattr(gfont, '_tkfont'):
gfont._tkfont = tkfont.Font(family=gfont.family, size=gfont.size,
weight=tkfont.BOLD if gfont.weight else tkfont.NORMAL,
slant=tkfont.ITALIC if gfont.slant else tkfont.ROMAN)
font = gfont._tkfont
return font.measure(text)
##########
# Labels #
##########
def glabel_constructor(self, glabel):
if hasattr(glabel, '_tkwin'):
return
win = self._windows[-1]
glabel._tkwin = win
# TODO(sredmond): Document that we're putting the anchor at the NW corner.
# TODO(sredmond): Respect the font that's been set.
glabel._tkid = win.canvas.create_text(
glabel.x, glabel.y,
text=glabel.text,
fill=glabel.color.hex, anchor=tk.SW,
state=tk.NORMAL if glabel.visible else tk.HIDDEN)
self.glabel_set_font(glabel, glabel.font)
win._master.update_idletasks()
def glabel_set_font(self, glabel, gfont):
if not hasattr(glabel, '_tkid'): return
if not hasattr(gfont, '_tkfont'):
gfont._tkfont = tkfont.Font(family=gfont.family, size=gfont.size,
weight=tkfont.BOLD if gfont.weight else tkfont.NORMAL,
slant=tkfont.ITALIC if gfont.slant else tkfont.ROMAN)
font = gfont._tkfont
tkid = glabel._tkid
win = glabel._tkwin
win.canvas.itemconfig(tkid, font=font)
def glabel_set_label(self, glabel, text):
if not hasattr(glabel, '_tkid'): return
tkid = glabel._tkid
win = glabel._tkwin
win.canvas.itemconfig(tkid, text=text)
def glabel_get_font_ascent(self, glabel): pass
def glabel_get_font_descent(self, glabel): pass
def glabel_get_size(self, glabel):
# TODO(sredmond): This is currently broken.
if not hasattr(glabel, '_tkid'): return 0, 0
tkid = glabel._tkid
win = glabel._tkwin
x0, y0, x1, y1 = win.canvas.bbox(tkid)
return x1 - x0, y1 - y0
# Polygons
def gpolygon_constructor(self, gpolygon):
if hasattr(gpolygon, '_tkwin'):
return
win = self._windows[-1]
gpolygon._tkwin = win
coords = sum(((v.x + gpolygon.x, v.y + gpolygon.y) for v in gpolygon.vertices), ())
gpolygon._tkid = win.canvas.create_polygon(coords, # Not the fastest, but it'll do
outline=gpolygon.color.hex, fill=gpolygon.fill_color.hex if gpolygon.filled else '',
state=tk.NORMAL if gpolygon.visible else tk.HIDDEN)
win._master.update_idletasks()
def gpolygon_add_vertex(self, gpolygon, x, y):
if not hasattr(gpolygon, '_tkid'): return
tkid = gpolygon._tkid
win = gpolygon._tkwin
win.canvas.coords(tkid, x, y, gpolygon.end.x, gpolygon.end.y,)
win._master.update_idletasks()
##########
# Images #
##########
def image_find(self, filename):
# TODO(sredmond): Couple image file searching and image file loading.
path = pathlib.Path(filename)
if path.is_absolute():
if path.is_file():
return path
return None
# For relative paths, search for images in the following places.
# (1) The actual relative path to the scripts current directory.
# (1) An `images/` subfolder in the scripts current directory.
# TODO(sredmond): Read in a path environmental variable for searching.
if path.is_file():
return path.resolve() # We found it, even though it's relative!
if (path.parent / 'images' / path.name).is_file():
return (path.parent / 'images' / path.name).resolve()
# TODO(sredmond): Also search through library-specific images.
return None
def image_load(self, filename):
try:
from PIL import Image
logger.info('Loading image using PIL.')
im = Image.open(filename)
im = im.convert('RGB') # This is an unfortunate conversion, in that it kills transparent images.
return im, im.width, im.height
except ImportError:
im = tk.PhotoImage(file=filename)
return im, im.width(), im.height()
def gimage_constructor(self, gimage):
"""Try to create some sort of Tk Photo Image."""
if hasattr(gimage, '_tkwin'):
return
win = self._windows[-1]
gimage._tkwin = win
image = gimage._data # Either a tk.PhotoImage or a PIL.Image
# This is an awkward state, since ImageTk.PhotoImage isn't a subclass.
if not isinstance(image, tk.PhotoImage):
image = PhotoImage(image=image)
gimage._tkid = win.canvas.create_image(
gimage.x, gimage.y, anchor=tk.NW, image=image)
# Keep a reference to the PhotoImage object so that the Python GC
# doesn't destroy the data.
gimage._tkim = image
win._master.update_idletasks()
def gimage_blank(self, gimage, width, height): pass
def gimage_get_pixel(self, gimage, row, col):
from campy.graphics.gcolor import Pixel
try:
# Using Tk.PhotoImage.
value = gimage._data.get(col, row)
return Pixel(*map(int, value.split(' '))) # TODO(sredmond): Make sure Tk always returns 'r g b' and not 'a' or a single channel.
except AttributeError: # No get method on ImageTk.PhotoImage.
value = gimage._data.getpixel((col, row)) # Should be an (r, g, b) tuple
return Pixel(*value)
def gimage_set_pixel(self, gimage, row, col, rgb):
try: # Default to using PIL
gimage._data.putpixel((col, row), rgb)
# Oh no... Look at this abuse of Python. This is the type of thing they warn you about in school.
# TODO(sredmond): Move this into the hex method of colors.
r, g, b = rgb
hexcolor = '#{:02x}{:02x}{:02x}'.format(r, g, b)
gimage._tkim._PhotoImage__photo.put(hexcolor, (col, row))
except AttributeError: # No putpixel in Tk, so try to fall back.
r, g, b = rgb
hexcolor = '#{:02x}{:02x}{:02x}'.format(r, g, b)
gimage._tkim.put(hexcolor, (col, row))
def gimage_preview(self, gimage): pass
##########
# Events #
##########
def set_action_command(self, gobject, cmd): pass
def get_next_event(self, mask): pass
def wait_for_event(self, mask): pass
def event_add_keypress_handler(self, event, handler): pass
def event_generate_keypress(self, event): pass
@staticmethod
def _wrap_mouse_event(event, window, event_type):
from campy.gui.events.mouse import GMouseEvent
# TODO(sredmond): As written, this joins the TkWindow, not the GWindow, to this event.
return GMouseEvent(event_type=event_type, gwindow=window._gwindow, x=event.x, y=event.y)
def event_add_mouse_handler(self, event, handler):
from campy.gui.events.mouse import MouseEventType
if not self._windows:
logger.warning('Refusing to add a mouse listener before any windows are created.')
return
win = self._windows[-1]
if event == MouseEventType.MOUSE_CLICKED:
win._master.bind('<Button-1>', lambda cb_event: handler(self._wrap_mouse_event(cb_event, win, event)))
elif event == MouseEventType.MOUSE_RELEASED:
win._master.bind('<ButtonRelease-1>', lambda cb_event: handler(self._wrap_mouse_event(cb_event, win, event)))
elif event == MouseEventType.MOUSE_MOVED:
win._master.bind('<Motion>', lambda cb_event: handler(self._wrap_mouse_event(cb_event, win, event)))
elif event == MouseEventType.MOUSE_DRAGGED:
win._master.bind('<B1-Motion>', lambda cb_event: handler(self._wrap_mouse_event(cb_event, win, event)))
else:
logger.warning('Unrecognized event type: {}. Quietly passing.'.format(event))
def event_generate_mouse(self, event): pass
@staticmethod
def _wrap_window_event(event, window):
from campy.gui.events.window import GWindowEvent
return GWindowEvent(window._gwindow, x=event.x, y=event.y, width=event.width, height=event.height)
def event_add_window_changed_handler(self, handler):
if not self._windows:
logger.warning('Refusing to add a window listener before any windows are created.')
return
win = self._windows[-1]
win._master.bind('<Configure>', lambda cb_event: handler(self._wrap_window_event(cb_event, win)))
def event_set_window_closed_handler(self, handler):
# TODO(sredmond): Don't allow this method to set a handler multiple times, or warn about replacing the old one.
if not self._windows:
logger.warning('Refusing to add a window listener before any windows are created.')
return
win = self._windows[-1]
@functools.wraps(handler)
def wrapper():
result = handler()
# Perform the default action when the handler returns a False value.
if not result:
win._close()
# Unlike some of the other event methods, this callback is bound via protocol.
win._master.protocol("WM_DELETE_WINDOW", wrapper)
def event_pump_one(self):
# Forcibly process queued tasks, but don't process newly queued ones.
self._root.update_idletasks()
self._root.dooneevent(DONT_WAIT)
# TODO(sredmond): Rename these backend events for consistency.
def timer_pause(self, event): pass
def timer_schedule(self, function, delay_ms):
self._root.after(delay_ms, function)
###############
# Interactors #
###############
def _ginteractor_add(self, gint, frame):
from campy.gui.ginteractors import GButton
if isinstance(gint, GButton):
# TODO(sredmond): Wrap up a GActionEvent on the Tk side to supply.
gint._tkobj = tk.Button(frame, text=gint.label, command=gint.click,
state=tk.NORMAL if not gint.disabled else tk.DISABLED)
gint._tkobj.pack()
frame.update_idletasks()
def gbutton_constructor(self, gbutton):
if hasattr(gbutton, '_tkwin'):
return
win = self._windows[-1]
gbutton._tkwin = win
def gbutton_set_label(self, gbutton):
if not hasattr(gbutton, '_tkobj'): return
gbutton._tkobj.config(text=gbutton.label)
def gbutton_set_disabled(self, gbutton):
if not hasattr(gbutton, '_tkobj'): return
gbutton._tkobj.config(state=tk.NORMAL if not gbutton.disabled else tk.DISABLED)
def gcheckbox_constructor(self, gcheckbox):
if hasattr(gcheckbox, '_tkwin'):
return
win = self._windows[-1]
gcheckbox._tkwin = win
# TODO(sredmond): Wrap up a GActionEvent on the Tk side to supply.
var = tkinter.IntVar()
gcheckbox._tkobj = tk.Checkbutton(win._master, text=gcheckbox.label, command=gcheckbutton.select,
variable=var,
state=tk.NORMAL if not gcheckbox.disabled else tk.DISABLED)
gcheckbox._tkobj.var = var
gcheckbox._tkobj.pack()
win._master.update_idletasks()
def gcheckbox_is_selected(self, gcheckbox):
return bool(gcheckbox._tkobj.var.get())
def gcheckbox_set_selected(self, gcheckbox, state):
return gcheckbox._tkobj.var.set(1 if state else 0)
def gslider_constructor(self, gslider, min, max, value): pass
def gslider_get_value(self, gslider): pass
def gslider_set_value(self, gslider, value): pass
def gtextfield_constructor(self, gtextfield, num_chars): pass
def gtextfield_get_text(self, gtextfield): pass
def gtextfield_set_text(self, gtextfield, str): pass
def gchooser_constructor(self, gchooser): pass
def gchooser_add_item(self, gchooser, item): pass
def gchooser_get_selected_item(self, gchooser): pass
def gchooser_set_selected_item(self, gchooser, item): pass
###########
# Dialogs #
###########
# TODO(sredmond): Make these dialogs steal focus.
def gfilechooser_show_open_dialog(self, current_dir, file_filter):
logger.debug('Ignoring file_filter argument to gfilechooser_show_open_dialog.')
parent = None
if self._windows:
parent=self._windows[-1]
return tkfiledialog.askopenfilename(initialdir=current_dir, title='Select File to Open', parent=parent) or None
def gfilechooser_show_save_dialog(self, current_dir, file_filter):
logger.debug('Ignoring file_filter argument to gfilechooser_show_save_dialog.')
parent = None
if self._windows:
parent=self._windows[-1]
return tkfiledialog.asksaveasfilename(initialdir=current_dir, title='Select File to Save', parent=parent) or None
def goptionpane_show_confirm_dialog(self, message, title, confirm_type):
from campy.graphics.goptionpane import ConfirmType
if confirm_type == ConfirmType.YES_NO:
return tkmessagebox.askyesno(title, message)
elif confirm_type == ConfirmType.YES_NO_CANCEL:
return tkmessagebox.askyesnocancel(title, message)
elif confirm_type == ConfirmType.OK_CANCEL:
return tkmessagebox.askokcancel(title, message)
else:
logger.debug('Unrecognized confirm_type {!r}'.format(confirm_type))
def goptionpane_show_input_dialog(self, message, title):
return tksimpledialog.askstring(title, message, parent=self._root)
def goptionpane_show_message_dialog(self, message, title, message_type):
from campy.graphics.goptionpane import MessageType
# TODO(sredmond): The icons aren't appearing correctly.
if message_type == MessageType.ERROR:
tkmessagebox.showerror(title, message, icon=tkmessagebox.ERROR)
elif message_type == MessageType.INFORMATION:
tkmessagebox.showinfo(title, message, icon=tkmessagebox.INFO)
elif message_type == MessageType.WARNING:
tkmessagebox.showwarning(title, message, icon=tkmessagebox.WARNING)
elif message_type == MessageType.QUESTION:
tkmessagebox.showinfo(title, message, icon=tkmessagebox.QUESTION)
elif message_type == MessageType.PLAIN:
tkmessagebox.showinfo(title, message)
else:
logger.debug('Unrecognized message_type {!r}'.format(message_type))
def goptionpane_show_option_dialog(self, message, title, options, initially_selected): pass
def goptionpane_show_text_file_dialog(self, message, title, rows, cols): pass
if __name__ == '__main__':
# Quick tests.
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GRect, GPolygon
from campy.graphics.gfilechooser import show_open_dialog, show_save_dialog
from campy.graphics.goptionpane import *
from campy.graphics.gtypes import *
from campy.gui.interactors import *
import math
print('{!r}'.format(show_open_dialog()))
print('{!r}'.format(show_save_dialog()))
window = GWindow()
rect = GRect(100, 200, x=50, y=60)
window.add(rect)
rect.location = 300, 300
button = GButton('Button')
window.add(button)
# Add a polygon.
edge_length = 75
stop_sign = GPolygon()
start = GPoint(-edge_length / 2, edge_length / 2 + edge_length / math.sqrt(2.0))
stop_sign.add_vertex(start)
for edge in range(8):
stop_sign.add_polar_edge(edge_length, 45*edge)
stop_sign.filled = True
stop_sign.color = "BLACK"
stop_sign.fill_color = "RED"
window.add(stop_sign, window.width / 2, window.height / 2)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
version = {}
with open("./vortexasdk/version.py") as fp:
exec(fp.read(), version)
setuptools.setup(
name="vortexasdk",
version=version["__version__"],
author="Vortexa Developers",
author_email="developers@vortexa.com",
description="Vortexa SDK",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/vortechsa/python-sdk",
license="Apache License 2.0",
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
install_requires=[
"flatten-dict==0.2.0",
"jsons==1.0.0",
"jupyter==1.0.0",
"matplotlib==3.3.4",
"pandas>=0.25.2",
"requests==2.27.1",
"statsmodels==0.12.2",
"tqdm==4.38.0",
],
extras_require={
"tests": [
"flake8==3.7.9",
"mkdocs==1.2.4",
"mypy==0.770",
"pre-commit==1.20.0",
"pytest==5.2.4",
"pydoc-markdown==2.0.5",
"pyyaml<6.0.0",
"six==1.12.0",
"tabulate==0.8.7",
"xlrd==1.2.0",
"openpyxl==3.0.7",
],
"deploy": ["wheel==0.36.2", "twine==3.3.0"],
},
)
|
from anonymization.base_anonymization import BaseAnonymization
from PIL import ImageFilter, Image
def find_boxes(bbox):
nb = []
for i in bbox:
nb.append(i)
return nb
class DetectionAnonymization(BaseAnonymization):
def __init__(self):
pass
def blurring(self, image, response, degree=None, id=None, mask=None):
"""
Blur the detected objects based on the user's requirements
:param image: input image
:param response: The response parsed from the object detection api
:param degree: The degree of the anonymization (specified in the user_configuration file)
:param id:
:param mask:
:return: The anonymized image
"""
boxes = find_boxes(response)
for i in boxes:
cropped_image = image.crop((i[0], i[1], i[2], i[3]))
blurred_image = cropped_image.filter(ImageFilter.GaussianBlur(25*float(degree)))
image.paste(blurred_image, (i[0], i[1], i[2], i[3]))
return image
def pixelating(self, image, response, degree=None, id=None, mask=None):
"""
Pixelate the detected objects based on the user's requirements
:param image: input image
:param response: The response parsed from the object detection api
:param degree: The degree of the anonymization (specified in the user_configuration file)
:param id:
:param mask:
:return: The anonymized image
"""
boxes = find_boxes(response)
for i in boxes:
cropped_image = image.crop((i[0], i[1], i[2], i[3]))
w, h = cropped_image.size
small = cropped_image.resize((int(w / (float(degree) * w)), int(h / (float(degree) * h))), Image.BILINEAR)
result = small.resize(cropped_image.size, Image.NEAREST)
image.paste(result, (i[0], i[1], i[2], i[3]))
return image
def blackening(self, image, response, degree=None, id=None, mask=None):
"""
Blacken the detected objects based on the user's requirements
:param image: input image
:param response: The response parsed from the object detection api
:param degree: The degree of the anonymization (specified in the user_configuration file)
:param id:
:param mask:
:return: The anonymized image
"""
boxes = find_boxes(response)
for i in boxes:
cropped = image.crop((i[0], i[1], i[2], i[3]))
h, w = cropped.size
black = Image.new(str(image.mode), (h, w), 'black')
result = Image.blend(cropped, black, float(degree))
cropped.paste(result)
image.paste(cropped, (i[0], i[1], i[2], i[3]))
return image
|
from django.contrib import admin
from .models import Search
@admin.register(Search)
class SearchAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'started_at', 'finished_at', 'failed',)
|
#!/usr/bin/env python
import rosbag
import argparse
parser = argparse.ArgumentParser(description='Extract vehicle data from ROS bag file, using python\'s rosbag API')
parser.add_argument('bag', action='store', type=str, help='Bag filename')
parser.add_argument('--output', type=str, default='out.txt', help='Output file name (default is out.txt)')
args = parser.parse_args()
if __name__ == "__main__":
with open(args.output, 'w') as outfile:
with rosbag.Bag(args.bag, 'r') as bag:
msgOdom = None
msgCostmap = None
msgPlan = None
msgStatus = None
msgVel = None
msgGoal = None
for topic, msg, t in bag.read_messages(topics=['/move_base/local_costmap/costmap', '/move_base/DWAPlannerROS/global_plan', '/odom', '/move_base/goal', '/mobile_base/commands/velocity', '/move_base/status', '/move_base/goal']):
if (msg._type == "nav_msgs/Odometry"):
msgOdom = msg;
elif (msg._type == "nav_msgs/Path"):
msgPlan = msg;
elif (msg._type == "nav_msgs/OccupancyGrid"):
msgCostmap = msg;
elif (msg._type == "actionlib_msgs/GoalStatusArray"):
msgStatus = msg;
elif (msg._type == "move_base_msgs/MoveBaseActionGoal"):
msgGoal = msg;
elif (msg._type == "geometry_msgs/Twist"):
msgVel = msg;
if (msgOdom != None and msgCostmap != None and msgPlan != None and msgStatus != None and msgGoal != None):
outfile.write("%f %f %f %f %f %f %f %f %f %f %d %f %f %f %f %f %f %f %f %f %f %f %f %f %s\n" % (
msgOdom.pose.pose.position.x, msgOdom.pose.pose.position.y, msgOdom.pose.pose.position.z,
msgGoal.goal.target_pose.pose.position.x, msgGoal.goal.target_pose.pose.position.y, msgGoal.goal.target_pose.pose.position.z, msgGoal.goal.target_pose.pose.orientation.x, msgGoal.goal.target_pose.pose.orientation.y, msgGoal.goal.target_pose.pose.orientation.z, msgGoal.goal.target_pose.pose.orientation.w,
msgStatus.status_list[0].status,
msgVel.linear.x, msgVel.linear.y, msgVel.linear.z, msgVel.angular.x, msgVel.angular.y, msgVel.angular.z,
msgPlan.poses[0].pose.position.x, msgPlan.poses[0].pose.position.y, msgPlan.poses[0].pose.position.z, msgPlan.poses[0].pose.orientation.x, msgPlan.poses[0].pose.orientation.y, msgPlan.poses[0].pose.orientation.z, msgPlan.poses[0].pose.orientation.w,
msgCostmap.data))
else:
print msg._type
# outfile.write("%d %f %f %f\n" % (msg.status.status, msg.latitude, msg.longitude, msg.altitude))
|
# -*- coding:utf-8 -*-
from __future__ import division
import sys
sys.path.append('../../../../../rtl/udm/sw')
import time
import udm
from udm import *
sys.path.append('..')
import sigma
from sigma import *
def hw_test_dhrystone(sigma, dhrystone_filename):
print("#### DHRYSTONE TEST STARTED ####")
print("Clearing buffer")
sigma.reset_buf()
print("Loading test program...")
sigma.tile.loadelf(dhrystone_filename)
print("Test program written!")
time.sleep(0.1)
rdarr = sigma.tile.udm.rdarr32(0x6000, 2)
Microseconds = rdarr[0]
Dhrystones_Per_Second = rdarr[1]
DMIPS = Dhrystones_Per_Second / 1757
print("Microseconds: ", Microseconds)
print("Dhrystones_Per_Second: ", Dhrystones_Per_Second)
print("DMIPS: ", DMIPS)
if ((Microseconds != 0) & (Dhrystones_Per_Second != 0)):
test_succ_flag = 1
print("#### DHRYSTONE TEST PASSED! ####")
else:
test_succ_flag = 0
print("#### DHRYSTONE TEST FAILED! ####")
print("")
return test_succ_flag
|
# Copyright 2019-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for explicit client-side field level encryption."""
import contextlib
import enum
import socket
import uuid
import weakref
from typing import Any, Mapping, Optional, Sequence
try:
from pymongocrypt.auto_encrypter import AutoEncrypter
from pymongocrypt.errors import MongoCryptError # noqa: F401
from pymongocrypt.explicit_encrypter import ExplicitEncrypter
from pymongocrypt.mongocrypt import MongoCryptOptions
from pymongocrypt.state_machine import MongoCryptCallback
_HAVE_PYMONGOCRYPT = True
except ImportError:
_HAVE_PYMONGOCRYPT = False
MongoCryptCallback = object
from bson import _dict_to_bson, decode, encode
from bson.binary import STANDARD, UUID_SUBTYPE, Binary
from bson.codec_options import CodecOptions
from bson.errors import BSONError
from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson
from bson.son import SON
from pymongo import _csot
from pymongo.daemon import _spawn_daemon
from pymongo.encryption_options import AutoEncryptionOpts
from pymongo.errors import (
ConfigurationError,
EncryptionError,
InvalidOperation,
ServerSelectionTimeoutError,
)
from pymongo.mongo_client import MongoClient
from pymongo.network import BLOCKING_IO_ERRORS
from pymongo.pool import PoolOptions, _configured_socket
from pymongo.read_concern import ReadConcern
from pymongo.ssl_support import get_ssl_context
from pymongo.uri_parser import parse_host
from pymongo.write_concern import WriteConcern
_HTTPS_PORT = 443
_KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value.
_MONGOCRYPTD_TIMEOUT_MS = 10000
_DATA_KEY_OPTS: CodecOptions = CodecOptions(document_class=SON, uuid_representation=STANDARD)
# Use RawBSONDocument codec options to avoid needlessly decoding
# documents from the key vault.
_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, uuid_representation=STANDARD)
@contextlib.contextmanager
def _wrap_encryption_errors():
"""Context manager to wrap encryption related errors."""
try:
yield
except BSONError:
# BSON encoding/decoding errors are unrelated to encryption so
# we should propagate them unchanged.
raise
except Exception as exc:
raise EncryptionError(exc)
class _EncryptionIO(MongoCryptCallback): # type: ignore
def __init__(self, client, key_vault_coll, mongocryptd_client, opts):
"""Internal class to perform I/O on behalf of pymongocrypt."""
self.client_ref: Any
# Use a weak ref to break reference cycle.
if client is not None:
self.client_ref = weakref.ref(client)
else:
self.client_ref = None
self.key_vault_coll = key_vault_coll.with_options(
codec_options=_KEY_VAULT_OPTS,
read_concern=ReadConcern(level="majority"),
write_concern=WriteConcern(w="majority"),
)
self.mongocryptd_client = mongocryptd_client
self.opts = opts
self._spawned = False
def kms_request(self, kms_context):
"""Complete a KMS request.
:Parameters:
- `kms_context`: A :class:`MongoCryptKmsContext`.
:Returns:
None
"""
endpoint = kms_context.endpoint
message = kms_context.message
provider = kms_context.kms_provider
ctx = self.opts._kms_ssl_contexts.get(provider)
if ctx is None:
# Enable strict certificate verification, OCSP, match hostname, and
# SNI using the system default CA certificates.
ctx = get_ssl_context(
None, # certfile
None, # passphrase
None, # ca_certs
None, # crlfile
False, # allow_invalid_certificates
False, # allow_invalid_hostnames
False,
) # disable_ocsp_endpoint_check
# CSOT: set timeout for socket creation.
connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001)
opts = PoolOptions(
connect_timeout=connect_timeout,
socket_timeout=connect_timeout,
ssl_context=ctx,
)
host, port = parse_host(endpoint, _HTTPS_PORT)
conn = _configured_socket((host, port), opts)
try:
conn.sendall(message)
while kms_context.bytes_needed > 0:
# CSOT: update timeout.
conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0))
data = conn.recv(kms_context.bytes_needed)
if not data:
raise OSError("KMS connection closed")
kms_context.feed(data)
except BLOCKING_IO_ERRORS:
raise socket.timeout("timed out")
finally:
conn.close()
def collection_info(self, database, filter):
"""Get the collection info for a namespace.
The returned collection info is passed to libmongocrypt which reads
the JSON schema.
:Parameters:
- `database`: The database on which to run listCollections.
- `filter`: The filter to pass to listCollections.
:Returns:
The first document from the listCollections command response as BSON.
"""
with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor:
for doc in cursor:
return _dict_to_bson(doc, False, _DATA_KEY_OPTS)
def spawn(self):
"""Spawn mongocryptd.
Note this method is thread safe; at most one mongocryptd will start
successfully.
"""
self._spawned = True
args = [self.opts._mongocryptd_spawn_path or "mongocryptd"]
args.extend(self.opts._mongocryptd_spawn_args)
_spawn_daemon(args)
def mark_command(self, database, cmd):
"""Mark a command for encryption.
:Parameters:
- `database`: The database on which to run this command.
- `cmd`: The BSON command to run.
:Returns:
The marked command response from mongocryptd.
"""
if not self._spawned and not self.opts._mongocryptd_bypass_spawn:
self.spawn()
# Database.command only supports mutable mappings so we need to decode
# the raw BSON command first.
inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS)
try:
res = self.mongocryptd_client[database].command(
inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS
)
except ServerSelectionTimeoutError:
if self.opts._mongocryptd_bypass_spawn:
raise
self.spawn()
res = self.mongocryptd_client[database].command(
inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS
)
return res.raw
def fetch_keys(self, filter):
"""Yields one or more keys from the key vault.
:Parameters:
- `filter`: The filter to pass to find.
:Returns:
A generator which yields the requested keys from the key vault.
"""
with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor:
for key in cursor:
yield key.raw
def insert_data_key(self, data_key):
"""Insert a data key into the key vault.
:Parameters:
- `data_key`: The data key document to insert.
:Returns:
The _id of the inserted data key document.
"""
raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS)
data_key_id = raw_doc.get("_id")
if not isinstance(data_key_id, uuid.UUID):
raise TypeError("data_key _id must be a UUID")
self.key_vault_coll.insert_one(raw_doc)
return Binary(data_key_id.bytes, subtype=UUID_SUBTYPE)
def bson_encode(self, doc):
"""Encode a document to BSON.
A document can be any mapping type (like :class:`dict`).
:Parameters:
- `doc`: mapping type representing a document
:Returns:
The encoded BSON bytes.
"""
return encode(doc)
def close(self):
"""Release resources.
Note it is not safe to call this method from __del__ or any GC hooks.
"""
self.client_ref = None
self.key_vault_coll = None
if self.mongocryptd_client:
self.mongocryptd_client.close()
self.mongocryptd_client = None
class _Encrypter(object):
"""Encrypts and decrypts MongoDB commands.
This class is used to support automatic encryption and decryption of
MongoDB commands."""
def __init__(self, client, opts):
"""Create a _Encrypter for a client.
:Parameters:
- `client`: The encrypted MongoClient.
- `opts`: The encrypted client's :class:`AutoEncryptionOpts`.
"""
if opts._schema_map is None:
schema_map = None
else:
schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS)
if opts._encrypted_fields_map is None:
encrypted_fields_map = None
else:
encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS)
self._bypass_auto_encryption = opts._bypass_auto_encryption
self._internal_client = None
def _get_internal_client(encrypter, mongo_client):
if mongo_client.options.pool_options.max_pool_size is None:
# Unlimited pool size, use the same client.
return mongo_client
# Else - limited pool size, use an internal client.
if encrypter._internal_client is not None:
return encrypter._internal_client
internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None)
encrypter._internal_client = internal_client
return internal_client
if opts._key_vault_client is not None:
key_vault_client = opts._key_vault_client
else:
key_vault_client = _get_internal_client(self, client)
if opts._bypass_auto_encryption:
metadata_client = None
else:
metadata_client = _get_internal_client(self, client)
db, coll = opts._key_vault_namespace.split(".", 1)
key_vault_coll = key_vault_client[db][coll]
mongocryptd_client: MongoClient = MongoClient(
opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS
)
io_callbacks = _EncryptionIO(metadata_client, key_vault_coll, mongocryptd_client, opts)
self._auto_encrypter = AutoEncrypter(
io_callbacks,
MongoCryptOptions(
opts._kms_providers,
schema_map,
crypt_shared_lib_path=opts._crypt_shared_lib_path,
crypt_shared_lib_required=opts._crypt_shared_lib_required,
bypass_encryption=opts._bypass_auto_encryption,
encrypted_fields_map=encrypted_fields_map,
bypass_query_analysis=opts._bypass_query_analysis,
),
)
self._closed = False
def encrypt(self, database, cmd, codec_options):
"""Encrypt a MongoDB command.
:Parameters:
- `database`: The database for this command.
- `cmd`: A command document.
- `codec_options`: The CodecOptions to use while encoding `cmd`.
:Returns:
The encrypted command to execute.
"""
self._check_closed()
encoded_cmd = _dict_to_bson(cmd, False, codec_options)
with _wrap_encryption_errors():
encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd)
# TODO: PYTHON-1922 avoid decoding the encrypted_cmd.
encrypt_cmd = _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS)
return encrypt_cmd
def decrypt(self, response):
"""Decrypt a MongoDB command response.
:Parameters:
- `response`: A MongoDB command response as BSON.
:Returns:
The decrypted command response.
"""
self._check_closed()
with _wrap_encryption_errors():
return self._auto_encrypter.decrypt(response)
def _check_closed(self):
if self._closed:
raise InvalidOperation("Cannot use MongoClient after close")
def close(self):
"""Cleanup resources."""
self._closed = True
self._auto_encrypter.close()
if self._internal_client:
self._internal_client.close()
self._internal_client = None
class Algorithm(str, enum.Enum):
"""An enum that defines the supported encryption algorithms."""
AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"
"""AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic."""
AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random"
"""AEAD_AES_256_CBC_HMAC_SHA_512_Random."""
INDEXED = "Indexed"
"""Indexed.
.. note:: Support for Queryable Encryption is in beta.
Backwards-breaking changes may be made before the final release.
.. versionadded:: 4.2
"""
UNINDEXED = "Unindexed"
"""Unindexed.
.. note:: Support for Queryable Encryption is in beta.
Backwards-breaking changes may be made before the final release.
.. versionadded:: 4.2
"""
class QueryType(enum.IntEnum):
"""**(BETA)** An enum that defines the supported values for explicit encryption query_type.
.. note:: Support for Queryable Encryption is in beta.
Backwards-breaking changes may be made before the final release.
.. versionadded:: 4.2
"""
EQUALITY = 1
"""Used to encrypt a value for an equality query."""
class ClientEncryption(object):
"""Explicit client-side field level encryption."""
def __init__(
self,
kms_providers: Mapping[str, Any],
key_vault_namespace: str,
key_vault_client: MongoClient,
codec_options: CodecOptions,
kms_tls_options: Optional[Mapping[str, Any]] = None,
) -> None:
"""Explicit client-side field level encryption.
The ClientEncryption class encapsulates explicit operations on a key
vault collection that cannot be done directly on a MongoClient. Similar
to configuring auto encryption on a MongoClient, it is constructed with
a MongoClient (to a MongoDB cluster containing the key vault
collection), KMS provider configuration, and keyVaultNamespace. It
provides an API for explicitly encrypting and decrypting values, and
creating data keys. It does not provide an API to query keys from the
key vault collection, as this can be done directly on the MongoClient.
See :ref:`explicit-client-side-encryption` for an example.
:Parameters:
- `kms_providers`: Map of KMS provider options. The `kms_providers`
map values differ by provider:
- `aws`: Map with "accessKeyId" and "secretAccessKey" as strings.
These are the AWS access key ID and AWS secret access key used
to generate KMS messages. An optional "sessionToken" may be
included to support temporary AWS credentials.
- `azure`: Map with "tenantId", "clientId", and "clientSecret" as
strings. Additionally, "identityPlatformEndpoint" may also be
specified as a string (defaults to 'login.microsoftonline.com').
These are the Azure Active Directory credentials used to
generate Azure Key Vault messages.
- `gcp`: Map with "email" as a string and "privateKey"
as `bytes` or a base64 encoded string.
Additionally, "endpoint" may also be specified as a string
(defaults to 'oauth2.googleapis.com'). These are the
credentials used to generate Google Cloud KMS messages.
- `kmip`: Map with "endpoint" as a host with required port.
For example: ``{"endpoint": "example.com:443"}``.
- `local`: Map with "key" as `bytes` (96 bytes in length) or
a base64 encoded string which decodes
to 96 bytes. "key" is the master key used to encrypt/decrypt
data keys. This key should be generated and stored as securely
as possible.
- `key_vault_namespace`: The namespace for the key vault collection.
The key vault collection contains all data keys used for encryption
and decryption. Data keys are stored as documents in this MongoDB
collection. Data keys are protected with encryption by a KMS
provider.
- `key_vault_client`: A MongoClient connected to a MongoDB cluster
containing the `key_vault_namespace` collection.
- `codec_options`: An instance of
:class:`~bson.codec_options.CodecOptions` to use when encoding a
value for encryption and decoding the decrypted BSON value. This
should be the same CodecOptions instance configured on the
MongoClient, Database, or Collection used to access application
data.
- `kms_tls_options` (optional): A map of KMS provider names to TLS
options to use when creating secure connections to KMS providers.
Accepts the same TLS options as
:class:`pymongo.mongo_client.MongoClient`. For example, to
override the system default CA file::
kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}}
Or to supply a client certificate::
kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}}
.. versionchanged:: 4.0
Added the `kms_tls_options` parameter and the "kmip" KMS provider.
.. versionadded:: 3.9
"""
if not _HAVE_PYMONGOCRYPT:
raise ConfigurationError(
"client-side field level encryption requires the pymongocrypt "
"library: install a compatible version with: "
"python -m pip install 'pymongo[encryption]'"
)
if not isinstance(codec_options, CodecOptions):
raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions")
self._kms_providers = kms_providers
self._key_vault_namespace = key_vault_namespace
self._key_vault_client = key_vault_client
self._codec_options = codec_options
db, coll = key_vault_namespace.split(".", 1)
key_vault_coll = key_vault_client[db][coll]
opts = AutoEncryptionOpts(
kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options
)
self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO(
None, key_vault_coll, None, opts
)
self._encryption = ExplicitEncrypter(
self._io_callbacks, MongoCryptOptions(kms_providers, None)
)
def create_data_key(
self,
kms_provider: str,
master_key: Optional[Mapping[str, Any]] = None,
key_alt_names: Optional[Sequence[str]] = None,
) -> Binary:
"""Create and insert a new data key into the key vault collection.
:Parameters:
- `kms_provider`: The KMS provider to use. Supported values are
"aws", "azure", "gcp", "kmip", and "local".
- `master_key`: Identifies a KMS-specific key used to encrypt the
new data key. If the kmsProvider is "local" the `master_key` is
not applicable and may be omitted.
If the `kms_provider` is "aws" it is required and has the
following fields::
- `region` (string): Required. The AWS region, e.g. "us-east-1".
- `key` (string): Required. The Amazon Resource Name (ARN) to
the AWS customer.
- `endpoint` (string): Optional. An alternate host to send KMS
requests to. May include port number, e.g.
"kms.us-east-1.amazonaws.com:443".
If the `kms_provider` is "azure" it is required and has the
following fields::
- `keyVaultEndpoint` (string): Required. Host with optional
port, e.g. "example.vault.azure.net".
- `keyName` (string): Required. Key name in the key vault.
- `keyVersion` (string): Optional. Version of the key to use.
If the `kms_provider` is "gcp" it is required and has the
following fields::
- `projectId` (string): Required. The Google cloud project ID.
- `location` (string): Required. The GCP location, e.g. "us-east1".
- `keyRing` (string): Required. Name of the key ring that contains
the key to use.
- `keyName` (string): Required. Name of the key to use.
- `keyVersion` (string): Optional. Version of the key to use.
- `endpoint` (string): Optional. Host with optional port.
Defaults to "cloudkms.googleapis.com".
If the `kms_provider` is "kmip" it is optional and has the
following fields::
- `keyId` (string): Optional. `keyId` is the KMIP Unique
Identifier to a 96 byte KMIP Secret Data managed object. If
keyId is omitted, the driver creates a random 96 byte KMIP
Secret Data managed object.
- `endpoint` (string): Optional. Host with optional
port, e.g. "example.vault.azure.net:".
- `key_alt_names` (optional): An optional list of string alternate
names used to reference a key. If a key is created with alternate
names, then encryption may refer to the key by the unique alternate
name instead of by ``key_id``. The following example shows creating
and referring to a data key by alternate name::
client_encryption.create_data_key("local", keyAltNames=["name1"])
# reference the key with the alternate name
client_encryption.encrypt("457-55-5462", keyAltName="name1",
algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random)
:Returns:
The ``_id`` of the created data key document as a
:class:`~bson.binary.Binary` with subtype
:data:`~bson.binary.UUID_SUBTYPE`.
"""
self._check_closed()
with _wrap_encryption_errors():
return self._encryption.create_data_key(
kms_provider, master_key=master_key, key_alt_names=key_alt_names
)
def encrypt(
self,
value: Any,
algorithm: str,
key_id: Optional[Binary] = None,
key_alt_name: Optional[str] = None,
index_key_id: Optional[Binary] = None,
query_type: Optional[int] = None,
contention_factor: Optional[int] = None,
) -> Binary:
"""Encrypt a BSON value with a given key and algorithm.
Note that exactly one of ``key_id`` or ``key_alt_name`` must be
provided.
:Parameters:
- `value`: The BSON value to encrypt.
- `algorithm` (string): The encryption algorithm to use. See
:class:`Algorithm` for some valid options.
- `key_id`: Identifies a data key by ``_id`` which must be a
:class:`~bson.binary.Binary` with subtype 4 (
:attr:`~bson.binary.UUID_SUBTYPE`).
- `key_alt_name`: Identifies a key vault document by 'keyAltName'.
- `index_key_id`: **(BETA)** The index key id to use for Queryable Encryption. Must be
a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`).
- `query_type` (int): **(BETA)** The query type to execute. See
:class:`QueryType` for valid options.
- `contention_factor` (int): **(BETA)** The contention factor to use
when the algorithm is :attr:`Algorithm.INDEXED`.
.. note:: `index_key_id`, `query_type`, and `contention_factor` are part of the
Queryable Encryption beta. Backwards-breaking changes may be made before the
final release.
:Returns:
The encrypted value, a :class:`~bson.binary.Binary` with subtype 6.
.. versionchanged:: 4.2
Added the `index_key_id`, `query_type`, and `contention_factor` parameters.
"""
self._check_closed()
if key_id is not None and not (
isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE
):
raise TypeError("key_id must be a bson.binary.Binary with subtype 4")
if index_key_id is not None and not (
isinstance(index_key_id, Binary) and index_key_id.subtype == UUID_SUBTYPE
):
raise TypeError("index_key_id must be a bson.binary.Binary with subtype 4")
doc = encode({"v": value}, codec_options=self._codec_options)
with _wrap_encryption_errors():
encrypted_doc = self._encryption.encrypt(
doc,
algorithm,
key_id=key_id,
key_alt_name=key_alt_name,
index_key_id=index_key_id,
query_type=query_type,
contention_factor=contention_factor,
)
return decode(encrypted_doc)["v"] # type: ignore[index]
def decrypt(self, value: Binary) -> Any:
"""Decrypt an encrypted value.
:Parameters:
- `value` (Binary): The encrypted value, a
:class:`~bson.binary.Binary` with subtype 6.
:Returns:
The decrypted BSON value.
"""
self._check_closed()
if not (isinstance(value, Binary) and value.subtype == 6):
raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6")
with _wrap_encryption_errors():
doc = encode({"v": value})
decrypted_doc = self._encryption.decrypt(doc)
return decode(decrypted_doc, codec_options=self._codec_options)["v"]
def __enter__(self) -> "ClientEncryption":
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.close()
def _check_closed(self):
if self._encryption is None:
raise InvalidOperation("Cannot use closed ClientEncryption")
def close(self) -> None:
"""Release resources.
Note that using this class in a with-statement will automatically call
:meth:`close`::
with ClientEncryption(...) as client_encryption:
encrypted = client_encryption.encrypt(value, ...)
decrypted = client_encryption.decrypt(encrypted)
"""
if self._io_callbacks:
self._io_callbacks.close()
self._encryption.close()
self._io_callbacks = None
self._encryption = None
|
#!/usr/bin/python
import logging
import csv
import re
import sys
# create logger
logger = logging.getLogger("extract_experience")
logger.setLevel(logging.INFO)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def main(input_file, output_file):
with open(input_file, 'r') as f:
reader = csv.reader(f, quoting=csv.QUOTE_ALL)
for job in reader:
logger.info("Processing job: {}".format(job[0]))
matches = re.findall("(\d+).? (years|yrs)", job[-1])
experience = [int(exp[0]) for exp in matches]
max_exp = None
if len(experience) > 0:
max_exp = max(experience)
logger.info(max_exp)
job.append(max_exp)
with open(output_file, 'a') as o:
writer = csv.writer(o, quoting=csv.QUOTE_ALL)
writer.writerow(job)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.